2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
33 #include <linux/srcu.h>
36 #include <asm/cputable.h>
37 #include <asm/cacheflush.h>
38 #include <asm/tlbflush.h>
39 #include <asm/uaccess.h>
41 #include <asm/kvm_ppc.h>
42 #include <asm/kvm_book3s.h>
43 #include <asm/mmu_context.h>
44 #include <asm/lppaca.h>
45 #include <asm/processor.h>
46 #include <asm/cputhreads.h>
48 #include <asm/hvcall.h>
49 #include <asm/switch_to.h>
51 #include <linux/gfp.h>
52 #include <linux/vmalloc.h>
53 #include <linux/highmem.h>
54 #include <linux/hugetlb.h>
56 /* #define EXIT_DEBUG */
57 /* #define EXIT_DEBUG_SIMPLE */
58 /* #define EXIT_DEBUG_INT */
60 /* Used to indicate that a guest page fault needs to be handled */
61 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
63 /* Used as a "null" value for timebase values */
64 #define TB_NIL (~(u64)0)
66 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
);
67 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu
*vcpu
);
70 * We use the vcpu_load/put functions to measure stolen time.
71 * Stolen time is counted as time when either the vcpu is able to
72 * run as part of a virtual core, but the task running the vcore
73 * is preempted or sleeping, or when the vcpu needs something done
74 * in the kernel by the task running the vcpu, but that task is
75 * preempted or sleeping. Those two things have to be counted
76 * separately, since one of the vcpu tasks will take on the job
77 * of running the core, and the other vcpu tasks in the vcore will
78 * sleep waiting for it to do that, but that sleep shouldn't count
81 * Hence we accumulate stolen time when the vcpu can run as part of
82 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
83 * needs its task to do other things in the kernel (for example,
84 * service a page fault) in busy_stolen. We don't accumulate
85 * stolen time for a vcore when it is inactive, or for a vcpu
86 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
87 * a misnomer; it means that the vcpu task is not executing in
88 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
89 * the kernel. We don't have any way of dividing up that time
90 * between time that the vcpu is genuinely stopped, time that
91 * the task is actively working on behalf of the vcpu, and time
92 * that the task is preempted, so we don't count any of it as
95 * Updates to busy_stolen are protected by arch.tbacct_lock;
96 * updates to vc->stolen_tb are protected by the arch.tbacct_lock
97 * of the vcpu that has taken responsibility for running the vcore
98 * (i.e. vc->runner). The stolen times are measured in units of
99 * timebase ticks. (Note that the != TB_NIL checks below are
100 * purely defensive; they should never fail.)
103 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
105 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
107 spin_lock(&vcpu
->arch
.tbacct_lock
);
108 if (vc
->runner
== vcpu
&& vc
->vcore_state
!= VCORE_INACTIVE
&&
109 vc
->preempt_tb
!= TB_NIL
) {
110 vc
->stolen_tb
+= mftb() - vc
->preempt_tb
;
111 vc
->preempt_tb
= TB_NIL
;
113 if (vcpu
->arch
.state
== KVMPPC_VCPU_BUSY_IN_HOST
&&
114 vcpu
->arch
.busy_preempt
!= TB_NIL
) {
115 vcpu
->arch
.busy_stolen
+= mftb() - vcpu
->arch
.busy_preempt
;
116 vcpu
->arch
.busy_preempt
= TB_NIL
;
118 spin_unlock(&vcpu
->arch
.tbacct_lock
);
121 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
123 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
125 spin_lock(&vcpu
->arch
.tbacct_lock
);
126 if (vc
->runner
== vcpu
&& vc
->vcore_state
!= VCORE_INACTIVE
)
127 vc
->preempt_tb
= mftb();
128 if (vcpu
->arch
.state
== KVMPPC_VCPU_BUSY_IN_HOST
)
129 vcpu
->arch
.busy_preempt
= mftb();
130 spin_unlock(&vcpu
->arch
.tbacct_lock
);
133 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
135 vcpu
->arch
.shregs
.msr
= msr
;
136 kvmppc_end_cede(vcpu
);
139 void kvmppc_set_pvr(struct kvm_vcpu
*vcpu
, u32 pvr
)
141 vcpu
->arch
.pvr
= pvr
;
144 void kvmppc_dump_regs(struct kvm_vcpu
*vcpu
)
148 pr_err("vcpu %p (%d):\n", vcpu
, vcpu
->vcpu_id
);
149 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
150 vcpu
->arch
.pc
, vcpu
->arch
.shregs
.msr
, vcpu
->arch
.trap
);
151 for (r
= 0; r
< 16; ++r
)
152 pr_err("r%2d = %.16lx r%d = %.16lx\n",
153 r
, kvmppc_get_gpr(vcpu
, r
),
154 r
+16, kvmppc_get_gpr(vcpu
, r
+16));
155 pr_err("ctr = %.16lx lr = %.16lx\n",
156 vcpu
->arch
.ctr
, vcpu
->arch
.lr
);
157 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
158 vcpu
->arch
.shregs
.srr0
, vcpu
->arch
.shregs
.srr1
);
159 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
160 vcpu
->arch
.shregs
.sprg0
, vcpu
->arch
.shregs
.sprg1
);
161 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
162 vcpu
->arch
.shregs
.sprg2
, vcpu
->arch
.shregs
.sprg3
);
163 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
164 vcpu
->arch
.cr
, vcpu
->arch
.xer
, vcpu
->arch
.shregs
.dsisr
);
165 pr_err("dar = %.16llx\n", vcpu
->arch
.shregs
.dar
);
166 pr_err("fault dar = %.16lx dsisr = %.8x\n",
167 vcpu
->arch
.fault_dar
, vcpu
->arch
.fault_dsisr
);
168 pr_err("SLB (%d entries):\n", vcpu
->arch
.slb_max
);
169 for (r
= 0; r
< vcpu
->arch
.slb_max
; ++r
)
170 pr_err(" ESID = %.16llx VSID = %.16llx\n",
171 vcpu
->arch
.slb
[r
].orige
, vcpu
->arch
.slb
[r
].origv
);
172 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
173 vcpu
->kvm
->arch
.lpcr
, vcpu
->kvm
->arch
.sdr1
,
174 vcpu
->arch
.last_inst
);
177 struct kvm_vcpu
*kvmppc_find_vcpu(struct kvm
*kvm
, int id
)
180 struct kvm_vcpu
*v
, *ret
= NULL
;
182 mutex_lock(&kvm
->lock
);
183 kvm_for_each_vcpu(r
, v
, kvm
) {
184 if (v
->vcpu_id
== id
) {
189 mutex_unlock(&kvm
->lock
);
193 static void init_vpa(struct kvm_vcpu
*vcpu
, struct lppaca
*vpa
)
195 vpa
->shared_proc
= 1;
196 vpa
->yield_count
= 1;
199 static int set_vpa(struct kvm_vcpu
*vcpu
, struct kvmppc_vpa
*v
,
200 unsigned long addr
, unsigned long len
)
202 /* check address is cacheline aligned */
203 if (addr
& (L1_CACHE_BYTES
- 1))
205 spin_lock(&vcpu
->arch
.vpa_update_lock
);
206 if (v
->next_gpa
!= addr
|| v
->len
!= len
) {
208 v
->len
= addr
? len
: 0;
209 v
->update_pending
= 1;
211 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
215 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
224 static int vpa_is_registered(struct kvmppc_vpa
*vpap
)
226 if (vpap
->update_pending
)
227 return vpap
->next_gpa
!= 0;
228 return vpap
->pinned_addr
!= NULL
;
231 static unsigned long do_h_register_vpa(struct kvm_vcpu
*vcpu
,
233 unsigned long vcpuid
, unsigned long vpa
)
235 struct kvm
*kvm
= vcpu
->kvm
;
236 unsigned long len
, nb
;
238 struct kvm_vcpu
*tvcpu
;
241 struct kvmppc_vpa
*vpap
;
243 tvcpu
= kvmppc_find_vcpu(kvm
, vcpuid
);
247 subfunc
= (flags
>> H_VPA_FUNC_SHIFT
) & H_VPA_FUNC_MASK
;
248 if (subfunc
== H_VPA_REG_VPA
|| subfunc
== H_VPA_REG_DTL
||
249 subfunc
== H_VPA_REG_SLB
) {
250 /* Registering new area - address must be cache-line aligned */
251 if ((vpa
& (L1_CACHE_BYTES
- 1)) || !vpa
)
254 /* convert logical addr to kernel addr and read length */
255 va
= kvmppc_pin_guest_page(kvm
, vpa
, &nb
);
258 if (subfunc
== H_VPA_REG_VPA
)
259 len
= ((struct reg_vpa
*)va
)->length
.hword
;
261 len
= ((struct reg_vpa
*)va
)->length
.word
;
262 kvmppc_unpin_guest_page(kvm
, va
);
265 if (len
> nb
|| len
< sizeof(struct reg_vpa
))
274 spin_lock(&tvcpu
->arch
.vpa_update_lock
);
277 case H_VPA_REG_VPA
: /* register VPA */
278 if (len
< sizeof(struct lppaca
))
280 vpap
= &tvcpu
->arch
.vpa
;
284 case H_VPA_REG_DTL
: /* register DTL */
285 if (len
< sizeof(struct dtl_entry
))
287 len
-= len
% sizeof(struct dtl_entry
);
289 /* Check that they have previously registered a VPA */
291 if (!vpa_is_registered(&tvcpu
->arch
.vpa
))
294 vpap
= &tvcpu
->arch
.dtl
;
298 case H_VPA_REG_SLB
: /* register SLB shadow buffer */
299 /* Check that they have previously registered a VPA */
301 if (!vpa_is_registered(&tvcpu
->arch
.vpa
))
304 vpap
= &tvcpu
->arch
.slb_shadow
;
308 case H_VPA_DEREG_VPA
: /* deregister VPA */
309 /* Check they don't still have a DTL or SLB buf registered */
311 if (vpa_is_registered(&tvcpu
->arch
.dtl
) ||
312 vpa_is_registered(&tvcpu
->arch
.slb_shadow
))
315 vpap
= &tvcpu
->arch
.vpa
;
319 case H_VPA_DEREG_DTL
: /* deregister DTL */
320 vpap
= &tvcpu
->arch
.dtl
;
324 case H_VPA_DEREG_SLB
: /* deregister SLB shadow buffer */
325 vpap
= &tvcpu
->arch
.slb_shadow
;
331 vpap
->next_gpa
= vpa
;
333 vpap
->update_pending
= 1;
336 spin_unlock(&tvcpu
->arch
.vpa_update_lock
);
341 static void kvmppc_update_vpa(struct kvm_vcpu
*vcpu
, struct kvmppc_vpa
*vpap
)
343 struct kvm
*kvm
= vcpu
->kvm
;
349 * We need to pin the page pointed to by vpap->next_gpa,
350 * but we can't call kvmppc_pin_guest_page under the lock
351 * as it does get_user_pages() and down_read(). So we
352 * have to drop the lock, pin the page, then get the lock
353 * again and check that a new area didn't get registered
357 gpa
= vpap
->next_gpa
;
358 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
362 va
= kvmppc_pin_guest_page(kvm
, vpap
->next_gpa
, &nb
);
363 spin_lock(&vcpu
->arch
.vpa_update_lock
);
364 if (gpa
== vpap
->next_gpa
)
366 /* sigh... unpin that one and try again */
368 kvmppc_unpin_guest_page(kvm
, va
);
371 vpap
->update_pending
= 0;
372 if (va
&& nb
< vpap
->len
) {
374 * If it's now too short, it must be that userspace
375 * has changed the mappings underlying guest memory,
376 * so unregister the region.
378 kvmppc_unpin_guest_page(kvm
, va
);
381 if (vpap
->pinned_addr
)
382 kvmppc_unpin_guest_page(kvm
, vpap
->pinned_addr
);
383 vpap
->pinned_addr
= va
;
385 vpap
->pinned_end
= va
+ vpap
->len
;
388 static void kvmppc_update_vpas(struct kvm_vcpu
*vcpu
)
390 if (!(vcpu
->arch
.vpa
.update_pending
||
391 vcpu
->arch
.slb_shadow
.update_pending
||
392 vcpu
->arch
.dtl
.update_pending
))
395 spin_lock(&vcpu
->arch
.vpa_update_lock
);
396 if (vcpu
->arch
.vpa
.update_pending
) {
397 kvmppc_update_vpa(vcpu
, &vcpu
->arch
.vpa
);
398 if (vcpu
->arch
.vpa
.pinned_addr
)
399 init_vpa(vcpu
, vcpu
->arch
.vpa
.pinned_addr
);
401 if (vcpu
->arch
.dtl
.update_pending
) {
402 kvmppc_update_vpa(vcpu
, &vcpu
->arch
.dtl
);
403 vcpu
->arch
.dtl_ptr
= vcpu
->arch
.dtl
.pinned_addr
;
404 vcpu
->arch
.dtl_index
= 0;
406 if (vcpu
->arch
.slb_shadow
.update_pending
)
407 kvmppc_update_vpa(vcpu
, &vcpu
->arch
.slb_shadow
);
408 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
412 * Return the accumulated stolen time for the vcore up until `now'.
413 * The caller should hold the vcore lock.
415 static u64
vcore_stolen_time(struct kvmppc_vcore
*vc
, u64 now
)
420 * If we are the task running the vcore, then since we hold
421 * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
422 * can't be updated, so we don't need the tbacct_lock.
423 * If the vcore is inactive, it can't become active (since we
424 * hold the vcore lock), so the vcpu load/put functions won't
425 * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
427 if (vc
->vcore_state
!= VCORE_INACTIVE
&&
428 vc
->runner
->arch
.run_task
!= current
) {
429 spin_lock(&vc
->runner
->arch
.tbacct_lock
);
431 if (vc
->preempt_tb
!= TB_NIL
)
432 p
+= now
- vc
->preempt_tb
;
433 spin_unlock(&vc
->runner
->arch
.tbacct_lock
);
440 static void kvmppc_create_dtl_entry(struct kvm_vcpu
*vcpu
,
441 struct kvmppc_vcore
*vc
)
443 struct dtl_entry
*dt
;
445 unsigned long stolen
;
446 unsigned long core_stolen
;
449 dt
= vcpu
->arch
.dtl_ptr
;
450 vpa
= vcpu
->arch
.vpa
.pinned_addr
;
452 core_stolen
= vcore_stolen_time(vc
, now
);
453 stolen
= core_stolen
- vcpu
->arch
.stolen_logged
;
454 vcpu
->arch
.stolen_logged
= core_stolen
;
455 spin_lock(&vcpu
->arch
.tbacct_lock
);
456 stolen
+= vcpu
->arch
.busy_stolen
;
457 vcpu
->arch
.busy_stolen
= 0;
458 spin_unlock(&vcpu
->arch
.tbacct_lock
);
461 memset(dt
, 0, sizeof(struct dtl_entry
));
462 dt
->dispatch_reason
= 7;
463 dt
->processor_id
= vc
->pcpu
+ vcpu
->arch
.ptid
;
465 dt
->enqueue_to_dispatch_time
= stolen
;
466 dt
->srr0
= kvmppc_get_pc(vcpu
);
467 dt
->srr1
= vcpu
->arch
.shregs
.msr
;
469 if (dt
== vcpu
->arch
.dtl
.pinned_end
)
470 dt
= vcpu
->arch
.dtl
.pinned_addr
;
471 vcpu
->arch
.dtl_ptr
= dt
;
472 /* order writing *dt vs. writing vpa->dtl_idx */
474 vpa
->dtl_idx
= ++vcpu
->arch
.dtl_index
;
477 int kvmppc_pseries_do_hcall(struct kvm_vcpu
*vcpu
)
479 unsigned long req
= kvmppc_get_gpr(vcpu
, 3);
480 unsigned long target
, ret
= H_SUCCESS
;
481 struct kvm_vcpu
*tvcpu
;
486 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
487 ret
= kvmppc_virtmode_h_enter(vcpu
, kvmppc_get_gpr(vcpu
, 4),
488 kvmppc_get_gpr(vcpu
, 5),
489 kvmppc_get_gpr(vcpu
, 6),
490 kvmppc_get_gpr(vcpu
, 7));
491 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
496 target
= kvmppc_get_gpr(vcpu
, 4);
497 tvcpu
= kvmppc_find_vcpu(vcpu
->kvm
, target
);
502 tvcpu
->arch
.prodded
= 1;
504 if (vcpu
->arch
.ceded
) {
505 if (waitqueue_active(&vcpu
->wq
)) {
506 wake_up_interruptible(&vcpu
->wq
);
507 vcpu
->stat
.halt_wakeup
++;
514 ret
= do_h_register_vpa(vcpu
, kvmppc_get_gpr(vcpu
, 4),
515 kvmppc_get_gpr(vcpu
, 5),
516 kvmppc_get_gpr(vcpu
, 6));
521 kvmppc_set_gpr(vcpu
, 3, ret
);
522 vcpu
->arch
.hcall_needed
= 0;
526 static int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
527 struct task_struct
*tsk
)
531 vcpu
->stat
.sum_exits
++;
533 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
534 run
->ready_for_interrupt_injection
= 1;
535 switch (vcpu
->arch
.trap
) {
536 /* We're good on these - the host merely wanted to get our attention */
537 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
538 vcpu
->stat
.dec_exits
++;
541 case BOOK3S_INTERRUPT_EXTERNAL
:
542 vcpu
->stat
.ext_intr_exits
++;
545 case BOOK3S_INTERRUPT_PERFMON
:
548 case BOOK3S_INTERRUPT_PROGRAM
:
552 * Normally program interrupts are delivered directly
553 * to the guest by the hardware, but we can get here
554 * as a result of a hypervisor emulation interrupt
555 * (e40) getting turned into a 700 by BML RTAS.
557 flags
= vcpu
->arch
.shregs
.msr
& 0x1f0000ull
;
558 kvmppc_core_queue_program(vcpu
, flags
);
562 case BOOK3S_INTERRUPT_SYSCALL
:
564 /* hcall - punt to userspace */
567 if (vcpu
->arch
.shregs
.msr
& MSR_PR
) {
568 /* sc 1 from userspace - reflect to guest syscall */
569 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_SYSCALL
);
573 run
->papr_hcall
.nr
= kvmppc_get_gpr(vcpu
, 3);
574 for (i
= 0; i
< 9; ++i
)
575 run
->papr_hcall
.args
[i
] = kvmppc_get_gpr(vcpu
, 4 + i
);
576 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
577 vcpu
->arch
.hcall_needed
= 1;
582 * We get these next two if the guest accesses a page which it thinks
583 * it has mapped but which is not actually present, either because
584 * it is for an emulated I/O device or because the corresonding
585 * host page has been paged out. Any other HDSI/HISI interrupts
586 * have been handled already.
588 case BOOK3S_INTERRUPT_H_DATA_STORAGE
:
589 r
= RESUME_PAGE_FAULT
;
591 case BOOK3S_INTERRUPT_H_INST_STORAGE
:
592 vcpu
->arch
.fault_dar
= kvmppc_get_pc(vcpu
);
593 vcpu
->arch
.fault_dsisr
= 0;
594 r
= RESUME_PAGE_FAULT
;
597 * This occurs if the guest executes an illegal instruction.
598 * We just generate a program interrupt to the guest, since
599 * we don't emulate any guest instructions at this stage.
601 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
602 kvmppc_core_queue_program(vcpu
, 0x80000);
606 kvmppc_dump_regs(vcpu
);
607 printk(KERN_EMERG
"trap=0x%x | pc=0x%lx | msr=0x%llx\n",
608 vcpu
->arch
.trap
, kvmppc_get_pc(vcpu
),
609 vcpu
->arch
.shregs
.msr
);
618 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
619 struct kvm_sregs
*sregs
)
623 sregs
->pvr
= vcpu
->arch
.pvr
;
625 memset(sregs
, 0, sizeof(struct kvm_sregs
));
626 for (i
= 0; i
< vcpu
->arch
.slb_max
; i
++) {
627 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
;
628 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
634 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
635 struct kvm_sregs
*sregs
)
639 kvmppc_set_pvr(vcpu
, sregs
->pvr
);
642 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
643 if (sregs
->u
.s
.ppc64
.slb
[i
].slbe
& SLB_ESID_V
) {
644 vcpu
->arch
.slb
[j
].orige
= sregs
->u
.s
.ppc64
.slb
[i
].slbe
;
645 vcpu
->arch
.slb
[j
].origv
= sregs
->u
.s
.ppc64
.slb
[i
].slbv
;
649 vcpu
->arch
.slb_max
= j
;
654 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*val
)
660 case KVM_REG_PPC_HIOR
:
661 *val
= get_reg_val(id
, 0);
663 case KVM_REG_PPC_DABR
:
664 *val
= get_reg_val(id
, vcpu
->arch
.dabr
);
666 case KVM_REG_PPC_DSCR
:
667 *val
= get_reg_val(id
, vcpu
->arch
.dscr
);
669 case KVM_REG_PPC_PURR
:
670 *val
= get_reg_val(id
, vcpu
->arch
.purr
);
672 case KVM_REG_PPC_SPURR
:
673 *val
= get_reg_val(id
, vcpu
->arch
.spurr
);
675 case KVM_REG_PPC_AMR
:
676 *val
= get_reg_val(id
, vcpu
->arch
.amr
);
678 case KVM_REG_PPC_UAMOR
:
679 *val
= get_reg_val(id
, vcpu
->arch
.uamor
);
681 case KVM_REG_PPC_MMCR0
... KVM_REG_PPC_MMCRA
:
682 i
= id
- KVM_REG_PPC_MMCR0
;
683 *val
= get_reg_val(id
, vcpu
->arch
.mmcr
[i
]);
685 case KVM_REG_PPC_PMC1
... KVM_REG_PPC_PMC8
:
686 i
= id
- KVM_REG_PPC_PMC1
;
687 *val
= get_reg_val(id
, vcpu
->arch
.pmc
[i
]);
690 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
691 if (cpu_has_feature(CPU_FTR_VSX
)) {
692 /* VSX => FP reg i is stored in arch.vsr[2*i] */
693 long int i
= id
- KVM_REG_PPC_FPR0
;
694 *val
= get_reg_val(id
, vcpu
->arch
.vsr
[2 * i
]);
696 /* let generic code handle it */
700 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
701 if (cpu_has_feature(CPU_FTR_VSX
)) {
702 long int i
= id
- KVM_REG_PPC_VSR0
;
703 val
->vsxval
[0] = vcpu
->arch
.vsr
[2 * i
];
704 val
->vsxval
[1] = vcpu
->arch
.vsr
[2 * i
+ 1];
709 #endif /* CONFIG_VSX */
710 case KVM_REG_PPC_VPA_ADDR
:
711 spin_lock(&vcpu
->arch
.vpa_update_lock
);
712 *val
= get_reg_val(id
, vcpu
->arch
.vpa
.next_gpa
);
713 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
715 case KVM_REG_PPC_VPA_SLB
:
716 spin_lock(&vcpu
->arch
.vpa_update_lock
);
717 val
->vpaval
.addr
= vcpu
->arch
.slb_shadow
.next_gpa
;
718 val
->vpaval
.length
= vcpu
->arch
.slb_shadow
.len
;
719 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
721 case KVM_REG_PPC_VPA_DTL
:
722 spin_lock(&vcpu
->arch
.vpa_update_lock
);
723 val
->vpaval
.addr
= vcpu
->arch
.dtl
.next_gpa
;
724 val
->vpaval
.length
= vcpu
->arch
.dtl
.len
;
725 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
735 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*val
)
739 unsigned long addr
, len
;
742 case KVM_REG_PPC_HIOR
:
743 /* Only allow this to be set to zero */
744 if (set_reg_val(id
, *val
))
747 case KVM_REG_PPC_DABR
:
748 vcpu
->arch
.dabr
= set_reg_val(id
, *val
);
750 case KVM_REG_PPC_DSCR
:
751 vcpu
->arch
.dscr
= set_reg_val(id
, *val
);
753 case KVM_REG_PPC_PURR
:
754 vcpu
->arch
.purr
= set_reg_val(id
, *val
);
756 case KVM_REG_PPC_SPURR
:
757 vcpu
->arch
.spurr
= set_reg_val(id
, *val
);
759 case KVM_REG_PPC_AMR
:
760 vcpu
->arch
.amr
= set_reg_val(id
, *val
);
762 case KVM_REG_PPC_UAMOR
:
763 vcpu
->arch
.uamor
= set_reg_val(id
, *val
);
765 case KVM_REG_PPC_MMCR0
... KVM_REG_PPC_MMCRA
:
766 i
= id
- KVM_REG_PPC_MMCR0
;
767 vcpu
->arch
.mmcr
[i
] = set_reg_val(id
, *val
);
769 case KVM_REG_PPC_PMC1
... KVM_REG_PPC_PMC8
:
770 i
= id
- KVM_REG_PPC_PMC1
;
771 vcpu
->arch
.pmc
[i
] = set_reg_val(id
, *val
);
774 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
775 if (cpu_has_feature(CPU_FTR_VSX
)) {
776 /* VSX => FP reg i is stored in arch.vsr[2*i] */
777 long int i
= id
- KVM_REG_PPC_FPR0
;
778 vcpu
->arch
.vsr
[2 * i
] = set_reg_val(id
, *val
);
780 /* let generic code handle it */
784 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
785 if (cpu_has_feature(CPU_FTR_VSX
)) {
786 long int i
= id
- KVM_REG_PPC_VSR0
;
787 vcpu
->arch
.vsr
[2 * i
] = val
->vsxval
[0];
788 vcpu
->arch
.vsr
[2 * i
+ 1] = val
->vsxval
[1];
793 #endif /* CONFIG_VSX */
794 case KVM_REG_PPC_VPA_ADDR
:
795 addr
= set_reg_val(id
, *val
);
797 if (!addr
&& (vcpu
->arch
.slb_shadow
.next_gpa
||
798 vcpu
->arch
.dtl
.next_gpa
))
800 r
= set_vpa(vcpu
, &vcpu
->arch
.vpa
, addr
, sizeof(struct lppaca
));
802 case KVM_REG_PPC_VPA_SLB
:
803 addr
= val
->vpaval
.addr
;
804 len
= val
->vpaval
.length
;
806 if (addr
&& !vcpu
->arch
.vpa
.next_gpa
)
808 r
= set_vpa(vcpu
, &vcpu
->arch
.slb_shadow
, addr
, len
);
810 case KVM_REG_PPC_VPA_DTL
:
811 addr
= val
->vpaval
.addr
;
812 len
= val
->vpaval
.length
;
814 if (addr
&& (len
< sizeof(struct dtl_entry
) ||
815 !vcpu
->arch
.vpa
.next_gpa
))
817 len
-= len
% sizeof(struct dtl_entry
);
818 r
= set_vpa(vcpu
, &vcpu
->arch
.dtl
, addr
, len
);
828 int kvmppc_core_check_processor_compat(void)
830 if (cpu_has_feature(CPU_FTR_HVMODE
))
835 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
837 struct kvm_vcpu
*vcpu
;
840 struct kvmppc_vcore
*vcore
;
842 core
= id
/ threads_per_core
;
843 if (core
>= KVM_MAX_VCORES
)
847 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
851 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
855 vcpu
->arch
.shared
= &vcpu
->arch
.shregs
;
856 vcpu
->arch
.last_cpu
= -1;
857 vcpu
->arch
.mmcr
[0] = MMCR0_FC
;
858 vcpu
->arch
.ctrl
= CTRL_RUNLATCH
;
859 /* default to host PVR, since we can't spoof it */
860 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
861 kvmppc_set_pvr(vcpu
, vcpu
->arch
.pvr
);
862 spin_lock_init(&vcpu
->arch
.vpa_update_lock
);
863 spin_lock_init(&vcpu
->arch
.tbacct_lock
);
864 vcpu
->arch
.busy_preempt
= TB_NIL
;
866 kvmppc_mmu_book3s_hv_init(vcpu
);
868 vcpu
->arch
.state
= KVMPPC_VCPU_NOTREADY
;
870 init_waitqueue_head(&vcpu
->arch
.cpu_run
);
872 mutex_lock(&kvm
->lock
);
873 vcore
= kvm
->arch
.vcores
[core
];
875 vcore
= kzalloc(sizeof(struct kvmppc_vcore
), GFP_KERNEL
);
877 INIT_LIST_HEAD(&vcore
->runnable_threads
);
878 spin_lock_init(&vcore
->lock
);
879 init_waitqueue_head(&vcore
->wq
);
880 vcore
->preempt_tb
= TB_NIL
;
882 kvm
->arch
.vcores
[core
] = vcore
;
884 mutex_unlock(&kvm
->lock
);
889 spin_lock(&vcore
->lock
);
890 ++vcore
->num_threads
;
891 spin_unlock(&vcore
->lock
);
892 vcpu
->arch
.vcore
= vcore
;
894 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
895 kvmppc_sanity_check(vcpu
);
900 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
905 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
907 spin_lock(&vcpu
->arch
.vpa_update_lock
);
908 if (vcpu
->arch
.dtl
.pinned_addr
)
909 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.dtl
.pinned_addr
);
910 if (vcpu
->arch
.slb_shadow
.pinned_addr
)
911 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.slb_shadow
.pinned_addr
);
912 if (vcpu
->arch
.vpa
.pinned_addr
)
913 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.vpa
.pinned_addr
);
914 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
915 kvm_vcpu_uninit(vcpu
);
916 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
919 static void kvmppc_set_timer(struct kvm_vcpu
*vcpu
)
921 unsigned long dec_nsec
, now
;
924 if (now
> vcpu
->arch
.dec_expires
) {
925 /* decrementer has already gone negative */
926 kvmppc_core_queue_dec(vcpu
);
927 kvmppc_core_prepare_to_enter(vcpu
);
930 dec_nsec
= (vcpu
->arch
.dec_expires
- now
) * NSEC_PER_SEC
932 hrtimer_start(&vcpu
->arch
.dec_timer
, ktime_set(0, dec_nsec
),
934 vcpu
->arch
.timer_running
= 1;
937 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
)
939 vcpu
->arch
.ceded
= 0;
940 if (vcpu
->arch
.timer_running
) {
941 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
942 vcpu
->arch
.timer_running
= 0;
946 extern int __kvmppc_vcore_entry(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
);
947 extern void xics_wake_cpu(int cpu
);
949 static void kvmppc_remove_runnable(struct kvmppc_vcore
*vc
,
950 struct kvm_vcpu
*vcpu
)
954 if (vcpu
->arch
.state
!= KVMPPC_VCPU_RUNNABLE
)
956 spin_lock(&vcpu
->arch
.tbacct_lock
);
958 vcpu
->arch
.busy_stolen
+= vcore_stolen_time(vc
, now
) -
959 vcpu
->arch
.stolen_logged
;
960 vcpu
->arch
.busy_preempt
= now
;
961 vcpu
->arch
.state
= KVMPPC_VCPU_BUSY_IN_HOST
;
962 spin_unlock(&vcpu
->arch
.tbacct_lock
);
964 list_del(&vcpu
->arch
.run_list
);
967 static int kvmppc_grab_hwthread(int cpu
)
969 struct paca_struct
*tpaca
;
974 /* Ensure the thread won't go into the kernel if it wakes */
975 tpaca
->kvm_hstate
.hwthread_req
= 1;
976 tpaca
->kvm_hstate
.kvm_vcpu
= NULL
;
979 * If the thread is already executing in the kernel (e.g. handling
980 * a stray interrupt), wait for it to get back to nap mode.
981 * The smp_mb() is to ensure that our setting of hwthread_req
982 * is visible before we look at hwthread_state, so if this
983 * races with the code at system_reset_pSeries and the thread
984 * misses our setting of hwthread_req, we are sure to see its
985 * setting of hwthread_state, and vice versa.
988 while (tpaca
->kvm_hstate
.hwthread_state
== KVM_HWTHREAD_IN_KERNEL
) {
989 if (--timeout
<= 0) {
990 pr_err("KVM: couldn't grab cpu %d\n", cpu
);
998 static void kvmppc_release_hwthread(int cpu
)
1000 struct paca_struct
*tpaca
;
1003 tpaca
->kvm_hstate
.hwthread_req
= 0;
1004 tpaca
->kvm_hstate
.kvm_vcpu
= NULL
;
1007 static void kvmppc_start_thread(struct kvm_vcpu
*vcpu
)
1010 struct paca_struct
*tpaca
;
1011 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
1013 if (vcpu
->arch
.timer_running
) {
1014 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
1015 vcpu
->arch
.timer_running
= 0;
1017 cpu
= vc
->pcpu
+ vcpu
->arch
.ptid
;
1019 tpaca
->kvm_hstate
.kvm_vcpu
= vcpu
;
1020 tpaca
->kvm_hstate
.kvm_vcore
= vc
;
1021 tpaca
->kvm_hstate
.napping
= 0;
1022 vcpu
->cpu
= vc
->pcpu
;
1024 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1025 if (vcpu
->arch
.ptid
) {
1032 static void kvmppc_wait_for_nap(struct kvmppc_vcore
*vc
)
1038 while (vc
->nap_count
< vc
->n_woken
) {
1039 if (++i
>= 1000000) {
1040 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
1041 vc
->nap_count
, vc
->n_woken
);
1050 * Check that we are on thread 0 and that any other threads in
1051 * this core are off-line. Then grab the threads so they can't
1054 static int on_primary_thread(void)
1056 int cpu
= smp_processor_id();
1057 int thr
= cpu_thread_in_core(cpu
);
1061 while (++thr
< threads_per_core
)
1062 if (cpu_online(cpu
+ thr
))
1065 /* Grab all hw threads so they can't go into the kernel */
1066 for (thr
= 1; thr
< threads_per_core
; ++thr
) {
1067 if (kvmppc_grab_hwthread(cpu
+ thr
)) {
1068 /* Couldn't grab one; let the others go */
1070 kvmppc_release_hwthread(cpu
+ thr
);
1071 } while (--thr
> 0);
1079 * Run a set of guest threads on a physical core.
1080 * Called with vc->lock held.
1082 static void kvmppc_run_core(struct kvmppc_vcore
*vc
)
1084 struct kvm_vcpu
*vcpu
, *vcpu0
, *vnext
;
1087 int ptid
, i
, need_vpa_update
;
1089 struct kvm_vcpu
*vcpus_to_update
[threads_per_core
];
1091 /* don't start if any threads have a signal pending */
1092 need_vpa_update
= 0;
1093 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1094 if (signal_pending(vcpu
->arch
.run_task
))
1096 if (vcpu
->arch
.vpa
.update_pending
||
1097 vcpu
->arch
.slb_shadow
.update_pending
||
1098 vcpu
->arch
.dtl
.update_pending
)
1099 vcpus_to_update
[need_vpa_update
++] = vcpu
;
1103 * Initialize *vc, in particular vc->vcore_state, so we can
1104 * drop the vcore lock if necessary.
1108 vc
->entry_exit_count
= 0;
1109 vc
->vcore_state
= VCORE_STARTING
;
1111 vc
->napping_threads
= 0;
1114 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
1115 * which can't be called with any spinlocks held.
1117 if (need_vpa_update
) {
1118 spin_unlock(&vc
->lock
);
1119 for (i
= 0; i
< need_vpa_update
; ++i
)
1120 kvmppc_update_vpas(vcpus_to_update
[i
]);
1121 spin_lock(&vc
->lock
);
1125 * Assign physical thread IDs, first to non-ceded vcpus
1126 * and then to ceded ones.
1130 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1131 if (!vcpu
->arch
.ceded
) {
1134 vcpu
->arch
.ptid
= ptid
++;
1138 goto out
; /* nothing to run; should never happen */
1139 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
1140 if (vcpu
->arch
.ceded
)
1141 vcpu
->arch
.ptid
= ptid
++;
1144 * Make sure we are running on thread 0, and that
1145 * secondary threads are offline.
1147 if (threads_per_core
> 1 && !on_primary_thread()) {
1148 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
1149 vcpu
->arch
.ret
= -EBUSY
;
1153 vc
->pcpu
= smp_processor_id();
1154 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1155 kvmppc_start_thread(vcpu
);
1156 kvmppc_create_dtl_entry(vcpu
, vc
);
1159 vc
->vcore_state
= VCORE_RUNNING
;
1161 spin_unlock(&vc
->lock
);
1165 srcu_idx
= srcu_read_lock(&vcpu0
->kvm
->srcu
);
1167 __kvmppc_vcore_entry(NULL
, vcpu0
);
1169 spin_lock(&vc
->lock
);
1170 /* disable sending of IPIs on virtual external irqs */
1171 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
1173 /* wait for secondary threads to finish writing their state to memory */
1174 if (vc
->nap_count
< vc
->n_woken
)
1175 kvmppc_wait_for_nap(vc
);
1176 for (i
= 0; i
< threads_per_core
; ++i
)
1177 kvmppc_release_hwthread(vc
->pcpu
+ i
);
1178 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
1179 vc
->vcore_state
= VCORE_EXITING
;
1180 spin_unlock(&vc
->lock
);
1182 srcu_read_unlock(&vcpu0
->kvm
->srcu
, srcu_idx
);
1184 /* make sure updates to secondary vcpu structs are visible now */
1191 spin_lock(&vc
->lock
);
1193 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1194 /* cancel pending dec exception if dec is positive */
1195 if (now
< vcpu
->arch
.dec_expires
&&
1196 kvmppc_core_pending_dec(vcpu
))
1197 kvmppc_core_dequeue_dec(vcpu
);
1200 if (vcpu
->arch
.trap
)
1201 ret
= kvmppc_handle_exit(vcpu
->arch
.kvm_run
, vcpu
,
1202 vcpu
->arch
.run_task
);
1204 vcpu
->arch
.ret
= ret
;
1205 vcpu
->arch
.trap
= 0;
1207 if (vcpu
->arch
.ceded
) {
1208 if (ret
!= RESUME_GUEST
)
1209 kvmppc_end_cede(vcpu
);
1211 kvmppc_set_timer(vcpu
);
1216 vc
->vcore_state
= VCORE_INACTIVE
;
1217 list_for_each_entry_safe(vcpu
, vnext
, &vc
->runnable_threads
,
1219 if (vcpu
->arch
.ret
!= RESUME_GUEST
) {
1220 kvmppc_remove_runnable(vc
, vcpu
);
1221 wake_up(&vcpu
->arch
.cpu_run
);
1227 * Wait for some other vcpu thread to execute us, and
1228 * wake us up when we need to handle something in the host.
1230 static void kvmppc_wait_for_exec(struct kvm_vcpu
*vcpu
, int wait_state
)
1234 prepare_to_wait(&vcpu
->arch
.cpu_run
, &wait
, wait_state
);
1235 if (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
)
1237 finish_wait(&vcpu
->arch
.cpu_run
, &wait
);
1241 * All the vcpus in this vcore are idle, so wait for a decrementer
1242 * or external interrupt to one of the vcpus. vc->lock is held.
1244 static void kvmppc_vcore_blocked(struct kvmppc_vcore
*vc
)
1248 prepare_to_wait(&vc
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1249 vc
->vcore_state
= VCORE_SLEEPING
;
1250 spin_unlock(&vc
->lock
);
1252 finish_wait(&vc
->wq
, &wait
);
1253 spin_lock(&vc
->lock
);
1254 vc
->vcore_state
= VCORE_INACTIVE
;
1257 static int kvmppc_run_vcpu(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1260 struct kvmppc_vcore
*vc
;
1261 struct kvm_vcpu
*v
, *vn
;
1263 kvm_run
->exit_reason
= 0;
1264 vcpu
->arch
.ret
= RESUME_GUEST
;
1265 vcpu
->arch
.trap
= 0;
1266 kvmppc_update_vpas(vcpu
);
1269 * Synchronize with other threads in this virtual core
1271 vc
= vcpu
->arch
.vcore
;
1272 spin_lock(&vc
->lock
);
1273 vcpu
->arch
.ceded
= 0;
1274 vcpu
->arch
.run_task
= current
;
1275 vcpu
->arch
.kvm_run
= kvm_run
;
1276 vcpu
->arch
.stolen_logged
= vcore_stolen_time(vc
, mftb());
1277 vcpu
->arch
.state
= KVMPPC_VCPU_RUNNABLE
;
1278 vcpu
->arch
.busy_preempt
= TB_NIL
;
1279 list_add_tail(&vcpu
->arch
.run_list
, &vc
->runnable_threads
);
1283 * This happens the first time this is called for a vcpu.
1284 * If the vcore is already running, we may be able to start
1285 * this thread straight away and have it join in.
1287 if (!signal_pending(current
)) {
1288 if (vc
->vcore_state
== VCORE_RUNNING
&&
1289 VCORE_EXIT_COUNT(vc
) == 0) {
1290 vcpu
->arch
.ptid
= vc
->n_runnable
- 1;
1291 kvmppc_create_dtl_entry(vcpu
, vc
);
1292 kvmppc_start_thread(vcpu
);
1293 } else if (vc
->vcore_state
== VCORE_SLEEPING
) {
1299 while (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
&&
1300 !signal_pending(current
)) {
1301 if (vc
->vcore_state
!= VCORE_INACTIVE
) {
1302 spin_unlock(&vc
->lock
);
1303 kvmppc_wait_for_exec(vcpu
, TASK_INTERRUPTIBLE
);
1304 spin_lock(&vc
->lock
);
1307 list_for_each_entry_safe(v
, vn
, &vc
->runnable_threads
,
1309 kvmppc_core_prepare_to_enter(v
);
1310 if (signal_pending(v
->arch
.run_task
)) {
1311 kvmppc_remove_runnable(vc
, v
);
1312 v
->stat
.signal_exits
++;
1313 v
->arch
.kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1314 v
->arch
.ret
= -EINTR
;
1315 wake_up(&v
->arch
.cpu_run
);
1318 if (!vc
->n_runnable
|| vcpu
->arch
.state
!= KVMPPC_VCPU_RUNNABLE
)
1322 list_for_each_entry(v
, &vc
->runnable_threads
, arch
.run_list
)
1323 if (!v
->arch
.pending_exceptions
)
1324 n_ceded
+= v
->arch
.ceded
;
1325 if (n_ceded
== vc
->n_runnable
)
1326 kvmppc_vcore_blocked(vc
);
1328 kvmppc_run_core(vc
);
1332 while (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
&&
1333 (vc
->vcore_state
== VCORE_RUNNING
||
1334 vc
->vcore_state
== VCORE_EXITING
)) {
1335 spin_unlock(&vc
->lock
);
1336 kvmppc_wait_for_exec(vcpu
, TASK_UNINTERRUPTIBLE
);
1337 spin_lock(&vc
->lock
);
1340 if (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
) {
1341 kvmppc_remove_runnable(vc
, vcpu
);
1342 vcpu
->stat
.signal_exits
++;
1343 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1344 vcpu
->arch
.ret
= -EINTR
;
1347 if (vc
->n_runnable
&& vc
->vcore_state
== VCORE_INACTIVE
) {
1348 /* Wake up some vcpu to run the core */
1349 v
= list_first_entry(&vc
->runnable_threads
,
1350 struct kvm_vcpu
, arch
.run_list
);
1351 wake_up(&v
->arch
.cpu_run
);
1354 spin_unlock(&vc
->lock
);
1355 return vcpu
->arch
.ret
;
1358 int kvmppc_vcpu_run(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1363 if (!vcpu
->arch
.sane
) {
1364 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1368 kvmppc_core_prepare_to_enter(vcpu
);
1370 /* No need to go into the guest when all we'll do is come back out */
1371 if (signal_pending(current
)) {
1372 run
->exit_reason
= KVM_EXIT_INTR
;
1376 atomic_inc(&vcpu
->kvm
->arch
.vcpus_running
);
1377 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
1380 /* On the first time here, set up HTAB and VRMA or RMA */
1381 if (!vcpu
->kvm
->arch
.rma_setup_done
) {
1382 r
= kvmppc_hv_setup_htab_rma(vcpu
);
1387 flush_fp_to_thread(current
);
1388 flush_altivec_to_thread(current
);
1389 flush_vsx_to_thread(current
);
1390 vcpu
->arch
.wqp
= &vcpu
->arch
.vcore
->wq
;
1391 vcpu
->arch
.pgdir
= current
->mm
->pgd
;
1392 vcpu
->arch
.state
= KVMPPC_VCPU_BUSY_IN_HOST
;
1395 r
= kvmppc_run_vcpu(run
, vcpu
);
1397 if (run
->exit_reason
== KVM_EXIT_PAPR_HCALL
&&
1398 !(vcpu
->arch
.shregs
.msr
& MSR_PR
)) {
1399 r
= kvmppc_pseries_do_hcall(vcpu
);
1400 kvmppc_core_prepare_to_enter(vcpu
);
1401 } else if (r
== RESUME_PAGE_FAULT
) {
1402 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1403 r
= kvmppc_book3s_hv_page_fault(run
, vcpu
,
1404 vcpu
->arch
.fault_dar
, vcpu
->arch
.fault_dsisr
);
1405 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
1407 } while (r
== RESUME_GUEST
);
1410 vcpu
->arch
.state
= KVMPPC_VCPU_NOTREADY
;
1411 atomic_dec(&vcpu
->kvm
->arch
.vcpus_running
);
1416 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
1417 Assumes POWER7 or PPC970. */
1418 static inline int lpcr_rmls(unsigned long rma_size
)
1421 case 32ul << 20: /* 32 MB */
1422 if (cpu_has_feature(CPU_FTR_ARCH_206
))
1423 return 8; /* only supported on POWER7 */
1425 case 64ul << 20: /* 64 MB */
1427 case 128ul << 20: /* 128 MB */
1429 case 256ul << 20: /* 256 MB */
1431 case 1ul << 30: /* 1 GB */
1433 case 16ul << 30: /* 16 GB */
1435 case 256ul << 30: /* 256 GB */
1442 static int kvm_rma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1444 struct kvmppc_linear_info
*ri
= vma
->vm_file
->private_data
;
1447 if (vmf
->pgoff
>= ri
->npages
)
1448 return VM_FAULT_SIGBUS
;
1450 page
= pfn_to_page(ri
->base_pfn
+ vmf
->pgoff
);
1456 static const struct vm_operations_struct kvm_rma_vm_ops
= {
1457 .fault
= kvm_rma_fault
,
1460 static int kvm_rma_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1462 vma
->vm_flags
|= VM_RESERVED
;
1463 vma
->vm_ops
= &kvm_rma_vm_ops
;
1467 static int kvm_rma_release(struct inode
*inode
, struct file
*filp
)
1469 struct kvmppc_linear_info
*ri
= filp
->private_data
;
1471 kvm_release_rma(ri
);
1475 static struct file_operations kvm_rma_fops
= {
1476 .mmap
= kvm_rma_mmap
,
1477 .release
= kvm_rma_release
,
1480 long kvm_vm_ioctl_allocate_rma(struct kvm
*kvm
, struct kvm_allocate_rma
*ret
)
1482 struct kvmppc_linear_info
*ri
;
1485 ri
= kvm_alloc_rma();
1489 fd
= anon_inode_getfd("kvm-rma", &kvm_rma_fops
, ri
, O_RDWR
);
1491 kvm_release_rma(ri
);
1493 ret
->rma_size
= ri
->npages
<< PAGE_SHIFT
;
1497 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size
**sps
,
1500 struct mmu_psize_def
*def
= &mmu_psize_defs
[linux_psize
];
1504 (*sps
)->page_shift
= def
->shift
;
1505 (*sps
)->slb_enc
= def
->sllp
;
1506 (*sps
)->enc
[0].page_shift
= def
->shift
;
1507 (*sps
)->enc
[0].pte_enc
= def
->penc
;
1511 int kvm_vm_ioctl_get_smmu_info(struct kvm
*kvm
, struct kvm_ppc_smmu_info
*info
)
1513 struct kvm_ppc_one_seg_page_size
*sps
;
1515 info
->flags
= KVM_PPC_PAGE_SIZES_REAL
;
1516 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1517 info
->flags
|= KVM_PPC_1T_SEGMENTS
;
1518 info
->slb_size
= mmu_slb_size
;
1520 /* We only support these sizes for now, and no muti-size segments */
1521 sps
= &info
->sps
[0];
1522 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_4K
);
1523 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_64K
);
1524 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_16M
);
1530 * Get (and clear) the dirty memory log for a memory slot.
1532 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1534 struct kvm_memory_slot
*memslot
;
1538 mutex_lock(&kvm
->slots_lock
);
1541 if (log
->slot
>= KVM_MEMORY_SLOTS
)
1544 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
1546 if (!memslot
->dirty_bitmap
)
1549 n
= kvm_dirty_bitmap_bytes(memslot
);
1550 memset(memslot
->dirty_bitmap
, 0, n
);
1552 r
= kvmppc_hv_get_dirty_log(kvm
, memslot
, memslot
->dirty_bitmap
);
1557 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1562 mutex_unlock(&kvm
->slots_lock
);
1566 static unsigned long slb_pgsize_encoding(unsigned long psize
)
1568 unsigned long senc
= 0;
1570 if (psize
> 0x1000) {
1572 if (psize
== 0x10000)
1573 senc
|= SLB_VSID_LP_01
;
1578 static void unpin_slot(struct kvm_memory_slot
*memslot
)
1580 unsigned long *physp
;
1581 unsigned long j
, npages
, pfn
;
1584 physp
= memslot
->arch
.slot_phys
;
1585 npages
= memslot
->npages
;
1588 for (j
= 0; j
< npages
; j
++) {
1589 if (!(physp
[j
] & KVMPPC_GOT_PAGE
))
1591 pfn
= physp
[j
] >> PAGE_SHIFT
;
1592 page
= pfn_to_page(pfn
);
1598 void kvmppc_core_free_memslot(struct kvm_memory_slot
*free
,
1599 struct kvm_memory_slot
*dont
)
1601 if (!dont
|| free
->arch
.rmap
!= dont
->arch
.rmap
) {
1602 vfree(free
->arch
.rmap
);
1603 free
->arch
.rmap
= NULL
;
1605 if (!dont
|| free
->arch
.slot_phys
!= dont
->arch
.slot_phys
) {
1607 vfree(free
->arch
.slot_phys
);
1608 free
->arch
.slot_phys
= NULL
;
1612 int kvmppc_core_create_memslot(struct kvm_memory_slot
*slot
,
1613 unsigned long npages
)
1615 slot
->arch
.rmap
= vzalloc(npages
* sizeof(*slot
->arch
.rmap
));
1616 if (!slot
->arch
.rmap
)
1618 slot
->arch
.slot_phys
= NULL
;
1623 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1624 struct kvm_memory_slot
*memslot
,
1625 struct kvm_userspace_memory_region
*mem
)
1627 unsigned long *phys
;
1629 /* Allocate a slot_phys array if needed */
1630 phys
= memslot
->arch
.slot_phys
;
1631 if (!kvm
->arch
.using_mmu_notifiers
&& !phys
&& memslot
->npages
) {
1632 phys
= vzalloc(memslot
->npages
* sizeof(unsigned long));
1635 memslot
->arch
.slot_phys
= phys
;
1641 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1642 struct kvm_userspace_memory_region
*mem
,
1643 struct kvm_memory_slot old
)
1645 unsigned long npages
= mem
->memory_size
>> PAGE_SHIFT
;
1646 struct kvm_memory_slot
*memslot
;
1648 if (npages
&& old
.npages
) {
1650 * If modifying a memslot, reset all the rmap dirty bits.
1651 * If this is a new memslot, we don't need to do anything
1652 * since the rmap array starts out as all zeroes,
1653 * i.e. no pages are dirty.
1655 memslot
= id_to_memslot(kvm
->memslots
, mem
->slot
);
1656 kvmppc_hv_get_dirty_log(kvm
, memslot
, NULL
);
1660 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu
*vcpu
)
1663 struct kvm
*kvm
= vcpu
->kvm
;
1664 struct kvmppc_linear_info
*ri
= NULL
;
1666 struct kvm_memory_slot
*memslot
;
1667 struct vm_area_struct
*vma
;
1668 unsigned long lpcr
, senc
;
1669 unsigned long psize
, porder
;
1670 unsigned long rma_size
;
1672 unsigned long *physp
;
1673 unsigned long i
, npages
;
1676 mutex_lock(&kvm
->lock
);
1677 if (kvm
->arch
.rma_setup_done
)
1678 goto out
; /* another vcpu beat us to it */
1680 /* Allocate hashed page table (if not done already) and reset it */
1681 if (!kvm
->arch
.hpt_virt
) {
1682 err
= kvmppc_alloc_hpt(kvm
, NULL
);
1684 pr_err("KVM: Couldn't alloc HPT\n");
1689 /* Look up the memslot for guest physical address 0 */
1690 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1691 memslot
= gfn_to_memslot(kvm
, 0);
1693 /* We must have some memory at 0 by now */
1695 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
1698 /* Look up the VMA for the start of this memory slot */
1699 hva
= memslot
->userspace_addr
;
1700 down_read(¤t
->mm
->mmap_sem
);
1701 vma
= find_vma(current
->mm
, hva
);
1702 if (!vma
|| vma
->vm_start
> hva
|| (vma
->vm_flags
& VM_IO
))
1705 psize
= vma_kernel_pagesize(vma
);
1706 porder
= __ilog2(psize
);
1708 /* Is this one of our preallocated RMAs? */
1709 if (vma
->vm_file
&& vma
->vm_file
->f_op
== &kvm_rma_fops
&&
1710 hva
== vma
->vm_start
)
1711 ri
= vma
->vm_file
->private_data
;
1713 up_read(¤t
->mm
->mmap_sem
);
1716 /* On POWER7, use VRMA; on PPC970, give up */
1718 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1719 pr_err("KVM: CPU requires an RMO\n");
1723 /* We can handle 4k, 64k or 16M pages in the VRMA */
1725 if (!(psize
== 0x1000 || psize
== 0x10000 ||
1726 psize
== 0x1000000))
1729 /* Update VRMASD field in the LPCR */
1730 senc
= slb_pgsize_encoding(psize
);
1731 kvm
->arch
.vrma_slb_v
= senc
| SLB_VSID_B_1T
|
1732 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1733 lpcr
= kvm
->arch
.lpcr
& ~LPCR_VRMASD
;
1734 lpcr
|= senc
<< (LPCR_VRMASD_SH
- 4);
1735 kvm
->arch
.lpcr
= lpcr
;
1737 /* Create HPTEs in the hash page table for the VRMA */
1738 kvmppc_map_vrma(vcpu
, memslot
, porder
);
1741 /* Set up to use an RMO region */
1742 rma_size
= ri
->npages
;
1743 if (rma_size
> memslot
->npages
)
1744 rma_size
= memslot
->npages
;
1745 rma_size
<<= PAGE_SHIFT
;
1746 rmls
= lpcr_rmls(rma_size
);
1749 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size
);
1752 atomic_inc(&ri
->use_count
);
1755 /* Update LPCR and RMOR */
1756 lpcr
= kvm
->arch
.lpcr
;
1757 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1758 /* PPC970; insert RMLS value (split field) in HID4 */
1759 lpcr
&= ~((1ul << HID4_RMLS0_SH
) |
1760 (3ul << HID4_RMLS2_SH
));
1761 lpcr
|= ((rmls
>> 2) << HID4_RMLS0_SH
) |
1762 ((rmls
& 3) << HID4_RMLS2_SH
);
1763 /* RMOR is also in HID4 */
1764 lpcr
|= ((ri
->base_pfn
>> (26 - PAGE_SHIFT
)) & 0xffff)
1768 lpcr
&= ~(LPCR_VPM0
| LPCR_VRMA_L
);
1769 lpcr
|= rmls
<< LPCR_RMLS_SH
;
1770 kvm
->arch
.rmor
= kvm
->arch
.rma
->base_pfn
<< PAGE_SHIFT
;
1772 kvm
->arch
.lpcr
= lpcr
;
1773 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1774 ri
->base_pfn
<< PAGE_SHIFT
, rma_size
, lpcr
);
1776 /* Initialize phys addrs of pages in RMO */
1777 npages
= ri
->npages
;
1778 porder
= __ilog2(npages
);
1779 physp
= memslot
->arch
.slot_phys
;
1781 if (npages
> memslot
->npages
)
1782 npages
= memslot
->npages
;
1783 spin_lock(&kvm
->arch
.slot_phys_lock
);
1784 for (i
= 0; i
< npages
; ++i
)
1785 physp
[i
] = ((ri
->base_pfn
+ i
) << PAGE_SHIFT
) +
1787 spin_unlock(&kvm
->arch
.slot_phys_lock
);
1791 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1793 kvm
->arch
.rma_setup_done
= 1;
1796 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1798 mutex_unlock(&kvm
->lock
);
1802 up_read(¤t
->mm
->mmap_sem
);
1806 int kvmppc_core_init_vm(struct kvm
*kvm
)
1808 unsigned long lpcr
, lpid
;
1810 /* Allocate the guest's logical partition ID */
1812 lpid
= kvmppc_alloc_lpid();
1815 kvm
->arch
.lpid
= lpid
;
1817 INIT_LIST_HEAD(&kvm
->arch
.spapr_tce_tables
);
1819 kvm
->arch
.rma
= NULL
;
1821 kvm
->arch
.host_sdr1
= mfspr(SPRN_SDR1
);
1823 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1824 /* PPC970; HID4 is effectively the LPCR */
1825 kvm
->arch
.host_lpid
= 0;
1826 kvm
->arch
.host_lpcr
= lpcr
= mfspr(SPRN_HID4
);
1827 lpcr
&= ~((3 << HID4_LPID1_SH
) | (0xful
<< HID4_LPID5_SH
));
1828 lpcr
|= ((lpid
>> 4) << HID4_LPID1_SH
) |
1829 ((lpid
& 0xf) << HID4_LPID5_SH
);
1831 /* POWER7; init LPCR for virtual RMA mode */
1832 kvm
->arch
.host_lpid
= mfspr(SPRN_LPID
);
1833 kvm
->arch
.host_lpcr
= lpcr
= mfspr(SPRN_LPCR
);
1834 lpcr
&= LPCR_PECE
| LPCR_LPES
;
1835 lpcr
|= (4UL << LPCR_DPFD_SH
) | LPCR_HDICE
|
1836 LPCR_VPM0
| LPCR_VPM1
;
1837 kvm
->arch
.vrma_slb_v
= SLB_VSID_B_1T
|
1838 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1840 kvm
->arch
.lpcr
= lpcr
;
1842 kvm
->arch
.using_mmu_notifiers
= !!cpu_has_feature(CPU_FTR_ARCH_206
);
1843 spin_lock_init(&kvm
->arch
.slot_phys_lock
);
1846 * Don't allow secondary CPU threads to come online
1847 * while any KVM VMs exist.
1849 inhibit_secondary_onlining();
1854 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
1856 uninhibit_secondary_onlining();
1858 if (kvm
->arch
.rma
) {
1859 kvm_release_rma(kvm
->arch
.rma
);
1860 kvm
->arch
.rma
= NULL
;
1863 kvmppc_free_hpt(kvm
);
1864 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
1867 /* These are stubs for now */
1868 void kvmppc_mmu_pte_pflush(struct kvm_vcpu
*vcpu
, ulong pa_start
, ulong pa_end
)
1872 /* We don't need to emulate any privileged instructions or dcbz */
1873 int kvmppc_core_emulate_op(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1874 unsigned int inst
, int *advance
)
1876 return EMULATE_FAIL
;
1879 int kvmppc_core_emulate_mtspr(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
)
1881 return EMULATE_FAIL
;
1884 int kvmppc_core_emulate_mfspr(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
)
1886 return EMULATE_FAIL
;
1889 static int kvmppc_book3s_hv_init(void)
1893 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1898 r
= kvmppc_mmu_hv_init();
1903 static void kvmppc_book3s_hv_exit(void)
1908 module_init(kvmppc_book3s_hv_init
);
1909 module_exit(kvmppc_book3s_hv_exit
);