2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/kernel.h>
16 /* SRR1 bits for machine check on POWER7 */
17 #define SRR1_MC_LDSTERR (1ul << (63-42))
18 #define SRR1_MC_IFETCH_SH (63-45)
19 #define SRR1_MC_IFETCH_MASK 0x7
20 #define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */
21 #define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */
22 #define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */
23 #define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */
25 /* DSISR bits for machine check on POWER7 */
26 #define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */
27 #define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */
28 #define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */
29 #define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */
30 #define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */
32 /* POWER7 SLB flush and reload */
33 static void reload_slb(struct kvm_vcpu
*vcpu
)
35 struct slb_shadow
*slb
;
38 /* First clear out SLB */
39 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
41 /* Do they have an SLB shadow buffer registered? */
42 slb
= vcpu
->arch
.slb_shadow
.pinned_addr
;
47 n
= min_t(u32
, slb
->persistent
, SLB_MIN_SIZE
);
48 if ((void *) &slb
->save_area
[n
] > vcpu
->arch
.slb_shadow
.pinned_end
)
51 /* Load up the SLB from that */
52 for (i
= 0; i
< n
; ++i
) {
53 unsigned long rb
= slb
->save_area
[i
].esid
;
54 unsigned long rs
= slb
->save_area
[i
].vsid
;
56 rb
= (rb
& ~0xFFFul
) | i
; /* insert entry number */
57 asm volatile("slbmte %0,%1" : : "r" (rs
), "r" (rb
));
61 /* POWER7 TLB flush */
62 static void flush_tlb_power7(struct kvm_vcpu
*vcpu
)
66 rb
= TLBIEL_INVAL_SET_LPID
;
67 for (i
= 0; i
< POWER7_TLB_SETS
; ++i
) {
68 asm volatile("tlbiel %0" : : "r" (rb
));
69 rb
+= 1 << TLBIEL_INVAL_SET_SHIFT
;
74 * On POWER7, see if we can handle a machine check that occurred inside
75 * the guest in real mode, without switching to the host partition.
77 * Returns: 0 => exit guest, 1 => deliver machine check to guest
79 static long kvmppc_realmode_mc_power7(struct kvm_vcpu
*vcpu
)
81 unsigned long srr1
= vcpu
->arch
.shregs
.msr
;
82 #ifdef CONFIG_PPC_POWERNV
83 struct opal_machine_check_event
*opal_evt
;
87 if (srr1
& SRR1_MC_LDSTERR
) {
88 /* error on load/store */
89 unsigned long dsisr
= vcpu
->arch
.shregs
.dsisr
;
91 if (dsisr
& (DSISR_MC_SLB_PARMULTI
| DSISR_MC_SLB_MULTI
|
92 DSISR_MC_SLB_PARITY
| DSISR_MC_DERAT_MULTI
)) {
93 /* flush and reload SLB; flushes D-ERAT too */
95 dsisr
&= ~(DSISR_MC_SLB_PARMULTI
| DSISR_MC_SLB_MULTI
|
96 DSISR_MC_SLB_PARITY
| DSISR_MC_DERAT_MULTI
);
98 if (dsisr
& DSISR_MC_TLB_MULTI
) {
99 flush_tlb_power7(vcpu
);
100 dsisr
&= ~DSISR_MC_TLB_MULTI
;
102 /* Any other errors we don't understand? */
103 if (dsisr
& 0xffffffffUL
)
107 switch ((srr1
>> SRR1_MC_IFETCH_SH
) & SRR1_MC_IFETCH_MASK
) {
110 case SRR1_MC_IFETCH_SLBPAR
:
111 case SRR1_MC_IFETCH_SLBMULTI
:
112 case SRR1_MC_IFETCH_SLBPARMULTI
:
115 case SRR1_MC_IFETCH_TLBMULTI
:
116 flush_tlb_power7(vcpu
);
122 #ifdef CONFIG_PPC_POWERNV
124 * See if OPAL has already handled the condition.
125 * We assume that if the condition is recovered then OPAL
126 * will have generated an error log event that we will pick
129 opal_evt
= local_paca
->opal_mc_evt
;
130 if (opal_evt
->version
== OpalMCE_V1
&&
131 (opal_evt
->severity
== OpalMCE_SEV_NO_ERROR
||
132 opal_evt
->disposition
== OpalMCE_DISPOSITION_RECOVERED
))
136 opal_evt
->in_use
= 0;
142 long kvmppc_realmode_machine_check(struct kvm_vcpu
*vcpu
)
144 if (cpu_has_feature(CPU_FTR_ARCH_206
))
145 return kvmppc_realmode_mc_power7(vcpu
);