memblock: Reimplement __memblock_remove() using memblock_isolate_range()
[linux-2.6.git] / arch / powerpc / kvm / book3s_emulate.c
blob0c9dc62532d02c78c2480d569fc014cb637713b1
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/reg.h>
25 #define OP_19_XOP_RFID 18
26 #define OP_19_XOP_RFI 50
28 #define OP_31_XOP_MFMSR 83
29 #define OP_31_XOP_MTMSR 146
30 #define OP_31_XOP_MTMSRD 178
31 #define OP_31_XOP_MTSR 210
32 #define OP_31_XOP_MTSRIN 242
33 #define OP_31_XOP_TLBIEL 274
34 #define OP_31_XOP_TLBIE 306
35 #define OP_31_XOP_SLBMTE 402
36 #define OP_31_XOP_SLBIE 434
37 #define OP_31_XOP_SLBIA 498
38 #define OP_31_XOP_MFSR 595
39 #define OP_31_XOP_MFSRIN 659
40 #define OP_31_XOP_DCBA 758
41 #define OP_31_XOP_SLBMFEV 851
42 #define OP_31_XOP_EIOIO 854
43 #define OP_31_XOP_SLBMFEE 915
45 /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
46 #define OP_31_XOP_DCBZ 1010
48 #define OP_LFS 48
49 #define OP_LFD 50
50 #define OP_STFS 52
51 #define OP_STFD 54
53 #define SPRN_GQR0 912
54 #define SPRN_GQR1 913
55 #define SPRN_GQR2 914
56 #define SPRN_GQR3 915
57 #define SPRN_GQR4 916
58 #define SPRN_GQR5 917
59 #define SPRN_GQR6 918
60 #define SPRN_GQR7 919
62 /* Book3S_32 defines mfsrin(v) - but that messes up our abstract
63 * function pointers, so let's just disable the define. */
64 #undef mfsrin
66 enum priv_level {
67 PRIV_PROBLEM = 0,
68 PRIV_SUPER = 1,
69 PRIV_HYPER = 2,
72 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
74 /* PAPR VMs only access supervisor SPRs */
75 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
76 return false;
78 /* Limit user space to its own small SPR set */
79 if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM)
80 return false;
82 return true;
85 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
86 unsigned int inst, int *advance)
88 int emulated = EMULATE_DONE;
90 switch (get_op(inst)) {
91 case 19:
92 switch (get_xop(inst)) {
93 case OP_19_XOP_RFID:
94 case OP_19_XOP_RFI:
95 kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
96 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
97 *advance = 0;
98 break;
100 default:
101 emulated = EMULATE_FAIL;
102 break;
104 break;
105 case 31:
106 switch (get_xop(inst)) {
107 case OP_31_XOP_MFMSR:
108 kvmppc_set_gpr(vcpu, get_rt(inst),
109 vcpu->arch.shared->msr);
110 break;
111 case OP_31_XOP_MTMSRD:
113 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
114 if (inst & 0x10000) {
115 vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
116 vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
117 } else
118 kvmppc_set_msr(vcpu, rs);
119 break;
121 case OP_31_XOP_MTMSR:
122 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
123 break;
124 case OP_31_XOP_MFSR:
126 int srnum;
128 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
129 if (vcpu->arch.mmu.mfsrin) {
130 u32 sr;
131 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
132 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
134 break;
136 case OP_31_XOP_MFSRIN:
138 int srnum;
140 srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
141 if (vcpu->arch.mmu.mfsrin) {
142 u32 sr;
143 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
144 kvmppc_set_gpr(vcpu, get_rt(inst), sr);
146 break;
148 case OP_31_XOP_MTSR:
149 vcpu->arch.mmu.mtsrin(vcpu,
150 (inst >> 16) & 0xf,
151 kvmppc_get_gpr(vcpu, get_rs(inst)));
152 break;
153 case OP_31_XOP_MTSRIN:
154 vcpu->arch.mmu.mtsrin(vcpu,
155 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
156 kvmppc_get_gpr(vcpu, get_rs(inst)));
157 break;
158 case OP_31_XOP_TLBIE:
159 case OP_31_XOP_TLBIEL:
161 bool large = (inst & 0x00200000) ? true : false;
162 ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
163 vcpu->arch.mmu.tlbie(vcpu, addr, large);
164 break;
166 case OP_31_XOP_EIOIO:
167 break;
168 case OP_31_XOP_SLBMTE:
169 if (!vcpu->arch.mmu.slbmte)
170 return EMULATE_FAIL;
172 vcpu->arch.mmu.slbmte(vcpu,
173 kvmppc_get_gpr(vcpu, get_rs(inst)),
174 kvmppc_get_gpr(vcpu, get_rb(inst)));
175 break;
176 case OP_31_XOP_SLBIE:
177 if (!vcpu->arch.mmu.slbie)
178 return EMULATE_FAIL;
180 vcpu->arch.mmu.slbie(vcpu,
181 kvmppc_get_gpr(vcpu, get_rb(inst)));
182 break;
183 case OP_31_XOP_SLBIA:
184 if (!vcpu->arch.mmu.slbia)
185 return EMULATE_FAIL;
187 vcpu->arch.mmu.slbia(vcpu);
188 break;
189 case OP_31_XOP_SLBMFEE:
190 if (!vcpu->arch.mmu.slbmfee) {
191 emulated = EMULATE_FAIL;
192 } else {
193 ulong t, rb;
195 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
196 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
197 kvmppc_set_gpr(vcpu, get_rt(inst), t);
199 break;
200 case OP_31_XOP_SLBMFEV:
201 if (!vcpu->arch.mmu.slbmfev) {
202 emulated = EMULATE_FAIL;
203 } else {
204 ulong t, rb;
206 rb = kvmppc_get_gpr(vcpu, get_rb(inst));
207 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
208 kvmppc_set_gpr(vcpu, get_rt(inst), t);
210 break;
211 case OP_31_XOP_DCBA:
212 /* Gets treated as NOP */
213 break;
214 case OP_31_XOP_DCBZ:
216 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
217 ulong ra = 0;
218 ulong addr, vaddr;
219 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
220 u32 dsisr;
221 int r;
223 if (get_ra(inst))
224 ra = kvmppc_get_gpr(vcpu, get_ra(inst));
226 addr = (ra + rb) & ~31ULL;
227 if (!(vcpu->arch.shared->msr & MSR_SF))
228 addr &= 0xffffffff;
229 vaddr = addr;
231 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
232 if ((r == -ENOENT) || (r == -EPERM)) {
233 *advance = 0;
234 vcpu->arch.shared->dar = vaddr;
235 to_svcpu(vcpu)->fault_dar = vaddr;
237 dsisr = DSISR_ISSTORE;
238 if (r == -ENOENT)
239 dsisr |= DSISR_NOHPTE;
240 else if (r == -EPERM)
241 dsisr |= DSISR_PROTFAULT;
243 vcpu->arch.shared->dsisr = dsisr;
244 to_svcpu(vcpu)->fault_dsisr = dsisr;
246 kvmppc_book3s_queue_irqprio(vcpu,
247 BOOK3S_INTERRUPT_DATA_STORAGE);
250 break;
252 default:
253 emulated = EMULATE_FAIL;
255 break;
256 default:
257 emulated = EMULATE_FAIL;
260 if (emulated == EMULATE_FAIL)
261 emulated = kvmppc_emulate_paired_single(run, vcpu);
263 return emulated;
266 void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
267 u32 val)
269 if (upper) {
270 /* Upper BAT */
271 u32 bl = (val >> 2) & 0x7ff;
272 bat->bepi_mask = (~bl << 17);
273 bat->bepi = val & 0xfffe0000;
274 bat->vs = (val & 2) ? 1 : 0;
275 bat->vp = (val & 1) ? 1 : 0;
276 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
277 } else {
278 /* Lower BAT */
279 bat->brpn = val & 0xfffe0000;
280 bat->wimg = (val >> 3) & 0xf;
281 bat->pp = val & 3;
282 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
286 static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
288 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
289 struct kvmppc_bat *bat;
291 switch (sprn) {
292 case SPRN_IBAT0U ... SPRN_IBAT3L:
293 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
294 break;
295 case SPRN_IBAT4U ... SPRN_IBAT7L:
296 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
297 break;
298 case SPRN_DBAT0U ... SPRN_DBAT3L:
299 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
300 break;
301 case SPRN_DBAT4U ... SPRN_DBAT7L:
302 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
303 break;
304 default:
305 BUG();
308 return bat;
311 int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
313 int emulated = EMULATE_DONE;
314 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
316 switch (sprn) {
317 case SPRN_SDR1:
318 if (!spr_allowed(vcpu, PRIV_HYPER))
319 goto unprivileged;
320 to_book3s(vcpu)->sdr1 = spr_val;
321 break;
322 case SPRN_DSISR:
323 vcpu->arch.shared->dsisr = spr_val;
324 break;
325 case SPRN_DAR:
326 vcpu->arch.shared->dar = spr_val;
327 break;
328 case SPRN_HIOR:
329 to_book3s(vcpu)->hior = spr_val;
330 break;
331 case SPRN_IBAT0U ... SPRN_IBAT3L:
332 case SPRN_IBAT4U ... SPRN_IBAT7L:
333 case SPRN_DBAT0U ... SPRN_DBAT3L:
334 case SPRN_DBAT4U ... SPRN_DBAT7L:
336 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
338 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
339 /* BAT writes happen so rarely that we're ok to flush
340 * everything here */
341 kvmppc_mmu_pte_flush(vcpu, 0, 0);
342 kvmppc_mmu_flush_segments(vcpu);
343 break;
345 case SPRN_HID0:
346 to_book3s(vcpu)->hid[0] = spr_val;
347 break;
348 case SPRN_HID1:
349 to_book3s(vcpu)->hid[1] = spr_val;
350 break;
351 case SPRN_HID2:
352 to_book3s(vcpu)->hid[2] = spr_val;
353 break;
354 case SPRN_HID2_GEKKO:
355 to_book3s(vcpu)->hid[2] = spr_val;
356 /* HID2.PSE controls paired single on gekko */
357 switch (vcpu->arch.pvr) {
358 case 0x00080200: /* lonestar 2.0 */
359 case 0x00088202: /* lonestar 2.2 */
360 case 0x70000100: /* gekko 1.0 */
361 case 0x00080100: /* gekko 2.0 */
362 case 0x00083203: /* gekko 2.3a */
363 case 0x00083213: /* gekko 2.3b */
364 case 0x00083204: /* gekko 2.4 */
365 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
366 case 0x00087200: /* broadway */
367 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
368 /* Native paired singles */
369 } else if (spr_val & (1 << 29)) { /* HID2.PSE */
370 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
371 kvmppc_giveup_ext(vcpu, MSR_FP);
372 } else {
373 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
375 break;
377 break;
378 case SPRN_HID4:
379 case SPRN_HID4_GEKKO:
380 to_book3s(vcpu)->hid[4] = spr_val;
381 break;
382 case SPRN_HID5:
383 to_book3s(vcpu)->hid[5] = spr_val;
384 /* guest HID5 set can change is_dcbz32 */
385 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
386 (mfmsr() & MSR_HV))
387 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
388 break;
389 case SPRN_GQR0:
390 case SPRN_GQR1:
391 case SPRN_GQR2:
392 case SPRN_GQR3:
393 case SPRN_GQR4:
394 case SPRN_GQR5:
395 case SPRN_GQR6:
396 case SPRN_GQR7:
397 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
398 break;
399 case SPRN_ICTC:
400 case SPRN_THRM1:
401 case SPRN_THRM2:
402 case SPRN_THRM3:
403 case SPRN_CTRLF:
404 case SPRN_CTRLT:
405 case SPRN_L2CR:
406 case SPRN_MMCR0_GEKKO:
407 case SPRN_MMCR1_GEKKO:
408 case SPRN_PMC1_GEKKO:
409 case SPRN_PMC2_GEKKO:
410 case SPRN_PMC3_GEKKO:
411 case SPRN_PMC4_GEKKO:
412 case SPRN_WPAR_GEKKO:
413 break;
414 unprivileged:
415 default:
416 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
417 #ifndef DEBUG_SPR
418 emulated = EMULATE_FAIL;
419 #endif
420 break;
423 return emulated;
426 int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
428 int emulated = EMULATE_DONE;
430 switch (sprn) {
431 case SPRN_IBAT0U ... SPRN_IBAT3L:
432 case SPRN_IBAT4U ... SPRN_IBAT7L:
433 case SPRN_DBAT0U ... SPRN_DBAT3L:
434 case SPRN_DBAT4U ... SPRN_DBAT7L:
436 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
438 if (sprn % 2)
439 kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
440 else
441 kvmppc_set_gpr(vcpu, rt, bat->raw);
443 break;
445 case SPRN_SDR1:
446 if (!spr_allowed(vcpu, PRIV_HYPER))
447 goto unprivileged;
448 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
449 break;
450 case SPRN_DSISR:
451 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
452 break;
453 case SPRN_DAR:
454 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
455 break;
456 case SPRN_HIOR:
457 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
458 break;
459 case SPRN_HID0:
460 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
461 break;
462 case SPRN_HID1:
463 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
464 break;
465 case SPRN_HID2:
466 case SPRN_HID2_GEKKO:
467 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
468 break;
469 case SPRN_HID4:
470 case SPRN_HID4_GEKKO:
471 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
472 break;
473 case SPRN_HID5:
474 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
475 break;
476 case SPRN_CFAR:
477 case SPRN_PURR:
478 kvmppc_set_gpr(vcpu, rt, 0);
479 break;
480 case SPRN_GQR0:
481 case SPRN_GQR1:
482 case SPRN_GQR2:
483 case SPRN_GQR3:
484 case SPRN_GQR4:
485 case SPRN_GQR5:
486 case SPRN_GQR6:
487 case SPRN_GQR7:
488 kvmppc_set_gpr(vcpu, rt,
489 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
490 break;
491 case SPRN_THRM1:
492 case SPRN_THRM2:
493 case SPRN_THRM3:
494 case SPRN_CTRLF:
495 case SPRN_CTRLT:
496 case SPRN_L2CR:
497 case SPRN_MMCR0_GEKKO:
498 case SPRN_MMCR1_GEKKO:
499 case SPRN_PMC1_GEKKO:
500 case SPRN_PMC2_GEKKO:
501 case SPRN_PMC3_GEKKO:
502 case SPRN_PMC4_GEKKO:
503 case SPRN_WPAR_GEKKO:
504 kvmppc_set_gpr(vcpu, rt, 0);
505 break;
506 default:
507 unprivileged:
508 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
509 #ifndef DEBUG_SPR
510 emulated = EMULATE_FAIL;
511 #endif
512 break;
515 return emulated;
518 u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
520 u32 dsisr = 0;
523 * This is what the spec says about DSISR bits (not mentioned = 0):
525 * 12:13 [DS] Set to bits 30:31
526 * 15:16 [X] Set to bits 29:30
527 * 17 [X] Set to bit 25
528 * [D/DS] Set to bit 5
529 * 18:21 [X] Set to bits 21:24
530 * [D/DS] Set to bits 1:4
531 * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
532 * 27:31 Set to bits 11:15 (RA)
535 switch (get_op(inst)) {
536 /* D-form */
537 case OP_LFS:
538 case OP_LFD:
539 case OP_STFD:
540 case OP_STFS:
541 dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
542 dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
543 break;
544 /* X-form */
545 case 31:
546 dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
547 dsisr |= (inst << 8) & 0x04000; /* bit 17 */
548 dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
549 break;
550 default:
551 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
552 break;
555 dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
557 return dsisr;
560 ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
562 ulong dar = 0;
563 ulong ra;
565 switch (get_op(inst)) {
566 case OP_LFS:
567 case OP_LFD:
568 case OP_STFD:
569 case OP_STFS:
570 ra = get_ra(inst);
571 if (ra)
572 dar = kvmppc_get_gpr(vcpu, ra);
573 dar += (s32)((s16)inst);
574 break;
575 case 31:
576 ra = get_ra(inst);
577 if (ra)
578 dar = kvmppc_get_gpr(vcpu, ra);
579 dar += kvmppc_get_gpr(vcpu, get_rb(inst));
580 break;
581 default:
582 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
583 break;
586 return dar;