hw/arm/aspeed: Replace aspeed_eeprom_init with at24c_eeprom_init
[qemu/kevin.git] / target / ppc / mmu_common.c
blob7235a4befe0992ff5318dfac59145c76de51064e
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_ppc.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
28 #include "exec/log.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/main-loop.h"
32 #include "qemu/qemu-print.h"
33 #include "internal.h"
34 #include "mmu-book3s-v3.h"
35 #include "mmu-radix64.h"
37 /* #define DUMP_PAGE_TABLES */
39 void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
41 PowerPCCPU *cpu = env_archcpu(env);
42 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
43 assert(!cpu->env.has_hv_mode || !cpu->vhyp);
44 #if defined(TARGET_PPC64)
45 if (mmu_is_64bit(env->mmu_model)) {
46 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
47 target_ulong htabsize = value & SDR_64_HTABSIZE;
49 if (value & ~sdr_mask) {
50 qemu_log_mask(LOG_GUEST_ERROR, "Invalid bits 0x"TARGET_FMT_lx
51 " set in SDR1", value & ~sdr_mask);
52 value &= sdr_mask;
54 if (htabsize > 28) {
55 qemu_log_mask(LOG_GUEST_ERROR, "Invalid HTABSIZE 0x" TARGET_FMT_lx
56 " stored in SDR1", htabsize);
57 return;
60 #endif /* defined(TARGET_PPC64) */
61 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
62 env->spr[SPR_SDR1] = value;
65 /*****************************************************************************/
66 /* PowerPC MMU emulation */
68 static int pp_check(int key, int pp, int nx)
70 int access;
72 /* Compute access rights */
73 access = 0;
74 if (key == 0) {
75 switch (pp) {
76 case 0x0:
77 case 0x1:
78 case 0x2:
79 access |= PAGE_WRITE;
80 /* fall through */
81 case 0x3:
82 access |= PAGE_READ;
83 break;
85 } else {
86 switch (pp) {
87 case 0x0:
88 access = 0;
89 break;
90 case 0x1:
91 case 0x3:
92 access = PAGE_READ;
93 break;
94 case 0x2:
95 access = PAGE_READ | PAGE_WRITE;
96 break;
99 if (nx == 0) {
100 access |= PAGE_EXEC;
103 return access;
106 static int check_prot(int prot, MMUAccessType access_type)
108 return prot & prot_for_access_type(access_type) ? 0 : -2;
111 int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
112 int way, int is_code)
114 int nr;
116 /* Select TLB num in a way from address */
117 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
118 /* Select TLB way */
119 nr += env->tlb_per_way * way;
120 /* 6xx have separate TLBs for instructions and data */
121 if (is_code && env->id_tlbs == 1) {
122 nr += env->nb_tlb;
125 return nr;
128 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
129 target_ulong pte1, int h,
130 MMUAccessType access_type)
132 target_ulong ptem, mmask;
133 int access, ret, pteh, ptev, pp;
135 ret = -1;
136 /* Check validity and table match */
137 ptev = pte_is_valid(pte0);
138 pteh = (pte0 >> 6) & 1;
139 if (ptev && h == pteh) {
140 /* Check vsid & api */
141 ptem = pte0 & PTE_PTEM_MASK;
142 mmask = PTE_CHECK_MASK;
143 pp = pte1 & 0x00000003;
144 if (ptem == ctx->ptem) {
145 if (ctx->raddr != (hwaddr)-1ULL) {
146 /* all matches should have equal RPN, WIMG & PP */
147 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
148 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
149 return -3;
152 /* Compute access rights */
153 access = pp_check(ctx->key, pp, ctx->nx);
154 /* Keep the matching PTE information */
155 ctx->raddr = pte1;
156 ctx->prot = access;
157 ret = check_prot(ctx->prot, access_type);
158 if (ret == 0) {
159 /* Access granted */
160 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
161 } else {
162 /* Access right violation */
163 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
168 return ret;
171 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
172 int ret, MMUAccessType access_type)
174 int store = 0;
176 /* Update page flags */
177 if (!(*pte1p & 0x00000100)) {
178 /* Update accessed flag */
179 *pte1p |= 0x00000100;
180 store = 1;
182 if (!(*pte1p & 0x00000080)) {
183 if (access_type == MMU_DATA_STORE && ret == 0) {
184 /* Update changed flag */
185 *pte1p |= 0x00000080;
186 store = 1;
187 } else {
188 /* Force page fault for first write access */
189 ctx->prot &= ~PAGE_WRITE;
193 return store;
196 /* Software driven TLB helpers */
198 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
199 target_ulong eaddr, MMUAccessType access_type)
201 ppc6xx_tlb_t *tlb;
202 int nr, best, way;
203 int ret;
205 best = -1;
206 ret = -1; /* No TLB found */
207 for (way = 0; way < env->nb_ways; way++) {
208 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH);
209 tlb = &env->tlb.tlb6[nr];
210 /* This test "emulates" the PTE index match for hardware TLBs */
211 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
212 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx
213 " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n",
214 nr, env->nb_tlb,
215 pte_is_valid(tlb->pte0) ? "valid" : "inval",
216 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
217 continue;
219 qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> "
220 TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n",
221 nr, env->nb_tlb,
222 pte_is_valid(tlb->pte0) ? "valid" : "inval",
223 tlb->EPN, eaddr, tlb->pte1,
224 access_type == MMU_DATA_STORE ? 'S' : 'L',
225 access_type == MMU_INST_FETCH ? 'I' : 'D');
226 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
227 0, access_type)) {
228 case -3:
229 /* TLB inconsistency */
230 return -1;
231 case -2:
232 /* Access violation */
233 ret = -2;
234 best = nr;
235 break;
236 case -1:
237 default:
238 /* No match */
239 break;
240 case 0:
241 /* access granted */
243 * XXX: we should go on looping to check all TLBs
244 * consistency but we can speed-up the whole thing as
245 * the result would be undefined if TLBs are not
246 * consistent.
248 ret = 0;
249 best = nr;
250 goto done;
253 if (best != -1) {
254 done:
255 qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx
256 " prot=%01x ret=%d\n",
257 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
258 /* Update page flags */
259 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
262 return ret;
265 /* Perform BAT hit & translation */
266 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
267 int *validp, int *protp, target_ulong *BATu,
268 target_ulong *BATl)
270 target_ulong bl;
271 int pp, valid, prot;
273 bl = (*BATu & 0x00001FFC) << 15;
274 valid = 0;
275 prot = 0;
276 if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) ||
277 (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) {
278 valid = 1;
279 pp = *BATl & 0x00000003;
280 if (pp != 0) {
281 prot = PAGE_READ | PAGE_EXEC;
282 if (pp == 0x2) {
283 prot |= PAGE_WRITE;
287 *blp = bl;
288 *validp = valid;
289 *protp = prot;
292 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
293 target_ulong virtual, MMUAccessType access_type)
295 target_ulong *BATlt, *BATut, *BATu, *BATl;
296 target_ulong BEPIl, BEPIu, bl;
297 int i, valid, prot;
298 int ret = -1;
299 bool ifetch = access_type == MMU_INST_FETCH;
301 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
302 ifetch ? 'I' : 'D', virtual);
303 if (ifetch) {
304 BATlt = env->IBAT[1];
305 BATut = env->IBAT[0];
306 } else {
307 BATlt = env->DBAT[1];
308 BATut = env->DBAT[0];
310 for (i = 0; i < env->nb_BATs; i++) {
311 BATu = &BATut[i];
312 BATl = &BATlt[i];
313 BEPIu = *BATu & 0xF0000000;
314 BEPIl = *BATu & 0x0FFE0000;
315 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
316 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu "
317 TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__,
318 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
319 if ((virtual & 0xF0000000) == BEPIu &&
320 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
321 /* BAT matches */
322 if (valid != 0) {
323 /* Get physical address */
324 ctx->raddr = (*BATl & 0xF0000000) |
325 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
326 (virtual & 0x0001F000);
327 /* Compute access rights */
328 ctx->prot = prot;
329 ret = check_prot(ctx->prot, access_type);
330 if (ret == 0) {
331 qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx
332 " prot=%c%c\n", i, ctx->raddr,
333 ctx->prot & PAGE_READ ? 'R' : '-',
334 ctx->prot & PAGE_WRITE ? 'W' : '-');
336 break;
340 if (ret < 0) {
341 if (qemu_log_enabled()) {
342 qemu_log_mask(CPU_LOG_MMU, "no BAT match for "
343 TARGET_FMT_lx ":\n", virtual);
344 for (i = 0; i < 4; i++) {
345 BATu = &BATut[i];
346 BATl = &BATlt[i];
347 BEPIu = *BATu & 0xF0000000;
348 BEPIl = *BATu & 0x0FFE0000;
349 bl = (*BATu & 0x00001FFC) << 15;
350 qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v "
351 TARGET_FMT_lx " BATu " TARGET_FMT_lx
352 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
353 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
354 __func__, ifetch ? 'I' : 'D', i, virtual,
355 *BATu, *BATl, BEPIu, BEPIl, bl);
359 /* No hit */
360 return ret;
363 /* Perform segment based translation */
364 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
365 target_ulong eaddr, MMUAccessType access_type,
366 int type)
368 PowerPCCPU *cpu = env_archcpu(env);
369 hwaddr hash;
370 target_ulong vsid;
371 int ds, target_page_bits;
372 bool pr;
373 int ret;
374 target_ulong sr, pgidx;
376 pr = FIELD_EX64(env->msr, MSR, PR);
377 ctx->eaddr = eaddr;
379 sr = env->sr[eaddr >> 28];
380 ctx->key = (((sr & 0x20000000) && pr) ||
381 ((sr & 0x40000000) && !pr)) ? 1 : 0;
382 ds = sr & 0x80000000 ? 1 : 0;
383 ctx->nx = sr & 0x10000000 ? 1 : 0;
384 vsid = sr & 0x00FFFFFF;
385 target_page_bits = TARGET_PAGE_BITS;
386 qemu_log_mask(CPU_LOG_MMU,
387 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
388 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
389 " ir=%d dr=%d pr=%d %d t=%d\n",
390 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr,
391 (int)FIELD_EX64(env->msr, MSR, IR),
392 (int)FIELD_EX64(env->msr, MSR, DR), pr ? 1 : 0,
393 access_type == MMU_DATA_STORE, type);
394 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
395 hash = vsid ^ pgidx;
396 ctx->ptem = (vsid << 7) | (pgidx >> 10);
398 qemu_log_mask(CPU_LOG_MMU,
399 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
400 ctx->key, ds, ctx->nx, vsid);
401 ret = -1;
402 if (!ds) {
403 /* Check if instruction fetch is allowed, if needed */
404 if (type != ACCESS_CODE || ctx->nx == 0) {
405 /* Page address translation */
406 qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx
407 " htab_mask " HWADDR_FMT_plx
408 " hash " HWADDR_FMT_plx "\n",
409 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
410 ctx->hash[0] = hash;
411 ctx->hash[1] = ~hash;
413 /* Initialize real address with an invalid value */
414 ctx->raddr = (hwaddr)-1ULL;
415 /* Software TLB search */
416 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type);
417 #if defined(DUMP_PAGE_TABLES)
418 if (qemu_loglevel_mask(CPU_LOG_MMU)) {
419 CPUState *cs = env_cpu(env);
420 hwaddr curaddr;
421 uint32_t a0, a1, a2, a3;
423 qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx
424 "\n", ppc_hash32_hpt_base(cpu),
425 ppc_hash32_hpt_mask(cpu) + 0x80);
426 for (curaddr = ppc_hash32_hpt_base(cpu);
427 curaddr < (ppc_hash32_hpt_base(cpu)
428 + ppc_hash32_hpt_mask(cpu) + 0x80);
429 curaddr += 16) {
430 a0 = ldl_phys(cs->as, curaddr);
431 a1 = ldl_phys(cs->as, curaddr + 4);
432 a2 = ldl_phys(cs->as, curaddr + 8);
433 a3 = ldl_phys(cs->as, curaddr + 12);
434 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
435 qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n",
436 curaddr, a0, a1, a2, a3);
440 #endif
441 } else {
442 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
443 ret = -3;
445 } else {
446 qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
447 /* Direct-store segment : absolutely *BUGGY* for now */
449 switch (type) {
450 case ACCESS_INT:
451 /* Integer load/store : only access allowed */
452 break;
453 case ACCESS_CODE:
454 /* No code fetch is allowed in direct-store areas */
455 return -4;
456 case ACCESS_FLOAT:
457 /* Floating point load/store */
458 return -4;
459 case ACCESS_RES:
460 /* lwarx, ldarx or srwcx. */
461 return -4;
462 case ACCESS_CACHE:
464 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
466 * Should make the instruction do no-op. As it already do
467 * no-op, it's quite easy :-)
469 ctx->raddr = eaddr;
470 return 0;
471 case ACCESS_EXT:
472 /* eciwx or ecowx */
473 return -4;
474 default:
475 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
476 "address translation\n");
477 return -4;
479 if ((access_type == MMU_DATA_STORE || ctx->key != 1) &&
480 (access_type == MMU_DATA_LOAD || ctx->key != 0)) {
481 ctx->raddr = eaddr;
482 ret = 2;
483 } else {
484 ret = -2;
488 return ret;
491 /* Generic TLB check function for embedded PowerPC implementations */
492 int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
493 hwaddr *raddrp,
494 target_ulong address, uint32_t pid, int ext,
495 int i)
497 target_ulong mask;
499 /* Check valid flag */
500 if (!(tlb->prot & PAGE_VALID)) {
501 return -1;
503 mask = ~(tlb->size - 1);
504 qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx
505 " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n",
506 __func__, i, address, pid, tlb->EPN,
507 mask, (uint32_t)tlb->PID, tlb->prot);
508 /* Check PID */
509 if (tlb->PID != 0 && tlb->PID != pid) {
510 return -1;
512 /* Check effective address */
513 if ((address & mask) != tlb->EPN) {
514 return -1;
516 *raddrp = (tlb->RPN & mask) | (address & ~mask);
517 if (ext) {
518 /* Extend the physical address to 36 bits */
519 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
522 return 0;
525 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
526 target_ulong address,
527 MMUAccessType access_type)
529 ppcemb_tlb_t *tlb;
530 hwaddr raddr;
531 int i, ret, zsel, zpr, pr;
533 ret = -1;
534 raddr = (hwaddr)-1ULL;
535 pr = FIELD_EX64(env->msr, MSR, PR);
536 for (i = 0; i < env->nb_tlb; i++) {
537 tlb = &env->tlb.tlbe[i];
538 if (ppcemb_tlb_check(env, tlb, &raddr, address,
539 env->spr[SPR_40x_PID], 0, i) < 0) {
540 continue;
542 zsel = (tlb->attr >> 4) & 0xF;
543 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
544 qemu_log_mask(CPU_LOG_MMU,
545 "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
546 __func__, i, zsel, zpr, access_type, tlb->attr);
547 /* Check execute enable bit */
548 switch (zpr) {
549 case 0x2:
550 if (pr != 0) {
551 goto check_perms;
553 /* fall through */
554 case 0x3:
555 /* All accesses granted */
556 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
557 ret = 0;
558 break;
559 case 0x0:
560 if (pr != 0) {
561 /* Raise Zone protection fault. */
562 env->spr[SPR_40x_ESR] = 1 << 22;
563 ctx->prot = 0;
564 ret = -2;
565 break;
567 /* fall through */
568 case 0x1:
569 check_perms:
570 /* Check from TLB entry */
571 ctx->prot = tlb->prot;
572 ret = check_prot(ctx->prot, access_type);
573 if (ret == -2) {
574 env->spr[SPR_40x_ESR] = 0;
576 break;
578 if (ret >= 0) {
579 ctx->raddr = raddr;
580 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
581 " => " HWADDR_FMT_plx
582 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
583 ret);
584 return 0;
587 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
588 " => " HWADDR_FMT_plx
589 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
591 return ret;
594 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
595 hwaddr *raddr, int *prot, target_ulong address,
596 MMUAccessType access_type, int i)
598 int prot2;
600 if (ppcemb_tlb_check(env, tlb, raddr, address,
601 env->spr[SPR_BOOKE_PID],
602 !env->nb_pids, i) >= 0) {
603 goto found_tlb;
606 if (env->spr[SPR_BOOKE_PID1] &&
607 ppcemb_tlb_check(env, tlb, raddr, address,
608 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
609 goto found_tlb;
612 if (env->spr[SPR_BOOKE_PID2] &&
613 ppcemb_tlb_check(env, tlb, raddr, address,
614 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
615 goto found_tlb;
618 qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__);
619 return -1;
621 found_tlb:
623 if (FIELD_EX64(env->msr, MSR, PR)) {
624 prot2 = tlb->prot & 0xF;
625 } else {
626 prot2 = (tlb->prot >> 4) & 0xF;
629 /* Check the address space */
630 if ((access_type == MMU_INST_FETCH ?
631 FIELD_EX64(env->msr, MSR, IR) :
632 FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) {
633 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
634 return -1;
637 *prot = prot2;
638 if (prot2 & prot_for_access_type(access_type)) {
639 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__);
640 return 0;
643 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2);
644 return access_type == MMU_INST_FETCH ? -3 : -2;
647 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
648 target_ulong address,
649 MMUAccessType access_type)
651 ppcemb_tlb_t *tlb;
652 hwaddr raddr;
653 int i, ret;
655 ret = -1;
656 raddr = (hwaddr)-1ULL;
657 for (i = 0; i < env->nb_tlb; i++) {
658 tlb = &env->tlb.tlbe[i];
659 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address,
660 access_type, i);
661 if (ret != -1) {
662 break;
666 if (ret >= 0) {
667 ctx->raddr = raddr;
668 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
669 " => " HWADDR_FMT_plx " %d %d\n", __func__,
670 address, ctx->raddr, ctx->prot, ret);
671 } else {
672 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
673 " => " HWADDR_FMT_plx " %d %d\n", __func__,
674 address, raddr, ctx->prot, ret);
677 return ret;
680 hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
681 ppcmas_tlb_t *tlb)
683 int tlbm_size;
685 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
687 return 1024ULL << tlbm_size;
690 /* TLB check function for MAS based SoftTLBs */
691 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
692 hwaddr *raddrp, target_ulong address,
693 uint32_t pid)
695 hwaddr mask;
696 uint32_t tlb_pid;
698 if (!FIELD_EX64(env->msr, MSR, CM)) {
699 /* In 32bit mode we can only address 32bit EAs */
700 address = (uint32_t)address;
703 /* Check valid flag */
704 if (!(tlb->mas1 & MAS1_VALID)) {
705 return -1;
708 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
709 qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx
710 " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%"
711 HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n",
712 __func__, address, pid, tlb->mas1, tlb->mas2, mask,
713 tlb->mas7_3, tlb->mas8);
715 /* Check PID */
716 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
717 if (tlb_pid != 0 && tlb_pid != pid) {
718 return -1;
721 /* Check effective address */
722 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
723 return -1;
726 if (raddrp) {
727 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
730 return 0;
733 static bool is_epid_mmu(int mmu_idx)
735 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
738 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type)
740 uint32_t esr = 0;
741 if (access_type == MMU_DATA_STORE) {
742 esr |= ESR_ST;
744 if (is_epid_mmu(mmu_idx)) {
745 esr |= ESR_EPID;
747 return esr;
751 * Get EPID register given the mmu_idx. If this is regular load,
752 * construct the EPID access bits from current processor state
754 * Get the effective AS and PR bits and the PID. The PID is returned
755 * only if EPID load is requested, otherwise the caller must detect
756 * the correct EPID. Return true if valid EPID is returned.
758 static bool mmubooke206_get_as(CPUPPCState *env,
759 int mmu_idx, uint32_t *epid_out,
760 bool *as_out, bool *pr_out)
762 if (is_epid_mmu(mmu_idx)) {
763 uint32_t epidr;
764 if (mmu_idx == PPC_TLB_EPID_STORE) {
765 epidr = env->spr[SPR_BOOKE_EPSC];
766 } else {
767 epidr = env->spr[SPR_BOOKE_EPLC];
769 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT;
770 *as_out = !!(epidr & EPID_EAS);
771 *pr_out = !!(epidr & EPID_EPR);
772 return true;
773 } else {
774 *as_out = FIELD_EX64(env->msr, MSR, DS);
775 *pr_out = FIELD_EX64(env->msr, MSR, PR);
776 return false;
780 /* Check if the tlb found by hashing really matches */
781 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
782 hwaddr *raddr, int *prot,
783 target_ulong address,
784 MMUAccessType access_type, int mmu_idx)
786 int prot2 = 0;
787 uint32_t epid;
788 bool as, pr;
789 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
791 if (!use_epid) {
792 if (ppcmas_tlb_check(env, tlb, raddr, address,
793 env->spr[SPR_BOOKE_PID]) >= 0) {
794 goto found_tlb;
797 if (env->spr[SPR_BOOKE_PID1] &&
798 ppcmas_tlb_check(env, tlb, raddr, address,
799 env->spr[SPR_BOOKE_PID1]) >= 0) {
800 goto found_tlb;
803 if (env->spr[SPR_BOOKE_PID2] &&
804 ppcmas_tlb_check(env, tlb, raddr, address,
805 env->spr[SPR_BOOKE_PID2]) >= 0) {
806 goto found_tlb;
808 } else {
809 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) {
810 goto found_tlb;
814 qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address "
815 "0x" TARGET_FMT_lx "\n", __func__, address);
816 return -1;
818 found_tlb:
820 if (pr) {
821 if (tlb->mas7_3 & MAS3_UR) {
822 prot2 |= PAGE_READ;
824 if (tlb->mas7_3 & MAS3_UW) {
825 prot2 |= PAGE_WRITE;
827 if (tlb->mas7_3 & MAS3_UX) {
828 prot2 |= PAGE_EXEC;
830 } else {
831 if (tlb->mas7_3 & MAS3_SR) {
832 prot2 |= PAGE_READ;
834 if (tlb->mas7_3 & MAS3_SW) {
835 prot2 |= PAGE_WRITE;
837 if (tlb->mas7_3 & MAS3_SX) {
838 prot2 |= PAGE_EXEC;
842 /* Check the address space and permissions */
843 if (access_type == MMU_INST_FETCH) {
844 /* There is no way to fetch code using epid load */
845 assert(!use_epid);
846 as = FIELD_EX64(env->msr, MSR, IR);
849 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
850 qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
851 return -1;
854 *prot = prot2;
855 if (prot2 & prot_for_access_type(access_type)) {
856 qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__);
857 return 0;
860 qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2);
861 return access_type == MMU_INST_FETCH ? -3 : -2;
864 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
865 target_ulong address,
866 MMUAccessType access_type,
867 int mmu_idx)
869 ppcmas_tlb_t *tlb;
870 hwaddr raddr;
871 int i, j, ret;
873 ret = -1;
874 raddr = (hwaddr)-1ULL;
876 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
877 int ways = booke206_tlb_ways(env, i);
879 for (j = 0; j < ways; j++) {
880 tlb = booke206_get_tlbm(env, i, address, j);
881 if (!tlb) {
882 continue;
884 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
885 access_type, mmu_idx);
886 if (ret != -1) {
887 goto found_tlb;
892 found_tlb:
894 if (ret >= 0) {
895 ctx->raddr = raddr;
896 qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
897 " => " HWADDR_FMT_plx " %d %d\n", __func__, address,
898 ctx->raddr, ctx->prot, ret);
899 } else {
900 qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
901 " => " HWADDR_FMT_plx " %d %d\n", __func__, address,
902 raddr, ctx->prot, ret);
905 return ret;
908 static const char *book3e_tsize_to_str[32] = {
909 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
910 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
911 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
912 "1T", "2T"
915 static void mmubooke_dump_mmu(CPUPPCState *env)
917 ppcemb_tlb_t *entry;
918 int i;
920 if (kvm_enabled() && !env->kvm_sw_tlb) {
921 qemu_printf("Cannot access KVM TLB\n");
922 return;
925 qemu_printf("\nTLB:\n");
926 qemu_printf("Effective Physical Size PID Prot "
927 "Attr\n");
929 entry = &env->tlb.tlbe[0];
930 for (i = 0; i < env->nb_tlb; i++, entry++) {
931 hwaddr ea, pa;
932 target_ulong mask;
933 uint64_t size = (uint64_t)entry->size;
934 char size_buf[20];
936 /* Check valid flag */
937 if (!(entry->prot & PAGE_VALID)) {
938 continue;
941 mask = ~(entry->size - 1);
942 ea = entry->EPN & mask;
943 pa = entry->RPN & mask;
944 /* Extend the physical address to 36 bits */
945 pa |= (hwaddr)(entry->RPN & 0xF) << 32;
946 if (size >= 1 * MiB) {
947 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB);
948 } else {
949 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
951 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
952 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
953 entry->prot, entry->attr);
958 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
959 int tlbsize)
961 ppcmas_tlb_t *entry;
962 int i;
964 qemu_printf("\nTLB%d:\n", tlbn);
965 qemu_printf("Effective Physical Size TID TS SRWX"
966 " URWX WIMGE U0123\n");
968 entry = &env->tlb.tlbm[offset];
969 for (i = 0; i < tlbsize; i++, entry++) {
970 hwaddr ea, pa, size;
971 int tsize;
973 if (!(entry->mas1 & MAS1_VALID)) {
974 continue;
977 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
978 size = 1024ULL << tsize;
979 ea = entry->mas2 & ~(size - 1);
980 pa = entry->mas7_3 & ~(size - 1);
982 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
983 " U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
984 (uint64_t)ea, (uint64_t)pa,
985 book3e_tsize_to_str[tsize],
986 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
987 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
988 entry->mas7_3 & MAS3_SR ? 'R' : '-',
989 entry->mas7_3 & MAS3_SW ? 'W' : '-',
990 entry->mas7_3 & MAS3_SX ? 'X' : '-',
991 entry->mas7_3 & MAS3_UR ? 'R' : '-',
992 entry->mas7_3 & MAS3_UW ? 'W' : '-',
993 entry->mas7_3 & MAS3_UX ? 'X' : '-',
994 entry->mas2 & MAS2_W ? 'W' : '-',
995 entry->mas2 & MAS2_I ? 'I' : '-',
996 entry->mas2 & MAS2_M ? 'M' : '-',
997 entry->mas2 & MAS2_G ? 'G' : '-',
998 entry->mas2 & MAS2_E ? 'E' : '-',
999 entry->mas7_3 & MAS3_U0 ? '0' : '-',
1000 entry->mas7_3 & MAS3_U1 ? '1' : '-',
1001 entry->mas7_3 & MAS3_U2 ? '2' : '-',
1002 entry->mas7_3 & MAS3_U3 ? '3' : '-');
1006 static void mmubooke206_dump_mmu(CPUPPCState *env)
1008 int offset = 0;
1009 int i;
1011 if (kvm_enabled() && !env->kvm_sw_tlb) {
1012 qemu_printf("Cannot access KVM TLB\n");
1013 return;
1016 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1017 int size = booke206_tlb_size(env, i);
1019 if (size == 0) {
1020 continue;
1023 mmubooke206_dump_one_tlb(env, i, offset, size);
1024 offset += size;
1028 static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
1030 target_ulong *BATlt, *BATut, *BATu, *BATl;
1031 target_ulong BEPIl, BEPIu, bl;
1032 int i;
1034 switch (type) {
1035 case ACCESS_CODE:
1036 BATlt = env->IBAT[1];
1037 BATut = env->IBAT[0];
1038 break;
1039 default:
1040 BATlt = env->DBAT[1];
1041 BATut = env->DBAT[0];
1042 break;
1045 for (i = 0; i < env->nb_BATs; i++) {
1046 BATu = &BATut[i];
1047 BATl = &BATlt[i];
1048 BEPIu = *BATu & 0xF0000000;
1049 BEPIl = *BATu & 0x0FFE0000;
1050 bl = (*BATu & 0x00001FFC) << 15;
1051 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1052 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
1053 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
1054 type == ACCESS_CODE ? "code" : "data", i,
1055 *BATu, *BATl, BEPIu, BEPIl, bl);
1059 static void mmu6xx_dump_mmu(CPUPPCState *env)
1061 PowerPCCPU *cpu = env_archcpu(env);
1062 ppc6xx_tlb_t *tlb;
1063 target_ulong sr;
1064 int type, way, entry, i;
1066 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
1067 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
1069 qemu_printf("\nSegment registers:\n");
1070 for (i = 0; i < 32; i++) {
1071 sr = env->sr[i];
1072 if (sr & 0x80000000) {
1073 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1074 "CNTLR_SPEC=0x%05x\n", i,
1075 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1076 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
1077 (uint32_t)(sr & 0xFFFFF));
1078 } else {
1079 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
1080 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1081 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
1082 (uint32_t)(sr & 0x00FFFFFF));
1086 qemu_printf("\nBATs:\n");
1087 mmu6xx_dump_BATs(env, ACCESS_INT);
1088 mmu6xx_dump_BATs(env, ACCESS_CODE);
1090 if (env->id_tlbs != 1) {
1091 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1092 " for code and data\n");
1095 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1097 for (type = 0; type < 2; type++) {
1098 for (way = 0; way < env->nb_ways; way++) {
1099 for (entry = env->nb_tlb * type + env->tlb_per_way * way;
1100 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
1101 entry++) {
1103 tlb = &env->tlb.tlb6[entry];
1104 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1105 TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
1106 type ? "code" : "data", entry % env->nb_tlb,
1107 env->nb_tlb, way,
1108 pte_is_valid(tlb->pte0) ? "valid" : "inval",
1109 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
1115 void dump_mmu(CPUPPCState *env)
1117 switch (env->mmu_model) {
1118 case POWERPC_MMU_BOOKE:
1119 mmubooke_dump_mmu(env);
1120 break;
1121 case POWERPC_MMU_BOOKE206:
1122 mmubooke206_dump_mmu(env);
1123 break;
1124 case POWERPC_MMU_SOFT_6xx:
1125 mmu6xx_dump_mmu(env);
1126 break;
1127 #if defined(TARGET_PPC64)
1128 case POWERPC_MMU_64B:
1129 case POWERPC_MMU_2_03:
1130 case POWERPC_MMU_2_06:
1131 case POWERPC_MMU_2_07:
1132 dump_slb(env_archcpu(env));
1133 break;
1134 case POWERPC_MMU_3_00:
1135 if (ppc64_v3_radix(env_archcpu(env))) {
1136 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n",
1137 __func__);
1138 } else {
1139 dump_slb(env_archcpu(env));
1141 break;
1142 #endif
1143 default:
1144 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
1148 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
1149 MMUAccessType access_type)
1151 ctx->raddr = eaddr;
1152 ctx->prot = PAGE_READ | PAGE_EXEC;
1154 switch (env->mmu_model) {
1155 case POWERPC_MMU_SOFT_6xx:
1156 case POWERPC_MMU_SOFT_4xx:
1157 case POWERPC_MMU_REAL:
1158 case POWERPC_MMU_BOOKE:
1159 ctx->prot |= PAGE_WRITE;
1160 break;
1162 default:
1163 /* Caller's checks mean we should never get here for other models */
1164 g_assert_not_reached();
1167 return 0;
1170 int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
1171 target_ulong eaddr,
1172 MMUAccessType access_type, int type,
1173 int mmu_idx)
1175 int ret = -1;
1176 bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) ||
1177 (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR));
1179 switch (env->mmu_model) {
1180 case POWERPC_MMU_SOFT_6xx:
1181 if (real_mode) {
1182 ret = check_physical(env, ctx, eaddr, access_type);
1183 } else {
1184 /* Try to find a BAT */
1185 if (env->nb_BATs != 0) {
1186 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type);
1188 if (ret < 0) {
1189 /* We didn't match any BAT entry or don't have BATs */
1190 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type);
1193 break;
1195 case POWERPC_MMU_SOFT_4xx:
1196 if (real_mode) {
1197 ret = check_physical(env, ctx, eaddr, access_type);
1198 } else {
1199 ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type);
1201 break;
1202 case POWERPC_MMU_BOOKE:
1203 ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type);
1204 break;
1205 case POWERPC_MMU_BOOKE206:
1206 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type,
1207 mmu_idx);
1208 break;
1209 case POWERPC_MMU_MPC8xx:
1210 /* XXX: TODO */
1211 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
1212 break;
1213 case POWERPC_MMU_REAL:
1214 if (real_mode) {
1215 ret = check_physical(env, ctx, eaddr, access_type);
1216 } else {
1217 cpu_abort(env_cpu(env),
1218 "PowerPC in real mode do not do any translation\n");
1220 return -1;
1221 default:
1222 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
1223 return -1;
1226 return ret;
1229 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
1230 MMUAccessType access_type, int mmu_idx)
1232 uint32_t epid;
1233 bool as, pr;
1234 uint32_t missed_tid = 0;
1235 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
1237 if (access_type == MMU_INST_FETCH) {
1238 as = FIELD_EX64(env->msr, MSR, IR);
1240 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1241 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1242 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1243 env->spr[SPR_BOOKE_MAS3] = 0;
1244 env->spr[SPR_BOOKE_MAS6] = 0;
1245 env->spr[SPR_BOOKE_MAS7] = 0;
1247 /* AS */
1248 if (as) {
1249 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1250 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
1253 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
1254 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
1256 if (!use_epid) {
1257 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
1258 case MAS4_TIDSELD_PID0:
1259 missed_tid = env->spr[SPR_BOOKE_PID];
1260 break;
1261 case MAS4_TIDSELD_PID1:
1262 missed_tid = env->spr[SPR_BOOKE_PID1];
1263 break;
1264 case MAS4_TIDSELD_PID2:
1265 missed_tid = env->spr[SPR_BOOKE_PID2];
1266 break;
1268 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
1269 } else {
1270 missed_tid = epid;
1271 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16;
1273 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT);
1276 /* next victim logic */
1277 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1278 env->last_way++;
1279 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1280 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1283 /* Perform address translation */
1284 /* TODO: Split this by mmu_model. */
1285 static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr,
1286 MMUAccessType access_type,
1287 hwaddr *raddrp, int *psizep, int *protp,
1288 int mmu_idx, bool guest_visible)
1290 CPUState *cs = CPU(cpu);
1291 CPUPPCState *env = &cpu->env;
1292 mmu_ctx_t ctx;
1293 int type;
1294 int ret;
1296 if (access_type == MMU_INST_FETCH) {
1297 /* code access */
1298 type = ACCESS_CODE;
1299 } else if (guest_visible) {
1300 /* data access */
1301 type = env->access_type;
1302 } else {
1303 type = ACCESS_INT;
1306 ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type,
1307 type, mmu_idx);
1308 if (ret == 0) {
1309 *raddrp = ctx.raddr;
1310 *protp = ctx.prot;
1311 *psizep = TARGET_PAGE_BITS;
1312 return true;
1315 if (guest_visible) {
1316 log_cpu_state_mask(CPU_LOG_MMU, cs, 0);
1317 if (type == ACCESS_CODE) {
1318 switch (ret) {
1319 case -1:
1320 /* No matches in page tables or TLB */
1321 switch (env->mmu_model) {
1322 case POWERPC_MMU_SOFT_6xx:
1323 cs->exception_index = POWERPC_EXCP_IFTLB;
1324 env->error_code = 1 << 18;
1325 env->spr[SPR_IMISS] = eaddr;
1326 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1327 goto tlb_miss;
1328 case POWERPC_MMU_SOFT_4xx:
1329 cs->exception_index = POWERPC_EXCP_ITLB;
1330 env->error_code = 0;
1331 env->spr[SPR_40x_DEAR] = eaddr;
1332 env->spr[SPR_40x_ESR] = 0x00000000;
1333 break;
1334 case POWERPC_MMU_BOOKE206:
1335 booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx);
1336 /* fall through */
1337 case POWERPC_MMU_BOOKE:
1338 cs->exception_index = POWERPC_EXCP_ITLB;
1339 env->error_code = 0;
1340 env->spr[SPR_BOOKE_DEAR] = eaddr;
1341 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD);
1342 break;
1343 case POWERPC_MMU_MPC8xx:
1344 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1345 case POWERPC_MMU_REAL:
1346 cpu_abort(cs, "PowerPC in real mode should never raise "
1347 "any MMU exceptions\n");
1348 default:
1349 cpu_abort(cs, "Unknown or invalid MMU model\n");
1351 break;
1352 case -2:
1353 /* Access rights violation */
1354 cs->exception_index = POWERPC_EXCP_ISI;
1355 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1356 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1357 env->error_code = 0;
1358 } else {
1359 env->error_code = 0x08000000;
1361 break;
1362 case -3:
1363 /* No execute protection violation */
1364 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1365 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1366 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1367 env->error_code = 0;
1368 } else {
1369 env->error_code = 0x10000000;
1371 cs->exception_index = POWERPC_EXCP_ISI;
1372 break;
1373 case -4:
1374 /* Direct store exception */
1375 /* No code fetch is allowed in direct-store areas */
1376 cs->exception_index = POWERPC_EXCP_ISI;
1377 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1378 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1379 env->error_code = 0;
1380 } else {
1381 env->error_code = 0x10000000;
1383 break;
1385 } else {
1386 switch (ret) {
1387 case -1:
1388 /* No matches in page tables or TLB */
1389 switch (env->mmu_model) {
1390 case POWERPC_MMU_SOFT_6xx:
1391 if (access_type == MMU_DATA_STORE) {
1392 cs->exception_index = POWERPC_EXCP_DSTLB;
1393 env->error_code = 1 << 16;
1394 } else {
1395 cs->exception_index = POWERPC_EXCP_DLTLB;
1396 env->error_code = 0;
1398 env->spr[SPR_DMISS] = eaddr;
1399 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1400 tlb_miss:
1401 env->error_code |= ctx.key << 19;
1402 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
1403 get_pteg_offset32(cpu, ctx.hash[0]);
1404 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
1405 get_pteg_offset32(cpu, ctx.hash[1]);
1406 break;
1407 case POWERPC_MMU_SOFT_4xx:
1408 cs->exception_index = POWERPC_EXCP_DTLB;
1409 env->error_code = 0;
1410 env->spr[SPR_40x_DEAR] = eaddr;
1411 if (access_type == MMU_DATA_STORE) {
1412 env->spr[SPR_40x_ESR] = 0x00800000;
1413 } else {
1414 env->spr[SPR_40x_ESR] = 0x00000000;
1416 break;
1417 case POWERPC_MMU_MPC8xx:
1418 /* XXX: TODO */
1419 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1420 case POWERPC_MMU_BOOKE206:
1421 booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx);
1422 /* fall through */
1423 case POWERPC_MMU_BOOKE:
1424 cs->exception_index = POWERPC_EXCP_DTLB;
1425 env->error_code = 0;
1426 env->spr[SPR_BOOKE_DEAR] = eaddr;
1427 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
1428 break;
1429 case POWERPC_MMU_REAL:
1430 cpu_abort(cs, "PowerPC in real mode should never raise "
1431 "any MMU exceptions\n");
1432 default:
1433 cpu_abort(cs, "Unknown or invalid MMU model\n");
1435 break;
1436 case -2:
1437 /* Access rights violation */
1438 cs->exception_index = POWERPC_EXCP_DSI;
1439 env->error_code = 0;
1440 if (env->mmu_model == POWERPC_MMU_SOFT_4xx) {
1441 env->spr[SPR_40x_DEAR] = eaddr;
1442 if (access_type == MMU_DATA_STORE) {
1443 env->spr[SPR_40x_ESR] |= 0x00800000;
1445 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1446 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1447 env->spr[SPR_BOOKE_DEAR] = eaddr;
1448 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
1449 } else {
1450 env->spr[SPR_DAR] = eaddr;
1451 if (access_type == MMU_DATA_STORE) {
1452 env->spr[SPR_DSISR] = 0x0A000000;
1453 } else {
1454 env->spr[SPR_DSISR] = 0x08000000;
1457 break;
1458 case -4:
1459 /* Direct store exception */
1460 switch (type) {
1461 case ACCESS_FLOAT:
1462 /* Floating point load/store */
1463 cs->exception_index = POWERPC_EXCP_ALIGN;
1464 env->error_code = POWERPC_EXCP_ALIGN_FP;
1465 env->spr[SPR_DAR] = eaddr;
1466 break;
1467 case ACCESS_RES:
1468 /* lwarx, ldarx or stwcx. */
1469 cs->exception_index = POWERPC_EXCP_DSI;
1470 env->error_code = 0;
1471 env->spr[SPR_DAR] = eaddr;
1472 if (access_type == MMU_DATA_STORE) {
1473 env->spr[SPR_DSISR] = 0x06000000;
1474 } else {
1475 env->spr[SPR_DSISR] = 0x04000000;
1477 break;
1478 case ACCESS_EXT:
1479 /* eciwx or ecowx */
1480 cs->exception_index = POWERPC_EXCP_DSI;
1481 env->error_code = 0;
1482 env->spr[SPR_DAR] = eaddr;
1483 if (access_type == MMU_DATA_STORE) {
1484 env->spr[SPR_DSISR] = 0x06100000;
1485 } else {
1486 env->spr[SPR_DSISR] = 0x04100000;
1488 break;
1489 default:
1490 printf("DSI: invalid exception (%d)\n", ret);
1491 cs->exception_index = POWERPC_EXCP_PROGRAM;
1492 env->error_code =
1493 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
1494 env->spr[SPR_DAR] = eaddr;
1495 break;
1497 break;
1501 return false;
1504 /*****************************************************************************/
1506 bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
1507 hwaddr *raddrp, int *psizep, int *protp,
1508 int mmu_idx, bool guest_visible)
1510 switch (cpu->env.mmu_model) {
1511 #if defined(TARGET_PPC64)
1512 case POWERPC_MMU_3_00:
1513 if (ppc64_v3_radix(cpu)) {
1514 return ppc_radix64_xlate(cpu, eaddr, access_type, raddrp,
1515 psizep, protp, mmu_idx, guest_visible);
1517 /* fall through */
1518 case POWERPC_MMU_64B:
1519 case POWERPC_MMU_2_03:
1520 case POWERPC_MMU_2_06:
1521 case POWERPC_MMU_2_07:
1522 return ppc_hash64_xlate(cpu, eaddr, access_type,
1523 raddrp, psizep, protp, mmu_idx, guest_visible);
1524 #endif
1526 case POWERPC_MMU_32B:
1527 return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp,
1528 psizep, protp, mmu_idx, guest_visible);
1530 default:
1531 return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp,
1532 psizep, protp, mmu_idx, guest_visible);
1536 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1538 PowerPCCPU *cpu = POWERPC_CPU(cs);
1539 hwaddr raddr;
1540 int s, p;
1543 * Some MMUs have separate TLBs for code and data. If we only
1544 * try an MMU_DATA_LOAD, we may not be able to read instructions
1545 * mapped by code TLBs, so we also try a MMU_INST_FETCH.
1547 if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p,
1548 cpu_mmu_index(&cpu->env, false), false) ||
1549 ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p,
1550 cpu_mmu_index(&cpu->env, true), false)) {
1551 return raddr & TARGET_PAGE_MASK;
1553 return -1;