s390x/tcg: Ignore register content if b1/b2 is zero when handling EXECUTE
[qemu/ar7.git] / target / ppc / mmu_helper.c
blobca88658cba04c6e0be7a1e12a352b90e28588253
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "mmu-hash64.h"
27 #include "mmu-hash32.h"
28 #include "exec/exec-all.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/log.h"
31 #include "helper_regs.h"
32 #include "qemu/error-report.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/qemu-print.h"
35 #include "mmu-book3s-v3.h"
36 #include "mmu-radix64.h"
38 /* #define DEBUG_MMU */
39 /* #define DEBUG_BATS */
40 /* #define DEBUG_SOFTWARE_TLB */
41 /* #define DUMP_PAGE_TABLES */
42 /* #define FLUSH_ALL_TLBS */
44 #ifdef DEBUG_MMU
45 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
46 #else
47 # define LOG_MMU_STATE(cpu) do { } while (0)
48 #endif
50 #ifdef DEBUG_SOFTWARE_TLB
51 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
52 #else
53 # define LOG_SWTLB(...) do { } while (0)
54 #endif
56 #ifdef DEBUG_BATS
57 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
58 #else
59 # define LOG_BATS(...) do { } while (0)
60 #endif
62 /*****************************************************************************/
63 /* PowerPC MMU emulation */
65 /* Context used internally during MMU translations */
66 typedef struct mmu_ctx_t mmu_ctx_t;
67 struct mmu_ctx_t {
68 hwaddr raddr; /* Real address */
69 hwaddr eaddr; /* Effective address */
70 int prot; /* Protection bits */
71 hwaddr hash[2]; /* Pagetable hash values */
72 target_ulong ptem; /* Virtual segment ID | API */
73 int key; /* Access key */
74 int nx; /* Non-execute area */
77 /* Common routines used by software and hardware TLBs emulation */
78 static inline int pte_is_valid(target_ulong pte0)
80 return pte0 & 0x80000000 ? 1 : 0;
83 static inline void pte_invalidate(target_ulong *pte0)
85 *pte0 &= ~0x80000000;
88 #define PTE_PTEM_MASK 0x7FFFFFBF
89 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
91 static int pp_check(int key, int pp, int nx)
93 int access;
95 /* Compute access rights */
96 access = 0;
97 if (key == 0) {
98 switch (pp) {
99 case 0x0:
100 case 0x1:
101 case 0x2:
102 access |= PAGE_WRITE;
103 /* fall through */
104 case 0x3:
105 access |= PAGE_READ;
106 break;
108 } else {
109 switch (pp) {
110 case 0x0:
111 access = 0;
112 break;
113 case 0x1:
114 case 0x3:
115 access = PAGE_READ;
116 break;
117 case 0x2:
118 access = PAGE_READ | PAGE_WRITE;
119 break;
122 if (nx == 0) {
123 access |= PAGE_EXEC;
126 return access;
129 static int check_prot(int prot, int rw, int access_type)
131 int ret;
133 if (access_type == ACCESS_CODE) {
134 if (prot & PAGE_EXEC) {
135 ret = 0;
136 } else {
137 ret = -2;
139 } else if (rw) {
140 if (prot & PAGE_WRITE) {
141 ret = 0;
142 } else {
143 ret = -2;
145 } else {
146 if (prot & PAGE_READ) {
147 ret = 0;
148 } else {
149 ret = -2;
153 return ret;
156 static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
157 target_ulong pte1, int h,
158 int rw, int type)
160 target_ulong ptem, mmask;
161 int access, ret, pteh, ptev, pp;
163 ret = -1;
164 /* Check validity and table match */
165 ptev = pte_is_valid(pte0);
166 pteh = (pte0 >> 6) & 1;
167 if (ptev && h == pteh) {
168 /* Check vsid & api */
169 ptem = pte0 & PTE_PTEM_MASK;
170 mmask = PTE_CHECK_MASK;
171 pp = pte1 & 0x00000003;
172 if (ptem == ctx->ptem) {
173 if (ctx->raddr != (hwaddr)-1ULL) {
174 /* all matches should have equal RPN, WIMG & PP */
175 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
176 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
177 return -3;
180 /* Compute access rights */
181 access = pp_check(ctx->key, pp, ctx->nx);
182 /* Keep the matching PTE information */
183 ctx->raddr = pte1;
184 ctx->prot = access;
185 ret = check_prot(ctx->prot, rw, type);
186 if (ret == 0) {
187 /* Access granted */
188 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
189 } else {
190 /* Access right violation */
191 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
196 return ret;
199 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
200 int ret, int rw)
202 int store = 0;
204 /* Update page flags */
205 if (!(*pte1p & 0x00000100)) {
206 /* Update accessed flag */
207 *pte1p |= 0x00000100;
208 store = 1;
210 if (!(*pte1p & 0x00000080)) {
211 if (rw == 1 && ret == 0) {
212 /* Update changed flag */
213 *pte1p |= 0x00000080;
214 store = 1;
215 } else {
216 /* Force page fault for first write access */
217 ctx->prot &= ~PAGE_WRITE;
221 return store;
224 /* Software driven TLB helpers */
225 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
226 int way, int is_code)
228 int nr;
230 /* Select TLB num in a way from address */
231 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
232 /* Select TLB way */
233 nr += env->tlb_per_way * way;
234 /* 6xx have separate TLBs for instructions and data */
235 if (is_code && env->id_tlbs == 1) {
236 nr += env->nb_tlb;
239 return nr;
242 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
244 ppc6xx_tlb_t *tlb;
245 int nr, max;
247 /* LOG_SWTLB("Invalidate all TLBs\n"); */
248 /* Invalidate all defined software TLB */
249 max = env->nb_tlb;
250 if (env->id_tlbs == 1) {
251 max *= 2;
253 for (nr = 0; nr < max; nr++) {
254 tlb = &env->tlb.tlb6[nr];
255 pte_invalidate(&tlb->pte0);
257 tlb_flush(env_cpu(env));
260 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
261 target_ulong eaddr,
262 int is_code, int match_epn)
264 #if !defined(FLUSH_ALL_TLBS)
265 CPUState *cs = env_cpu(env);
266 ppc6xx_tlb_t *tlb;
267 int way, nr;
269 /* Invalidate ITLB + DTLB, all ways */
270 for (way = 0; way < env->nb_ways; way++) {
271 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
272 tlb = &env->tlb.tlb6[nr];
273 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
274 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
275 env->nb_tlb, eaddr);
276 pte_invalidate(&tlb->pte0);
277 tlb_flush_page(cs, tlb->EPN);
280 #else
281 /* XXX: PowerPC specification say this is valid as well */
282 ppc6xx_tlb_invalidate_all(env);
283 #endif
286 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
287 target_ulong eaddr, int is_code)
289 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
292 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
293 int is_code, target_ulong pte0, target_ulong pte1)
295 ppc6xx_tlb_t *tlb;
296 int nr;
298 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
299 tlb = &env->tlb.tlb6[nr];
300 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
301 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
302 /* Invalidate any pending reference in QEMU for this virtual address */
303 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
304 tlb->pte0 = pte0;
305 tlb->pte1 = pte1;
306 tlb->EPN = EPN;
307 /* Store last way for LRU mechanism */
308 env->last_way = way;
311 static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
312 target_ulong eaddr, int rw, int access_type)
314 ppc6xx_tlb_t *tlb;
315 int nr, best, way;
316 int ret;
318 best = -1;
319 ret = -1; /* No TLB found */
320 for (way = 0; way < env->nb_ways; way++) {
321 nr = ppc6xx_tlb_getnum(env, eaddr, way,
322 access_type == ACCESS_CODE ? 1 : 0);
323 tlb = &env->tlb.tlb6[nr];
324 /* This test "emulates" the PTE index match for hardware TLBs */
325 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
326 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
327 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
328 pte_is_valid(tlb->pte0) ? "valid" : "inval",
329 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
330 continue;
332 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
333 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
334 pte_is_valid(tlb->pte0) ? "valid" : "inval",
335 tlb->EPN, eaddr, tlb->pte1,
336 rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
337 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
338 0, rw, access_type)) {
339 case -3:
340 /* TLB inconsistency */
341 return -1;
342 case -2:
343 /* Access violation */
344 ret = -2;
345 best = nr;
346 break;
347 case -1:
348 default:
349 /* No match */
350 break;
351 case 0:
352 /* access granted */
354 * XXX: we should go on looping to check all TLBs
355 * consistency but we can speed-up the whole thing as
356 * the result would be undefined if TLBs are not
357 * consistent.
359 ret = 0;
360 best = nr;
361 goto done;
364 if (best != -1) {
365 done:
366 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
367 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
368 /* Update page flags */
369 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw);
372 return ret;
375 /* Perform BAT hit & translation */
376 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
377 int *validp, int *protp, target_ulong *BATu,
378 target_ulong *BATl)
380 target_ulong bl;
381 int pp, valid, prot;
383 bl = (*BATu & 0x00001FFC) << 15;
384 valid = 0;
385 prot = 0;
386 if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
387 ((msr_pr != 0) && (*BATu & 0x00000001))) {
388 valid = 1;
389 pp = *BATl & 0x00000003;
390 if (pp != 0) {
391 prot = PAGE_READ | PAGE_EXEC;
392 if (pp == 0x2) {
393 prot |= PAGE_WRITE;
397 *blp = bl;
398 *validp = valid;
399 *protp = prot;
402 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
403 target_ulong virtual, int rw, int type)
405 target_ulong *BATlt, *BATut, *BATu, *BATl;
406 target_ulong BEPIl, BEPIu, bl;
407 int i, valid, prot;
408 int ret = -1;
410 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
411 type == ACCESS_CODE ? 'I' : 'D', virtual);
412 switch (type) {
413 case ACCESS_CODE:
414 BATlt = env->IBAT[1];
415 BATut = env->IBAT[0];
416 break;
417 default:
418 BATlt = env->DBAT[1];
419 BATut = env->DBAT[0];
420 break;
422 for (i = 0; i < env->nb_BATs; i++) {
423 BATu = &BATut[i];
424 BATl = &BATlt[i];
425 BEPIu = *BATu & 0xF0000000;
426 BEPIl = *BATu & 0x0FFE0000;
427 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
428 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
429 " BATl " TARGET_FMT_lx "\n", __func__,
430 type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
431 if ((virtual & 0xF0000000) == BEPIu &&
432 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
433 /* BAT matches */
434 if (valid != 0) {
435 /* Get physical address */
436 ctx->raddr = (*BATl & 0xF0000000) |
437 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
438 (virtual & 0x0001F000);
439 /* Compute access rights */
440 ctx->prot = prot;
441 ret = check_prot(ctx->prot, rw, type);
442 if (ret == 0) {
443 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
444 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
445 ctx->prot & PAGE_WRITE ? 'W' : '-');
447 break;
451 if (ret < 0) {
452 #if defined(DEBUG_BATS)
453 if (qemu_log_enabled()) {
454 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
455 for (i = 0; i < 4; i++) {
456 BATu = &BATut[i];
457 BATl = &BATlt[i];
458 BEPIu = *BATu & 0xF0000000;
459 BEPIl = *BATu & 0x0FFE0000;
460 bl = (*BATu & 0x00001FFC) << 15;
461 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
462 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
463 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
464 __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
465 *BATu, *BATl, BEPIu, BEPIl, bl);
468 #endif
470 /* No hit */
471 return ret;
474 /* Perform segment based translation */
475 static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
476 target_ulong eaddr, int rw, int type)
478 PowerPCCPU *cpu = env_archcpu(env);
479 hwaddr hash;
480 target_ulong vsid;
481 int ds, pr, target_page_bits;
482 int ret;
483 target_ulong sr, pgidx;
485 pr = msr_pr;
486 ctx->eaddr = eaddr;
488 sr = env->sr[eaddr >> 28];
489 ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
490 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
491 ds = sr & 0x80000000 ? 1 : 0;
492 ctx->nx = sr & 0x10000000 ? 1 : 0;
493 vsid = sr & 0x00FFFFFF;
494 target_page_bits = TARGET_PAGE_BITS;
495 qemu_log_mask(CPU_LOG_MMU,
496 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
497 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
498 " ir=%d dr=%d pr=%d %d t=%d\n",
499 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
500 (int)msr_dr, pr != 0 ? 1 : 0, rw, type);
501 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
502 hash = vsid ^ pgidx;
503 ctx->ptem = (vsid << 7) | (pgidx >> 10);
505 qemu_log_mask(CPU_LOG_MMU,
506 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
507 ctx->key, ds, ctx->nx, vsid);
508 ret = -1;
509 if (!ds) {
510 /* Check if instruction fetch is allowed, if needed */
511 if (type != ACCESS_CODE || ctx->nx == 0) {
512 /* Page address translation */
513 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
514 " htab_mask " TARGET_FMT_plx
515 " hash " TARGET_FMT_plx "\n",
516 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
517 ctx->hash[0] = hash;
518 ctx->hash[1] = ~hash;
520 /* Initialize real address with an invalid value */
521 ctx->raddr = (hwaddr)-1ULL;
522 /* Software TLB search */
523 ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
524 #if defined(DUMP_PAGE_TABLES)
525 if (qemu_loglevel_mask(CPU_LOG_MMU)) {
526 CPUState *cs = env_cpu(env);
527 hwaddr curaddr;
528 uint32_t a0, a1, a2, a3;
530 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
531 "\n", ppc_hash32_hpt_base(cpu),
532 ppc_hash32_hpt_mask(env) + 0x80);
533 for (curaddr = ppc_hash32_hpt_base(cpu);
534 curaddr < (ppc_hash32_hpt_base(cpu)
535 + ppc_hash32_hpt_mask(cpu) + 0x80);
536 curaddr += 16) {
537 a0 = ldl_phys(cs->as, curaddr);
538 a1 = ldl_phys(cs->as, curaddr + 4);
539 a2 = ldl_phys(cs->as, curaddr + 8);
540 a3 = ldl_phys(cs->as, curaddr + 12);
541 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
542 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
543 curaddr, a0, a1, a2, a3);
547 #endif
548 } else {
549 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
550 ret = -3;
552 } else {
553 target_ulong sr;
555 qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
556 /* Direct-store segment : absolutely *BUGGY* for now */
559 * Direct-store implies a 32-bit MMU.
560 * Check the Segment Register's bus unit ID (BUID).
562 sr = env->sr[eaddr >> 28];
563 if ((sr & 0x1FF00000) >> 20 == 0x07f) {
565 * Memory-forced I/O controller interface access
567 * If T=1 and BUID=x'07F', the 601 performs a memory
568 * access to SR[28-31] LA[4-31], bypassing all protection
569 * mechanisms.
571 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
572 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
573 return 0;
576 switch (type) {
577 case ACCESS_INT:
578 /* Integer load/store : only access allowed */
579 break;
580 case ACCESS_CODE:
581 /* No code fetch is allowed in direct-store areas */
582 return -4;
583 case ACCESS_FLOAT:
584 /* Floating point load/store */
585 return -4;
586 case ACCESS_RES:
587 /* lwarx, ldarx or srwcx. */
588 return -4;
589 case ACCESS_CACHE:
591 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
593 * Should make the instruction do no-op. As it already do
594 * no-op, it's quite easy :-)
596 ctx->raddr = eaddr;
597 return 0;
598 case ACCESS_EXT:
599 /* eciwx or ecowx */
600 return -4;
601 default:
602 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
603 "address translation\n");
604 return -4;
606 if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
607 ctx->raddr = eaddr;
608 ret = 2;
609 } else {
610 ret = -2;
614 return ret;
617 /* Generic TLB check function for embedded PowerPC implementations */
618 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
619 hwaddr *raddrp,
620 target_ulong address, uint32_t pid, int ext,
621 int i)
623 target_ulong mask;
625 /* Check valid flag */
626 if (!(tlb->prot & PAGE_VALID)) {
627 return -1;
629 mask = ~(tlb->size - 1);
630 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
631 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
632 mask, (uint32_t)tlb->PID, tlb->prot);
633 /* Check PID */
634 if (tlb->PID != 0 && tlb->PID != pid) {
635 return -1;
637 /* Check effective address */
638 if ((address & mask) != tlb->EPN) {
639 return -1;
641 *raddrp = (tlb->RPN & mask) | (address & ~mask);
642 if (ext) {
643 /* Extend the physical address to 36 bits */
644 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
647 return 0;
650 /* Generic TLB search function for PowerPC embedded implementations */
651 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
652 uint32_t pid)
654 ppcemb_tlb_t *tlb;
655 hwaddr raddr;
656 int i, ret;
658 /* Default return value is no match */
659 ret = -1;
660 for (i = 0; i < env->nb_tlb; i++) {
661 tlb = &env->tlb.tlbe[i];
662 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
663 ret = i;
664 break;
668 return ret;
671 /* Helpers specific to PowerPC 40x implementations */
672 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
674 ppcemb_tlb_t *tlb;
675 int i;
677 for (i = 0; i < env->nb_tlb; i++) {
678 tlb = &env->tlb.tlbe[i];
679 tlb->prot &= ~PAGE_VALID;
681 tlb_flush(env_cpu(env));
684 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
685 target_ulong address, int rw,
686 int access_type)
688 ppcemb_tlb_t *tlb;
689 hwaddr raddr;
690 int i, ret, zsel, zpr, pr;
692 ret = -1;
693 raddr = (hwaddr)-1ULL;
694 pr = msr_pr;
695 for (i = 0; i < env->nb_tlb; i++) {
696 tlb = &env->tlb.tlbe[i];
697 if (ppcemb_tlb_check(env, tlb, &raddr, address,
698 env->spr[SPR_40x_PID], 0, i) < 0) {
699 continue;
701 zsel = (tlb->attr >> 4) & 0xF;
702 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
703 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
704 __func__, i, zsel, zpr, rw, tlb->attr);
705 /* Check execute enable bit */
706 switch (zpr) {
707 case 0x2:
708 if (pr != 0) {
709 goto check_perms;
711 /* fall through */
712 case 0x3:
713 /* All accesses granted */
714 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
715 ret = 0;
716 break;
717 case 0x0:
718 if (pr != 0) {
719 /* Raise Zone protection fault. */
720 env->spr[SPR_40x_ESR] = 1 << 22;
721 ctx->prot = 0;
722 ret = -2;
723 break;
725 /* fall through */
726 case 0x1:
727 check_perms:
728 /* Check from TLB entry */
729 ctx->prot = tlb->prot;
730 ret = check_prot(ctx->prot, rw, access_type);
731 if (ret == -2) {
732 env->spr[SPR_40x_ESR] = 0;
734 break;
736 if (ret >= 0) {
737 ctx->raddr = raddr;
738 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
739 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
740 ret);
741 return 0;
744 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
745 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
747 return ret;
750 void store_40x_sler(CPUPPCState *env, uint32_t val)
752 /* XXX: TO BE FIXED */
753 if (val != 0x00000000) {
754 cpu_abort(env_cpu(env),
755 "Little-endian regions are not supported by now\n");
757 env->spr[SPR_405_SLER] = val;
760 static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
761 hwaddr *raddr, int *prot,
762 target_ulong address, int rw,
763 int access_type, int i)
765 int ret, prot2;
767 if (ppcemb_tlb_check(env, tlb, raddr, address,
768 env->spr[SPR_BOOKE_PID],
769 !env->nb_pids, i) >= 0) {
770 goto found_tlb;
773 if (env->spr[SPR_BOOKE_PID1] &&
774 ppcemb_tlb_check(env, tlb, raddr, address,
775 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
776 goto found_tlb;
779 if (env->spr[SPR_BOOKE_PID2] &&
780 ppcemb_tlb_check(env, tlb, raddr, address,
781 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
782 goto found_tlb;
785 LOG_SWTLB("%s: TLB entry not found\n", __func__);
786 return -1;
788 found_tlb:
790 if (msr_pr != 0) {
791 prot2 = tlb->prot & 0xF;
792 } else {
793 prot2 = (tlb->prot >> 4) & 0xF;
796 /* Check the address space */
797 if (access_type == ACCESS_CODE) {
798 if (msr_ir != (tlb->attr & 1)) {
799 LOG_SWTLB("%s: AS doesn't match\n", __func__);
800 return -1;
803 *prot = prot2;
804 if (prot2 & PAGE_EXEC) {
805 LOG_SWTLB("%s: good TLB!\n", __func__);
806 return 0;
809 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
810 ret = -3;
811 } else {
812 if (msr_dr != (tlb->attr & 1)) {
813 LOG_SWTLB("%s: AS doesn't match\n", __func__);
814 return -1;
817 *prot = prot2;
818 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
819 LOG_SWTLB("%s: found TLB!\n", __func__);
820 return 0;
823 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
824 ret = -2;
827 return ret;
830 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
831 target_ulong address, int rw,
832 int access_type)
834 ppcemb_tlb_t *tlb;
835 hwaddr raddr;
836 int i, ret;
838 ret = -1;
839 raddr = (hwaddr)-1ULL;
840 for (i = 0; i < env->nb_tlb; i++) {
841 tlb = &env->tlb.tlbe[i];
842 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw,
843 access_type, i);
844 if (ret != -1) {
845 break;
849 if (ret >= 0) {
850 ctx->raddr = raddr;
851 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
852 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
853 ret);
854 } else {
855 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
856 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
859 return ret;
862 static void booke206_flush_tlb(CPUPPCState *env, int flags,
863 const int check_iprot)
865 int tlb_size;
866 int i, j;
867 ppcmas_tlb_t *tlb = env->tlb.tlbm;
869 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
870 if (flags & (1 << i)) {
871 tlb_size = booke206_tlb_size(env, i);
872 for (j = 0; j < tlb_size; j++) {
873 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
874 tlb[j].mas1 &= ~MAS1_VALID;
878 tlb += booke206_tlb_size(env, i);
881 tlb_flush(env_cpu(env));
884 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
885 ppcmas_tlb_t *tlb)
887 int tlbm_size;
889 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
891 return 1024ULL << tlbm_size;
894 /* TLB check function for MAS based SoftTLBs */
895 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
896 hwaddr *raddrp, target_ulong address,
897 uint32_t pid)
899 hwaddr mask;
900 uint32_t tlb_pid;
902 if (!msr_cm) {
903 /* In 32bit mode we can only address 32bit EAs */
904 address = (uint32_t)address;
907 /* Check valid flag */
908 if (!(tlb->mas1 & MAS1_VALID)) {
909 return -1;
912 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
913 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
914 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
915 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
916 tlb->mas7_3, tlb->mas8);
918 /* Check PID */
919 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
920 if (tlb_pid != 0 && tlb_pid != pid) {
921 return -1;
924 /* Check effective address */
925 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
926 return -1;
929 if (raddrp) {
930 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
933 return 0;
936 static bool is_epid_mmu(int mmu_idx)
938 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
941 static uint32_t mmubooke206_esr(int mmu_idx, bool rw)
943 uint32_t esr = 0;
944 if (rw) {
945 esr |= ESR_ST;
947 if (is_epid_mmu(mmu_idx)) {
948 esr |= ESR_EPID;
950 return esr;
954 * Get EPID register given the mmu_idx. If this is regular load,
955 * construct the EPID access bits from current processor state
957 * Get the effective AS and PR bits and the PID. The PID is returned
958 * only if EPID load is requested, otherwise the caller must detect
959 * the correct EPID. Return true if valid EPID is returned.
961 static bool mmubooke206_get_as(CPUPPCState *env,
962 int mmu_idx, uint32_t *epid_out,
963 bool *as_out, bool *pr_out)
965 if (is_epid_mmu(mmu_idx)) {
966 uint32_t epidr;
967 if (mmu_idx == PPC_TLB_EPID_STORE) {
968 epidr = env->spr[SPR_BOOKE_EPSC];
969 } else {
970 epidr = env->spr[SPR_BOOKE_EPLC];
972 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT;
973 *as_out = !!(epidr & EPID_EAS);
974 *pr_out = !!(epidr & EPID_EPR);
975 return true;
976 } else {
977 *as_out = msr_ds;
978 *pr_out = msr_pr;
979 return false;
983 /* Check if the tlb found by hashing really matches */
984 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
985 hwaddr *raddr, int *prot,
986 target_ulong address, int rw,
987 int access_type, int mmu_idx)
989 int ret;
990 int prot2 = 0;
991 uint32_t epid;
992 bool as, pr;
993 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
995 if (!use_epid) {
996 if (ppcmas_tlb_check(env, tlb, raddr, address,
997 env->spr[SPR_BOOKE_PID]) >= 0) {
998 goto found_tlb;
1001 if (env->spr[SPR_BOOKE_PID1] &&
1002 ppcmas_tlb_check(env, tlb, raddr, address,
1003 env->spr[SPR_BOOKE_PID1]) >= 0) {
1004 goto found_tlb;
1007 if (env->spr[SPR_BOOKE_PID2] &&
1008 ppcmas_tlb_check(env, tlb, raddr, address,
1009 env->spr[SPR_BOOKE_PID2]) >= 0) {
1010 goto found_tlb;
1012 } else {
1013 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) {
1014 goto found_tlb;
1018 LOG_SWTLB("%s: TLB entry not found\n", __func__);
1019 return -1;
1021 found_tlb:
1023 if (pr) {
1024 if (tlb->mas7_3 & MAS3_UR) {
1025 prot2 |= PAGE_READ;
1027 if (tlb->mas7_3 & MAS3_UW) {
1028 prot2 |= PAGE_WRITE;
1030 if (tlb->mas7_3 & MAS3_UX) {
1031 prot2 |= PAGE_EXEC;
1033 } else {
1034 if (tlb->mas7_3 & MAS3_SR) {
1035 prot2 |= PAGE_READ;
1037 if (tlb->mas7_3 & MAS3_SW) {
1038 prot2 |= PAGE_WRITE;
1040 if (tlb->mas7_3 & MAS3_SX) {
1041 prot2 |= PAGE_EXEC;
1045 /* Check the address space and permissions */
1046 if (access_type == ACCESS_CODE) {
1047 /* There is no way to fetch code using epid load */
1048 assert(!use_epid);
1049 if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1050 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1051 return -1;
1054 *prot = prot2;
1055 if (prot2 & PAGE_EXEC) {
1056 LOG_SWTLB("%s: good TLB!\n", __func__);
1057 return 0;
1060 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
1061 ret = -3;
1062 } else {
1063 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1064 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1065 return -1;
1068 *prot = prot2;
1069 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
1070 LOG_SWTLB("%s: found TLB!\n", __func__);
1071 return 0;
1074 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
1075 ret = -2;
1078 return ret;
1081 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1082 target_ulong address, int rw,
1083 int access_type, int mmu_idx)
1085 ppcmas_tlb_t *tlb;
1086 hwaddr raddr;
1087 int i, j, ret;
1089 ret = -1;
1090 raddr = (hwaddr)-1ULL;
1092 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1093 int ways = booke206_tlb_ways(env, i);
1095 for (j = 0; j < ways; j++) {
1096 tlb = booke206_get_tlbm(env, i, address, j);
1097 if (!tlb) {
1098 continue;
1100 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
1101 rw, access_type, mmu_idx);
1102 if (ret != -1) {
1103 goto found_tlb;
1108 found_tlb:
1110 if (ret >= 0) {
1111 ctx->raddr = raddr;
1112 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1113 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1114 ret);
1115 } else {
1116 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1117 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1120 return ret;
1123 static const char *book3e_tsize_to_str[32] = {
1124 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1125 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1126 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1127 "1T", "2T"
1130 static void mmubooke_dump_mmu(CPUPPCState *env)
1132 ppcemb_tlb_t *entry;
1133 int i;
1135 if (kvm_enabled() && !env->kvm_sw_tlb) {
1136 qemu_printf("Cannot access KVM TLB\n");
1137 return;
1140 qemu_printf("\nTLB:\n");
1141 qemu_printf("Effective Physical Size PID Prot "
1142 "Attr\n");
1144 entry = &env->tlb.tlbe[0];
1145 for (i = 0; i < env->nb_tlb; i++, entry++) {
1146 hwaddr ea, pa;
1147 target_ulong mask;
1148 uint64_t size = (uint64_t)entry->size;
1149 char size_buf[20];
1151 /* Check valid flag */
1152 if (!(entry->prot & PAGE_VALID)) {
1153 continue;
1156 mask = ~(entry->size - 1);
1157 ea = entry->EPN & mask;
1158 pa = entry->RPN & mask;
1159 /* Extend the physical address to 36 bits */
1160 pa |= (hwaddr)(entry->RPN & 0xF) << 32;
1161 if (size >= 1 * MiB) {
1162 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB);
1163 } else {
1164 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
1166 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
1167 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
1168 entry->prot, entry->attr);
1173 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
1174 int tlbsize)
1176 ppcmas_tlb_t *entry;
1177 int i;
1179 qemu_printf("\nTLB%d:\n", tlbn);
1180 qemu_printf("Effective Physical Size TID TS SRWX"
1181 " URWX WIMGE U0123\n");
1183 entry = &env->tlb.tlbm[offset];
1184 for (i = 0; i < tlbsize; i++, entry++) {
1185 hwaddr ea, pa, size;
1186 int tsize;
1188 if (!(entry->mas1 & MAS1_VALID)) {
1189 continue;
1192 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1193 size = 1024ULL << tsize;
1194 ea = entry->mas2 & ~(size - 1);
1195 pa = entry->mas7_3 & ~(size - 1);
1197 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
1198 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1199 (uint64_t)ea, (uint64_t)pa,
1200 book3e_tsize_to_str[tsize],
1201 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
1202 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
1203 entry->mas7_3 & MAS3_SR ? 'R' : '-',
1204 entry->mas7_3 & MAS3_SW ? 'W' : '-',
1205 entry->mas7_3 & MAS3_SX ? 'X' : '-',
1206 entry->mas7_3 & MAS3_UR ? 'R' : '-',
1207 entry->mas7_3 & MAS3_UW ? 'W' : '-',
1208 entry->mas7_3 & MAS3_UX ? 'X' : '-',
1209 entry->mas2 & MAS2_W ? 'W' : '-',
1210 entry->mas2 & MAS2_I ? 'I' : '-',
1211 entry->mas2 & MAS2_M ? 'M' : '-',
1212 entry->mas2 & MAS2_G ? 'G' : '-',
1213 entry->mas2 & MAS2_E ? 'E' : '-',
1214 entry->mas7_3 & MAS3_U0 ? '0' : '-',
1215 entry->mas7_3 & MAS3_U1 ? '1' : '-',
1216 entry->mas7_3 & MAS3_U2 ? '2' : '-',
1217 entry->mas7_3 & MAS3_U3 ? '3' : '-');
1221 static void mmubooke206_dump_mmu(CPUPPCState *env)
1223 int offset = 0;
1224 int i;
1226 if (kvm_enabled() && !env->kvm_sw_tlb) {
1227 qemu_printf("Cannot access KVM TLB\n");
1228 return;
1231 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1232 int size = booke206_tlb_size(env, i);
1234 if (size == 0) {
1235 continue;
1238 mmubooke206_dump_one_tlb(env, i, offset, size);
1239 offset += size;
1243 static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
1245 target_ulong *BATlt, *BATut, *BATu, *BATl;
1246 target_ulong BEPIl, BEPIu, bl;
1247 int i;
1249 switch (type) {
1250 case ACCESS_CODE:
1251 BATlt = env->IBAT[1];
1252 BATut = env->IBAT[0];
1253 break;
1254 default:
1255 BATlt = env->DBAT[1];
1256 BATut = env->DBAT[0];
1257 break;
1260 for (i = 0; i < env->nb_BATs; i++) {
1261 BATu = &BATut[i];
1262 BATl = &BATlt[i];
1263 BEPIu = *BATu & 0xF0000000;
1264 BEPIl = *BATu & 0x0FFE0000;
1265 bl = (*BATu & 0x00001FFC) << 15;
1266 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1267 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
1268 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
1269 type == ACCESS_CODE ? "code" : "data", i,
1270 *BATu, *BATl, BEPIu, BEPIl, bl);
1274 static void mmu6xx_dump_mmu(CPUPPCState *env)
1276 PowerPCCPU *cpu = env_archcpu(env);
1277 ppc6xx_tlb_t *tlb;
1278 target_ulong sr;
1279 int type, way, entry, i;
1281 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
1282 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
1284 qemu_printf("\nSegment registers:\n");
1285 for (i = 0; i < 32; i++) {
1286 sr = env->sr[i];
1287 if (sr & 0x80000000) {
1288 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1289 "CNTLR_SPEC=0x%05x\n", i,
1290 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1291 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
1292 (uint32_t)(sr & 0xFFFFF));
1293 } else {
1294 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
1295 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1296 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
1297 (uint32_t)(sr & 0x00FFFFFF));
1301 qemu_printf("\nBATs:\n");
1302 mmu6xx_dump_BATs(env, ACCESS_INT);
1303 mmu6xx_dump_BATs(env, ACCESS_CODE);
1305 if (env->id_tlbs != 1) {
1306 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1307 " for code and data\n");
1310 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1312 for (type = 0; type < 2; type++) {
1313 for (way = 0; way < env->nb_ways; way++) {
1314 for (entry = env->nb_tlb * type + env->tlb_per_way * way;
1315 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
1316 entry++) {
1318 tlb = &env->tlb.tlb6[entry];
1319 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1320 TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
1321 type ? "code" : "data", entry % env->nb_tlb,
1322 env->nb_tlb, way,
1323 pte_is_valid(tlb->pte0) ? "valid" : "inval",
1324 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
1330 void dump_mmu(CPUPPCState *env)
1332 switch (env->mmu_model) {
1333 case POWERPC_MMU_BOOKE:
1334 mmubooke_dump_mmu(env);
1335 break;
1336 case POWERPC_MMU_BOOKE206:
1337 mmubooke206_dump_mmu(env);
1338 break;
1339 case POWERPC_MMU_SOFT_6xx:
1340 case POWERPC_MMU_SOFT_74xx:
1341 mmu6xx_dump_mmu(env);
1342 break;
1343 #if defined(TARGET_PPC64)
1344 case POWERPC_MMU_64B:
1345 case POWERPC_MMU_2_03:
1346 case POWERPC_MMU_2_06:
1347 case POWERPC_MMU_2_07:
1348 dump_slb(env_archcpu(env));
1349 break;
1350 case POWERPC_MMU_3_00:
1351 if (ppc64_v3_radix(env_archcpu(env))) {
1352 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n",
1353 __func__);
1354 } else {
1355 dump_slb(env_archcpu(env));
1357 break;
1358 #endif
1359 default:
1360 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
1364 static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
1365 target_ulong eaddr, int rw)
1367 int in_plb, ret;
1369 ctx->raddr = eaddr;
1370 ctx->prot = PAGE_READ | PAGE_EXEC;
1371 ret = 0;
1372 switch (env->mmu_model) {
1373 case POWERPC_MMU_SOFT_6xx:
1374 case POWERPC_MMU_SOFT_74xx:
1375 case POWERPC_MMU_SOFT_4xx:
1376 case POWERPC_MMU_REAL:
1377 case POWERPC_MMU_BOOKE:
1378 ctx->prot |= PAGE_WRITE;
1379 break;
1381 case POWERPC_MMU_SOFT_4xx_Z:
1382 if (unlikely(msr_pe != 0)) {
1384 * 403 family add some particular protections, using
1385 * PBL/PBU registers for accesses with no translation.
1387 in_plb =
1388 /* Check PLB validity */
1389 (env->pb[0] < env->pb[1] &&
1390 /* and address in plb area */
1391 eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
1392 (env->pb[2] < env->pb[3] &&
1393 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
1394 if (in_plb ^ msr_px) {
1395 /* Access in protected area */
1396 if (rw == 1) {
1397 /* Access is not allowed */
1398 ret = -2;
1400 } else {
1401 /* Read-write access is allowed */
1402 ctx->prot |= PAGE_WRITE;
1405 break;
1407 default:
1408 /* Caller's checks mean we should never get here for other models */
1409 abort();
1410 return -1;
1413 return ret;
1416 static int get_physical_address_wtlb(
1417 CPUPPCState *env, mmu_ctx_t *ctx,
1418 target_ulong eaddr, int rw, int access_type,
1419 int mmu_idx)
1421 int ret = -1;
1422 bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0)
1423 || (access_type != ACCESS_CODE && msr_dr == 0);
1425 switch (env->mmu_model) {
1426 case POWERPC_MMU_SOFT_6xx:
1427 case POWERPC_MMU_SOFT_74xx:
1428 if (real_mode) {
1429 ret = check_physical(env, ctx, eaddr, rw);
1430 } else {
1431 /* Try to find a BAT */
1432 if (env->nb_BATs != 0) {
1433 ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type);
1435 if (ret < 0) {
1436 /* We didn't match any BAT entry or don't have BATs */
1437 ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type);
1440 break;
1442 case POWERPC_MMU_SOFT_4xx:
1443 case POWERPC_MMU_SOFT_4xx_Z:
1444 if (real_mode) {
1445 ret = check_physical(env, ctx, eaddr, rw);
1446 } else {
1447 ret = mmu40x_get_physical_address(env, ctx, eaddr,
1448 rw, access_type);
1450 break;
1451 case POWERPC_MMU_BOOKE:
1452 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1453 rw, access_type);
1454 break;
1455 case POWERPC_MMU_BOOKE206:
1456 ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw,
1457 access_type, mmu_idx);
1458 break;
1459 case POWERPC_MMU_MPC8xx:
1460 /* XXX: TODO */
1461 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
1462 break;
1463 case POWERPC_MMU_REAL:
1464 if (real_mode) {
1465 ret = check_physical(env, ctx, eaddr, rw);
1466 } else {
1467 cpu_abort(env_cpu(env),
1468 "PowerPC in real mode do not do any translation\n");
1470 return -1;
1471 default:
1472 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
1473 return -1;
1476 return ret;
1479 static int get_physical_address(
1480 CPUPPCState *env, mmu_ctx_t *ctx,
1481 target_ulong eaddr, int rw, int access_type)
1483 return get_physical_address_wtlb(env, ctx, eaddr, rw, access_type, 0);
1486 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1488 PowerPCCPU *cpu = POWERPC_CPU(cs);
1489 CPUPPCState *env = &cpu->env;
1490 mmu_ctx_t ctx;
1492 switch (env->mmu_model) {
1493 #if defined(TARGET_PPC64)
1494 case POWERPC_MMU_64B:
1495 case POWERPC_MMU_2_03:
1496 case POWERPC_MMU_2_06:
1497 case POWERPC_MMU_2_07:
1498 return ppc_hash64_get_phys_page_debug(cpu, addr);
1499 case POWERPC_MMU_3_00:
1500 return ppc64_v3_get_phys_page_debug(cpu, addr);
1501 #endif
1503 case POWERPC_MMU_32B:
1504 case POWERPC_MMU_601:
1505 return ppc_hash32_get_phys_page_debug(cpu, addr);
1507 default:
1511 if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
1514 * Some MMUs have separate TLBs for code and data. If we only
1515 * try an ACCESS_INT, we may not be able to read instructions
1516 * mapped by code TLBs, so we also try a ACCESS_CODE.
1518 if (unlikely(get_physical_address(env, &ctx, addr, 0,
1519 ACCESS_CODE) != 0)) {
1520 return -1;
1524 return ctx.raddr & TARGET_PAGE_MASK;
1527 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
1528 int rw, int mmu_idx)
1530 uint32_t epid;
1531 bool as, pr;
1532 uint32_t missed_tid = 0;
1533 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
1534 if (rw == 2) {
1535 as = msr_ir;
1537 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1538 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1539 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1540 env->spr[SPR_BOOKE_MAS3] = 0;
1541 env->spr[SPR_BOOKE_MAS6] = 0;
1542 env->spr[SPR_BOOKE_MAS7] = 0;
1544 /* AS */
1545 if (as) {
1546 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1547 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
1550 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
1551 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
1553 if (!use_epid) {
1554 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
1555 case MAS4_TIDSELD_PID0:
1556 missed_tid = env->spr[SPR_BOOKE_PID];
1557 break;
1558 case MAS4_TIDSELD_PID1:
1559 missed_tid = env->spr[SPR_BOOKE_PID1];
1560 break;
1561 case MAS4_TIDSELD_PID2:
1562 missed_tid = env->spr[SPR_BOOKE_PID2];
1563 break;
1565 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
1566 } else {
1567 missed_tid = epid;
1568 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16;
1570 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT);
1573 /* next victim logic */
1574 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1575 env->last_way++;
1576 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1577 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1580 /* Perform address translation */
1581 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
1582 int rw, int mmu_idx)
1584 CPUState *cs = env_cpu(env);
1585 PowerPCCPU *cpu = POWERPC_CPU(cs);
1586 mmu_ctx_t ctx;
1587 int access_type;
1588 int ret = 0;
1590 if (rw == 2) {
1591 /* code access */
1592 rw = 0;
1593 access_type = ACCESS_CODE;
1594 } else {
1595 /* data access */
1596 access_type = env->access_type;
1598 ret = get_physical_address_wtlb(env, &ctx, address, rw,
1599 access_type, mmu_idx);
1600 if (ret == 0) {
1601 tlb_set_page(cs, address & TARGET_PAGE_MASK,
1602 ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
1603 mmu_idx, TARGET_PAGE_SIZE);
1604 ret = 0;
1605 } else if (ret < 0) {
1606 LOG_MMU_STATE(cs);
1607 if (access_type == ACCESS_CODE) {
1608 switch (ret) {
1609 case -1:
1610 /* No matches in page tables or TLB */
1611 switch (env->mmu_model) {
1612 case POWERPC_MMU_SOFT_6xx:
1613 cs->exception_index = POWERPC_EXCP_IFTLB;
1614 env->error_code = 1 << 18;
1615 env->spr[SPR_IMISS] = address;
1616 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1617 goto tlb_miss;
1618 case POWERPC_MMU_SOFT_74xx:
1619 cs->exception_index = POWERPC_EXCP_IFTLB;
1620 goto tlb_miss_74xx;
1621 case POWERPC_MMU_SOFT_4xx:
1622 case POWERPC_MMU_SOFT_4xx_Z:
1623 cs->exception_index = POWERPC_EXCP_ITLB;
1624 env->error_code = 0;
1625 env->spr[SPR_40x_DEAR] = address;
1626 env->spr[SPR_40x_ESR] = 0x00000000;
1627 break;
1628 case POWERPC_MMU_BOOKE206:
1629 booke206_update_mas_tlb_miss(env, address, 2, mmu_idx);
1630 /* fall through */
1631 case POWERPC_MMU_BOOKE:
1632 cs->exception_index = POWERPC_EXCP_ITLB;
1633 env->error_code = 0;
1634 env->spr[SPR_BOOKE_DEAR] = address;
1635 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, 0);
1636 return -1;
1637 case POWERPC_MMU_MPC8xx:
1638 /* XXX: TODO */
1639 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1640 break;
1641 case POWERPC_MMU_REAL:
1642 cpu_abort(cs, "PowerPC in real mode should never raise "
1643 "any MMU exceptions\n");
1644 return -1;
1645 default:
1646 cpu_abort(cs, "Unknown or invalid MMU model\n");
1647 return -1;
1649 break;
1650 case -2:
1651 /* Access rights violation */
1652 cs->exception_index = POWERPC_EXCP_ISI;
1653 env->error_code = 0x08000000;
1654 break;
1655 case -3:
1656 /* No execute protection violation */
1657 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1658 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1659 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1661 cs->exception_index = POWERPC_EXCP_ISI;
1662 env->error_code = 0x10000000;
1663 break;
1664 case -4:
1665 /* Direct store exception */
1666 /* No code fetch is allowed in direct-store areas */
1667 cs->exception_index = POWERPC_EXCP_ISI;
1668 env->error_code = 0x10000000;
1669 break;
1671 } else {
1672 switch (ret) {
1673 case -1:
1674 /* No matches in page tables or TLB */
1675 switch (env->mmu_model) {
1676 case POWERPC_MMU_SOFT_6xx:
1677 if (rw == 1) {
1678 cs->exception_index = POWERPC_EXCP_DSTLB;
1679 env->error_code = 1 << 16;
1680 } else {
1681 cs->exception_index = POWERPC_EXCP_DLTLB;
1682 env->error_code = 0;
1684 env->spr[SPR_DMISS] = address;
1685 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1686 tlb_miss:
1687 env->error_code |= ctx.key << 19;
1688 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
1689 get_pteg_offset32(cpu, ctx.hash[0]);
1690 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
1691 get_pteg_offset32(cpu, ctx.hash[1]);
1692 break;
1693 case POWERPC_MMU_SOFT_74xx:
1694 if (rw == 1) {
1695 cs->exception_index = POWERPC_EXCP_DSTLB;
1696 } else {
1697 cs->exception_index = POWERPC_EXCP_DLTLB;
1699 tlb_miss_74xx:
1700 /* Implement LRU algorithm */
1701 env->error_code = ctx.key << 19;
1702 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
1703 ((env->last_way + 1) & (env->nb_ways - 1));
1704 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
1705 break;
1706 case POWERPC_MMU_SOFT_4xx:
1707 case POWERPC_MMU_SOFT_4xx_Z:
1708 cs->exception_index = POWERPC_EXCP_DTLB;
1709 env->error_code = 0;
1710 env->spr[SPR_40x_DEAR] = address;
1711 if (rw) {
1712 env->spr[SPR_40x_ESR] = 0x00800000;
1713 } else {
1714 env->spr[SPR_40x_ESR] = 0x00000000;
1716 break;
1717 case POWERPC_MMU_MPC8xx:
1718 /* XXX: TODO */
1719 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1720 break;
1721 case POWERPC_MMU_BOOKE206:
1722 booke206_update_mas_tlb_miss(env, address, rw, mmu_idx);
1723 /* fall through */
1724 case POWERPC_MMU_BOOKE:
1725 cs->exception_index = POWERPC_EXCP_DTLB;
1726 env->error_code = 0;
1727 env->spr[SPR_BOOKE_DEAR] = address;
1728 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw);
1729 return -1;
1730 case POWERPC_MMU_REAL:
1731 cpu_abort(cs, "PowerPC in real mode should never raise "
1732 "any MMU exceptions\n");
1733 return -1;
1734 default:
1735 cpu_abort(cs, "Unknown or invalid MMU model\n");
1736 return -1;
1738 break;
1739 case -2:
1740 /* Access rights violation */
1741 cs->exception_index = POWERPC_EXCP_DSI;
1742 env->error_code = 0;
1743 if (env->mmu_model == POWERPC_MMU_SOFT_4xx
1744 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
1745 env->spr[SPR_40x_DEAR] = address;
1746 if (rw) {
1747 env->spr[SPR_40x_ESR] |= 0x00800000;
1749 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1750 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1751 env->spr[SPR_BOOKE_DEAR] = address;
1752 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, rw);
1753 } else {
1754 env->spr[SPR_DAR] = address;
1755 if (rw == 1) {
1756 env->spr[SPR_DSISR] = 0x0A000000;
1757 } else {
1758 env->spr[SPR_DSISR] = 0x08000000;
1761 break;
1762 case -4:
1763 /* Direct store exception */
1764 switch (access_type) {
1765 case ACCESS_FLOAT:
1766 /* Floating point load/store */
1767 cs->exception_index = POWERPC_EXCP_ALIGN;
1768 env->error_code = POWERPC_EXCP_ALIGN_FP;
1769 env->spr[SPR_DAR] = address;
1770 break;
1771 case ACCESS_RES:
1772 /* lwarx, ldarx or stwcx. */
1773 cs->exception_index = POWERPC_EXCP_DSI;
1774 env->error_code = 0;
1775 env->spr[SPR_DAR] = address;
1776 if (rw == 1) {
1777 env->spr[SPR_DSISR] = 0x06000000;
1778 } else {
1779 env->spr[SPR_DSISR] = 0x04000000;
1781 break;
1782 case ACCESS_EXT:
1783 /* eciwx or ecowx */
1784 cs->exception_index = POWERPC_EXCP_DSI;
1785 env->error_code = 0;
1786 env->spr[SPR_DAR] = address;
1787 if (rw == 1) {
1788 env->spr[SPR_DSISR] = 0x06100000;
1789 } else {
1790 env->spr[SPR_DSISR] = 0x04100000;
1792 break;
1793 default:
1794 printf("DSI: invalid exception (%d)\n", ret);
1795 cs->exception_index = POWERPC_EXCP_PROGRAM;
1796 env->error_code =
1797 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
1798 env->spr[SPR_DAR] = address;
1799 break;
1801 break;
1804 ret = 1;
1807 return ret;
1810 /*****************************************************************************/
1811 /* BATs management */
1812 #if !defined(FLUSH_ALL_TLBS)
1813 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
1814 target_ulong mask)
1816 CPUState *cs = env_cpu(env);
1817 target_ulong base, end, page;
1819 base = BATu & ~0x0001FFFF;
1820 end = base + mask + 0x00020000;
1821 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
1822 /* Flushing 1024 4K pages is slower than a complete flush */
1823 LOG_BATS("Flush all BATs\n");
1824 tlb_flush(cs);
1825 LOG_BATS("Flush done\n");
1826 return;
1828 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
1829 TARGET_FMT_lx ")\n", base, end, mask);
1830 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
1831 tlb_flush_page(cs, page);
1833 LOG_BATS("Flush done\n");
1835 #endif
1837 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
1838 target_ulong value)
1840 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
1841 nr, ul == 0 ? 'u' : 'l', value, env->nip);
1844 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1846 target_ulong mask;
1847 #if defined(FLUSH_ALL_TLBS)
1848 PowerPCCPU *cpu = env_archcpu(env);
1849 #endif
1851 dump_store_bat(env, 'I', 0, nr, value);
1852 if (env->IBAT[0][nr] != value) {
1853 mask = (value << 15) & 0x0FFE0000UL;
1854 #if !defined(FLUSH_ALL_TLBS)
1855 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1856 #endif
1858 * When storing valid upper BAT, mask BEPI and BRPN and
1859 * invalidate all TLBs covered by this BAT
1861 mask = (value << 15) & 0x0FFE0000UL;
1862 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1863 (value & ~0x0001FFFFUL & ~mask);
1864 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
1865 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
1866 #if !defined(FLUSH_ALL_TLBS)
1867 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1868 #else
1869 tlb_flush(env_cpu(env));
1870 #endif
1874 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1876 dump_store_bat(env, 'I', 1, nr, value);
1877 env->IBAT[1][nr] = value;
1880 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1882 target_ulong mask;
1883 #if defined(FLUSH_ALL_TLBS)
1884 PowerPCCPU *cpu = env_archcpu(env);
1885 #endif
1887 dump_store_bat(env, 'D', 0, nr, value);
1888 if (env->DBAT[0][nr] != value) {
1890 * When storing valid upper BAT, mask BEPI and BRPN and
1891 * invalidate all TLBs covered by this BAT
1893 mask = (value << 15) & 0x0FFE0000UL;
1894 #if !defined(FLUSH_ALL_TLBS)
1895 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1896 #endif
1897 mask = (value << 15) & 0x0FFE0000UL;
1898 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
1899 (value & ~0x0001FFFFUL & ~mask);
1900 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
1901 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
1902 #if !defined(FLUSH_ALL_TLBS)
1903 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1904 #else
1905 tlb_flush(env_cpu(env));
1906 #endif
1910 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1912 dump_store_bat(env, 'D', 1, nr, value);
1913 env->DBAT[1][nr] = value;
1916 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
1918 target_ulong mask;
1919 #if defined(FLUSH_ALL_TLBS)
1920 PowerPCCPU *cpu = env_archcpu(env);
1921 int do_inval;
1922 #endif
1924 dump_store_bat(env, 'I', 0, nr, value);
1925 if (env->IBAT[0][nr] != value) {
1926 #if defined(FLUSH_ALL_TLBS)
1927 do_inval = 0;
1928 #endif
1929 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1930 if (env->IBAT[1][nr] & 0x40) {
1931 /* Invalidate BAT only if it is valid */
1932 #if !defined(FLUSH_ALL_TLBS)
1933 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1934 #else
1935 do_inval = 1;
1936 #endif
1939 * When storing valid upper BAT, mask BEPI and BRPN and
1940 * invalidate all TLBs covered by this BAT
1942 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1943 (value & ~0x0001FFFFUL & ~mask);
1944 env->DBAT[0][nr] = env->IBAT[0][nr];
1945 if (env->IBAT[1][nr] & 0x40) {
1946 #if !defined(FLUSH_ALL_TLBS)
1947 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1948 #else
1949 do_inval = 1;
1950 #endif
1952 #if defined(FLUSH_ALL_TLBS)
1953 if (do_inval) {
1954 tlb_flush(env_cpu(env));
1956 #endif
1960 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
1962 #if !defined(FLUSH_ALL_TLBS)
1963 target_ulong mask;
1964 #else
1965 PowerPCCPU *cpu = env_archcpu(env);
1966 int do_inval;
1967 #endif
1969 dump_store_bat(env, 'I', 1, nr, value);
1970 if (env->IBAT[1][nr] != value) {
1971 #if defined(FLUSH_ALL_TLBS)
1972 do_inval = 0;
1973 #endif
1974 if (env->IBAT[1][nr] & 0x40) {
1975 #if !defined(FLUSH_ALL_TLBS)
1976 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1977 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1978 #else
1979 do_inval = 1;
1980 #endif
1982 if (value & 0x40) {
1983 #if !defined(FLUSH_ALL_TLBS)
1984 mask = (value << 17) & 0x0FFE0000UL;
1985 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1986 #else
1987 do_inval = 1;
1988 #endif
1990 env->IBAT[1][nr] = value;
1991 env->DBAT[1][nr] = value;
1992 #if defined(FLUSH_ALL_TLBS)
1993 if (do_inval) {
1994 tlb_flush(env_cpu(env));
1996 #endif
2000 /*****************************************************************************/
2001 /* TLB management */
2002 void ppc_tlb_invalidate_all(CPUPPCState *env)
2004 #if defined(TARGET_PPC64)
2005 if (mmu_is_64bit(env->mmu_model)) {
2006 env->tlb_need_flush = 0;
2007 tlb_flush(env_cpu(env));
2008 } else
2009 #endif /* defined(TARGET_PPC64) */
2010 switch (env->mmu_model) {
2011 case POWERPC_MMU_SOFT_6xx:
2012 case POWERPC_MMU_SOFT_74xx:
2013 ppc6xx_tlb_invalidate_all(env);
2014 break;
2015 case POWERPC_MMU_SOFT_4xx:
2016 case POWERPC_MMU_SOFT_4xx_Z:
2017 ppc4xx_tlb_invalidate_all(env);
2018 break;
2019 case POWERPC_MMU_REAL:
2020 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
2021 break;
2022 case POWERPC_MMU_MPC8xx:
2023 /* XXX: TODO */
2024 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
2025 break;
2026 case POWERPC_MMU_BOOKE:
2027 tlb_flush(env_cpu(env));
2028 break;
2029 case POWERPC_MMU_BOOKE206:
2030 booke206_flush_tlb(env, -1, 0);
2031 break;
2032 case POWERPC_MMU_32B:
2033 case POWERPC_MMU_601:
2034 env->tlb_need_flush = 0;
2035 tlb_flush(env_cpu(env));
2036 break;
2037 default:
2038 /* XXX: TODO */
2039 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
2040 break;
2044 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
2046 #if !defined(FLUSH_ALL_TLBS)
2047 addr &= TARGET_PAGE_MASK;
2048 #if defined(TARGET_PPC64)
2049 if (mmu_is_64bit(env->mmu_model)) {
2050 /* tlbie invalidate TLBs for all segments */
2052 * XXX: given the fact that there are too many segments to invalidate,
2053 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2054 * we just invalidate all TLBs
2056 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2057 } else
2058 #endif /* defined(TARGET_PPC64) */
2059 switch (env->mmu_model) {
2060 case POWERPC_MMU_SOFT_6xx:
2061 case POWERPC_MMU_SOFT_74xx:
2062 ppc6xx_tlb_invalidate_virt(env, addr, 0);
2063 if (env->id_tlbs == 1) {
2064 ppc6xx_tlb_invalidate_virt(env, addr, 1);
2066 break;
2067 case POWERPC_MMU_32B:
2068 case POWERPC_MMU_601:
2070 * Actual CPUs invalidate entire congruence classes based on
2071 * the geometry of their TLBs and some OSes take that into
2072 * account, we just mark the TLB to be flushed later (context
2073 * synchronizing event or sync instruction on 32-bit).
2075 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2076 break;
2077 default:
2078 /* Should never reach here with other MMU models */
2079 assert(0);
2081 #else
2082 ppc_tlb_invalidate_all(env);
2083 #endif
2086 /*****************************************************************************/
2087 /* Special registers manipulation */
2088 void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
2090 PowerPCCPU *cpu = env_archcpu(env);
2091 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
2092 assert(!cpu->vhyp);
2093 #if defined(TARGET_PPC64)
2094 if (mmu_is_64bit(env->mmu_model)) {
2095 target_ulong sdr_mask = SDR_64_HTABORG | SDR_64_HTABSIZE;
2096 target_ulong htabsize = value & SDR_64_HTABSIZE;
2098 if (value & ~sdr_mask) {
2099 error_report("Invalid bits 0x"TARGET_FMT_lx" set in SDR1",
2100 value & ~sdr_mask);
2101 value &= sdr_mask;
2103 if (htabsize > 28) {
2104 error_report("Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
2105 htabsize);
2106 return;
2109 #endif /* defined(TARGET_PPC64) */
2110 /* FIXME: Should check for valid HTABMASK values in 32-bit case */
2111 env->spr[SPR_SDR1] = value;
2114 #if defined(TARGET_PPC64)
2115 void ppc_store_ptcr(CPUPPCState *env, target_ulong value)
2117 PowerPCCPU *cpu = env_archcpu(env);
2118 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
2119 target_ulong patbsize = value & PTCR_PATS;
2121 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
2123 assert(!cpu->vhyp);
2124 assert(env->mmu_model & POWERPC_MMU_3_00);
2126 if (value & ~ptcr_mask) {
2127 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
2128 value & ~ptcr_mask);
2129 value &= ptcr_mask;
2132 if (patbsize > 24) {
2133 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
2134 " stored in PTCR", patbsize);
2135 return;
2138 env->spr[SPR_PTCR] = value;
2141 #endif /* defined(TARGET_PPC64) */
2143 /* Segment registers load and store */
2144 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
2146 #if defined(TARGET_PPC64)
2147 if (mmu_is_64bit(env->mmu_model)) {
2148 /* XXX */
2149 return 0;
2151 #endif
2152 return env->sr[sr_num];
2155 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
2157 qemu_log_mask(CPU_LOG_MMU,
2158 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
2159 (int)srnum, value, env->sr[srnum]);
2160 #if defined(TARGET_PPC64)
2161 if (mmu_is_64bit(env->mmu_model)) {
2162 PowerPCCPU *cpu = env_archcpu(env);
2163 uint64_t esid, vsid;
2165 /* ESID = srnum */
2166 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
2168 /* VSID = VSID */
2169 vsid = (value & 0xfffffff) << 12;
2170 /* flags = flags */
2171 vsid |= ((value >> 27) & 0xf) << 8;
2173 ppc_store_slb(cpu, srnum, esid, vsid);
2174 } else
2175 #endif
2176 if (env->sr[srnum] != value) {
2177 env->sr[srnum] = value;
2179 * Invalidating 256MB of virtual memory in 4kB pages is way
2180 * longer than flushing the whole TLB.
2182 #if !defined(FLUSH_ALL_TLBS) && 0
2184 target_ulong page, end;
2185 /* Invalidate 256 MB of virtual memory */
2186 page = (16 << 20) * srnum;
2187 end = page + (16 << 20);
2188 for (; page != end; page += TARGET_PAGE_SIZE) {
2189 tlb_flush_page(env_cpu(env), page);
2192 #else
2193 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2194 #endif
2198 /* TLB management */
2199 void helper_tlbia(CPUPPCState *env)
2201 ppc_tlb_invalidate_all(env);
2204 void helper_tlbie(CPUPPCState *env, target_ulong addr)
2206 ppc_tlb_invalidate_one(env, addr);
2209 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
2211 /* tlbiva instruction only exists on BookE */
2212 assert(env->mmu_model == POWERPC_MMU_BOOKE);
2213 /* XXX: TODO */
2214 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
2217 /* Software driven TLBs management */
2218 /* PowerPC 602/603 software TLB load instructions helpers */
2219 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2221 target_ulong RPN, CMP, EPN;
2222 int way;
2224 RPN = env->spr[SPR_RPA];
2225 if (is_code) {
2226 CMP = env->spr[SPR_ICMP];
2227 EPN = env->spr[SPR_IMISS];
2228 } else {
2229 CMP = env->spr[SPR_DCMP];
2230 EPN = env->spr[SPR_DMISS];
2232 way = (env->spr[SPR_SRR1] >> 17) & 1;
2233 (void)EPN; /* avoid a compiler warning */
2234 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2235 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2236 RPN, way);
2237 /* Store this TLB */
2238 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2239 way, is_code, CMP, RPN);
2242 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
2244 do_6xx_tlb(env, EPN, 0);
2247 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
2249 do_6xx_tlb(env, EPN, 1);
2252 /* PowerPC 74xx software TLB load instructions helpers */
2253 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2255 target_ulong RPN, CMP, EPN;
2256 int way;
2258 RPN = env->spr[SPR_PTELO];
2259 CMP = env->spr[SPR_PTEHI];
2260 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2261 way = env->spr[SPR_TLBMISS] & 0x3;
2262 (void)EPN; /* avoid a compiler warning */
2263 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2264 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2265 RPN, way);
2266 /* Store this TLB */
2267 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2268 way, is_code, CMP, RPN);
2271 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
2273 do_74xx_tlb(env, EPN, 0);
2276 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
2278 do_74xx_tlb(env, EPN, 1);
2281 /*****************************************************************************/
2282 /* PowerPC 601 specific instructions (POWER bridge) */
2284 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
2286 mmu_ctx_t ctx;
2287 int nb_BATs;
2288 target_ulong ret = 0;
2291 * We don't have to generate many instances of this instruction,
2292 * as rac is supervisor only.
2294 * XXX: FIX THIS: Pretend we have no BAT
2296 nb_BATs = env->nb_BATs;
2297 env->nb_BATs = 0;
2298 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
2299 ret = ctx.raddr;
2301 env->nb_BATs = nb_BATs;
2302 return ret;
2305 static inline target_ulong booke_tlb_to_page_size(int size)
2307 return 1024 << (2 * size);
2310 static inline int booke_page_size_to_tlb(target_ulong page_size)
2312 int size;
2314 switch (page_size) {
2315 case 0x00000400UL:
2316 size = 0x0;
2317 break;
2318 case 0x00001000UL:
2319 size = 0x1;
2320 break;
2321 case 0x00004000UL:
2322 size = 0x2;
2323 break;
2324 case 0x00010000UL:
2325 size = 0x3;
2326 break;
2327 case 0x00040000UL:
2328 size = 0x4;
2329 break;
2330 case 0x00100000UL:
2331 size = 0x5;
2332 break;
2333 case 0x00400000UL:
2334 size = 0x6;
2335 break;
2336 case 0x01000000UL:
2337 size = 0x7;
2338 break;
2339 case 0x04000000UL:
2340 size = 0x8;
2341 break;
2342 case 0x10000000UL:
2343 size = 0x9;
2344 break;
2345 case 0x40000000UL:
2346 size = 0xA;
2347 break;
2348 #if defined(TARGET_PPC64)
2349 case 0x000100000000ULL:
2350 size = 0xB;
2351 break;
2352 case 0x000400000000ULL:
2353 size = 0xC;
2354 break;
2355 case 0x001000000000ULL:
2356 size = 0xD;
2357 break;
2358 case 0x004000000000ULL:
2359 size = 0xE;
2360 break;
2361 case 0x010000000000ULL:
2362 size = 0xF;
2363 break;
2364 #endif
2365 default:
2366 size = -1;
2367 break;
2370 return size;
2373 /* Helpers for 4xx TLB management */
2374 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2376 #define PPC4XX_TLBHI_V 0x00000040
2377 #define PPC4XX_TLBHI_E 0x00000020
2378 #define PPC4XX_TLBHI_SIZE_MIN 0
2379 #define PPC4XX_TLBHI_SIZE_MAX 7
2380 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2381 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2382 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2384 #define PPC4XX_TLBLO_EX 0x00000200
2385 #define PPC4XX_TLBLO_WR 0x00000100
2386 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2387 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2389 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
2391 ppcemb_tlb_t *tlb;
2392 target_ulong ret;
2393 int size;
2395 entry &= PPC4XX_TLB_ENTRY_MASK;
2396 tlb = &env->tlb.tlbe[entry];
2397 ret = tlb->EPN;
2398 if (tlb->prot & PAGE_VALID) {
2399 ret |= PPC4XX_TLBHI_V;
2401 size = booke_page_size_to_tlb(tlb->size);
2402 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
2403 size = PPC4XX_TLBHI_SIZE_DEFAULT;
2405 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
2406 env->spr[SPR_40x_PID] = tlb->PID;
2407 return ret;
2410 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
2412 ppcemb_tlb_t *tlb;
2413 target_ulong ret;
2415 entry &= PPC4XX_TLB_ENTRY_MASK;
2416 tlb = &env->tlb.tlbe[entry];
2417 ret = tlb->RPN;
2418 if (tlb->prot & PAGE_EXEC) {
2419 ret |= PPC4XX_TLBLO_EX;
2421 if (tlb->prot & PAGE_WRITE) {
2422 ret |= PPC4XX_TLBLO_WR;
2424 return ret;
2427 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
2428 target_ulong val)
2430 CPUState *cs = env_cpu(env);
2431 ppcemb_tlb_t *tlb;
2432 target_ulong page, end;
2434 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
2435 val);
2436 entry &= PPC4XX_TLB_ENTRY_MASK;
2437 tlb = &env->tlb.tlbe[entry];
2438 /* Invalidate previous TLB (if it's valid) */
2439 if (tlb->prot & PAGE_VALID) {
2440 end = tlb->EPN + tlb->size;
2441 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
2442 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2443 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2444 tlb_flush_page(cs, page);
2447 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
2448 & PPC4XX_TLBHI_SIZE_MASK);
2450 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2451 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2453 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
2454 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
2455 "are not supported (%d)\n"
2456 "Please implement TARGET_PAGE_BITS_VARY\n",
2457 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2459 tlb->EPN = val & ~(tlb->size - 1);
2460 if (val & PPC4XX_TLBHI_V) {
2461 tlb->prot |= PAGE_VALID;
2462 if (val & PPC4XX_TLBHI_E) {
2463 /* XXX: TO BE FIXED */
2464 cpu_abort(cs,
2465 "Little-endian TLB entries are not supported by now\n");
2467 } else {
2468 tlb->prot &= ~PAGE_VALID;
2470 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2471 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2472 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2473 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2474 tlb->prot & PAGE_READ ? 'r' : '-',
2475 tlb->prot & PAGE_WRITE ? 'w' : '-',
2476 tlb->prot & PAGE_EXEC ? 'x' : '-',
2477 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2478 /* Invalidate new TLB (if valid) */
2479 if (tlb->prot & PAGE_VALID) {
2480 end = tlb->EPN + tlb->size;
2481 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
2482 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2483 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2484 tlb_flush_page(cs, page);
2489 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
2490 target_ulong val)
2492 ppcemb_tlb_t *tlb;
2494 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
2495 val);
2496 entry &= PPC4XX_TLB_ENTRY_MASK;
2497 tlb = &env->tlb.tlbe[entry];
2498 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
2499 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
2500 tlb->prot = PAGE_READ;
2501 if (val & PPC4XX_TLBLO_EX) {
2502 tlb->prot |= PAGE_EXEC;
2504 if (val & PPC4XX_TLBLO_WR) {
2505 tlb->prot |= PAGE_WRITE;
2507 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2508 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2509 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2510 tlb->prot & PAGE_READ ? 'r' : '-',
2511 tlb->prot & PAGE_WRITE ? 'w' : '-',
2512 tlb->prot & PAGE_EXEC ? 'x' : '-',
2513 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2516 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
2518 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2521 /* PowerPC 440 TLB management */
2522 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
2523 target_ulong value)
2525 ppcemb_tlb_t *tlb;
2526 target_ulong EPN, RPN, size;
2527 int do_flush_tlbs;
2529 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
2530 __func__, word, (int)entry, value);
2531 do_flush_tlbs = 0;
2532 entry &= 0x3F;
2533 tlb = &env->tlb.tlbe[entry];
2534 switch (word) {
2535 default:
2536 /* Just here to please gcc */
2537 case 0:
2538 EPN = value & 0xFFFFFC00;
2539 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
2540 do_flush_tlbs = 1;
2542 tlb->EPN = EPN;
2543 size = booke_tlb_to_page_size((value >> 4) & 0xF);
2544 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
2545 do_flush_tlbs = 1;
2547 tlb->size = size;
2548 tlb->attr &= ~0x1;
2549 tlb->attr |= (value >> 8) & 1;
2550 if (value & 0x200) {
2551 tlb->prot |= PAGE_VALID;
2552 } else {
2553 if (tlb->prot & PAGE_VALID) {
2554 tlb->prot &= ~PAGE_VALID;
2555 do_flush_tlbs = 1;
2558 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2559 if (do_flush_tlbs) {
2560 tlb_flush(env_cpu(env));
2562 break;
2563 case 1:
2564 RPN = value & 0xFFFFFC0F;
2565 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
2566 tlb_flush(env_cpu(env));
2568 tlb->RPN = RPN;
2569 break;
2570 case 2:
2571 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
2572 tlb->prot = tlb->prot & PAGE_VALID;
2573 if (value & 0x1) {
2574 tlb->prot |= PAGE_READ << 4;
2576 if (value & 0x2) {
2577 tlb->prot |= PAGE_WRITE << 4;
2579 if (value & 0x4) {
2580 tlb->prot |= PAGE_EXEC << 4;
2582 if (value & 0x8) {
2583 tlb->prot |= PAGE_READ;
2585 if (value & 0x10) {
2586 tlb->prot |= PAGE_WRITE;
2588 if (value & 0x20) {
2589 tlb->prot |= PAGE_EXEC;
2591 break;
2595 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
2596 target_ulong entry)
2598 ppcemb_tlb_t *tlb;
2599 target_ulong ret;
2600 int size;
2602 entry &= 0x3F;
2603 tlb = &env->tlb.tlbe[entry];
2604 switch (word) {
2605 default:
2606 /* Just here to please gcc */
2607 case 0:
2608 ret = tlb->EPN;
2609 size = booke_page_size_to_tlb(tlb->size);
2610 if (size < 0 || size > 0xF) {
2611 size = 1;
2613 ret |= size << 4;
2614 if (tlb->attr & 0x1) {
2615 ret |= 0x100;
2617 if (tlb->prot & PAGE_VALID) {
2618 ret |= 0x200;
2620 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2621 env->spr[SPR_440_MMUCR] |= tlb->PID;
2622 break;
2623 case 1:
2624 ret = tlb->RPN;
2625 break;
2626 case 2:
2627 ret = tlb->attr & ~0x1;
2628 if (tlb->prot & (PAGE_READ << 4)) {
2629 ret |= 0x1;
2631 if (tlb->prot & (PAGE_WRITE << 4)) {
2632 ret |= 0x2;
2634 if (tlb->prot & (PAGE_EXEC << 4)) {
2635 ret |= 0x4;
2637 if (tlb->prot & PAGE_READ) {
2638 ret |= 0x8;
2640 if (tlb->prot & PAGE_WRITE) {
2641 ret |= 0x10;
2643 if (tlb->prot & PAGE_EXEC) {
2644 ret |= 0x20;
2646 break;
2648 return ret;
2651 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
2653 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
2656 /* PowerPC BookE 2.06 TLB management */
2658 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
2660 uint32_t tlbncfg = 0;
2661 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
2662 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
2663 int tlb;
2665 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2666 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
2668 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
2669 cpu_abort(env_cpu(env), "we don't support HES yet\n");
2672 return booke206_get_tlbm(env, tlb, ea, esel);
2675 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
2677 env->spr[pidn] = pid;
2678 /* changing PIDs mean we're in a different address space now */
2679 tlb_flush(env_cpu(env));
2682 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
2684 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
2685 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
2687 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
2689 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
2690 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
2693 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
2695 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
2696 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
2697 } else {
2698 tlb_flush(env_cpu(env));
2702 void helper_booke206_tlbwe(CPUPPCState *env)
2704 uint32_t tlbncfg, tlbn;
2705 ppcmas_tlb_t *tlb;
2706 uint32_t size_tlb, size_ps;
2707 target_ulong mask;
2710 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
2711 case MAS0_WQ_ALWAYS:
2712 /* good to go, write that entry */
2713 break;
2714 case MAS0_WQ_COND:
2715 /* XXX check if reserved */
2716 if (0) {
2717 return;
2719 break;
2720 case MAS0_WQ_CLR_RSRV:
2721 /* XXX clear entry */
2722 return;
2723 default:
2724 /* no idea what to do */
2725 return;
2728 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
2729 !msr_gs) {
2730 /* XXX we don't support direct LRAT setting yet */
2731 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
2732 return;
2735 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2736 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
2738 tlb = booke206_cur_tlb(env);
2740 if (!tlb) {
2741 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2742 POWERPC_EXCP_INVAL |
2743 POWERPC_EXCP_INVAL_INVAL, GETPC());
2746 /* check that we support the targeted size */
2747 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2748 size_ps = booke206_tlbnps(env, tlbn);
2749 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
2750 !(size_ps & (1 << size_tlb))) {
2751 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2752 POWERPC_EXCP_INVAL |
2753 POWERPC_EXCP_INVAL_INVAL, GETPC());
2756 if (msr_gs) {
2757 cpu_abort(env_cpu(env), "missing HV implementation\n");
2760 if (tlb->mas1 & MAS1_VALID) {
2762 * Invalidate the page in QEMU TLB if it was a valid entry.
2764 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2765 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2766 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2768 * "Note that when an L2 TLB entry is written, it may be displacing an
2769 * already valid entry in the same L2 TLB location (a victim). If a
2770 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2771 * TLB entry is automatically invalidated."
2773 flush_page(env, tlb);
2776 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
2777 env->spr[SPR_BOOKE_MAS3];
2778 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
2780 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
2781 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2782 booke206_fixed_size_tlbn(env, tlbn, tlb);
2783 } else {
2784 if (!(tlbncfg & TLBnCFG_AVAIL)) {
2785 /* force !AVAIL TLB entries to correct page size */
2786 tlb->mas1 &= ~MAS1_TSIZE_MASK;
2787 /* XXX can be configured in MMUCSR0 */
2788 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
2792 /* Make a mask from TLB size to discard invalid bits in EPN field */
2793 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2794 /* Add a mask for page attributes */
2795 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
2797 if (!msr_cm) {
2799 * Executing a tlbwe instruction in 32-bit mode will set bits
2800 * 0:31 of the TLB EPN field to zero.
2802 mask &= 0xffffffff;
2805 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
2807 if (!(tlbncfg & TLBnCFG_IPROT)) {
2808 /* no IPROT supported by TLB */
2809 tlb->mas1 &= ~MAS1_IPROT;
2812 flush_page(env, tlb);
2815 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
2817 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
2818 int way = booke206_tlbm_to_way(env, tlb);
2820 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
2821 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
2822 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2824 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
2825 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
2826 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
2827 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
2830 void helper_booke206_tlbre(CPUPPCState *env)
2832 ppcmas_tlb_t *tlb = NULL;
2834 tlb = booke206_cur_tlb(env);
2835 if (!tlb) {
2836 env->spr[SPR_BOOKE_MAS1] = 0;
2837 } else {
2838 booke206_tlb_to_mas(env, tlb);
2842 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
2844 ppcmas_tlb_t *tlb = NULL;
2845 int i, j;
2846 hwaddr raddr;
2847 uint32_t spid, sas;
2849 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
2850 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
2852 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2853 int ways = booke206_tlb_ways(env, i);
2855 for (j = 0; j < ways; j++) {
2856 tlb = booke206_get_tlbm(env, i, address, j);
2858 if (!tlb) {
2859 continue;
2862 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
2863 continue;
2866 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
2867 continue;
2870 booke206_tlb_to_mas(env, tlb);
2871 return;
2875 /* no entry found, fill with defaults */
2876 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
2877 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
2878 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
2879 env->spr[SPR_BOOKE_MAS3] = 0;
2880 env->spr[SPR_BOOKE_MAS7] = 0;
2882 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
2883 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
2886 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
2887 << MAS1_TID_SHIFT;
2889 /* next victim logic */
2890 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
2891 env->last_way++;
2892 env->last_way &= booke206_tlb_ways(env, 0) - 1;
2893 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2896 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
2897 uint32_t ea)
2899 int i;
2900 int ways = booke206_tlb_ways(env, tlbn);
2901 target_ulong mask;
2903 for (i = 0; i < ways; i++) {
2904 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
2905 if (!tlb) {
2906 continue;
2908 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2909 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
2910 !(tlb->mas1 & MAS1_IPROT)) {
2911 tlb->mas1 &= ~MAS1_VALID;
2916 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
2918 CPUState *cs;
2920 if (address & 0x4) {
2921 /* flush all entries */
2922 if (address & 0x8) {
2923 /* flush all of TLB1 */
2924 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
2925 } else {
2926 /* flush all of TLB0 */
2927 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
2929 return;
2932 if (address & 0x8) {
2933 /* flush TLB1 entries */
2934 booke206_invalidate_ea_tlb(env, 1, address);
2935 CPU_FOREACH(cs) {
2936 tlb_flush(cs);
2938 } else {
2939 /* flush TLB0 entries */
2940 booke206_invalidate_ea_tlb(env, 0, address);
2941 CPU_FOREACH(cs) {
2942 tlb_flush_page(cs, address & MAS2_EPN_MASK);
2947 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
2949 /* XXX missing LPID handling */
2950 booke206_flush_tlb(env, -1, 1);
2953 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
2955 int i, j;
2956 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2957 ppcmas_tlb_t *tlb = env->tlb.tlbm;
2958 int tlb_size;
2960 /* XXX missing LPID handling */
2961 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2962 tlb_size = booke206_tlb_size(env, i);
2963 for (j = 0; j < tlb_size; j++) {
2964 if (!(tlb[j].mas1 & MAS1_IPROT) &&
2965 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
2966 tlb[j].mas1 &= ~MAS1_VALID;
2969 tlb += booke206_tlb_size(env, i);
2971 tlb_flush(env_cpu(env));
2974 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
2976 int i, j;
2977 ppcmas_tlb_t *tlb;
2978 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2979 int pid = tid >> MAS6_SPID_SHIFT;
2980 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
2981 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
2982 /* XXX check for unsupported isize and raise an invalid opcode then */
2983 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
2984 /* XXX implement MAV2 handling */
2985 bool mav2 = false;
2987 /* XXX missing LPID handling */
2988 /* flush by pid and ea */
2989 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2990 int ways = booke206_tlb_ways(env, i);
2992 for (j = 0; j < ways; j++) {
2993 tlb = booke206_get_tlbm(env, i, address, j);
2994 if (!tlb) {
2995 continue;
2997 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
2998 (tlb->mas1 & MAS1_IPROT) ||
2999 ((tlb->mas1 & MAS1_IND) != ind) ||
3000 ((tlb->mas8 & MAS8_TGS) != sgs)) {
3001 continue;
3003 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
3004 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3005 continue;
3007 /* XXX e500mc doesn't match SAS, but other cores might */
3008 tlb->mas1 &= ~MAS1_VALID;
3011 tlb_flush(env_cpu(env));
3014 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
3016 int flags = 0;
3018 if (type & 2) {
3019 flags |= BOOKE206_FLUSH_TLB1;
3022 if (type & 4) {
3023 flags |= BOOKE206_FLUSH_TLB0;
3026 booke206_flush_tlb(env, flags, 1);
3030 void helper_check_tlb_flush_local(CPUPPCState *env)
3032 check_tlb_flush(env, false);
3035 void helper_check_tlb_flush_global(CPUPPCState *env)
3037 check_tlb_flush(env, true);
3040 /*****************************************************************************/
3042 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
3043 MMUAccessType access_type, int mmu_idx,
3044 bool probe, uintptr_t retaddr)
3046 PowerPCCPU *cpu = POWERPC_CPU(cs);
3047 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
3048 CPUPPCState *env = &cpu->env;
3049 int ret;
3051 if (pcc->handle_mmu_fault) {
3052 ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx);
3053 } else {
3054 ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
3056 if (unlikely(ret != 0)) {
3057 if (probe) {
3058 return false;
3060 raise_exception_err_ra(env, cs->exception_index, env->error_code,
3061 retaddr);
3063 return true;