target/ppc: Remove type argument from get_bat_6xx_tlb
[qemu/ar7.git] / target / ppc / mmu_helper.c
blob0eba8302ee326dcf11782161892d29105dcd4e6f
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "mmu-hash64.h"
27 #include "mmu-hash32.h"
28 #include "exec/exec-all.h"
29 #include "exec/cpu_ldst.h"
30 #include "exec/log.h"
31 #include "helper_regs.h"
32 #include "qemu/error-report.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/qemu-print.h"
35 #include "internal.h"
36 #include "mmu-book3s-v3.h"
37 #include "mmu-radix64.h"
39 /* #define DEBUG_MMU */
40 /* #define DEBUG_BATS */
41 /* #define DEBUG_SOFTWARE_TLB */
42 /* #define DUMP_PAGE_TABLES */
43 /* #define FLUSH_ALL_TLBS */
45 #ifdef DEBUG_MMU
46 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
47 #else
48 # define LOG_MMU_STATE(cpu) do { } while (0)
49 #endif
51 #ifdef DEBUG_SOFTWARE_TLB
52 # define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
53 #else
54 # define LOG_SWTLB(...) do { } while (0)
55 #endif
57 #ifdef DEBUG_BATS
58 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
59 #else
60 # define LOG_BATS(...) do { } while (0)
61 #endif
63 /*****************************************************************************/
64 /* PowerPC MMU emulation */
66 /* Context used internally during MMU translations */
67 typedef struct mmu_ctx_t mmu_ctx_t;
68 struct mmu_ctx_t {
69 hwaddr raddr; /* Real address */
70 hwaddr eaddr; /* Effective address */
71 int prot; /* Protection bits */
72 hwaddr hash[2]; /* Pagetable hash values */
73 target_ulong ptem; /* Virtual segment ID | API */
74 int key; /* Access key */
75 int nx; /* Non-execute area */
78 /* Common routines used by software and hardware TLBs emulation */
79 static inline int pte_is_valid(target_ulong pte0)
81 return pte0 & 0x80000000 ? 1 : 0;
84 static inline void pte_invalidate(target_ulong *pte0)
86 *pte0 &= ~0x80000000;
89 #define PTE_PTEM_MASK 0x7FFFFFBF
90 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
92 static int pp_check(int key, int pp, int nx)
94 int access;
96 /* Compute access rights */
97 access = 0;
98 if (key == 0) {
99 switch (pp) {
100 case 0x0:
101 case 0x1:
102 case 0x2:
103 access |= PAGE_WRITE;
104 /* fall through */
105 case 0x3:
106 access |= PAGE_READ;
107 break;
109 } else {
110 switch (pp) {
111 case 0x0:
112 access = 0;
113 break;
114 case 0x1:
115 case 0x3:
116 access = PAGE_READ;
117 break;
118 case 0x2:
119 access = PAGE_READ | PAGE_WRITE;
120 break;
123 if (nx == 0) {
124 access |= PAGE_EXEC;
127 return access;
130 static int check_prot(int prot, MMUAccessType access_type)
132 return prot & prot_for_access_type(access_type) ? 0 : -2;
135 static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
136 target_ulong pte1, int h,
137 MMUAccessType access_type)
139 target_ulong ptem, mmask;
140 int access, ret, pteh, ptev, pp;
142 ret = -1;
143 /* Check validity and table match */
144 ptev = pte_is_valid(pte0);
145 pteh = (pte0 >> 6) & 1;
146 if (ptev && h == pteh) {
147 /* Check vsid & api */
148 ptem = pte0 & PTE_PTEM_MASK;
149 mmask = PTE_CHECK_MASK;
150 pp = pte1 & 0x00000003;
151 if (ptem == ctx->ptem) {
152 if (ctx->raddr != (hwaddr)-1ULL) {
153 /* all matches should have equal RPN, WIMG & PP */
154 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
155 qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
156 return -3;
159 /* Compute access rights */
160 access = pp_check(ctx->key, pp, ctx->nx);
161 /* Keep the matching PTE information */
162 ctx->raddr = pte1;
163 ctx->prot = access;
164 ret = check_prot(ctx->prot, access_type);
165 if (ret == 0) {
166 /* Access granted */
167 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
168 } else {
169 /* Access right violation */
170 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
175 return ret;
178 static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
179 int ret, MMUAccessType access_type)
181 int store = 0;
183 /* Update page flags */
184 if (!(*pte1p & 0x00000100)) {
185 /* Update accessed flag */
186 *pte1p |= 0x00000100;
187 store = 1;
189 if (!(*pte1p & 0x00000080)) {
190 if (access_type == MMU_DATA_STORE && ret == 0) {
191 /* Update changed flag */
192 *pte1p |= 0x00000080;
193 store = 1;
194 } else {
195 /* Force page fault for first write access */
196 ctx->prot &= ~PAGE_WRITE;
200 return store;
203 /* Software driven TLB helpers */
204 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
205 int way, int is_code)
207 int nr;
209 /* Select TLB num in a way from address */
210 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
211 /* Select TLB way */
212 nr += env->tlb_per_way * way;
213 /* 6xx have separate TLBs for instructions and data */
214 if (is_code && env->id_tlbs == 1) {
215 nr += env->nb_tlb;
218 return nr;
221 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
223 ppc6xx_tlb_t *tlb;
224 int nr, max;
226 /* LOG_SWTLB("Invalidate all TLBs\n"); */
227 /* Invalidate all defined software TLB */
228 max = env->nb_tlb;
229 if (env->id_tlbs == 1) {
230 max *= 2;
232 for (nr = 0; nr < max; nr++) {
233 tlb = &env->tlb.tlb6[nr];
234 pte_invalidate(&tlb->pte0);
236 tlb_flush(env_cpu(env));
239 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
240 target_ulong eaddr,
241 int is_code, int match_epn)
243 #if !defined(FLUSH_ALL_TLBS)
244 CPUState *cs = env_cpu(env);
245 ppc6xx_tlb_t *tlb;
246 int way, nr;
248 /* Invalidate ITLB + DTLB, all ways */
249 for (way = 0; way < env->nb_ways; way++) {
250 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
251 tlb = &env->tlb.tlb6[nr];
252 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
253 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
254 env->nb_tlb, eaddr);
255 pte_invalidate(&tlb->pte0);
256 tlb_flush_page(cs, tlb->EPN);
259 #else
260 /* XXX: PowerPC specification say this is valid as well */
261 ppc6xx_tlb_invalidate_all(env);
262 #endif
265 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
266 target_ulong eaddr, int is_code)
268 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
271 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
272 int is_code, target_ulong pte0, target_ulong pte1)
274 ppc6xx_tlb_t *tlb;
275 int nr;
277 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
278 tlb = &env->tlb.tlb6[nr];
279 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
280 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
281 /* Invalidate any pending reference in QEMU for this virtual address */
282 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
283 tlb->pte0 = pte0;
284 tlb->pte1 = pte1;
285 tlb->EPN = EPN;
286 /* Store last way for LRU mechanism */
287 env->last_way = way;
290 static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
291 target_ulong eaddr, MMUAccessType access_type)
293 ppc6xx_tlb_t *tlb;
294 int nr, best, way;
295 int ret;
297 best = -1;
298 ret = -1; /* No TLB found */
299 for (way = 0; way < env->nb_ways; way++) {
300 nr = ppc6xx_tlb_getnum(env, eaddr, way, access_type == MMU_INST_FETCH);
301 tlb = &env->tlb.tlb6[nr];
302 /* This test "emulates" the PTE index match for hardware TLBs */
303 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
304 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
305 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
306 pte_is_valid(tlb->pte0) ? "valid" : "inval",
307 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
308 continue;
310 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
311 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
312 pte_is_valid(tlb->pte0) ? "valid" : "inval",
313 tlb->EPN, eaddr, tlb->pte1,
314 access_type == MMU_DATA_STORE ? 'S' : 'L',
315 access_type == MMU_INST_FETCH ? 'I' : 'D');
316 switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
317 0, access_type)) {
318 case -3:
319 /* TLB inconsistency */
320 return -1;
321 case -2:
322 /* Access violation */
323 ret = -2;
324 best = nr;
325 break;
326 case -1:
327 default:
328 /* No match */
329 break;
330 case 0:
331 /* access granted */
333 * XXX: we should go on looping to check all TLBs
334 * consistency but we can speed-up the whole thing as
335 * the result would be undefined if TLBs are not
336 * consistent.
338 ret = 0;
339 best = nr;
340 goto done;
343 if (best != -1) {
344 done:
345 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
346 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
347 /* Update page flags */
348 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
351 return ret;
354 /* Perform BAT hit & translation */
355 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
356 int *validp, int *protp, target_ulong *BATu,
357 target_ulong *BATl)
359 target_ulong bl;
360 int pp, valid, prot;
362 bl = (*BATu & 0x00001FFC) << 15;
363 valid = 0;
364 prot = 0;
365 if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
366 ((msr_pr != 0) && (*BATu & 0x00000001))) {
367 valid = 1;
368 pp = *BATl & 0x00000003;
369 if (pp != 0) {
370 prot = PAGE_READ | PAGE_EXEC;
371 if (pp == 0x2) {
372 prot |= PAGE_WRITE;
376 *blp = bl;
377 *validp = valid;
378 *protp = prot;
381 static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
382 target_ulong virtual, MMUAccessType access_type)
384 target_ulong *BATlt, *BATut, *BATu, *BATl;
385 target_ulong BEPIl, BEPIu, bl;
386 int i, valid, prot;
387 int ret = -1;
388 bool ifetch = access_type == MMU_INST_FETCH;
390 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
391 ifetch ? 'I' : 'D', virtual);
392 if (ifetch) {
393 BATlt = env->IBAT[1];
394 BATut = env->IBAT[0];
395 } else {
396 BATlt = env->DBAT[1];
397 BATut = env->DBAT[0];
399 for (i = 0; i < env->nb_BATs; i++) {
400 BATu = &BATut[i];
401 BATl = &BATlt[i];
402 BEPIu = *BATu & 0xF0000000;
403 BEPIl = *BATu & 0x0FFE0000;
404 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
405 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
406 " BATl " TARGET_FMT_lx "\n", __func__,
407 ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
408 if ((virtual & 0xF0000000) == BEPIu &&
409 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
410 /* BAT matches */
411 if (valid != 0) {
412 /* Get physical address */
413 ctx->raddr = (*BATl & 0xF0000000) |
414 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
415 (virtual & 0x0001F000);
416 /* Compute access rights */
417 ctx->prot = prot;
418 ret = check_prot(ctx->prot, access_type);
419 if (ret == 0) {
420 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
421 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
422 ctx->prot & PAGE_WRITE ? 'W' : '-');
424 break;
428 if (ret < 0) {
429 #if defined(DEBUG_BATS)
430 if (qemu_log_enabled()) {
431 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
432 for (i = 0; i < 4; i++) {
433 BATu = &BATut[i];
434 BATl = &BATlt[i];
435 BEPIu = *BATu & 0xF0000000;
436 BEPIl = *BATu & 0x0FFE0000;
437 bl = (*BATu & 0x00001FFC) << 15;
438 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
439 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
440 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
441 __func__, ifetch ? 'I' : 'D', i, virtual,
442 *BATu, *BATl, BEPIu, BEPIl, bl);
445 #endif
447 /* No hit */
448 return ret;
451 /* Perform segment based translation */
452 static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
453 target_ulong eaddr, MMUAccessType access_type,
454 int type)
456 PowerPCCPU *cpu = env_archcpu(env);
457 hwaddr hash;
458 target_ulong vsid;
459 int ds, pr, target_page_bits;
460 int ret;
461 target_ulong sr, pgidx;
463 pr = msr_pr;
464 ctx->eaddr = eaddr;
466 sr = env->sr[eaddr >> 28];
467 ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
468 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
469 ds = sr & 0x80000000 ? 1 : 0;
470 ctx->nx = sr & 0x10000000 ? 1 : 0;
471 vsid = sr & 0x00FFFFFF;
472 target_page_bits = TARGET_PAGE_BITS;
473 qemu_log_mask(CPU_LOG_MMU,
474 "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
475 " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
476 " ir=%d dr=%d pr=%d %d t=%d\n",
477 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
478 (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type);
479 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
480 hash = vsid ^ pgidx;
481 ctx->ptem = (vsid << 7) | (pgidx >> 10);
483 qemu_log_mask(CPU_LOG_MMU,
484 "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
485 ctx->key, ds, ctx->nx, vsid);
486 ret = -1;
487 if (!ds) {
488 /* Check if instruction fetch is allowed, if needed */
489 if (type != ACCESS_CODE || ctx->nx == 0) {
490 /* Page address translation */
491 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
492 " htab_mask " TARGET_FMT_plx
493 " hash " TARGET_FMT_plx "\n",
494 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
495 ctx->hash[0] = hash;
496 ctx->hash[1] = ~hash;
498 /* Initialize real address with an invalid value */
499 ctx->raddr = (hwaddr)-1ULL;
500 /* Software TLB search */
501 ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type);
502 #if defined(DUMP_PAGE_TABLES)
503 if (qemu_loglevel_mask(CPU_LOG_MMU)) {
504 CPUState *cs = env_cpu(env);
505 hwaddr curaddr;
506 uint32_t a0, a1, a2, a3;
508 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
509 "\n", ppc_hash32_hpt_base(cpu),
510 ppc_hash32_hpt_mask(env) + 0x80);
511 for (curaddr = ppc_hash32_hpt_base(cpu);
512 curaddr < (ppc_hash32_hpt_base(cpu)
513 + ppc_hash32_hpt_mask(cpu) + 0x80);
514 curaddr += 16) {
515 a0 = ldl_phys(cs->as, curaddr);
516 a1 = ldl_phys(cs->as, curaddr + 4);
517 a2 = ldl_phys(cs->as, curaddr + 8);
518 a3 = ldl_phys(cs->as, curaddr + 12);
519 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
520 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
521 curaddr, a0, a1, a2, a3);
525 #endif
526 } else {
527 qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
528 ret = -3;
530 } else {
531 target_ulong sr;
533 qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
534 /* Direct-store segment : absolutely *BUGGY* for now */
537 * Direct-store implies a 32-bit MMU.
538 * Check the Segment Register's bus unit ID (BUID).
540 sr = env->sr[eaddr >> 28];
541 if ((sr & 0x1FF00000) >> 20 == 0x07f) {
543 * Memory-forced I/O controller interface access
545 * If T=1 and BUID=x'07F', the 601 performs a memory
546 * access to SR[28-31] LA[4-31], bypassing all protection
547 * mechanisms.
549 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
550 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
551 return 0;
554 switch (type) {
555 case ACCESS_INT:
556 /* Integer load/store : only access allowed */
557 break;
558 case ACCESS_CODE:
559 /* No code fetch is allowed in direct-store areas */
560 return -4;
561 case ACCESS_FLOAT:
562 /* Floating point load/store */
563 return -4;
564 case ACCESS_RES:
565 /* lwarx, ldarx or srwcx. */
566 return -4;
567 case ACCESS_CACHE:
569 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
571 * Should make the instruction do no-op. As it already do
572 * no-op, it's quite easy :-)
574 ctx->raddr = eaddr;
575 return 0;
576 case ACCESS_EXT:
577 /* eciwx or ecowx */
578 return -4;
579 default:
580 qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
581 "address translation\n");
582 return -4;
584 if ((access_type == MMU_DATA_STORE || ctx->key != 1) &&
585 (access_type == MMU_DATA_LOAD || ctx->key != 0)) {
586 ctx->raddr = eaddr;
587 ret = 2;
588 } else {
589 ret = -2;
593 return ret;
596 /* Generic TLB check function for embedded PowerPC implementations */
597 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
598 hwaddr *raddrp,
599 target_ulong address, uint32_t pid, int ext,
600 int i)
602 target_ulong mask;
604 /* Check valid flag */
605 if (!(tlb->prot & PAGE_VALID)) {
606 return -1;
608 mask = ~(tlb->size - 1);
609 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
610 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
611 mask, (uint32_t)tlb->PID, tlb->prot);
612 /* Check PID */
613 if (tlb->PID != 0 && tlb->PID != pid) {
614 return -1;
616 /* Check effective address */
617 if ((address & mask) != tlb->EPN) {
618 return -1;
620 *raddrp = (tlb->RPN & mask) | (address & ~mask);
621 if (ext) {
622 /* Extend the physical address to 36 bits */
623 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
626 return 0;
629 /* Generic TLB search function for PowerPC embedded implementations */
630 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
631 uint32_t pid)
633 ppcemb_tlb_t *tlb;
634 hwaddr raddr;
635 int i, ret;
637 /* Default return value is no match */
638 ret = -1;
639 for (i = 0; i < env->nb_tlb; i++) {
640 tlb = &env->tlb.tlbe[i];
641 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
642 ret = i;
643 break;
647 return ret;
650 /* Helpers specific to PowerPC 40x implementations */
651 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
653 ppcemb_tlb_t *tlb;
654 int i;
656 for (i = 0; i < env->nb_tlb; i++) {
657 tlb = &env->tlb.tlbe[i];
658 tlb->prot &= ~PAGE_VALID;
660 tlb_flush(env_cpu(env));
663 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
664 target_ulong address,
665 MMUAccessType access_type,
666 int type)
668 ppcemb_tlb_t *tlb;
669 hwaddr raddr;
670 int i, ret, zsel, zpr, pr;
672 ret = -1;
673 raddr = (hwaddr)-1ULL;
674 pr = msr_pr;
675 for (i = 0; i < env->nb_tlb; i++) {
676 tlb = &env->tlb.tlbe[i];
677 if (ppcemb_tlb_check(env, tlb, &raddr, address,
678 env->spr[SPR_40x_PID], 0, i) < 0) {
679 continue;
681 zsel = (tlb->attr >> 4) & 0xF;
682 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
683 LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
684 __func__, i, zsel, zpr, access_type, tlb->attr);
685 /* Check execute enable bit */
686 switch (zpr) {
687 case 0x2:
688 if (pr != 0) {
689 goto check_perms;
691 /* fall through */
692 case 0x3:
693 /* All accesses granted */
694 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
695 ret = 0;
696 break;
697 case 0x0:
698 if (pr != 0) {
699 /* Raise Zone protection fault. */
700 env->spr[SPR_40x_ESR] = 1 << 22;
701 ctx->prot = 0;
702 ret = -2;
703 break;
705 /* fall through */
706 case 0x1:
707 check_perms:
708 /* Check from TLB entry */
709 ctx->prot = tlb->prot;
710 ret = check_prot(ctx->prot, access_type);
711 if (ret == -2) {
712 env->spr[SPR_40x_ESR] = 0;
714 break;
716 if (ret >= 0) {
717 ctx->raddr = raddr;
718 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
719 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
720 ret);
721 return 0;
724 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
725 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
727 return ret;
730 void store_40x_sler(CPUPPCState *env, uint32_t val)
732 /* XXX: TO BE FIXED */
733 if (val != 0x00000000) {
734 cpu_abort(env_cpu(env),
735 "Little-endian regions are not supported by now\n");
737 env->spr[SPR_405_SLER] = val;
740 static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
741 hwaddr *raddr, int *prot, target_ulong address,
742 MMUAccessType access_type, int type, int i)
744 int ret, prot2;
746 if (ppcemb_tlb_check(env, tlb, raddr, address,
747 env->spr[SPR_BOOKE_PID],
748 !env->nb_pids, i) >= 0) {
749 goto found_tlb;
752 if (env->spr[SPR_BOOKE_PID1] &&
753 ppcemb_tlb_check(env, tlb, raddr, address,
754 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
755 goto found_tlb;
758 if (env->spr[SPR_BOOKE_PID2] &&
759 ppcemb_tlb_check(env, tlb, raddr, address,
760 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
761 goto found_tlb;
764 LOG_SWTLB("%s: TLB entry not found\n", __func__);
765 return -1;
767 found_tlb:
769 if (msr_pr != 0) {
770 prot2 = tlb->prot & 0xF;
771 } else {
772 prot2 = (tlb->prot >> 4) & 0xF;
775 /* Check the address space */
776 if (type == ACCESS_CODE) {
777 if (msr_ir != (tlb->attr & 1)) {
778 LOG_SWTLB("%s: AS doesn't match\n", __func__);
779 return -1;
782 *prot = prot2;
783 if (prot2 & PAGE_EXEC) {
784 LOG_SWTLB("%s: good TLB!\n", __func__);
785 return 0;
788 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
789 ret = -3;
790 } else {
791 if (msr_dr != (tlb->attr & 1)) {
792 LOG_SWTLB("%s: AS doesn't match\n", __func__);
793 return -1;
796 *prot = prot2;
797 if (prot2 & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE)) {
798 LOG_SWTLB("%s: found TLB!\n", __func__);
799 return 0;
802 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
803 ret = -2;
806 return ret;
809 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
810 target_ulong address,
811 MMUAccessType access_type,
812 int type)
814 ppcemb_tlb_t *tlb;
815 hwaddr raddr;
816 int i, ret;
818 ret = -1;
819 raddr = (hwaddr)-1ULL;
820 for (i = 0; i < env->nb_tlb; i++) {
821 tlb = &env->tlb.tlbe[i];
822 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address,
823 access_type, type, i);
824 if (ret != -1) {
825 break;
829 if (ret >= 0) {
830 ctx->raddr = raddr;
831 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
832 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
833 ret);
834 } else {
835 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
836 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
839 return ret;
842 static void booke206_flush_tlb(CPUPPCState *env, int flags,
843 const int check_iprot)
845 int tlb_size;
846 int i, j;
847 ppcmas_tlb_t *tlb = env->tlb.tlbm;
849 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
850 if (flags & (1 << i)) {
851 tlb_size = booke206_tlb_size(env, i);
852 for (j = 0; j < tlb_size; j++) {
853 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
854 tlb[j].mas1 &= ~MAS1_VALID;
858 tlb += booke206_tlb_size(env, i);
861 tlb_flush(env_cpu(env));
864 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
865 ppcmas_tlb_t *tlb)
867 int tlbm_size;
869 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
871 return 1024ULL << tlbm_size;
874 /* TLB check function for MAS based SoftTLBs */
875 static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
876 hwaddr *raddrp, target_ulong address,
877 uint32_t pid)
879 hwaddr mask;
880 uint32_t tlb_pid;
882 if (!msr_cm) {
883 /* In 32bit mode we can only address 32bit EAs */
884 address = (uint32_t)address;
887 /* Check valid flag */
888 if (!(tlb->mas1 & MAS1_VALID)) {
889 return -1;
892 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
893 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
894 PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
895 PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
896 tlb->mas7_3, tlb->mas8);
898 /* Check PID */
899 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
900 if (tlb_pid != 0 && tlb_pid != pid) {
901 return -1;
904 /* Check effective address */
905 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
906 return -1;
909 if (raddrp) {
910 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
913 return 0;
916 static bool is_epid_mmu(int mmu_idx)
918 return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD;
921 static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type)
923 uint32_t esr = 0;
924 if (access_type == MMU_DATA_STORE) {
925 esr |= ESR_ST;
927 if (is_epid_mmu(mmu_idx)) {
928 esr |= ESR_EPID;
930 return esr;
934 * Get EPID register given the mmu_idx. If this is regular load,
935 * construct the EPID access bits from current processor state
937 * Get the effective AS and PR bits and the PID. The PID is returned
938 * only if EPID load is requested, otherwise the caller must detect
939 * the correct EPID. Return true if valid EPID is returned.
941 static bool mmubooke206_get_as(CPUPPCState *env,
942 int mmu_idx, uint32_t *epid_out,
943 bool *as_out, bool *pr_out)
945 if (is_epid_mmu(mmu_idx)) {
946 uint32_t epidr;
947 if (mmu_idx == PPC_TLB_EPID_STORE) {
948 epidr = env->spr[SPR_BOOKE_EPSC];
949 } else {
950 epidr = env->spr[SPR_BOOKE_EPLC];
952 *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT;
953 *as_out = !!(epidr & EPID_EAS);
954 *pr_out = !!(epidr & EPID_EPR);
955 return true;
956 } else {
957 *as_out = msr_ds;
958 *pr_out = msr_pr;
959 return false;
963 /* Check if the tlb found by hashing really matches */
964 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
965 hwaddr *raddr, int *prot,
966 target_ulong address,
967 MMUAccessType access_type,
968 int type, int mmu_idx)
970 int ret;
971 int prot2 = 0;
972 uint32_t epid;
973 bool as, pr;
974 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
976 if (!use_epid) {
977 if (ppcmas_tlb_check(env, tlb, raddr, address,
978 env->spr[SPR_BOOKE_PID]) >= 0) {
979 goto found_tlb;
982 if (env->spr[SPR_BOOKE_PID1] &&
983 ppcmas_tlb_check(env, tlb, raddr, address,
984 env->spr[SPR_BOOKE_PID1]) >= 0) {
985 goto found_tlb;
988 if (env->spr[SPR_BOOKE_PID2] &&
989 ppcmas_tlb_check(env, tlb, raddr, address,
990 env->spr[SPR_BOOKE_PID2]) >= 0) {
991 goto found_tlb;
993 } else {
994 if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) {
995 goto found_tlb;
999 LOG_SWTLB("%s: TLB entry not found\n", __func__);
1000 return -1;
1002 found_tlb:
1004 if (pr) {
1005 if (tlb->mas7_3 & MAS3_UR) {
1006 prot2 |= PAGE_READ;
1008 if (tlb->mas7_3 & MAS3_UW) {
1009 prot2 |= PAGE_WRITE;
1011 if (tlb->mas7_3 & MAS3_UX) {
1012 prot2 |= PAGE_EXEC;
1014 } else {
1015 if (tlb->mas7_3 & MAS3_SR) {
1016 prot2 |= PAGE_READ;
1018 if (tlb->mas7_3 & MAS3_SW) {
1019 prot2 |= PAGE_WRITE;
1021 if (tlb->mas7_3 & MAS3_SX) {
1022 prot2 |= PAGE_EXEC;
1026 /* Check the address space and permissions */
1027 if (type == ACCESS_CODE) {
1028 /* There is no way to fetch code using epid load */
1029 assert(!use_epid);
1030 if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1031 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1032 return -1;
1035 *prot = prot2;
1036 if (prot2 & PAGE_EXEC) {
1037 LOG_SWTLB("%s: good TLB!\n", __func__);
1038 return 0;
1041 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
1042 ret = -3;
1043 } else {
1044 if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1045 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1046 return -1;
1049 *prot = prot2;
1050 if (prot2 & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE)) {
1051 LOG_SWTLB("%s: found TLB!\n", __func__);
1052 return 0;
1055 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
1056 ret = -2;
1059 return ret;
1062 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1063 target_ulong address,
1064 MMUAccessType access_type,
1065 int type, int mmu_idx)
1067 ppcmas_tlb_t *tlb;
1068 hwaddr raddr;
1069 int i, j, ret;
1071 ret = -1;
1072 raddr = (hwaddr)-1ULL;
1074 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1075 int ways = booke206_tlb_ways(env, i);
1077 for (j = 0; j < ways; j++) {
1078 tlb = booke206_get_tlbm(env, i, address, j);
1079 if (!tlb) {
1080 continue;
1082 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
1083 access_type, type, mmu_idx);
1084 if (ret != -1) {
1085 goto found_tlb;
1090 found_tlb:
1092 if (ret >= 0) {
1093 ctx->raddr = raddr;
1094 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1095 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1096 ret);
1097 } else {
1098 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1099 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1102 return ret;
1105 static const char *book3e_tsize_to_str[32] = {
1106 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1107 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1108 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1109 "1T", "2T"
1112 static void mmubooke_dump_mmu(CPUPPCState *env)
1114 ppcemb_tlb_t *entry;
1115 int i;
1117 if (kvm_enabled() && !env->kvm_sw_tlb) {
1118 qemu_printf("Cannot access KVM TLB\n");
1119 return;
1122 qemu_printf("\nTLB:\n");
1123 qemu_printf("Effective Physical Size PID Prot "
1124 "Attr\n");
1126 entry = &env->tlb.tlbe[0];
1127 for (i = 0; i < env->nb_tlb; i++, entry++) {
1128 hwaddr ea, pa;
1129 target_ulong mask;
1130 uint64_t size = (uint64_t)entry->size;
1131 char size_buf[20];
1133 /* Check valid flag */
1134 if (!(entry->prot & PAGE_VALID)) {
1135 continue;
1138 mask = ~(entry->size - 1);
1139 ea = entry->EPN & mask;
1140 pa = entry->RPN & mask;
1141 /* Extend the physical address to 36 bits */
1142 pa |= (hwaddr)(entry->RPN & 0xF) << 32;
1143 if (size >= 1 * MiB) {
1144 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / MiB);
1145 } else {
1146 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size / KiB);
1148 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
1149 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
1150 entry->prot, entry->attr);
1155 static void mmubooke206_dump_one_tlb(CPUPPCState *env, int tlbn, int offset,
1156 int tlbsize)
1158 ppcmas_tlb_t *entry;
1159 int i;
1161 qemu_printf("\nTLB%d:\n", tlbn);
1162 qemu_printf("Effective Physical Size TID TS SRWX"
1163 " URWX WIMGE U0123\n");
1165 entry = &env->tlb.tlbm[offset];
1166 for (i = 0; i < tlbsize; i++, entry++) {
1167 hwaddr ea, pa, size;
1168 int tsize;
1170 if (!(entry->mas1 & MAS1_VALID)) {
1171 continue;
1174 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1175 size = 1024ULL << tsize;
1176 ea = entry->mas2 & ~(size - 1);
1177 pa = entry->mas7_3 & ~(size - 1);
1179 qemu_printf("0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
1180 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1181 (uint64_t)ea, (uint64_t)pa,
1182 book3e_tsize_to_str[tsize],
1183 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
1184 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
1185 entry->mas7_3 & MAS3_SR ? 'R' : '-',
1186 entry->mas7_3 & MAS3_SW ? 'W' : '-',
1187 entry->mas7_3 & MAS3_SX ? 'X' : '-',
1188 entry->mas7_3 & MAS3_UR ? 'R' : '-',
1189 entry->mas7_3 & MAS3_UW ? 'W' : '-',
1190 entry->mas7_3 & MAS3_UX ? 'X' : '-',
1191 entry->mas2 & MAS2_W ? 'W' : '-',
1192 entry->mas2 & MAS2_I ? 'I' : '-',
1193 entry->mas2 & MAS2_M ? 'M' : '-',
1194 entry->mas2 & MAS2_G ? 'G' : '-',
1195 entry->mas2 & MAS2_E ? 'E' : '-',
1196 entry->mas7_3 & MAS3_U0 ? '0' : '-',
1197 entry->mas7_3 & MAS3_U1 ? '1' : '-',
1198 entry->mas7_3 & MAS3_U2 ? '2' : '-',
1199 entry->mas7_3 & MAS3_U3 ? '3' : '-');
1203 static void mmubooke206_dump_mmu(CPUPPCState *env)
1205 int offset = 0;
1206 int i;
1208 if (kvm_enabled() && !env->kvm_sw_tlb) {
1209 qemu_printf("Cannot access KVM TLB\n");
1210 return;
1213 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1214 int size = booke206_tlb_size(env, i);
1216 if (size == 0) {
1217 continue;
1220 mmubooke206_dump_one_tlb(env, i, offset, size);
1221 offset += size;
1225 static void mmu6xx_dump_BATs(CPUPPCState *env, int type)
1227 target_ulong *BATlt, *BATut, *BATu, *BATl;
1228 target_ulong BEPIl, BEPIu, bl;
1229 int i;
1231 switch (type) {
1232 case ACCESS_CODE:
1233 BATlt = env->IBAT[1];
1234 BATut = env->IBAT[0];
1235 break;
1236 default:
1237 BATlt = env->DBAT[1];
1238 BATut = env->DBAT[0];
1239 break;
1242 for (i = 0; i < env->nb_BATs; i++) {
1243 BATu = &BATut[i];
1244 BATl = &BATlt[i];
1245 BEPIu = *BATu & 0xF0000000;
1246 BEPIl = *BATu & 0x0FFE0000;
1247 bl = (*BATu & 0x00001FFC) << 15;
1248 qemu_printf("%s BAT%d BATu " TARGET_FMT_lx
1249 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
1250 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
1251 type == ACCESS_CODE ? "code" : "data", i,
1252 *BATu, *BATl, BEPIu, BEPIl, bl);
1256 static void mmu6xx_dump_mmu(CPUPPCState *env)
1258 PowerPCCPU *cpu = env_archcpu(env);
1259 ppc6xx_tlb_t *tlb;
1260 target_ulong sr;
1261 int type, way, entry, i;
1263 qemu_printf("HTAB base = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_base(cpu));
1264 qemu_printf("HTAB mask = 0x%"HWADDR_PRIx"\n", ppc_hash32_hpt_mask(cpu));
1266 qemu_printf("\nSegment registers:\n");
1267 for (i = 0; i < 32; i++) {
1268 sr = env->sr[i];
1269 if (sr & 0x80000000) {
1270 qemu_printf("%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
1271 "CNTLR_SPEC=0x%05x\n", i,
1272 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1273 sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
1274 (uint32_t)(sr & 0xFFFFF));
1275 } else {
1276 qemu_printf("%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
1277 sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
1278 sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
1279 (uint32_t)(sr & 0x00FFFFFF));
1283 qemu_printf("\nBATs:\n");
1284 mmu6xx_dump_BATs(env, ACCESS_INT);
1285 mmu6xx_dump_BATs(env, ACCESS_CODE);
1287 if (env->id_tlbs != 1) {
1288 qemu_printf("ERROR: 6xx MMU should have separated TLB"
1289 " for code and data\n");
1292 qemu_printf("\nTLBs [EPN EPN + SIZE]\n");
1294 for (type = 0; type < 2; type++) {
1295 for (way = 0; way < env->nb_ways; way++) {
1296 for (entry = env->nb_tlb * type + env->tlb_per_way * way;
1297 entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
1298 entry++) {
1300 tlb = &env->tlb.tlb6[entry];
1301 qemu_printf("%s TLB %02d/%02d way:%d %s ["
1302 TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
1303 type ? "code" : "data", entry % env->nb_tlb,
1304 env->nb_tlb, way,
1305 pte_is_valid(tlb->pte0) ? "valid" : "inval",
1306 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
1312 void dump_mmu(CPUPPCState *env)
1314 switch (env->mmu_model) {
1315 case POWERPC_MMU_BOOKE:
1316 mmubooke_dump_mmu(env);
1317 break;
1318 case POWERPC_MMU_BOOKE206:
1319 mmubooke206_dump_mmu(env);
1320 break;
1321 case POWERPC_MMU_SOFT_6xx:
1322 case POWERPC_MMU_SOFT_74xx:
1323 mmu6xx_dump_mmu(env);
1324 break;
1325 #if defined(TARGET_PPC64)
1326 case POWERPC_MMU_64B:
1327 case POWERPC_MMU_2_03:
1328 case POWERPC_MMU_2_06:
1329 case POWERPC_MMU_2_07:
1330 dump_slb(env_archcpu(env));
1331 break;
1332 case POWERPC_MMU_3_00:
1333 if (ppc64_v3_radix(env_archcpu(env))) {
1334 qemu_log_mask(LOG_UNIMP, "%s: the PPC64 MMU is unsupported\n",
1335 __func__);
1336 } else {
1337 dump_slb(env_archcpu(env));
1339 break;
1340 #endif
1341 default:
1342 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
1346 static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
1347 MMUAccessType access_type)
1349 int in_plb, ret;
1351 ctx->raddr = eaddr;
1352 ctx->prot = PAGE_READ | PAGE_EXEC;
1353 ret = 0;
1354 switch (env->mmu_model) {
1355 case POWERPC_MMU_SOFT_6xx:
1356 case POWERPC_MMU_SOFT_74xx:
1357 case POWERPC_MMU_SOFT_4xx:
1358 case POWERPC_MMU_REAL:
1359 case POWERPC_MMU_BOOKE:
1360 ctx->prot |= PAGE_WRITE;
1361 break;
1363 case POWERPC_MMU_SOFT_4xx_Z:
1364 if (unlikely(msr_pe != 0)) {
1366 * 403 family add some particular protections, using
1367 * PBL/PBU registers for accesses with no translation.
1369 in_plb =
1370 /* Check PLB validity */
1371 (env->pb[0] < env->pb[1] &&
1372 /* and address in plb area */
1373 eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
1374 (env->pb[2] < env->pb[3] &&
1375 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
1376 if (in_plb ^ msr_px) {
1377 /* Access in protected area */
1378 if (access_type == MMU_DATA_STORE) {
1379 /* Access is not allowed */
1380 ret = -2;
1382 } else {
1383 /* Read-write access is allowed */
1384 ctx->prot |= PAGE_WRITE;
1387 break;
1389 default:
1390 /* Caller's checks mean we should never get here for other models */
1391 abort();
1392 return -1;
1395 return ret;
1398 static int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx,
1399 target_ulong eaddr,
1400 MMUAccessType access_type, int type,
1401 int mmu_idx)
1403 int ret = -1;
1404 bool real_mode = (type == ACCESS_CODE && msr_ir == 0)
1405 || (type != ACCESS_CODE && msr_dr == 0);
1407 switch (env->mmu_model) {
1408 case POWERPC_MMU_SOFT_6xx:
1409 case POWERPC_MMU_SOFT_74xx:
1410 if (real_mode) {
1411 ret = check_physical(env, ctx, eaddr, access_type);
1412 } else {
1413 /* Try to find a BAT */
1414 if (env->nb_BATs != 0) {
1415 ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type);
1417 if (ret < 0) {
1418 /* We didn't match any BAT entry or don't have BATs */
1419 ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type);
1422 break;
1424 case POWERPC_MMU_SOFT_4xx:
1425 case POWERPC_MMU_SOFT_4xx_Z:
1426 if (real_mode) {
1427 ret = check_physical(env, ctx, eaddr, access_type);
1428 } else {
1429 ret = mmu40x_get_physical_address(env, ctx, eaddr,
1430 access_type, type);
1432 break;
1433 case POWERPC_MMU_BOOKE:
1434 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1435 access_type, type);
1436 break;
1437 case POWERPC_MMU_BOOKE206:
1438 ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type,
1439 type, mmu_idx);
1440 break;
1441 case POWERPC_MMU_MPC8xx:
1442 /* XXX: TODO */
1443 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
1444 break;
1445 case POWERPC_MMU_REAL:
1446 if (real_mode) {
1447 ret = check_physical(env, ctx, eaddr, access_type);
1448 } else {
1449 cpu_abort(env_cpu(env),
1450 "PowerPC in real mode do not do any translation\n");
1452 return -1;
1453 default:
1454 cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n");
1455 return -1;
1458 return ret;
1461 static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1462 target_ulong eaddr, MMUAccessType access_type,
1463 int type)
1465 return get_physical_address_wtlb(env, ctx, eaddr, access_type, type, 0);
1468 hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1470 PowerPCCPU *cpu = POWERPC_CPU(cs);
1471 CPUPPCState *env = &cpu->env;
1472 mmu_ctx_t ctx;
1474 switch (env->mmu_model) {
1475 #if defined(TARGET_PPC64)
1476 case POWERPC_MMU_64B:
1477 case POWERPC_MMU_2_03:
1478 case POWERPC_MMU_2_06:
1479 case POWERPC_MMU_2_07:
1480 return ppc_hash64_get_phys_page_debug(cpu, addr);
1481 case POWERPC_MMU_3_00:
1482 return ppc64_v3_get_phys_page_debug(cpu, addr);
1483 #endif
1485 case POWERPC_MMU_32B:
1486 case POWERPC_MMU_601:
1487 return ppc_hash32_get_phys_page_debug(cpu, addr);
1489 default:
1493 if (unlikely(get_physical_address(env, &ctx, addr, MMU_DATA_LOAD,
1494 ACCESS_INT) != 0)) {
1497 * Some MMUs have separate TLBs for code and data. If we only
1498 * try an ACCESS_INT, we may not be able to read instructions
1499 * mapped by code TLBs, so we also try a ACCESS_CODE.
1501 if (unlikely(get_physical_address(env, &ctx, addr, MMU_INST_FETCH,
1502 ACCESS_CODE) != 0)) {
1503 return -1;
1507 return ctx.raddr & TARGET_PAGE_MASK;
1510 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
1511 MMUAccessType access_type, int mmu_idx)
1513 uint32_t epid;
1514 bool as, pr;
1515 uint32_t missed_tid = 0;
1516 bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr);
1518 if (access_type == MMU_INST_FETCH) {
1519 as = msr_ir;
1521 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1522 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1523 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1524 env->spr[SPR_BOOKE_MAS3] = 0;
1525 env->spr[SPR_BOOKE_MAS6] = 0;
1526 env->spr[SPR_BOOKE_MAS7] = 0;
1528 /* AS */
1529 if (as) {
1530 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1531 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
1534 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
1535 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
1537 if (!use_epid) {
1538 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
1539 case MAS4_TIDSELD_PID0:
1540 missed_tid = env->spr[SPR_BOOKE_PID];
1541 break;
1542 case MAS4_TIDSELD_PID1:
1543 missed_tid = env->spr[SPR_BOOKE_PID1];
1544 break;
1545 case MAS4_TIDSELD_PID2:
1546 missed_tid = env->spr[SPR_BOOKE_PID2];
1547 break;
1549 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
1550 } else {
1551 missed_tid = epid;
1552 env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16;
1554 env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT);
1557 /* next victim logic */
1558 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1559 env->last_way++;
1560 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1561 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1564 /* Perform address translation */
1565 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
1566 MMUAccessType access_type, int mmu_idx)
1568 CPUState *cs = env_cpu(env);
1569 PowerPCCPU *cpu = POWERPC_CPU(cs);
1570 mmu_ctx_t ctx;
1571 int type;
1572 int ret = 0;
1574 if (access_type == MMU_INST_FETCH) {
1575 /* code access */
1576 type = ACCESS_CODE;
1577 } else {
1578 /* data access */
1579 type = env->access_type;
1581 ret = get_physical_address_wtlb(env, &ctx, address, access_type,
1582 type, mmu_idx);
1583 if (ret == 0) {
1584 tlb_set_page(cs, address & TARGET_PAGE_MASK,
1585 ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
1586 mmu_idx, TARGET_PAGE_SIZE);
1587 ret = 0;
1588 } else if (ret < 0) {
1589 LOG_MMU_STATE(cs);
1590 if (type == ACCESS_CODE) {
1591 switch (ret) {
1592 case -1:
1593 /* No matches in page tables or TLB */
1594 switch (env->mmu_model) {
1595 case POWERPC_MMU_SOFT_6xx:
1596 cs->exception_index = POWERPC_EXCP_IFTLB;
1597 env->error_code = 1 << 18;
1598 env->spr[SPR_IMISS] = address;
1599 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1600 goto tlb_miss;
1601 case POWERPC_MMU_SOFT_74xx:
1602 cs->exception_index = POWERPC_EXCP_IFTLB;
1603 goto tlb_miss_74xx;
1604 case POWERPC_MMU_SOFT_4xx:
1605 case POWERPC_MMU_SOFT_4xx_Z:
1606 cs->exception_index = POWERPC_EXCP_ITLB;
1607 env->error_code = 0;
1608 env->spr[SPR_40x_DEAR] = address;
1609 env->spr[SPR_40x_ESR] = 0x00000000;
1610 break;
1611 case POWERPC_MMU_BOOKE206:
1612 booke206_update_mas_tlb_miss(env, address, 2, mmu_idx);
1613 /* fall through */
1614 case POWERPC_MMU_BOOKE:
1615 cs->exception_index = POWERPC_EXCP_ITLB;
1616 env->error_code = 0;
1617 env->spr[SPR_BOOKE_DEAR] = address;
1618 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD);
1619 return -1;
1620 case POWERPC_MMU_MPC8xx:
1621 /* XXX: TODO */
1622 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1623 break;
1624 case POWERPC_MMU_REAL:
1625 cpu_abort(cs, "PowerPC in real mode should never raise "
1626 "any MMU exceptions\n");
1627 return -1;
1628 default:
1629 cpu_abort(cs, "Unknown or invalid MMU model\n");
1630 return -1;
1632 break;
1633 case -2:
1634 /* Access rights violation */
1635 cs->exception_index = POWERPC_EXCP_ISI;
1636 env->error_code = 0x08000000;
1637 break;
1638 case -3:
1639 /* No execute protection violation */
1640 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1641 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1642 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1644 cs->exception_index = POWERPC_EXCP_ISI;
1645 env->error_code = 0x10000000;
1646 break;
1647 case -4:
1648 /* Direct store exception */
1649 /* No code fetch is allowed in direct-store areas */
1650 cs->exception_index = POWERPC_EXCP_ISI;
1651 env->error_code = 0x10000000;
1652 break;
1654 } else {
1655 switch (ret) {
1656 case -1:
1657 /* No matches in page tables or TLB */
1658 switch (env->mmu_model) {
1659 case POWERPC_MMU_SOFT_6xx:
1660 if (access_type == MMU_DATA_STORE) {
1661 cs->exception_index = POWERPC_EXCP_DSTLB;
1662 env->error_code = 1 << 16;
1663 } else {
1664 cs->exception_index = POWERPC_EXCP_DLTLB;
1665 env->error_code = 0;
1667 env->spr[SPR_DMISS] = address;
1668 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1669 tlb_miss:
1670 env->error_code |= ctx.key << 19;
1671 env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) +
1672 get_pteg_offset32(cpu, ctx.hash[0]);
1673 env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) +
1674 get_pteg_offset32(cpu, ctx.hash[1]);
1675 break;
1676 case POWERPC_MMU_SOFT_74xx:
1677 if (access_type == MMU_DATA_STORE) {
1678 cs->exception_index = POWERPC_EXCP_DSTLB;
1679 } else {
1680 cs->exception_index = POWERPC_EXCP_DLTLB;
1682 tlb_miss_74xx:
1683 /* Implement LRU algorithm */
1684 env->error_code = ctx.key << 19;
1685 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
1686 ((env->last_way + 1) & (env->nb_ways - 1));
1687 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
1688 break;
1689 case POWERPC_MMU_SOFT_4xx:
1690 case POWERPC_MMU_SOFT_4xx_Z:
1691 cs->exception_index = POWERPC_EXCP_DTLB;
1692 env->error_code = 0;
1693 env->spr[SPR_40x_DEAR] = address;
1694 if (access_type == MMU_DATA_STORE) {
1695 env->spr[SPR_40x_ESR] = 0x00800000;
1696 } else {
1697 env->spr[SPR_40x_ESR] = 0x00000000;
1699 break;
1700 case POWERPC_MMU_MPC8xx:
1701 /* XXX: TODO */
1702 cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
1703 break;
1704 case POWERPC_MMU_BOOKE206:
1705 booke206_update_mas_tlb_miss(env, address, access_type, mmu_idx);
1706 /* fall through */
1707 case POWERPC_MMU_BOOKE:
1708 cs->exception_index = POWERPC_EXCP_DTLB;
1709 env->error_code = 0;
1710 env->spr[SPR_BOOKE_DEAR] = address;
1711 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
1712 return -1;
1713 case POWERPC_MMU_REAL:
1714 cpu_abort(cs, "PowerPC in real mode should never raise "
1715 "any MMU exceptions\n");
1716 return -1;
1717 default:
1718 cpu_abort(cs, "Unknown or invalid MMU model\n");
1719 return -1;
1721 break;
1722 case -2:
1723 /* Access rights violation */
1724 cs->exception_index = POWERPC_EXCP_DSI;
1725 env->error_code = 0;
1726 if (env->mmu_model == POWERPC_MMU_SOFT_4xx
1727 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
1728 env->spr[SPR_40x_DEAR] = address;
1729 if (access_type == MMU_DATA_STORE) {
1730 env->spr[SPR_40x_ESR] |= 0x00800000;
1732 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1733 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1734 env->spr[SPR_BOOKE_DEAR] = address;
1735 env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type);
1736 } else {
1737 env->spr[SPR_DAR] = address;
1738 if (access_type == MMU_DATA_STORE) {
1739 env->spr[SPR_DSISR] = 0x0A000000;
1740 } else {
1741 env->spr[SPR_DSISR] = 0x08000000;
1744 break;
1745 case -4:
1746 /* Direct store exception */
1747 switch (type) {
1748 case ACCESS_FLOAT:
1749 /* Floating point load/store */
1750 cs->exception_index = POWERPC_EXCP_ALIGN;
1751 env->error_code = POWERPC_EXCP_ALIGN_FP;
1752 env->spr[SPR_DAR] = address;
1753 break;
1754 case ACCESS_RES:
1755 /* lwarx, ldarx or stwcx. */
1756 cs->exception_index = POWERPC_EXCP_DSI;
1757 env->error_code = 0;
1758 env->spr[SPR_DAR] = address;
1759 if (access_type == MMU_DATA_STORE) {
1760 env->spr[SPR_DSISR] = 0x06000000;
1761 } else {
1762 env->spr[SPR_DSISR] = 0x04000000;
1764 break;
1765 case ACCESS_EXT:
1766 /* eciwx or ecowx */
1767 cs->exception_index = POWERPC_EXCP_DSI;
1768 env->error_code = 0;
1769 env->spr[SPR_DAR] = address;
1770 if (access_type == MMU_DATA_STORE) {
1771 env->spr[SPR_DSISR] = 0x06100000;
1772 } else {
1773 env->spr[SPR_DSISR] = 0x04100000;
1775 break;
1776 default:
1777 printf("DSI: invalid exception (%d)\n", ret);
1778 cs->exception_index = POWERPC_EXCP_PROGRAM;
1779 env->error_code =
1780 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
1781 env->spr[SPR_DAR] = address;
1782 break;
1784 break;
1787 ret = 1;
1790 return ret;
1793 /*****************************************************************************/
1794 /* BATs management */
1795 #if !defined(FLUSH_ALL_TLBS)
1796 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
1797 target_ulong mask)
1799 CPUState *cs = env_cpu(env);
1800 target_ulong base, end, page;
1802 base = BATu & ~0x0001FFFF;
1803 end = base + mask + 0x00020000;
1804 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
1805 /* Flushing 1024 4K pages is slower than a complete flush */
1806 LOG_BATS("Flush all BATs\n");
1807 tlb_flush(cs);
1808 LOG_BATS("Flush done\n");
1809 return;
1811 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
1812 TARGET_FMT_lx ")\n", base, end, mask);
1813 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
1814 tlb_flush_page(cs, page);
1816 LOG_BATS("Flush done\n");
1818 #endif
1820 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
1821 target_ulong value)
1823 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
1824 nr, ul == 0 ? 'u' : 'l', value, env->nip);
1827 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1829 target_ulong mask;
1830 #if defined(FLUSH_ALL_TLBS)
1831 PowerPCCPU *cpu = env_archcpu(env);
1832 #endif
1834 dump_store_bat(env, 'I', 0, nr, value);
1835 if (env->IBAT[0][nr] != value) {
1836 mask = (value << 15) & 0x0FFE0000UL;
1837 #if !defined(FLUSH_ALL_TLBS)
1838 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1839 #endif
1841 * When storing valid upper BAT, mask BEPI and BRPN and
1842 * invalidate all TLBs covered by this BAT
1844 mask = (value << 15) & 0x0FFE0000UL;
1845 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1846 (value & ~0x0001FFFFUL & ~mask);
1847 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
1848 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
1849 #if !defined(FLUSH_ALL_TLBS)
1850 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1851 #else
1852 tlb_flush(env_cpu(env));
1853 #endif
1857 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1859 dump_store_bat(env, 'I', 1, nr, value);
1860 env->IBAT[1][nr] = value;
1863 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
1865 target_ulong mask;
1866 #if defined(FLUSH_ALL_TLBS)
1867 PowerPCCPU *cpu = env_archcpu(env);
1868 #endif
1870 dump_store_bat(env, 'D', 0, nr, value);
1871 if (env->DBAT[0][nr] != value) {
1873 * When storing valid upper BAT, mask BEPI and BRPN and
1874 * invalidate all TLBs covered by this BAT
1876 mask = (value << 15) & 0x0FFE0000UL;
1877 #if !defined(FLUSH_ALL_TLBS)
1878 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1879 #endif
1880 mask = (value << 15) & 0x0FFE0000UL;
1881 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
1882 (value & ~0x0001FFFFUL & ~mask);
1883 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
1884 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
1885 #if !defined(FLUSH_ALL_TLBS)
1886 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
1887 #else
1888 tlb_flush(env_cpu(env));
1889 #endif
1893 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
1895 dump_store_bat(env, 'D', 1, nr, value);
1896 env->DBAT[1][nr] = value;
1899 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
1901 target_ulong mask;
1902 #if defined(FLUSH_ALL_TLBS)
1903 PowerPCCPU *cpu = env_archcpu(env);
1904 int do_inval;
1905 #endif
1907 dump_store_bat(env, 'I', 0, nr, value);
1908 if (env->IBAT[0][nr] != value) {
1909 #if defined(FLUSH_ALL_TLBS)
1910 do_inval = 0;
1911 #endif
1912 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1913 if (env->IBAT[1][nr] & 0x40) {
1914 /* Invalidate BAT only if it is valid */
1915 #if !defined(FLUSH_ALL_TLBS)
1916 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1917 #else
1918 do_inval = 1;
1919 #endif
1922 * When storing valid upper BAT, mask BEPI and BRPN and
1923 * invalidate all TLBs covered by this BAT
1925 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
1926 (value & ~0x0001FFFFUL & ~mask);
1927 env->DBAT[0][nr] = env->IBAT[0][nr];
1928 if (env->IBAT[1][nr] & 0x40) {
1929 #if !defined(FLUSH_ALL_TLBS)
1930 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1931 #else
1932 do_inval = 1;
1933 #endif
1935 #if defined(FLUSH_ALL_TLBS)
1936 if (do_inval) {
1937 tlb_flush(env_cpu(env));
1939 #endif
1943 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
1945 #if !defined(FLUSH_ALL_TLBS)
1946 target_ulong mask;
1947 #else
1948 PowerPCCPU *cpu = env_archcpu(env);
1949 int do_inval;
1950 #endif
1952 dump_store_bat(env, 'I', 1, nr, value);
1953 if (env->IBAT[1][nr] != value) {
1954 #if defined(FLUSH_ALL_TLBS)
1955 do_inval = 0;
1956 #endif
1957 if (env->IBAT[1][nr] & 0x40) {
1958 #if !defined(FLUSH_ALL_TLBS)
1959 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
1960 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1961 #else
1962 do_inval = 1;
1963 #endif
1965 if (value & 0x40) {
1966 #if !defined(FLUSH_ALL_TLBS)
1967 mask = (value << 17) & 0x0FFE0000UL;
1968 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
1969 #else
1970 do_inval = 1;
1971 #endif
1973 env->IBAT[1][nr] = value;
1974 env->DBAT[1][nr] = value;
1975 #if defined(FLUSH_ALL_TLBS)
1976 if (do_inval) {
1977 tlb_flush(env_cpu(env));
1979 #endif
1983 /*****************************************************************************/
1984 /* TLB management */
1985 void ppc_tlb_invalidate_all(CPUPPCState *env)
1987 #if defined(TARGET_PPC64)
1988 if (mmu_is_64bit(env->mmu_model)) {
1989 env->tlb_need_flush = 0;
1990 tlb_flush(env_cpu(env));
1991 } else
1992 #endif /* defined(TARGET_PPC64) */
1993 switch (env->mmu_model) {
1994 case POWERPC_MMU_SOFT_6xx:
1995 case POWERPC_MMU_SOFT_74xx:
1996 ppc6xx_tlb_invalidate_all(env);
1997 break;
1998 case POWERPC_MMU_SOFT_4xx:
1999 case POWERPC_MMU_SOFT_4xx_Z:
2000 ppc4xx_tlb_invalidate_all(env);
2001 break;
2002 case POWERPC_MMU_REAL:
2003 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
2004 break;
2005 case POWERPC_MMU_MPC8xx:
2006 /* XXX: TODO */
2007 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
2008 break;
2009 case POWERPC_MMU_BOOKE:
2010 tlb_flush(env_cpu(env));
2011 break;
2012 case POWERPC_MMU_BOOKE206:
2013 booke206_flush_tlb(env, -1, 0);
2014 break;
2015 case POWERPC_MMU_32B:
2016 case POWERPC_MMU_601:
2017 env->tlb_need_flush = 0;
2018 tlb_flush(env_cpu(env));
2019 break;
2020 default:
2021 /* XXX: TODO */
2022 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
2023 break;
2027 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
2029 #if !defined(FLUSH_ALL_TLBS)
2030 addr &= TARGET_PAGE_MASK;
2031 #if defined(TARGET_PPC64)
2032 if (mmu_is_64bit(env->mmu_model)) {
2033 /* tlbie invalidate TLBs for all segments */
2035 * XXX: given the fact that there are too many segments to invalidate,
2036 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2037 * we just invalidate all TLBs
2039 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2040 } else
2041 #endif /* defined(TARGET_PPC64) */
2042 switch (env->mmu_model) {
2043 case POWERPC_MMU_SOFT_6xx:
2044 case POWERPC_MMU_SOFT_74xx:
2045 ppc6xx_tlb_invalidate_virt(env, addr, 0);
2046 if (env->id_tlbs == 1) {
2047 ppc6xx_tlb_invalidate_virt(env, addr, 1);
2049 break;
2050 case POWERPC_MMU_32B:
2051 case POWERPC_MMU_601:
2053 * Actual CPUs invalidate entire congruence classes based on
2054 * the geometry of their TLBs and some OSes take that into
2055 * account, we just mark the TLB to be flushed later (context
2056 * synchronizing event or sync instruction on 32-bit).
2058 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2059 break;
2060 default:
2061 /* Should never reach here with other MMU models */
2062 assert(0);
2064 #else
2065 ppc_tlb_invalidate_all(env);
2066 #endif
2069 /*****************************************************************************/
2070 /* Special registers manipulation */
2071 #if defined(TARGET_PPC64)
2072 void ppc_store_ptcr(CPUPPCState *env, target_ulong value)
2074 PowerPCCPU *cpu = env_archcpu(env);
2075 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS;
2076 target_ulong patbsize = value & PTCR_PATS;
2078 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
2080 assert(!cpu->vhyp);
2081 assert(env->mmu_model & POWERPC_MMU_3_00);
2083 if (value & ~ptcr_mask) {
2084 error_report("Invalid bits 0x"TARGET_FMT_lx" set in PTCR",
2085 value & ~ptcr_mask);
2086 value &= ptcr_mask;
2089 if (patbsize > 24) {
2090 error_report("Invalid Partition Table size 0x" TARGET_FMT_lx
2091 " stored in PTCR", patbsize);
2092 return;
2095 env->spr[SPR_PTCR] = value;
2098 #endif /* defined(TARGET_PPC64) */
2100 /* Segment registers load and store */
2101 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
2103 #if defined(TARGET_PPC64)
2104 if (mmu_is_64bit(env->mmu_model)) {
2105 /* XXX */
2106 return 0;
2108 #endif
2109 return env->sr[sr_num];
2112 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
2114 qemu_log_mask(CPU_LOG_MMU,
2115 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
2116 (int)srnum, value, env->sr[srnum]);
2117 #if defined(TARGET_PPC64)
2118 if (mmu_is_64bit(env->mmu_model)) {
2119 PowerPCCPU *cpu = env_archcpu(env);
2120 uint64_t esid, vsid;
2122 /* ESID = srnum */
2123 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
2125 /* VSID = VSID */
2126 vsid = (value & 0xfffffff) << 12;
2127 /* flags = flags */
2128 vsid |= ((value >> 27) & 0xf) << 8;
2130 ppc_store_slb(cpu, srnum, esid, vsid);
2131 } else
2132 #endif
2133 if (env->sr[srnum] != value) {
2134 env->sr[srnum] = value;
2136 * Invalidating 256MB of virtual memory in 4kB pages is way
2137 * longer than flushing the whole TLB.
2139 #if !defined(FLUSH_ALL_TLBS) && 0
2141 target_ulong page, end;
2142 /* Invalidate 256 MB of virtual memory */
2143 page = (16 << 20) * srnum;
2144 end = page + (16 << 20);
2145 for (; page != end; page += TARGET_PAGE_SIZE) {
2146 tlb_flush_page(env_cpu(env), page);
2149 #else
2150 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
2151 #endif
2155 /* TLB management */
2156 void helper_tlbia(CPUPPCState *env)
2158 ppc_tlb_invalidate_all(env);
2161 void helper_tlbie(CPUPPCState *env, target_ulong addr)
2163 ppc_tlb_invalidate_one(env, addr);
2166 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
2168 /* tlbiva instruction only exists on BookE */
2169 assert(env->mmu_model == POWERPC_MMU_BOOKE);
2170 /* XXX: TODO */
2171 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
2174 /* Software driven TLBs management */
2175 /* PowerPC 602/603 software TLB load instructions helpers */
2176 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2178 target_ulong RPN, CMP, EPN;
2179 int way;
2181 RPN = env->spr[SPR_RPA];
2182 if (is_code) {
2183 CMP = env->spr[SPR_ICMP];
2184 EPN = env->spr[SPR_IMISS];
2185 } else {
2186 CMP = env->spr[SPR_DCMP];
2187 EPN = env->spr[SPR_DMISS];
2189 way = (env->spr[SPR_SRR1] >> 17) & 1;
2190 (void)EPN; /* avoid a compiler warning */
2191 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2192 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2193 RPN, way);
2194 /* Store this TLB */
2195 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2196 way, is_code, CMP, RPN);
2199 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
2201 do_6xx_tlb(env, EPN, 0);
2204 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
2206 do_6xx_tlb(env, EPN, 1);
2209 /* PowerPC 74xx software TLB load instructions helpers */
2210 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2212 target_ulong RPN, CMP, EPN;
2213 int way;
2215 RPN = env->spr[SPR_PTELO];
2216 CMP = env->spr[SPR_PTEHI];
2217 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2218 way = env->spr[SPR_TLBMISS] & 0x3;
2219 (void)EPN; /* avoid a compiler warning */
2220 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2221 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2222 RPN, way);
2223 /* Store this TLB */
2224 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2225 way, is_code, CMP, RPN);
2228 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
2230 do_74xx_tlb(env, EPN, 0);
2233 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
2235 do_74xx_tlb(env, EPN, 1);
2238 /*****************************************************************************/
2239 /* PowerPC 601 specific instructions (POWER bridge) */
2241 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
2243 mmu_ctx_t ctx;
2244 int nb_BATs;
2245 target_ulong ret = 0;
2248 * We don't have to generate many instances of this instruction,
2249 * as rac is supervisor only.
2251 * XXX: FIX THIS: Pretend we have no BAT
2253 nb_BATs = env->nb_BATs;
2254 env->nb_BATs = 0;
2255 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
2256 ret = ctx.raddr;
2258 env->nb_BATs = nb_BATs;
2259 return ret;
2262 static inline target_ulong booke_tlb_to_page_size(int size)
2264 return 1024 << (2 * size);
2267 static inline int booke_page_size_to_tlb(target_ulong page_size)
2269 int size;
2271 switch (page_size) {
2272 case 0x00000400UL:
2273 size = 0x0;
2274 break;
2275 case 0x00001000UL:
2276 size = 0x1;
2277 break;
2278 case 0x00004000UL:
2279 size = 0x2;
2280 break;
2281 case 0x00010000UL:
2282 size = 0x3;
2283 break;
2284 case 0x00040000UL:
2285 size = 0x4;
2286 break;
2287 case 0x00100000UL:
2288 size = 0x5;
2289 break;
2290 case 0x00400000UL:
2291 size = 0x6;
2292 break;
2293 case 0x01000000UL:
2294 size = 0x7;
2295 break;
2296 case 0x04000000UL:
2297 size = 0x8;
2298 break;
2299 case 0x10000000UL:
2300 size = 0x9;
2301 break;
2302 case 0x40000000UL:
2303 size = 0xA;
2304 break;
2305 #if defined(TARGET_PPC64)
2306 case 0x000100000000ULL:
2307 size = 0xB;
2308 break;
2309 case 0x000400000000ULL:
2310 size = 0xC;
2311 break;
2312 case 0x001000000000ULL:
2313 size = 0xD;
2314 break;
2315 case 0x004000000000ULL:
2316 size = 0xE;
2317 break;
2318 case 0x010000000000ULL:
2319 size = 0xF;
2320 break;
2321 #endif
2322 default:
2323 size = -1;
2324 break;
2327 return size;
2330 /* Helpers for 4xx TLB management */
2331 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2333 #define PPC4XX_TLBHI_V 0x00000040
2334 #define PPC4XX_TLBHI_E 0x00000020
2335 #define PPC4XX_TLBHI_SIZE_MIN 0
2336 #define PPC4XX_TLBHI_SIZE_MAX 7
2337 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2338 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2339 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2341 #define PPC4XX_TLBLO_EX 0x00000200
2342 #define PPC4XX_TLBLO_WR 0x00000100
2343 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2344 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2346 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
2348 ppcemb_tlb_t *tlb;
2349 target_ulong ret;
2350 int size;
2352 entry &= PPC4XX_TLB_ENTRY_MASK;
2353 tlb = &env->tlb.tlbe[entry];
2354 ret = tlb->EPN;
2355 if (tlb->prot & PAGE_VALID) {
2356 ret |= PPC4XX_TLBHI_V;
2358 size = booke_page_size_to_tlb(tlb->size);
2359 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
2360 size = PPC4XX_TLBHI_SIZE_DEFAULT;
2362 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
2363 env->spr[SPR_40x_PID] = tlb->PID;
2364 return ret;
2367 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
2369 ppcemb_tlb_t *tlb;
2370 target_ulong ret;
2372 entry &= PPC4XX_TLB_ENTRY_MASK;
2373 tlb = &env->tlb.tlbe[entry];
2374 ret = tlb->RPN;
2375 if (tlb->prot & PAGE_EXEC) {
2376 ret |= PPC4XX_TLBLO_EX;
2378 if (tlb->prot & PAGE_WRITE) {
2379 ret |= PPC4XX_TLBLO_WR;
2381 return ret;
2384 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
2385 target_ulong val)
2387 CPUState *cs = env_cpu(env);
2388 ppcemb_tlb_t *tlb;
2389 target_ulong page, end;
2391 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
2392 val);
2393 entry &= PPC4XX_TLB_ENTRY_MASK;
2394 tlb = &env->tlb.tlbe[entry];
2395 /* Invalidate previous TLB (if it's valid) */
2396 if (tlb->prot & PAGE_VALID) {
2397 end = tlb->EPN + tlb->size;
2398 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
2399 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2400 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2401 tlb_flush_page(cs, page);
2404 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
2405 & PPC4XX_TLBHI_SIZE_MASK);
2407 * We cannot handle TLB size < TARGET_PAGE_SIZE.
2408 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
2410 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
2411 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
2412 "are not supported (%d)\n"
2413 "Please implement TARGET_PAGE_BITS_VARY\n",
2414 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2416 tlb->EPN = val & ~(tlb->size - 1);
2417 if (val & PPC4XX_TLBHI_V) {
2418 tlb->prot |= PAGE_VALID;
2419 if (val & PPC4XX_TLBHI_E) {
2420 /* XXX: TO BE FIXED */
2421 cpu_abort(cs,
2422 "Little-endian TLB entries are not supported by now\n");
2424 } else {
2425 tlb->prot &= ~PAGE_VALID;
2427 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2428 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2429 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2430 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2431 tlb->prot & PAGE_READ ? 'r' : '-',
2432 tlb->prot & PAGE_WRITE ? 'w' : '-',
2433 tlb->prot & PAGE_EXEC ? 'x' : '-',
2434 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2435 /* Invalidate new TLB (if valid) */
2436 if (tlb->prot & PAGE_VALID) {
2437 end = tlb->EPN + tlb->size;
2438 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
2439 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2440 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2441 tlb_flush_page(cs, page);
2446 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
2447 target_ulong val)
2449 ppcemb_tlb_t *tlb;
2451 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
2452 val);
2453 entry &= PPC4XX_TLB_ENTRY_MASK;
2454 tlb = &env->tlb.tlbe[entry];
2455 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
2456 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
2457 tlb->prot = PAGE_READ;
2458 if (val & PPC4XX_TLBLO_EX) {
2459 tlb->prot |= PAGE_EXEC;
2461 if (val & PPC4XX_TLBLO_WR) {
2462 tlb->prot |= PAGE_WRITE;
2464 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2465 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2466 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2467 tlb->prot & PAGE_READ ? 'r' : '-',
2468 tlb->prot & PAGE_WRITE ? 'w' : '-',
2469 tlb->prot & PAGE_EXEC ? 'x' : '-',
2470 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2473 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
2475 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2478 /* PowerPC 440 TLB management */
2479 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
2480 target_ulong value)
2482 ppcemb_tlb_t *tlb;
2483 target_ulong EPN, RPN, size;
2484 int do_flush_tlbs;
2486 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
2487 __func__, word, (int)entry, value);
2488 do_flush_tlbs = 0;
2489 entry &= 0x3F;
2490 tlb = &env->tlb.tlbe[entry];
2491 switch (word) {
2492 default:
2493 /* Just here to please gcc */
2494 case 0:
2495 EPN = value & 0xFFFFFC00;
2496 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
2497 do_flush_tlbs = 1;
2499 tlb->EPN = EPN;
2500 size = booke_tlb_to_page_size((value >> 4) & 0xF);
2501 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
2502 do_flush_tlbs = 1;
2504 tlb->size = size;
2505 tlb->attr &= ~0x1;
2506 tlb->attr |= (value >> 8) & 1;
2507 if (value & 0x200) {
2508 tlb->prot |= PAGE_VALID;
2509 } else {
2510 if (tlb->prot & PAGE_VALID) {
2511 tlb->prot &= ~PAGE_VALID;
2512 do_flush_tlbs = 1;
2515 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2516 if (do_flush_tlbs) {
2517 tlb_flush(env_cpu(env));
2519 break;
2520 case 1:
2521 RPN = value & 0xFFFFFC0F;
2522 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
2523 tlb_flush(env_cpu(env));
2525 tlb->RPN = RPN;
2526 break;
2527 case 2:
2528 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
2529 tlb->prot = tlb->prot & PAGE_VALID;
2530 if (value & 0x1) {
2531 tlb->prot |= PAGE_READ << 4;
2533 if (value & 0x2) {
2534 tlb->prot |= PAGE_WRITE << 4;
2536 if (value & 0x4) {
2537 tlb->prot |= PAGE_EXEC << 4;
2539 if (value & 0x8) {
2540 tlb->prot |= PAGE_READ;
2542 if (value & 0x10) {
2543 tlb->prot |= PAGE_WRITE;
2545 if (value & 0x20) {
2546 tlb->prot |= PAGE_EXEC;
2548 break;
2552 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
2553 target_ulong entry)
2555 ppcemb_tlb_t *tlb;
2556 target_ulong ret;
2557 int size;
2559 entry &= 0x3F;
2560 tlb = &env->tlb.tlbe[entry];
2561 switch (word) {
2562 default:
2563 /* Just here to please gcc */
2564 case 0:
2565 ret = tlb->EPN;
2566 size = booke_page_size_to_tlb(tlb->size);
2567 if (size < 0 || size > 0xF) {
2568 size = 1;
2570 ret |= size << 4;
2571 if (tlb->attr & 0x1) {
2572 ret |= 0x100;
2574 if (tlb->prot & PAGE_VALID) {
2575 ret |= 0x200;
2577 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2578 env->spr[SPR_440_MMUCR] |= tlb->PID;
2579 break;
2580 case 1:
2581 ret = tlb->RPN;
2582 break;
2583 case 2:
2584 ret = tlb->attr & ~0x1;
2585 if (tlb->prot & (PAGE_READ << 4)) {
2586 ret |= 0x1;
2588 if (tlb->prot & (PAGE_WRITE << 4)) {
2589 ret |= 0x2;
2591 if (tlb->prot & (PAGE_EXEC << 4)) {
2592 ret |= 0x4;
2594 if (tlb->prot & PAGE_READ) {
2595 ret |= 0x8;
2597 if (tlb->prot & PAGE_WRITE) {
2598 ret |= 0x10;
2600 if (tlb->prot & PAGE_EXEC) {
2601 ret |= 0x20;
2603 break;
2605 return ret;
2608 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
2610 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
2613 /* PowerPC BookE 2.06 TLB management */
2615 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
2617 uint32_t tlbncfg = 0;
2618 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
2619 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
2620 int tlb;
2622 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2623 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
2625 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
2626 cpu_abort(env_cpu(env), "we don't support HES yet\n");
2629 return booke206_get_tlbm(env, tlb, ea, esel);
2632 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
2634 env->spr[pidn] = pid;
2635 /* changing PIDs mean we're in a different address space now */
2636 tlb_flush(env_cpu(env));
2639 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
2641 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
2642 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
2644 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
2646 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
2647 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
2650 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
2652 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
2653 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
2654 } else {
2655 tlb_flush(env_cpu(env));
2659 void helper_booke206_tlbwe(CPUPPCState *env)
2661 uint32_t tlbncfg, tlbn;
2662 ppcmas_tlb_t *tlb;
2663 uint32_t size_tlb, size_ps;
2664 target_ulong mask;
2667 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
2668 case MAS0_WQ_ALWAYS:
2669 /* good to go, write that entry */
2670 break;
2671 case MAS0_WQ_COND:
2672 /* XXX check if reserved */
2673 if (0) {
2674 return;
2676 break;
2677 case MAS0_WQ_CLR_RSRV:
2678 /* XXX clear entry */
2679 return;
2680 default:
2681 /* no idea what to do */
2682 return;
2685 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
2686 !msr_gs) {
2687 /* XXX we don't support direct LRAT setting yet */
2688 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
2689 return;
2692 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
2693 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
2695 tlb = booke206_cur_tlb(env);
2697 if (!tlb) {
2698 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2699 POWERPC_EXCP_INVAL |
2700 POWERPC_EXCP_INVAL_INVAL, GETPC());
2703 /* check that we support the targeted size */
2704 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
2705 size_ps = booke206_tlbnps(env, tlbn);
2706 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
2707 !(size_ps & (1 << size_tlb))) {
2708 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
2709 POWERPC_EXCP_INVAL |
2710 POWERPC_EXCP_INVAL_INVAL, GETPC());
2713 if (msr_gs) {
2714 cpu_abort(env_cpu(env), "missing HV implementation\n");
2717 if (tlb->mas1 & MAS1_VALID) {
2719 * Invalidate the page in QEMU TLB if it was a valid entry.
2721 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
2722 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
2723 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
2725 * "Note that when an L2 TLB entry is written, it may be displacing an
2726 * already valid entry in the same L2 TLB location (a victim). If a
2727 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
2728 * TLB entry is automatically invalidated."
2730 flush_page(env, tlb);
2733 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
2734 env->spr[SPR_BOOKE_MAS3];
2735 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
2737 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
2738 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
2739 booke206_fixed_size_tlbn(env, tlbn, tlb);
2740 } else {
2741 if (!(tlbncfg & TLBnCFG_AVAIL)) {
2742 /* force !AVAIL TLB entries to correct page size */
2743 tlb->mas1 &= ~MAS1_TSIZE_MASK;
2744 /* XXX can be configured in MMUCSR0 */
2745 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
2749 /* Make a mask from TLB size to discard invalid bits in EPN field */
2750 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2751 /* Add a mask for page attributes */
2752 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
2754 if (!msr_cm) {
2756 * Executing a tlbwe instruction in 32-bit mode will set bits
2757 * 0:31 of the TLB EPN field to zero.
2759 mask &= 0xffffffff;
2762 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
2764 if (!(tlbncfg & TLBnCFG_IPROT)) {
2765 /* no IPROT supported by TLB */
2766 tlb->mas1 &= ~MAS1_IPROT;
2769 flush_page(env, tlb);
2772 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
2774 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
2775 int way = booke206_tlbm_to_way(env, tlb);
2777 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
2778 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
2779 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2781 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
2782 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
2783 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
2784 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
2787 void helper_booke206_tlbre(CPUPPCState *env)
2789 ppcmas_tlb_t *tlb = NULL;
2791 tlb = booke206_cur_tlb(env);
2792 if (!tlb) {
2793 env->spr[SPR_BOOKE_MAS1] = 0;
2794 } else {
2795 booke206_tlb_to_mas(env, tlb);
2799 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
2801 ppcmas_tlb_t *tlb = NULL;
2802 int i, j;
2803 hwaddr raddr;
2804 uint32_t spid, sas;
2806 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
2807 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
2809 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2810 int ways = booke206_tlb_ways(env, i);
2812 for (j = 0; j < ways; j++) {
2813 tlb = booke206_get_tlbm(env, i, address, j);
2815 if (!tlb) {
2816 continue;
2819 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
2820 continue;
2823 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
2824 continue;
2827 booke206_tlb_to_mas(env, tlb);
2828 return;
2832 /* no entry found, fill with defaults */
2833 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
2834 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
2835 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
2836 env->spr[SPR_BOOKE_MAS3] = 0;
2837 env->spr[SPR_BOOKE_MAS7] = 0;
2839 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
2840 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
2843 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
2844 << MAS1_TID_SHIFT;
2846 /* next victim logic */
2847 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
2848 env->last_way++;
2849 env->last_way &= booke206_tlb_ways(env, 0) - 1;
2850 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
2853 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
2854 uint32_t ea)
2856 int i;
2857 int ways = booke206_tlb_ways(env, tlbn);
2858 target_ulong mask;
2860 for (i = 0; i < ways; i++) {
2861 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
2862 if (!tlb) {
2863 continue;
2865 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
2866 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
2867 !(tlb->mas1 & MAS1_IPROT)) {
2868 tlb->mas1 &= ~MAS1_VALID;
2873 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
2875 CPUState *cs;
2877 if (address & 0x4) {
2878 /* flush all entries */
2879 if (address & 0x8) {
2880 /* flush all of TLB1 */
2881 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
2882 } else {
2883 /* flush all of TLB0 */
2884 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
2886 return;
2889 if (address & 0x8) {
2890 /* flush TLB1 entries */
2891 booke206_invalidate_ea_tlb(env, 1, address);
2892 CPU_FOREACH(cs) {
2893 tlb_flush(cs);
2895 } else {
2896 /* flush TLB0 entries */
2897 booke206_invalidate_ea_tlb(env, 0, address);
2898 CPU_FOREACH(cs) {
2899 tlb_flush_page(cs, address & MAS2_EPN_MASK);
2904 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
2906 /* XXX missing LPID handling */
2907 booke206_flush_tlb(env, -1, 1);
2910 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
2912 int i, j;
2913 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2914 ppcmas_tlb_t *tlb = env->tlb.tlbm;
2915 int tlb_size;
2917 /* XXX missing LPID handling */
2918 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2919 tlb_size = booke206_tlb_size(env, i);
2920 for (j = 0; j < tlb_size; j++) {
2921 if (!(tlb[j].mas1 & MAS1_IPROT) &&
2922 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
2923 tlb[j].mas1 &= ~MAS1_VALID;
2926 tlb += booke206_tlb_size(env, i);
2928 tlb_flush(env_cpu(env));
2931 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
2933 int i, j;
2934 ppcmas_tlb_t *tlb;
2935 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
2936 int pid = tid >> MAS6_SPID_SHIFT;
2937 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
2938 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
2939 /* XXX check for unsupported isize and raise an invalid opcode then */
2940 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
2941 /* XXX implement MAV2 handling */
2942 bool mav2 = false;
2944 /* XXX missing LPID handling */
2945 /* flush by pid and ea */
2946 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
2947 int ways = booke206_tlb_ways(env, i);
2949 for (j = 0; j < ways; j++) {
2950 tlb = booke206_get_tlbm(env, i, address, j);
2951 if (!tlb) {
2952 continue;
2954 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
2955 (tlb->mas1 & MAS1_IPROT) ||
2956 ((tlb->mas1 & MAS1_IND) != ind) ||
2957 ((tlb->mas8 & MAS8_TGS) != sgs)) {
2958 continue;
2960 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
2961 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
2962 continue;
2964 /* XXX e500mc doesn't match SAS, but other cores might */
2965 tlb->mas1 &= ~MAS1_VALID;
2968 tlb_flush(env_cpu(env));
2971 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
2973 int flags = 0;
2975 if (type & 2) {
2976 flags |= BOOKE206_FLUSH_TLB1;
2979 if (type & 4) {
2980 flags |= BOOKE206_FLUSH_TLB0;
2983 booke206_flush_tlb(env, flags, 1);
2987 void helper_check_tlb_flush_local(CPUPPCState *env)
2989 check_tlb_flush(env, false);
2992 void helper_check_tlb_flush_global(CPUPPCState *env)
2994 check_tlb_flush(env, true);
2997 /*****************************************************************************/
2999 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
3000 MMUAccessType access_type, int mmu_idx,
3001 bool probe, uintptr_t retaddr)
3003 PowerPCCPU *cpu = POWERPC_CPU(cs);
3004 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
3005 CPUPPCState *env = &cpu->env;
3006 int ret;
3008 if (pcc->handle_mmu_fault) {
3009 ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx);
3010 } else {
3011 ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
3013 if (unlikely(ret != 0)) {
3014 if (probe) {
3015 return false;
3017 raise_exception_err_ra(env, cs->exception_index, env->error_code,
3018 retaddr);
3020 return true;