Merge commit 'c9159fe9aa9abe24115ea4d16127179e9cb07e22' into upstream-merge
[qemu-kvm.git] / target-ppc / mmu_helper.c
blob532b114aeddad3837c5b07ea5b5570e5bde3ec92
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "cpu.h"
20 #include "helper.h"
21 #include "kvm.h"
22 #include "kvm_ppc.h"
24 //#define DEBUG_MMU
25 //#define DEBUG_BATS
26 //#define DEBUG_SLB
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
32 #ifdef DEBUG_MMU
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
35 #else
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
38 #endif
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
42 #else
43 # define LOG_SWTLB(...) do { } while (0)
44 #endif
46 #ifdef DEBUG_BATS
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
48 #else
49 # define LOG_BATS(...) do { } while (0)
50 #endif
52 #ifdef DEBUG_SLB
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
54 #else
55 # define LOG_SLB(...) do { } while (0)
56 #endif
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
62 int mmu_idx)
64 int exception, error_code;
66 if (rw == 2) {
67 exception = POWERPC_EXCP_ISI;
68 error_code = 0x40000000;
69 } else {
70 exception = POWERPC_EXCP_DSI;
71 error_code = 0x40000000;
72 if (rw) {
73 error_code |= 0x02000000;
75 env->spr[SPR_DAR] = address;
76 env->spr[SPR_DSISR] = error_code;
78 env->exception_index = exception;
79 env->error_code = error_code;
81 return 1;
84 #else
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0)
88 return pte0 & 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong *pte0)
93 *pte0 &= ~0x80000000;
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0)
99 return pte0 & 0x0000000000000001ULL ? 1 : 0;
102 static inline void pte64_invalidate(target_ulong *pte0)
104 *pte0 &= ~0x0000000000000001ULL;
106 #endif
108 #define PTE_PTEM_MASK 0x7FFFFFBF
109 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
110 #if defined(TARGET_PPC64)
111 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
112 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
113 #endif
115 static inline int pp_check(int key, int pp, int nx)
117 int access;
119 /* Compute access rights */
120 /* When pp is 3/7, the result is undefined. Set it to noaccess */
121 access = 0;
122 if (key == 0) {
123 switch (pp) {
124 case 0x0:
125 case 0x1:
126 case 0x2:
127 access |= PAGE_WRITE;
128 /* No break here */
129 case 0x3:
130 case 0x6:
131 access |= PAGE_READ;
132 break;
134 } else {
135 switch (pp) {
136 case 0x0:
137 case 0x6:
138 access = 0;
139 break;
140 case 0x1:
141 case 0x3:
142 access = PAGE_READ;
143 break;
144 case 0x2:
145 access = PAGE_READ | PAGE_WRITE;
146 break;
149 if (nx == 0) {
150 access |= PAGE_EXEC;
153 return access;
156 static inline int check_prot(int prot, int rw, int access_type)
158 int ret;
160 if (access_type == ACCESS_CODE) {
161 if (prot & PAGE_EXEC) {
162 ret = 0;
163 } else {
164 ret = -2;
166 } else if (rw) {
167 if (prot & PAGE_WRITE) {
168 ret = 0;
169 } else {
170 ret = -2;
172 } else {
173 if (prot & PAGE_READ) {
174 ret = 0;
175 } else {
176 ret = -2;
180 return ret;
183 static inline int pte_check(mmu_ctx_t *ctx, int is_64b, target_ulong pte0,
184 target_ulong pte1, int h, int rw, int type)
186 target_ulong ptem, mmask;
187 int access, ret, pteh, ptev, pp;
189 ret = -1;
190 /* Check validity and table match */
191 #if defined(TARGET_PPC64)
192 if (is_64b) {
193 ptev = pte64_is_valid(pte0);
194 pteh = (pte0 >> 1) & 1;
195 } else
196 #endif
198 ptev = pte_is_valid(pte0);
199 pteh = (pte0 >> 6) & 1;
201 if (ptev && h == pteh) {
202 /* Check vsid & api */
203 #if defined(TARGET_PPC64)
204 if (is_64b) {
205 ptem = pte0 & PTE64_PTEM_MASK;
206 mmask = PTE64_CHECK_MASK;
207 pp = (pte1 & 0x00000003) | ((pte1 >> 61) & 0x00000004);
208 ctx->nx = (pte1 >> 2) & 1; /* No execute bit */
209 ctx->nx |= (pte1 >> 3) & 1; /* Guarded bit */
210 } else
211 #endif
213 ptem = pte0 & PTE_PTEM_MASK;
214 mmask = PTE_CHECK_MASK;
215 pp = pte1 & 0x00000003;
217 if (ptem == ctx->ptem) {
218 if (ctx->raddr != (target_phys_addr_t)-1ULL) {
219 /* all matches should have equal RPN, WIMG & PP */
220 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
221 qemu_log("Bad RPN/WIMG/PP\n");
222 return -3;
225 /* Compute access rights */
226 access = pp_check(ctx->key, pp, ctx->nx);
227 /* Keep the matching PTE informations */
228 ctx->raddr = pte1;
229 ctx->prot = access;
230 ret = check_prot(ctx->prot, rw, type);
231 if (ret == 0) {
232 /* Access granted */
233 LOG_MMU("PTE access granted !\n");
234 } else {
235 /* Access right violation */
236 LOG_MMU("PTE access rejected\n");
241 return ret;
244 static inline int pte32_check(mmu_ctx_t *ctx, target_ulong pte0,
245 target_ulong pte1, int h, int rw, int type)
247 return pte_check(ctx, 0, pte0, pte1, h, rw, type);
250 #if defined(TARGET_PPC64)
251 static inline int pte64_check(mmu_ctx_t *ctx, target_ulong pte0,
252 target_ulong pte1, int h, int rw, int type)
254 return pte_check(ctx, 1, pte0, pte1, h, rw, type);
256 #endif
258 static inline int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
259 int ret, int rw)
261 int store = 0;
263 /* Update page flags */
264 if (!(*pte1p & 0x00000100)) {
265 /* Update accessed flag */
266 *pte1p |= 0x00000100;
267 store = 1;
269 if (!(*pte1p & 0x00000080)) {
270 if (rw == 1 && ret == 0) {
271 /* Update changed flag */
272 *pte1p |= 0x00000080;
273 store = 1;
274 } else {
275 /* Force page fault for first write access */
276 ctx->prot &= ~PAGE_WRITE;
280 return store;
283 /* Software driven TLB helpers */
284 static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
285 int way, int is_code)
287 int nr;
289 /* Select TLB num in a way from address */
290 nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
291 /* Select TLB way */
292 nr += env->tlb_per_way * way;
293 /* 6xx have separate TLBs for instructions and data */
294 if (is_code && env->id_tlbs == 1) {
295 nr += env->nb_tlb;
298 return nr;
301 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
303 ppc6xx_tlb_t *tlb;
304 int nr, max;
306 /* LOG_SWTLB("Invalidate all TLBs\n"); */
307 /* Invalidate all defined software TLB */
308 max = env->nb_tlb;
309 if (env->id_tlbs == 1) {
310 max *= 2;
312 for (nr = 0; nr < max; nr++) {
313 tlb = &env->tlb.tlb6[nr];
314 pte_invalidate(&tlb->pte0);
316 tlb_flush(env, 1);
319 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
320 target_ulong eaddr,
321 int is_code, int match_epn)
323 #if !defined(FLUSH_ALL_TLBS)
324 ppc6xx_tlb_t *tlb;
325 int way, nr;
327 /* Invalidate ITLB + DTLB, all ways */
328 for (way = 0; way < env->nb_ways; way++) {
329 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
330 tlb = &env->tlb.tlb6[nr];
331 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
332 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
333 env->nb_tlb, eaddr);
334 pte_invalidate(&tlb->pte0);
335 tlb_flush_page(env, tlb->EPN);
338 #else
339 /* XXX: PowerPC specification say this is valid as well */
340 ppc6xx_tlb_invalidate_all(env);
341 #endif
344 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
345 target_ulong eaddr, int is_code)
347 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
350 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
351 int is_code, target_ulong pte0, target_ulong pte1)
353 ppc6xx_tlb_t *tlb;
354 int nr;
356 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
357 tlb = &env->tlb.tlb6[nr];
358 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
359 " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
360 /* Invalidate any pending reference in QEMU for this virtual address */
361 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
362 tlb->pte0 = pte0;
363 tlb->pte1 = pte1;
364 tlb->EPN = EPN;
365 /* Store last way for LRU mechanism */
366 env->last_way = way;
369 static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
370 target_ulong eaddr, int rw, int access_type)
372 ppc6xx_tlb_t *tlb;
373 int nr, best, way;
374 int ret;
376 best = -1;
377 ret = -1; /* No TLB found */
378 for (way = 0; way < env->nb_ways; way++) {
379 nr = ppc6xx_tlb_getnum(env, eaddr, way,
380 access_type == ACCESS_CODE ? 1 : 0);
381 tlb = &env->tlb.tlb6[nr];
382 /* This test "emulates" the PTE index match for hardware TLBs */
383 if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
384 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
385 "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
386 pte_is_valid(tlb->pte0) ? "valid" : "inval",
387 tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
388 continue;
390 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
391 TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
392 pte_is_valid(tlb->pte0) ? "valid" : "inval",
393 tlb->EPN, eaddr, tlb->pte1,
394 rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
395 switch (pte32_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
396 case -3:
397 /* TLB inconsistency */
398 return -1;
399 case -2:
400 /* Access violation */
401 ret = -2;
402 best = nr;
403 break;
404 case -1:
405 default:
406 /* No match */
407 break;
408 case 0:
409 /* access granted */
410 /* XXX: we should go on looping to check all TLBs consistency
411 * but we can speed-up the whole thing as the
412 * result would be undefined if TLBs are not consistent.
414 ret = 0;
415 best = nr;
416 goto done;
419 if (best != -1) {
420 done:
421 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
422 ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
423 /* Update page flags */
424 pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw);
427 return ret;
430 /* Perform BAT hit & translation */
431 static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
432 int *validp, int *protp, target_ulong *BATu,
433 target_ulong *BATl)
435 target_ulong bl;
436 int pp, valid, prot;
438 bl = (*BATu & 0x00001FFC) << 15;
439 valid = 0;
440 prot = 0;
441 if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
442 ((msr_pr != 0) && (*BATu & 0x00000001))) {
443 valid = 1;
444 pp = *BATl & 0x00000003;
445 if (pp != 0) {
446 prot = PAGE_READ | PAGE_EXEC;
447 if (pp == 0x2) {
448 prot |= PAGE_WRITE;
452 *blp = bl;
453 *validp = valid;
454 *protp = prot;
457 static inline void bat_601_size_prot(CPUPPCState *env, target_ulong *blp,
458 int *validp, int *protp,
459 target_ulong *BATu, target_ulong *BATl)
461 target_ulong bl;
462 int key, pp, valid, prot;
464 bl = (*BATl & 0x0000003F) << 17;
465 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx " msk " TARGET_FMT_lx "\n",
466 (uint8_t)(*BATl & 0x0000003F), bl, ~bl);
467 prot = 0;
468 valid = (*BATl >> 6) & 1;
469 if (valid) {
470 pp = *BATu & 0x00000003;
471 if (msr_pr == 0) {
472 key = (*BATu >> 3) & 1;
473 } else {
474 key = (*BATu >> 2) & 1;
476 prot = pp_check(key, pp, 0);
478 *blp = bl;
479 *validp = valid;
480 *protp = prot;
483 static inline int get_bat(CPUPPCState *env, mmu_ctx_t *ctx,
484 target_ulong virtual, int rw, int type)
486 target_ulong *BATlt, *BATut, *BATu, *BATl;
487 target_ulong BEPIl, BEPIu, bl;
488 int i, valid, prot;
489 int ret = -1;
491 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
492 type == ACCESS_CODE ? 'I' : 'D', virtual);
493 switch (type) {
494 case ACCESS_CODE:
495 BATlt = env->IBAT[1];
496 BATut = env->IBAT[0];
497 break;
498 default:
499 BATlt = env->DBAT[1];
500 BATut = env->DBAT[0];
501 break;
503 for (i = 0; i < env->nb_BATs; i++) {
504 BATu = &BATut[i];
505 BATl = &BATlt[i];
506 BEPIu = *BATu & 0xF0000000;
507 BEPIl = *BATu & 0x0FFE0000;
508 if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
509 bat_601_size_prot(env, &bl, &valid, &prot, BATu, BATl);
510 } else {
511 bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
513 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
514 " BATl " TARGET_FMT_lx "\n", __func__,
515 type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
516 if ((virtual & 0xF0000000) == BEPIu &&
517 ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
518 /* BAT matches */
519 if (valid != 0) {
520 /* Get physical address */
521 ctx->raddr = (*BATl & 0xF0000000) |
522 ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
523 (virtual & 0x0001F000);
524 /* Compute access rights */
525 ctx->prot = prot;
526 ret = check_prot(ctx->prot, rw, type);
527 if (ret == 0) {
528 LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
529 i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
530 ctx->prot & PAGE_WRITE ? 'W' : '-');
532 break;
536 if (ret < 0) {
537 #if defined(DEBUG_BATS)
538 if (qemu_log_enabled()) {
539 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
540 for (i = 0; i < 4; i++) {
541 BATu = &BATut[i];
542 BATl = &BATlt[i];
543 BEPIu = *BATu & 0xF0000000;
544 BEPIl = *BATu & 0x0FFE0000;
545 bl = (*BATu & 0x00001FFC) << 15;
546 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
547 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
548 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
549 __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
550 *BATu, *BATl, BEPIu, BEPIl, bl);
553 #endif
555 /* No hit */
556 return ret;
559 static inline target_phys_addr_t get_pteg_offset(CPUPPCState *env,
560 target_phys_addr_t hash,
561 int pte_size)
563 return (hash * pte_size * 8) & env->htab_mask;
566 /* PTE table lookup */
567 static inline int find_pte2(CPUPPCState *env, mmu_ctx_t *ctx, int is_64b, int h,
568 int rw, int type, int target_page_bits)
570 target_phys_addr_t pteg_off;
571 target_ulong pte0, pte1;
572 int i, good = -1;
573 int ret, r;
575 ret = -1; /* No entry found */
576 pteg_off = get_pteg_offset(env, ctx->hash[h],
577 is_64b ? HASH_PTE_SIZE_64 : HASH_PTE_SIZE_32);
578 for (i = 0; i < 8; i++) {
579 #if defined(TARGET_PPC64)
580 if (is_64b) {
581 if (env->external_htab) {
582 pte0 = ldq_p(env->external_htab + pteg_off + (i * 16));
583 pte1 = ldq_p(env->external_htab + pteg_off + (i * 16) + 8);
584 } else {
585 pte0 = ldq_phys(env->htab_base + pteg_off + (i * 16));
586 pte1 = ldq_phys(env->htab_base + pteg_off + (i * 16) + 8);
589 r = pte64_check(ctx, pte0, pte1, h, rw, type);
590 LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
591 TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
592 pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h,
593 (int)((pte0 >> 1) & 1), ctx->ptem);
594 } else
595 #endif
597 if (env->external_htab) {
598 pte0 = ldl_p(env->external_htab + pteg_off + (i * 8));
599 pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4);
600 } else {
601 pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8));
602 pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4);
604 r = pte32_check(ctx, pte0, pte1, h, rw, type);
605 LOG_MMU("Load pte from " TARGET_FMT_lx " => " TARGET_FMT_lx " "
606 TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
607 pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h,
608 (int)((pte0 >> 6) & 1), ctx->ptem);
610 switch (r) {
611 case -3:
612 /* PTE inconsistency */
613 return -1;
614 case -2:
615 /* Access violation */
616 ret = -2;
617 good = i;
618 break;
619 case -1:
620 default:
621 /* No PTE match */
622 break;
623 case 0:
624 /* access granted */
625 /* XXX: we should go on looping to check all PTEs consistency
626 * but if we can speed-up the whole thing as the
627 * result would be undefined if PTEs are not consistent.
629 ret = 0;
630 good = i;
631 goto done;
634 if (good != -1) {
635 done:
636 LOG_MMU("found PTE at addr " TARGET_FMT_lx " prot=%01x ret=%d\n",
637 ctx->raddr, ctx->prot, ret);
638 /* Update page flags */
639 pte1 = ctx->raddr;
640 if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
641 #if defined(TARGET_PPC64)
642 if (is_64b) {
643 if (env->external_htab) {
644 stq_p(env->external_htab + pteg_off + (good * 16) + 8,
645 pte1);
646 } else {
647 stq_phys_notdirty(env->htab_base + pteg_off +
648 (good * 16) + 8, pte1);
650 } else
651 #endif
653 if (env->external_htab) {
654 stl_p(env->external_htab + pteg_off + (good * 8) + 4,
655 pte1);
656 } else {
657 stl_phys_notdirty(env->htab_base + pteg_off +
658 (good * 8) + 4, pte1);
664 /* We have a TLB that saves 4K pages, so let's
665 * split a huge page to 4k chunks */
666 if (target_page_bits != TARGET_PAGE_BITS) {
667 ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
668 & TARGET_PAGE_MASK;
670 return ret;
673 static inline int find_pte(CPUPPCState *env, mmu_ctx_t *ctx, int h, int rw,
674 int type, int target_page_bits)
676 #if defined(TARGET_PPC64)
677 if (env->mmu_model & POWERPC_MMU_64) {
678 return find_pte2(env, ctx, 1, h, rw, type, target_page_bits);
680 #endif
682 return find_pte2(env, ctx, 0, h, rw, type, target_page_bits);
685 #if defined(TARGET_PPC64)
686 static inline ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
688 uint64_t esid_256M, esid_1T;
689 int n;
691 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
693 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
694 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
696 for (n = 0; n < env->slb_nr; n++) {
697 ppc_slb_t *slb = &env->slb[n];
699 LOG_SLB("%s: slot %d %016" PRIx64 " %016"
700 PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
701 /* We check for 1T matches on all MMUs here - if the MMU
702 * doesn't have 1T segment support, we will have prevented 1T
703 * entries from being inserted in the slbmte code. */
704 if (((slb->esid == esid_256M) &&
705 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
706 || ((slb->esid == esid_1T) &&
707 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
708 return slb;
712 return NULL;
715 /*****************************************************************************/
716 /* SPR accesses */
718 void helper_slbia(CPUPPCState *env)
720 int n, do_invalidate;
722 do_invalidate = 0;
723 /* XXX: Warning: slbia never invalidates the first segment */
724 for (n = 1; n < env->slb_nr; n++) {
725 ppc_slb_t *slb = &env->slb[n];
727 if (slb->esid & SLB_ESID_V) {
728 slb->esid &= ~SLB_ESID_V;
729 /* XXX: given the fact that segment size is 256 MB or 1TB,
730 * and we still don't have a tlb_flush_mask(env, n, mask)
731 * in QEMU, we just invalidate all TLBs
733 do_invalidate = 1;
736 if (do_invalidate) {
737 tlb_flush(env, 1);
741 void helper_slbie(CPUPPCState *env, target_ulong addr)
743 ppc_slb_t *slb;
745 slb = slb_lookup(env, addr);
746 if (!slb) {
747 return;
750 if (slb->esid & SLB_ESID_V) {
751 slb->esid &= ~SLB_ESID_V;
753 /* XXX: given the fact that segment size is 256 MB or 1TB,
754 * and we still don't have a tlb_flush_mask(env, n, mask)
755 * in QEMU, we just invalidate all TLBs
757 tlb_flush(env, 1);
761 int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
763 int slot = rb & 0xfff;
764 ppc_slb_t *slb = &env->slb[slot];
766 if (rb & (0x1000 - env->slb_nr)) {
767 return -1; /* Reserved bits set or slot too high */
769 if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
770 return -1; /* Bad segment size */
772 if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
773 return -1; /* 1T segment on MMU that doesn't support it */
776 /* Mask out the slot number as we store the entry */
777 slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
778 slb->vsid = rs;
780 LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
781 " %016" PRIx64 "\n", __func__, slot, rb, rs,
782 slb->esid, slb->vsid);
784 return 0;
787 static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
788 target_ulong *rt)
790 int slot = rb & 0xfff;
791 ppc_slb_t *slb = &env->slb[slot];
793 if (slot >= env->slb_nr) {
794 return -1;
797 *rt = slb->esid;
798 return 0;
801 static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
802 target_ulong *rt)
804 int slot = rb & 0xfff;
805 ppc_slb_t *slb = &env->slb[slot];
807 if (slot >= env->slb_nr) {
808 return -1;
811 *rt = slb->vsid;
812 return 0;
814 #endif /* defined(TARGET_PPC64) */
816 /* Perform segment based translation */
817 static inline int get_segment(CPUPPCState *env, mmu_ctx_t *ctx,
818 target_ulong eaddr, int rw, int type)
820 target_phys_addr_t hash;
821 target_ulong vsid;
822 int ds, pr, target_page_bits;
823 int ret, ret2;
825 pr = msr_pr;
826 ctx->eaddr = eaddr;
827 #if defined(TARGET_PPC64)
828 if (env->mmu_model & POWERPC_MMU_64) {
829 ppc_slb_t *slb;
830 target_ulong pageaddr;
831 int segment_bits;
833 LOG_MMU("Check SLBs\n");
834 slb = slb_lookup(env, eaddr);
835 if (!slb) {
836 return -5;
839 if (slb->vsid & SLB_VSID_B) {
840 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
841 segment_bits = 40;
842 } else {
843 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
844 segment_bits = 28;
847 target_page_bits = (slb->vsid & SLB_VSID_L)
848 ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
849 ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP)
850 : (slb->vsid & SLB_VSID_KS));
851 ds = 0;
852 ctx->nx = !!(slb->vsid & SLB_VSID_N);
854 pageaddr = eaddr & ((1ULL << segment_bits)
855 - (1ULL << target_page_bits));
856 if (slb->vsid & SLB_VSID_B) {
857 hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits);
858 } else {
859 hash = vsid ^ (pageaddr >> target_page_bits);
861 /* Only 5 bits of the page index are used in the AVPN */
862 ctx->ptem = (slb->vsid & SLB_VSID_PTEM) |
863 ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80));
864 } else
865 #endif /* defined(TARGET_PPC64) */
867 target_ulong sr, pgidx;
869 sr = env->sr[eaddr >> 28];
870 ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
871 ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
872 ds = sr & 0x80000000 ? 1 : 0;
873 ctx->nx = sr & 0x10000000 ? 1 : 0;
874 vsid = sr & 0x00FFFFFF;
875 target_page_bits = TARGET_PAGE_BITS;
876 LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip="
877 TARGET_FMT_lx " lr=" TARGET_FMT_lx
878 " ir=%d dr=%d pr=%d %d t=%d\n",
879 eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
880 (int)msr_dr, pr != 0 ? 1 : 0, rw, type);
881 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
882 hash = vsid ^ pgidx;
883 ctx->ptem = (vsid << 7) | (pgidx >> 10);
885 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
886 ctx->key, ds, ctx->nx, vsid);
887 ret = -1;
888 if (!ds) {
889 /* Check if instruction fetch is allowed, if needed */
890 if (type != ACCESS_CODE || ctx->nx == 0) {
891 /* Page address translation */
892 LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
893 " hash " TARGET_FMT_plx "\n",
894 env->htab_base, env->htab_mask, hash);
895 ctx->hash[0] = hash;
896 ctx->hash[1] = ~hash;
898 /* Initialize real address with an invalid value */
899 ctx->raddr = (target_phys_addr_t)-1ULL;
900 if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx ||
901 env->mmu_model == POWERPC_MMU_SOFT_74xx)) {
902 /* Software TLB search */
903 ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
904 } else {
905 LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
906 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
907 " hash=" TARGET_FMT_plx "\n",
908 env->htab_base, env->htab_mask, vsid, ctx->ptem,
909 ctx->hash[0]);
910 /* Primary table lookup */
911 ret = find_pte(env, ctx, 0, rw, type, target_page_bits);
912 if (ret < 0) {
913 /* Secondary table lookup */
914 if (eaddr != 0xEFFFFFFF) {
915 LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
916 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
917 " hash=" TARGET_FMT_plx "\n", env->htab_base,
918 env->htab_mask, vsid, ctx->ptem, ctx->hash[1]);
920 ret2 = find_pte(env, ctx, 1, rw, type,
921 target_page_bits);
922 if (ret2 != -1) {
923 ret = ret2;
927 #if defined(DUMP_PAGE_TABLES)
928 if (qemu_log_enabled()) {
929 target_phys_addr_t curaddr;
930 uint32_t a0, a1, a2, a3;
932 qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
933 "\n", sdr, mask + 0x80);
934 for (curaddr = sdr; curaddr < (sdr + mask + 0x80);
935 curaddr += 16) {
936 a0 = ldl_phys(curaddr);
937 a1 = ldl_phys(curaddr + 4);
938 a2 = ldl_phys(curaddr + 8);
939 a3 = ldl_phys(curaddr + 12);
940 if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
941 qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
942 curaddr, a0, a1, a2, a3);
946 #endif
947 } else {
948 LOG_MMU("No access allowed\n");
949 ret = -3;
951 } else {
952 target_ulong sr;
954 LOG_MMU("direct store...\n");
955 /* Direct-store segment : absolutely *BUGGY* for now */
957 /* Direct-store implies a 32-bit MMU.
958 * Check the Segment Register's bus unit ID (BUID).
960 sr = env->sr[eaddr >> 28];
961 if ((sr & 0x1FF00000) >> 20 == 0x07f) {
962 /* Memory-forced I/O controller interface access */
963 /* If T=1 and BUID=x'07F', the 601 performs a memory access
964 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
966 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
967 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
968 return 0;
971 switch (type) {
972 case ACCESS_INT:
973 /* Integer load/store : only access allowed */
974 break;
975 case ACCESS_CODE:
976 /* No code fetch is allowed in direct-store areas */
977 return -4;
978 case ACCESS_FLOAT:
979 /* Floating point load/store */
980 return -4;
981 case ACCESS_RES:
982 /* lwarx, ldarx or srwcx. */
983 return -4;
984 case ACCESS_CACHE:
985 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
986 /* Should make the instruction do no-op.
987 * As it already do no-op, it's quite easy :-)
989 ctx->raddr = eaddr;
990 return 0;
991 case ACCESS_EXT:
992 /* eciwx or ecowx */
993 return -4;
994 default:
995 qemu_log("ERROR: instruction should not need "
996 "address translation\n");
997 return -4;
999 if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
1000 ctx->raddr = eaddr;
1001 ret = 2;
1002 } else {
1003 ret = -2;
1007 return ret;
1010 /* Generic TLB check function for embedded PowerPC implementations */
1011 static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
1012 target_phys_addr_t *raddrp,
1013 target_ulong address, uint32_t pid, int ext,
1014 int i)
1016 target_ulong mask;
1018 /* Check valid flag */
1019 if (!(tlb->prot & PAGE_VALID)) {
1020 return -1;
1022 mask = ~(tlb->size - 1);
1023 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
1024 " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
1025 mask, (uint32_t)tlb->PID, tlb->prot);
1026 /* Check PID */
1027 if (tlb->PID != 0 && tlb->PID != pid) {
1028 return -1;
1030 /* Check effective address */
1031 if ((address & mask) != tlb->EPN) {
1032 return -1;
1034 *raddrp = (tlb->RPN & mask) | (address & ~mask);
1035 if (ext) {
1036 /* Extend the physical address to 36 bits */
1037 *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
1040 return 0;
1043 /* Generic TLB search function for PowerPC embedded implementations */
1044 static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
1045 uint32_t pid)
1047 ppcemb_tlb_t *tlb;
1048 target_phys_addr_t raddr;
1049 int i, ret;
1051 /* Default return value is no match */
1052 ret = -1;
1053 for (i = 0; i < env->nb_tlb; i++) {
1054 tlb = &env->tlb.tlbe[i];
1055 if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
1056 ret = i;
1057 break;
1061 return ret;
1064 /* Helpers specific to PowerPC 40x implementations */
1065 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
1067 ppcemb_tlb_t *tlb;
1068 int i;
1070 for (i = 0; i < env->nb_tlb; i++) {
1071 tlb = &env->tlb.tlbe[i];
1072 tlb->prot &= ~PAGE_VALID;
1074 tlb_flush(env, 1);
1077 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
1078 target_ulong eaddr, uint32_t pid)
1080 #if !defined(FLUSH_ALL_TLBS)
1081 ppcemb_tlb_t *tlb;
1082 target_phys_addr_t raddr;
1083 target_ulong page, end;
1084 int i;
1086 for (i = 0; i < env->nb_tlb; i++) {
1087 tlb = &env->tlb.tlbe[i];
1088 if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
1089 end = tlb->EPN + tlb->size;
1090 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
1091 tlb_flush_page(env, page);
1093 tlb->prot &= ~PAGE_VALID;
1094 break;
1097 #else
1098 ppc4xx_tlb_invalidate_all(env);
1099 #endif
1102 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1103 target_ulong address, int rw,
1104 int access_type)
1106 ppcemb_tlb_t *tlb;
1107 target_phys_addr_t raddr;
1108 int i, ret, zsel, zpr, pr;
1110 ret = -1;
1111 raddr = (target_phys_addr_t)-1ULL;
1112 pr = msr_pr;
1113 for (i = 0; i < env->nb_tlb; i++) {
1114 tlb = &env->tlb.tlbe[i];
1115 if (ppcemb_tlb_check(env, tlb, &raddr, address,
1116 env->spr[SPR_40x_PID], 0, i) < 0) {
1117 continue;
1119 zsel = (tlb->attr >> 4) & 0xF;
1120 zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
1121 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1122 __func__, i, zsel, zpr, rw, tlb->attr);
1123 /* Check execute enable bit */
1124 switch (zpr) {
1125 case 0x2:
1126 if (pr != 0) {
1127 goto check_perms;
1129 /* No break here */
1130 case 0x3:
1131 /* All accesses granted */
1132 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1133 ret = 0;
1134 break;
1135 case 0x0:
1136 if (pr != 0) {
1137 /* Raise Zone protection fault. */
1138 env->spr[SPR_40x_ESR] = 1 << 22;
1139 ctx->prot = 0;
1140 ret = -2;
1141 break;
1143 /* No break here */
1144 case 0x1:
1145 check_perms:
1146 /* Check from TLB entry */
1147 ctx->prot = tlb->prot;
1148 ret = check_prot(ctx->prot, rw, access_type);
1149 if (ret == -2) {
1150 env->spr[SPR_40x_ESR] = 0;
1152 break;
1154 if (ret >= 0) {
1155 ctx->raddr = raddr;
1156 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1157 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1158 ret);
1159 return 0;
1162 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1163 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1165 return ret;
1168 void store_40x_sler(CPUPPCState *env, uint32_t val)
1170 /* XXX: TO BE FIXED */
1171 if (val != 0x00000000) {
1172 cpu_abort(env, "Little-endian regions are not supported by now\n");
1174 env->spr[SPR_405_SLER] = val;
1177 static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
1178 target_phys_addr_t *raddr, int *prot,
1179 target_ulong address, int rw,
1180 int access_type, int i)
1182 int ret, prot2;
1184 if (ppcemb_tlb_check(env, tlb, raddr, address,
1185 env->spr[SPR_BOOKE_PID],
1186 !env->nb_pids, i) >= 0) {
1187 goto found_tlb;
1190 if (env->spr[SPR_BOOKE_PID1] &&
1191 ppcemb_tlb_check(env, tlb, raddr, address,
1192 env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
1193 goto found_tlb;
1196 if (env->spr[SPR_BOOKE_PID2] &&
1197 ppcemb_tlb_check(env, tlb, raddr, address,
1198 env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
1199 goto found_tlb;
1202 LOG_SWTLB("%s: TLB entry not found\n", __func__);
1203 return -1;
1205 found_tlb:
1207 if (msr_pr != 0) {
1208 prot2 = tlb->prot & 0xF;
1209 } else {
1210 prot2 = (tlb->prot >> 4) & 0xF;
1213 /* Check the address space */
1214 if (access_type == ACCESS_CODE) {
1215 if (msr_ir != (tlb->attr & 1)) {
1216 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1217 return -1;
1220 *prot = prot2;
1221 if (prot2 & PAGE_EXEC) {
1222 LOG_SWTLB("%s: good TLB!\n", __func__);
1223 return 0;
1226 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
1227 ret = -3;
1228 } else {
1229 if (msr_dr != (tlb->attr & 1)) {
1230 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1231 return -1;
1234 *prot = prot2;
1235 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
1236 LOG_SWTLB("%s: found TLB!\n", __func__);
1237 return 0;
1240 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
1241 ret = -2;
1244 return ret;
1247 static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1248 target_ulong address, int rw,
1249 int access_type)
1251 ppcemb_tlb_t *tlb;
1252 target_phys_addr_t raddr;
1253 int i, ret;
1255 ret = -1;
1256 raddr = (target_phys_addr_t)-1ULL;
1257 for (i = 0; i < env->nb_tlb; i++) {
1258 tlb = &env->tlb.tlbe[i];
1259 ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw,
1260 access_type, i);
1261 if (!ret) {
1262 break;
1266 if (ret >= 0) {
1267 ctx->raddr = raddr;
1268 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1269 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1270 ret);
1271 } else {
1272 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1273 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1276 return ret;
1279 void booke206_flush_tlb(CPUPPCState *env, int flags, const int check_iprot)
1281 int tlb_size;
1282 int i, j;
1283 ppcmas_tlb_t *tlb = env->tlb.tlbm;
1285 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1286 if (flags & (1 << i)) {
1287 tlb_size = booke206_tlb_size(env, i);
1288 for (j = 0; j < tlb_size; j++) {
1289 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
1290 tlb[j].mas1 &= ~MAS1_VALID;
1294 tlb += booke206_tlb_size(env, i);
1297 tlb_flush(env, 1);
1300 target_phys_addr_t booke206_tlb_to_page_size(CPUPPCState *env,
1301 ppcmas_tlb_t *tlb)
1303 int tlbm_size;
1305 tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1307 return 1024ULL << tlbm_size;
1310 /* TLB check function for MAS based SoftTLBs */
1311 int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
1312 target_phys_addr_t *raddrp,
1313 target_ulong address, uint32_t pid)
1315 target_ulong mask;
1316 uint32_t tlb_pid;
1318 /* Check valid flag */
1319 if (!(tlb->mas1 & MAS1_VALID)) {
1320 return -1;
1323 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1324 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
1325 PRIx64 " mask=0x" TARGET_FMT_lx " MAS7_3=0x%" PRIx64 " MAS8=%x\n",
1326 __func__, address, pid, tlb->mas1, tlb->mas2, mask, tlb->mas7_3,
1327 tlb->mas8);
1329 /* Check PID */
1330 tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
1331 if (tlb_pid != 0 && tlb_pid != pid) {
1332 return -1;
1335 /* Check effective address */
1336 if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
1337 return -1;
1340 if (raddrp) {
1341 *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
1344 return 0;
1347 static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
1348 target_phys_addr_t *raddr, int *prot,
1349 target_ulong address, int rw,
1350 int access_type)
1352 int ret;
1353 int prot2 = 0;
1355 if (ppcmas_tlb_check(env, tlb, raddr, address,
1356 env->spr[SPR_BOOKE_PID]) >= 0) {
1357 goto found_tlb;
1360 if (env->spr[SPR_BOOKE_PID1] &&
1361 ppcmas_tlb_check(env, tlb, raddr, address,
1362 env->spr[SPR_BOOKE_PID1]) >= 0) {
1363 goto found_tlb;
1366 if (env->spr[SPR_BOOKE_PID2] &&
1367 ppcmas_tlb_check(env, tlb, raddr, address,
1368 env->spr[SPR_BOOKE_PID2]) >= 0) {
1369 goto found_tlb;
1372 LOG_SWTLB("%s: TLB entry not found\n", __func__);
1373 return -1;
1375 found_tlb:
1377 if (msr_pr != 0) {
1378 if (tlb->mas7_3 & MAS3_UR) {
1379 prot2 |= PAGE_READ;
1381 if (tlb->mas7_3 & MAS3_UW) {
1382 prot2 |= PAGE_WRITE;
1384 if (tlb->mas7_3 & MAS3_UX) {
1385 prot2 |= PAGE_EXEC;
1387 } else {
1388 if (tlb->mas7_3 & MAS3_SR) {
1389 prot2 |= PAGE_READ;
1391 if (tlb->mas7_3 & MAS3_SW) {
1392 prot2 |= PAGE_WRITE;
1394 if (tlb->mas7_3 & MAS3_SX) {
1395 prot2 |= PAGE_EXEC;
1399 /* Check the address space and permissions */
1400 if (access_type == ACCESS_CODE) {
1401 if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1402 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1403 return -1;
1406 *prot = prot2;
1407 if (prot2 & PAGE_EXEC) {
1408 LOG_SWTLB("%s: good TLB!\n", __func__);
1409 return 0;
1412 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
1413 ret = -3;
1414 } else {
1415 if (msr_dr != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1416 LOG_SWTLB("%s: AS doesn't match\n", __func__);
1417 return -1;
1420 *prot = prot2;
1421 if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
1422 LOG_SWTLB("%s: found TLB!\n", __func__);
1423 return 0;
1426 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
1427 ret = -2;
1430 return ret;
1433 static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
1434 target_ulong address, int rw,
1435 int access_type)
1437 ppcmas_tlb_t *tlb;
1438 target_phys_addr_t raddr;
1439 int i, j, ret;
1441 ret = -1;
1442 raddr = (target_phys_addr_t)-1ULL;
1444 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1445 int ways = booke206_tlb_ways(env, i);
1447 for (j = 0; j < ways; j++) {
1448 tlb = booke206_get_tlbm(env, i, address, j);
1449 if (!tlb) {
1450 continue;
1452 ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
1453 rw, access_type);
1454 if (ret != -1) {
1455 goto found_tlb;
1460 found_tlb:
1462 if (ret >= 0) {
1463 ctx->raddr = raddr;
1464 LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
1465 " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
1466 ret);
1467 } else {
1468 LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
1469 " %d %d\n", __func__, address, raddr, ctx->prot, ret);
1472 return ret;
1475 static const char *book3e_tsize_to_str[32] = {
1476 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1477 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1478 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1479 "1T", "2T"
1482 static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
1483 CPUPPCState *env)
1485 ppcemb_tlb_t *entry;
1486 int i;
1488 if (kvm_enabled() && !env->kvm_sw_tlb) {
1489 cpu_fprintf(f, "Cannot access KVM TLB\n");
1490 return;
1493 cpu_fprintf(f, "\nTLB:\n");
1494 cpu_fprintf(f, "Effective Physical Size PID Prot "
1495 "Attr\n");
1497 entry = &env->tlb.tlbe[0];
1498 for (i = 0; i < env->nb_tlb; i++, entry++) {
1499 target_phys_addr_t ea, pa;
1500 target_ulong mask;
1501 uint64_t size = (uint64_t)entry->size;
1502 char size_buf[20];
1504 /* Check valid flag */
1505 if (!(entry->prot & PAGE_VALID)) {
1506 continue;
1509 mask = ~(entry->size - 1);
1510 ea = entry->EPN & mask;
1511 pa = entry->RPN & mask;
1512 #if (TARGET_PHYS_ADDR_BITS >= 36)
1513 /* Extend the physical address to 36 bits */
1514 pa |= (target_phys_addr_t)(entry->RPN & 0xF) << 32;
1515 #endif
1516 size /= 1024;
1517 if (size >= 1024) {
1518 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / 1024);
1519 } else {
1520 snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size);
1522 cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
1523 (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
1524 entry->prot, entry->attr);
1529 static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf,
1530 CPUPPCState *env, int tlbn, int offset,
1531 int tlbsize)
1533 ppcmas_tlb_t *entry;
1534 int i;
1536 cpu_fprintf(f, "\nTLB%d:\n", tlbn);
1537 cpu_fprintf(f, "Effective Physical Size TID TS SRWX"
1538 " URWX WIMGE U0123\n");
1540 entry = &env->tlb.tlbm[offset];
1541 for (i = 0; i < tlbsize; i++, entry++) {
1542 target_phys_addr_t ea, pa, size;
1543 int tsize;
1545 if (!(entry->mas1 & MAS1_VALID)) {
1546 continue;
1549 tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1550 size = 1024ULL << tsize;
1551 ea = entry->mas2 & ~(size - 1);
1552 pa = entry->mas7_3 & ~(size - 1);
1554 cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
1555 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1556 (uint64_t)ea, (uint64_t)pa,
1557 book3e_tsize_to_str[tsize],
1558 (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
1559 (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
1560 entry->mas7_3 & MAS3_SR ? 'R' : '-',
1561 entry->mas7_3 & MAS3_SW ? 'W' : '-',
1562 entry->mas7_3 & MAS3_SX ? 'X' : '-',
1563 entry->mas7_3 & MAS3_UR ? 'R' : '-',
1564 entry->mas7_3 & MAS3_UW ? 'W' : '-',
1565 entry->mas7_3 & MAS3_UX ? 'X' : '-',
1566 entry->mas2 & MAS2_W ? 'W' : '-',
1567 entry->mas2 & MAS2_I ? 'I' : '-',
1568 entry->mas2 & MAS2_M ? 'M' : '-',
1569 entry->mas2 & MAS2_G ? 'G' : '-',
1570 entry->mas2 & MAS2_E ? 'E' : '-',
1571 entry->mas7_3 & MAS3_U0 ? '0' : '-',
1572 entry->mas7_3 & MAS3_U1 ? '1' : '-',
1573 entry->mas7_3 & MAS3_U2 ? '2' : '-',
1574 entry->mas7_3 & MAS3_U3 ? '3' : '-');
1578 static void mmubooke206_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
1579 CPUPPCState *env)
1581 int offset = 0;
1582 int i;
1584 if (kvm_enabled() && !env->kvm_sw_tlb) {
1585 cpu_fprintf(f, "Cannot access KVM TLB\n");
1586 return;
1589 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1590 int size = booke206_tlb_size(env, i);
1592 if (size == 0) {
1593 continue;
1596 mmubooke206_dump_one_tlb(f, cpu_fprintf, env, i, offset, size);
1597 offset += size;
1601 #if defined(TARGET_PPC64)
1602 static void mmubooks_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
1603 CPUPPCState *env)
1605 int i;
1606 uint64_t slbe, slbv;
1608 cpu_synchronize_state(env);
1610 cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
1611 for (i = 0; i < env->slb_nr; i++) {
1612 slbe = env->slb[i].esid;
1613 slbv = env->slb[i].vsid;
1614 if (slbe == 0 && slbv == 0) {
1615 continue;
1617 cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
1618 i, slbe, slbv);
1621 #endif
1623 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
1625 switch (env->mmu_model) {
1626 case POWERPC_MMU_BOOKE:
1627 mmubooke_dump_mmu(f, cpu_fprintf, env);
1628 break;
1629 case POWERPC_MMU_BOOKE206:
1630 mmubooke206_dump_mmu(f, cpu_fprintf, env);
1631 break;
1632 #if defined(TARGET_PPC64)
1633 case POWERPC_MMU_64B:
1634 case POWERPC_MMU_2_06:
1635 case POWERPC_MMU_2_06d:
1636 mmubooks_dump_mmu(f, cpu_fprintf, env);
1637 break;
1638 #endif
1639 default:
1640 qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
1644 static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
1645 target_ulong eaddr, int rw)
1647 int in_plb, ret;
1649 ctx->raddr = eaddr;
1650 ctx->prot = PAGE_READ | PAGE_EXEC;
1651 ret = 0;
1652 switch (env->mmu_model) {
1653 case POWERPC_MMU_32B:
1654 case POWERPC_MMU_601:
1655 case POWERPC_MMU_SOFT_6xx:
1656 case POWERPC_MMU_SOFT_74xx:
1657 case POWERPC_MMU_SOFT_4xx:
1658 case POWERPC_MMU_REAL:
1659 case POWERPC_MMU_BOOKE:
1660 ctx->prot |= PAGE_WRITE;
1661 break;
1662 #if defined(TARGET_PPC64)
1663 case POWERPC_MMU_620:
1664 case POWERPC_MMU_64B:
1665 case POWERPC_MMU_2_06:
1666 case POWERPC_MMU_2_06d:
1667 /* Real address are 60 bits long */
1668 ctx->raddr &= 0x0FFFFFFFFFFFFFFFULL;
1669 ctx->prot |= PAGE_WRITE;
1670 break;
1671 #endif
1672 case POWERPC_MMU_SOFT_4xx_Z:
1673 if (unlikely(msr_pe != 0)) {
1674 /* 403 family add some particular protections,
1675 * using PBL/PBU registers for accesses with no translation.
1677 in_plb =
1678 /* Check PLB validity */
1679 (env->pb[0] < env->pb[1] &&
1680 /* and address in plb area */
1681 eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
1682 (env->pb[2] < env->pb[3] &&
1683 eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
1684 if (in_plb ^ msr_px) {
1685 /* Access in protected area */
1686 if (rw == 1) {
1687 /* Access is not allowed */
1688 ret = -2;
1690 } else {
1691 /* Read-write access is allowed */
1692 ctx->prot |= PAGE_WRITE;
1695 break;
1696 case POWERPC_MMU_MPC8xx:
1697 /* XXX: TODO */
1698 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1699 break;
1700 case POWERPC_MMU_BOOKE206:
1701 cpu_abort(env, "BookE 2.06 MMU doesn't have physical real mode\n");
1702 break;
1703 default:
1704 cpu_abort(env, "Unknown or invalid MMU model\n");
1705 return -1;
1708 return ret;
1711 int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr,
1712 int rw, int access_type)
1714 int ret;
1716 #if 0
1717 qemu_log("%s\n", __func__);
1718 #endif
1719 if ((access_type == ACCESS_CODE && msr_ir == 0) ||
1720 (access_type != ACCESS_CODE && msr_dr == 0)) {
1721 if (env->mmu_model == POWERPC_MMU_BOOKE) {
1722 /* The BookE MMU always performs address translation. The
1723 IS and DS bits only affect the address space. */
1724 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1725 rw, access_type);
1726 } else if (env->mmu_model == POWERPC_MMU_BOOKE206) {
1727 ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw,
1728 access_type);
1729 } else {
1730 /* No address translation. */
1731 ret = check_physical(env, ctx, eaddr, rw);
1733 } else {
1734 ret = -1;
1735 switch (env->mmu_model) {
1736 case POWERPC_MMU_32B:
1737 case POWERPC_MMU_601:
1738 case POWERPC_MMU_SOFT_6xx:
1739 case POWERPC_MMU_SOFT_74xx:
1740 /* Try to find a BAT */
1741 if (env->nb_BATs != 0) {
1742 ret = get_bat(env, ctx, eaddr, rw, access_type);
1744 #if defined(TARGET_PPC64)
1745 case POWERPC_MMU_620:
1746 case POWERPC_MMU_64B:
1747 case POWERPC_MMU_2_06:
1748 case POWERPC_MMU_2_06d:
1749 #endif
1750 if (ret < 0) {
1751 /* We didn't match any BAT entry or don't have BATs */
1752 ret = get_segment(env, ctx, eaddr, rw, access_type);
1754 break;
1755 case POWERPC_MMU_SOFT_4xx:
1756 case POWERPC_MMU_SOFT_4xx_Z:
1757 ret = mmu40x_get_physical_address(env, ctx, eaddr,
1758 rw, access_type);
1759 break;
1760 case POWERPC_MMU_BOOKE:
1761 ret = mmubooke_get_physical_address(env, ctx, eaddr,
1762 rw, access_type);
1763 break;
1764 case POWERPC_MMU_BOOKE206:
1765 ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw,
1766 access_type);
1767 break;
1768 case POWERPC_MMU_MPC8xx:
1769 /* XXX: TODO */
1770 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1771 break;
1772 case POWERPC_MMU_REAL:
1773 cpu_abort(env, "PowerPC in real mode do not do any translation\n");
1774 return -1;
1775 default:
1776 cpu_abort(env, "Unknown or invalid MMU model\n");
1777 return -1;
1780 #if 0
1781 qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n",
1782 __func__, eaddr, ret, ctx->raddr);
1783 #endif
1785 return ret;
1788 target_phys_addr_t cpu_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
1790 mmu_ctx_t ctx;
1792 if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
1793 return -1;
1796 return ctx.raddr & TARGET_PAGE_MASK;
1799 static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
1800 int rw)
1802 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1803 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1804 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1805 env->spr[SPR_BOOKE_MAS3] = 0;
1806 env->spr[SPR_BOOKE_MAS6] = 0;
1807 env->spr[SPR_BOOKE_MAS7] = 0;
1809 /* AS */
1810 if (((rw == 2) && msr_ir) || ((rw != 2) && msr_dr)) {
1811 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1812 env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
1815 env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
1816 env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
1818 switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
1819 case MAS4_TIDSELD_PID0:
1820 env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID] << MAS1_TID_SHIFT;
1821 break;
1822 case MAS4_TIDSELD_PID1:
1823 env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID1] << MAS1_TID_SHIFT;
1824 break;
1825 case MAS4_TIDSELD_PID2:
1826 env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID2] << MAS1_TID_SHIFT;
1827 break;
1830 env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
1832 /* next victim logic */
1833 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1834 env->last_way++;
1835 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1836 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1839 /* Perform address translation */
1840 int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rw,
1841 int mmu_idx)
1843 mmu_ctx_t ctx;
1844 int access_type;
1845 int ret = 0;
1847 if (rw == 2) {
1848 /* code access */
1849 rw = 0;
1850 access_type = ACCESS_CODE;
1851 } else {
1852 /* data access */
1853 access_type = env->access_type;
1855 ret = get_physical_address(env, &ctx, address, rw, access_type);
1856 if (ret == 0) {
1857 tlb_set_page(env, address & TARGET_PAGE_MASK,
1858 ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
1859 mmu_idx, TARGET_PAGE_SIZE);
1860 ret = 0;
1861 } else if (ret < 0) {
1862 LOG_MMU_STATE(env);
1863 if (access_type == ACCESS_CODE) {
1864 switch (ret) {
1865 case -1:
1866 /* No matches in page tables or TLB */
1867 switch (env->mmu_model) {
1868 case POWERPC_MMU_SOFT_6xx:
1869 env->exception_index = POWERPC_EXCP_IFTLB;
1870 env->error_code = 1 << 18;
1871 env->spr[SPR_IMISS] = address;
1872 env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
1873 goto tlb_miss;
1874 case POWERPC_MMU_SOFT_74xx:
1875 env->exception_index = POWERPC_EXCP_IFTLB;
1876 goto tlb_miss_74xx;
1877 case POWERPC_MMU_SOFT_4xx:
1878 case POWERPC_MMU_SOFT_4xx_Z:
1879 env->exception_index = POWERPC_EXCP_ITLB;
1880 env->error_code = 0;
1881 env->spr[SPR_40x_DEAR] = address;
1882 env->spr[SPR_40x_ESR] = 0x00000000;
1883 break;
1884 case POWERPC_MMU_32B:
1885 case POWERPC_MMU_601:
1886 #if defined(TARGET_PPC64)
1887 case POWERPC_MMU_620:
1888 case POWERPC_MMU_64B:
1889 case POWERPC_MMU_2_06:
1890 case POWERPC_MMU_2_06d:
1891 #endif
1892 env->exception_index = POWERPC_EXCP_ISI;
1893 env->error_code = 0x40000000;
1894 break;
1895 case POWERPC_MMU_BOOKE206:
1896 booke206_update_mas_tlb_miss(env, address, rw);
1897 /* fall through */
1898 case POWERPC_MMU_BOOKE:
1899 env->exception_index = POWERPC_EXCP_ITLB;
1900 env->error_code = 0;
1901 env->spr[SPR_BOOKE_DEAR] = address;
1902 return -1;
1903 case POWERPC_MMU_MPC8xx:
1904 /* XXX: TODO */
1905 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
1906 break;
1907 case POWERPC_MMU_REAL:
1908 cpu_abort(env, "PowerPC in real mode should never raise "
1909 "any MMU exceptions\n");
1910 return -1;
1911 default:
1912 cpu_abort(env, "Unknown or invalid MMU model\n");
1913 return -1;
1915 break;
1916 case -2:
1917 /* Access rights violation */
1918 env->exception_index = POWERPC_EXCP_ISI;
1919 env->error_code = 0x08000000;
1920 break;
1921 case -3:
1922 /* No execute protection violation */
1923 if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
1924 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
1925 env->spr[SPR_BOOKE_ESR] = 0x00000000;
1927 env->exception_index = POWERPC_EXCP_ISI;
1928 env->error_code = 0x10000000;
1929 break;
1930 case -4:
1931 /* Direct store exception */
1932 /* No code fetch is allowed in direct-store areas */
1933 env->exception_index = POWERPC_EXCP_ISI;
1934 env->error_code = 0x10000000;
1935 break;
1936 #if defined(TARGET_PPC64)
1937 case -5:
1938 /* No match in segment table */
1939 if (env->mmu_model == POWERPC_MMU_620) {
1940 env->exception_index = POWERPC_EXCP_ISI;
1941 /* XXX: this might be incorrect */
1942 env->error_code = 0x40000000;
1943 } else {
1944 env->exception_index = POWERPC_EXCP_ISEG;
1945 env->error_code = 0;
1947 break;
1948 #endif
1950 } else {
1951 switch (ret) {
1952 case -1:
1953 /* No matches in page tables or TLB */
1954 switch (env->mmu_model) {
1955 case POWERPC_MMU_SOFT_6xx:
1956 if (rw == 1) {
1957 env->exception_index = POWERPC_EXCP_DSTLB;
1958 env->error_code = 1 << 16;
1959 } else {
1960 env->exception_index = POWERPC_EXCP_DLTLB;
1961 env->error_code = 0;
1963 env->spr[SPR_DMISS] = address;
1964 env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
1965 tlb_miss:
1966 env->error_code |= ctx.key << 19;
1967 env->spr[SPR_HASH1] = env->htab_base +
1968 get_pteg_offset(env, ctx.hash[0], HASH_PTE_SIZE_32);
1969 env->spr[SPR_HASH2] = env->htab_base +
1970 get_pteg_offset(env, ctx.hash[1], HASH_PTE_SIZE_32);
1971 break;
1972 case POWERPC_MMU_SOFT_74xx:
1973 if (rw == 1) {
1974 env->exception_index = POWERPC_EXCP_DSTLB;
1975 } else {
1976 env->exception_index = POWERPC_EXCP_DLTLB;
1978 tlb_miss_74xx:
1979 /* Implement LRU algorithm */
1980 env->error_code = ctx.key << 19;
1981 env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
1982 ((env->last_way + 1) & (env->nb_ways - 1));
1983 env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
1984 break;
1985 case POWERPC_MMU_SOFT_4xx:
1986 case POWERPC_MMU_SOFT_4xx_Z:
1987 env->exception_index = POWERPC_EXCP_DTLB;
1988 env->error_code = 0;
1989 env->spr[SPR_40x_DEAR] = address;
1990 if (rw) {
1991 env->spr[SPR_40x_ESR] = 0x00800000;
1992 } else {
1993 env->spr[SPR_40x_ESR] = 0x00000000;
1995 break;
1996 case POWERPC_MMU_32B:
1997 case POWERPC_MMU_601:
1998 #if defined(TARGET_PPC64)
1999 case POWERPC_MMU_620:
2000 case POWERPC_MMU_64B:
2001 case POWERPC_MMU_2_06:
2002 case POWERPC_MMU_2_06d:
2003 #endif
2004 env->exception_index = POWERPC_EXCP_DSI;
2005 env->error_code = 0;
2006 env->spr[SPR_DAR] = address;
2007 if (rw == 1) {
2008 env->spr[SPR_DSISR] = 0x42000000;
2009 } else {
2010 env->spr[SPR_DSISR] = 0x40000000;
2012 break;
2013 case POWERPC_MMU_MPC8xx:
2014 /* XXX: TODO */
2015 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
2016 break;
2017 case POWERPC_MMU_BOOKE206:
2018 booke206_update_mas_tlb_miss(env, address, rw);
2019 /* fall through */
2020 case POWERPC_MMU_BOOKE:
2021 env->exception_index = POWERPC_EXCP_DTLB;
2022 env->error_code = 0;
2023 env->spr[SPR_BOOKE_DEAR] = address;
2024 env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0;
2025 return -1;
2026 case POWERPC_MMU_REAL:
2027 cpu_abort(env, "PowerPC in real mode should never raise "
2028 "any MMU exceptions\n");
2029 return -1;
2030 default:
2031 cpu_abort(env, "Unknown or invalid MMU model\n");
2032 return -1;
2034 break;
2035 case -2:
2036 /* Access rights violation */
2037 env->exception_index = POWERPC_EXCP_DSI;
2038 env->error_code = 0;
2039 if (env->mmu_model == POWERPC_MMU_SOFT_4xx
2040 || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
2041 env->spr[SPR_40x_DEAR] = address;
2042 if (rw) {
2043 env->spr[SPR_40x_ESR] |= 0x00800000;
2045 } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
2046 (env->mmu_model == POWERPC_MMU_BOOKE206)) {
2047 env->spr[SPR_BOOKE_DEAR] = address;
2048 env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0;
2049 } else {
2050 env->spr[SPR_DAR] = address;
2051 if (rw == 1) {
2052 env->spr[SPR_DSISR] = 0x0A000000;
2053 } else {
2054 env->spr[SPR_DSISR] = 0x08000000;
2057 break;
2058 case -4:
2059 /* Direct store exception */
2060 switch (access_type) {
2061 case ACCESS_FLOAT:
2062 /* Floating point load/store */
2063 env->exception_index = POWERPC_EXCP_ALIGN;
2064 env->error_code = POWERPC_EXCP_ALIGN_FP;
2065 env->spr[SPR_DAR] = address;
2066 break;
2067 case ACCESS_RES:
2068 /* lwarx, ldarx or stwcx. */
2069 env->exception_index = POWERPC_EXCP_DSI;
2070 env->error_code = 0;
2071 env->spr[SPR_DAR] = address;
2072 if (rw == 1) {
2073 env->spr[SPR_DSISR] = 0x06000000;
2074 } else {
2075 env->spr[SPR_DSISR] = 0x04000000;
2077 break;
2078 case ACCESS_EXT:
2079 /* eciwx or ecowx */
2080 env->exception_index = POWERPC_EXCP_DSI;
2081 env->error_code = 0;
2082 env->spr[SPR_DAR] = address;
2083 if (rw == 1) {
2084 env->spr[SPR_DSISR] = 0x06100000;
2085 } else {
2086 env->spr[SPR_DSISR] = 0x04100000;
2088 break;
2089 default:
2090 printf("DSI: invalid exception (%d)\n", ret);
2091 env->exception_index = POWERPC_EXCP_PROGRAM;
2092 env->error_code =
2093 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
2094 env->spr[SPR_DAR] = address;
2095 break;
2097 break;
2098 #if defined(TARGET_PPC64)
2099 case -5:
2100 /* No match in segment table */
2101 if (env->mmu_model == POWERPC_MMU_620) {
2102 env->exception_index = POWERPC_EXCP_DSI;
2103 env->error_code = 0;
2104 env->spr[SPR_DAR] = address;
2105 /* XXX: this might be incorrect */
2106 if (rw == 1) {
2107 env->spr[SPR_DSISR] = 0x42000000;
2108 } else {
2109 env->spr[SPR_DSISR] = 0x40000000;
2111 } else {
2112 env->exception_index = POWERPC_EXCP_DSEG;
2113 env->error_code = 0;
2114 env->spr[SPR_DAR] = address;
2116 break;
2117 #endif
2120 #if 0
2121 printf("%s: set exception to %d %02x\n", __func__,
2122 env->exception, env->error_code);
2123 #endif
2124 ret = 1;
2127 return ret;
2130 /*****************************************************************************/
2131 /* BATs management */
2132 #if !defined(FLUSH_ALL_TLBS)
2133 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
2134 target_ulong mask)
2136 target_ulong base, end, page;
2138 base = BATu & ~0x0001FFFF;
2139 end = base + mask + 0x00020000;
2140 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
2141 TARGET_FMT_lx ")\n", base, end, mask);
2142 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
2143 tlb_flush_page(env, page);
2145 LOG_BATS("Flush done\n");
2147 #endif
2149 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
2150 target_ulong value)
2152 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
2153 nr, ul == 0 ? 'u' : 'l', value, env->nip);
2156 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
2158 target_ulong mask;
2160 dump_store_bat(env, 'I', 0, nr, value);
2161 if (env->IBAT[0][nr] != value) {
2162 mask = (value << 15) & 0x0FFE0000UL;
2163 #if !defined(FLUSH_ALL_TLBS)
2164 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
2165 #endif
2166 /* When storing valid upper BAT, mask BEPI and BRPN
2167 * and invalidate all TLBs covered by this BAT
2169 mask = (value << 15) & 0x0FFE0000UL;
2170 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
2171 (value & ~0x0001FFFFUL & ~mask);
2172 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
2173 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
2174 #if !defined(FLUSH_ALL_TLBS)
2175 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
2176 #else
2177 tlb_flush(env, 1);
2178 #endif
2182 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
2184 dump_store_bat(env, 'I', 1, nr, value);
2185 env->IBAT[1][nr] = value;
2188 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
2190 target_ulong mask;
2192 dump_store_bat(env, 'D', 0, nr, value);
2193 if (env->DBAT[0][nr] != value) {
2194 /* When storing valid upper BAT, mask BEPI and BRPN
2195 * and invalidate all TLBs covered by this BAT
2197 mask = (value << 15) & 0x0FFE0000UL;
2198 #if !defined(FLUSH_ALL_TLBS)
2199 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
2200 #endif
2201 mask = (value << 15) & 0x0FFE0000UL;
2202 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
2203 (value & ~0x0001FFFFUL & ~mask);
2204 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
2205 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
2206 #if !defined(FLUSH_ALL_TLBS)
2207 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
2208 #else
2209 tlb_flush(env, 1);
2210 #endif
2214 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
2216 dump_store_bat(env, 'D', 1, nr, value);
2217 env->DBAT[1][nr] = value;
2220 void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
2222 target_ulong mask;
2223 #if defined(FLUSH_ALL_TLBS)
2224 int do_inval;
2225 #endif
2227 dump_store_bat(env, 'I', 0, nr, value);
2228 if (env->IBAT[0][nr] != value) {
2229 #if defined(FLUSH_ALL_TLBS)
2230 do_inval = 0;
2231 #endif
2232 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
2233 if (env->IBAT[1][nr] & 0x40) {
2234 /* Invalidate BAT only if it is valid */
2235 #if !defined(FLUSH_ALL_TLBS)
2236 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
2237 #else
2238 do_inval = 1;
2239 #endif
2241 /* When storing valid upper BAT, mask BEPI and BRPN
2242 * and invalidate all TLBs covered by this BAT
2244 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
2245 (value & ~0x0001FFFFUL & ~mask);
2246 env->DBAT[0][nr] = env->IBAT[0][nr];
2247 if (env->IBAT[1][nr] & 0x40) {
2248 #if !defined(FLUSH_ALL_TLBS)
2249 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
2250 #else
2251 do_inval = 1;
2252 #endif
2254 #if defined(FLUSH_ALL_TLBS)
2255 if (do_inval) {
2256 tlb_flush(env, 1);
2258 #endif
2262 void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
2264 target_ulong mask;
2265 #if defined(FLUSH_ALL_TLBS)
2266 int do_inval;
2267 #endif
2269 dump_store_bat(env, 'I', 1, nr, value);
2270 if (env->IBAT[1][nr] != value) {
2271 #if defined(FLUSH_ALL_TLBS)
2272 do_inval = 0;
2273 #endif
2274 if (env->IBAT[1][nr] & 0x40) {
2275 #if !defined(FLUSH_ALL_TLBS)
2276 mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
2277 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
2278 #else
2279 do_inval = 1;
2280 #endif
2282 if (value & 0x40) {
2283 #if !defined(FLUSH_ALL_TLBS)
2284 mask = (value << 17) & 0x0FFE0000UL;
2285 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
2286 #else
2287 do_inval = 1;
2288 #endif
2290 env->IBAT[1][nr] = value;
2291 env->DBAT[1][nr] = value;
2292 #if defined(FLUSH_ALL_TLBS)
2293 if (do_inval) {
2294 tlb_flush(env, 1);
2296 #endif
2300 /*****************************************************************************/
2301 /* TLB management */
2302 void ppc_tlb_invalidate_all(CPUPPCState *env)
2304 switch (env->mmu_model) {
2305 case POWERPC_MMU_SOFT_6xx:
2306 case POWERPC_MMU_SOFT_74xx:
2307 ppc6xx_tlb_invalidate_all(env);
2308 break;
2309 case POWERPC_MMU_SOFT_4xx:
2310 case POWERPC_MMU_SOFT_4xx_Z:
2311 ppc4xx_tlb_invalidate_all(env);
2312 break;
2313 case POWERPC_MMU_REAL:
2314 cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
2315 break;
2316 case POWERPC_MMU_MPC8xx:
2317 /* XXX: TODO */
2318 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
2319 break;
2320 case POWERPC_MMU_BOOKE:
2321 tlb_flush(env, 1);
2322 break;
2323 case POWERPC_MMU_BOOKE206:
2324 booke206_flush_tlb(env, -1, 0);
2325 break;
2326 case POWERPC_MMU_32B:
2327 case POWERPC_MMU_601:
2328 #if defined(TARGET_PPC64)
2329 case POWERPC_MMU_620:
2330 case POWERPC_MMU_64B:
2331 case POWERPC_MMU_2_06:
2332 case POWERPC_MMU_2_06d:
2333 #endif /* defined(TARGET_PPC64) */
2334 tlb_flush(env, 1);
2335 break;
2336 default:
2337 /* XXX: TODO */
2338 cpu_abort(env, "Unknown MMU model\n");
2339 break;
2343 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
2345 #if !defined(FLUSH_ALL_TLBS)
2346 addr &= TARGET_PAGE_MASK;
2347 switch (env->mmu_model) {
2348 case POWERPC_MMU_SOFT_6xx:
2349 case POWERPC_MMU_SOFT_74xx:
2350 ppc6xx_tlb_invalidate_virt(env, addr, 0);
2351 if (env->id_tlbs == 1) {
2352 ppc6xx_tlb_invalidate_virt(env, addr, 1);
2354 break;
2355 case POWERPC_MMU_SOFT_4xx:
2356 case POWERPC_MMU_SOFT_4xx_Z:
2357 ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
2358 break;
2359 case POWERPC_MMU_REAL:
2360 cpu_abort(env, "No TLB for PowerPC 4xx in real mode\n");
2361 break;
2362 case POWERPC_MMU_MPC8xx:
2363 /* XXX: TODO */
2364 cpu_abort(env, "MPC8xx MMU model is not implemented\n");
2365 break;
2366 case POWERPC_MMU_BOOKE:
2367 /* XXX: TODO */
2368 cpu_abort(env, "BookE MMU model is not implemented\n");
2369 break;
2370 case POWERPC_MMU_BOOKE206:
2371 /* XXX: TODO */
2372 cpu_abort(env, "BookE 2.06 MMU model is not implemented\n");
2373 break;
2374 case POWERPC_MMU_32B:
2375 case POWERPC_MMU_601:
2376 /* tlbie invalidate TLBs for all segments */
2377 addr &= ~((target_ulong)-1ULL << 28);
2378 /* XXX: this case should be optimized,
2379 * giving a mask to tlb_flush_page
2381 tlb_flush_page(env, addr | (0x0 << 28));
2382 tlb_flush_page(env, addr | (0x1 << 28));
2383 tlb_flush_page(env, addr | (0x2 << 28));
2384 tlb_flush_page(env, addr | (0x3 << 28));
2385 tlb_flush_page(env, addr | (0x4 << 28));
2386 tlb_flush_page(env, addr | (0x5 << 28));
2387 tlb_flush_page(env, addr | (0x6 << 28));
2388 tlb_flush_page(env, addr | (0x7 << 28));
2389 tlb_flush_page(env, addr | (0x8 << 28));
2390 tlb_flush_page(env, addr | (0x9 << 28));
2391 tlb_flush_page(env, addr | (0xA << 28));
2392 tlb_flush_page(env, addr | (0xB << 28));
2393 tlb_flush_page(env, addr | (0xC << 28));
2394 tlb_flush_page(env, addr | (0xD << 28));
2395 tlb_flush_page(env, addr | (0xE << 28));
2396 tlb_flush_page(env, addr | (0xF << 28));
2397 break;
2398 #if defined(TARGET_PPC64)
2399 case POWERPC_MMU_620:
2400 case POWERPC_MMU_64B:
2401 case POWERPC_MMU_2_06:
2402 case POWERPC_MMU_2_06d:
2403 /* tlbie invalidate TLBs for all segments */
2404 /* XXX: given the fact that there are too many segments to invalidate,
2405 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2406 * we just invalidate all TLBs
2408 tlb_flush(env, 1);
2409 break;
2410 #endif /* defined(TARGET_PPC64) */
2411 default:
2412 /* XXX: TODO */
2413 cpu_abort(env, "Unknown MMU model\n");
2414 break;
2416 #else
2417 ppc_tlb_invalidate_all(env);
2418 #endif
2421 /*****************************************************************************/
2422 /* Special registers manipulation */
2423 #if defined(TARGET_PPC64)
2424 void ppc_store_asr(CPUPPCState *env, target_ulong value)
2426 if (env->asr != value) {
2427 env->asr = value;
2428 tlb_flush(env, 1);
2431 #endif
2433 void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
2435 LOG_MMU("%s: " TARGET_FMT_lx "\n", __func__, value);
2436 if (env->spr[SPR_SDR1] != value) {
2437 env->spr[SPR_SDR1] = value;
2438 #if defined(TARGET_PPC64)
2439 if (env->mmu_model & POWERPC_MMU_64) {
2440 target_ulong htabsize = value & SDR_64_HTABSIZE;
2442 if (htabsize > 28) {
2443 fprintf(stderr, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2444 " stored in SDR1\n", htabsize);
2445 htabsize = 28;
2447 env->htab_mask = (1ULL << (htabsize + 18)) - 1;
2448 env->htab_base = value & SDR_64_HTABORG;
2449 } else
2450 #endif /* defined(TARGET_PPC64) */
2452 /* FIXME: Should check for valid HTABMASK values */
2453 env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF;
2454 env->htab_base = value & SDR_32_HTABORG;
2456 tlb_flush(env, 1);
2460 /* Segment registers load and store */
2461 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
2463 #if defined(TARGET_PPC64)
2464 if (env->mmu_model & POWERPC_MMU_64) {
2465 /* XXX */
2466 return 0;
2468 #endif
2469 return env->sr[sr_num];
2472 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
2474 LOG_MMU("%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
2475 (int)srnum, value, env->sr[srnum]);
2476 #if defined(TARGET_PPC64)
2477 if (env->mmu_model & POWERPC_MMU_64) {
2478 uint64_t rb = 0, rs = 0;
2480 /* ESID = srnum */
2481 rb |= ((uint32_t)srnum & 0xf) << 28;
2482 /* Set the valid bit */
2483 rb |= 1 << 27;
2484 /* Index = ESID */
2485 rb |= (uint32_t)srnum;
2487 /* VSID = VSID */
2488 rs |= (value & 0xfffffff) << 12;
2489 /* flags = flags */
2490 rs |= ((value >> 27) & 0xf) << 8;
2492 ppc_store_slb(env, rb, rs);
2493 } else
2494 #endif
2495 if (env->sr[srnum] != value) {
2496 env->sr[srnum] = value;
2497 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2498 flusing the whole TLB. */
2499 #if !defined(FLUSH_ALL_TLBS) && 0
2501 target_ulong page, end;
2502 /* Invalidate 256 MB of virtual memory */
2503 page = (16 << 20) * srnum;
2504 end = page + (16 << 20);
2505 for (; page != end; page += TARGET_PAGE_SIZE) {
2506 tlb_flush_page(env, page);
2509 #else
2510 tlb_flush(env, 1);
2511 #endif
2514 #endif /* !defined(CONFIG_USER_ONLY) */
2516 #if !defined(CONFIG_USER_ONLY)
2517 /* SLB management */
2518 #if defined(TARGET_PPC64)
2519 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
2521 if (ppc_store_slb(env, rb, rs) < 0) {
2522 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
2523 POWERPC_EXCP_INVAL);
2527 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
2529 target_ulong rt = 0;
2531 if (ppc_load_slb_esid(env, rb, &rt) < 0) {
2532 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
2533 POWERPC_EXCP_INVAL);
2535 return rt;
2538 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
2540 target_ulong rt = 0;
2542 if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
2543 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
2544 POWERPC_EXCP_INVAL);
2546 return rt;
2548 #endif /* defined(TARGET_PPC64) */
2550 /* TLB management */
2551 void helper_tlbia(CPUPPCState *env)
2553 ppc_tlb_invalidate_all(env);
2556 void helper_tlbie(CPUPPCState *env, target_ulong addr)
2558 ppc_tlb_invalidate_one(env, addr);
2561 /* Software driven TLBs management */
2562 /* PowerPC 602/603 software TLB load instructions helpers */
2563 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2565 target_ulong RPN, CMP, EPN;
2566 int way;
2568 RPN = env->spr[SPR_RPA];
2569 if (is_code) {
2570 CMP = env->spr[SPR_ICMP];
2571 EPN = env->spr[SPR_IMISS];
2572 } else {
2573 CMP = env->spr[SPR_DCMP];
2574 EPN = env->spr[SPR_DMISS];
2576 way = (env->spr[SPR_SRR1] >> 17) & 1;
2577 (void)EPN; /* avoid a compiler warning */
2578 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2579 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2580 RPN, way);
2581 /* Store this TLB */
2582 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2583 way, is_code, CMP, RPN);
2586 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
2588 do_6xx_tlb(env, EPN, 0);
2591 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
2593 do_6xx_tlb(env, EPN, 1);
2596 /* PowerPC 74xx software TLB load instructions helpers */
2597 static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
2599 target_ulong RPN, CMP, EPN;
2600 int way;
2602 RPN = env->spr[SPR_PTELO];
2603 CMP = env->spr[SPR_PTEHI];
2604 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2605 way = env->spr[SPR_TLBMISS] & 0x3;
2606 (void)EPN; /* avoid a compiler warning */
2607 LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
2608 " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
2609 RPN, way);
2610 /* Store this TLB */
2611 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2612 way, is_code, CMP, RPN);
2615 void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
2617 do_74xx_tlb(env, EPN, 0);
2620 void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
2622 do_74xx_tlb(env, EPN, 1);
2625 /*****************************************************************************/
2626 /* PowerPC 601 specific instructions (POWER bridge) */
2628 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
2630 mmu_ctx_t ctx;
2631 int nb_BATs;
2632 target_ulong ret = 0;
2634 /* We don't have to generate many instances of this instruction,
2635 * as rac is supervisor only.
2637 /* XXX: FIX THIS: Pretend we have no BAT */
2638 nb_BATs = env->nb_BATs;
2639 env->nb_BATs = 0;
2640 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
2641 ret = ctx.raddr;
2643 env->nb_BATs = nb_BATs;
2644 return ret;
2647 static inline target_ulong booke_tlb_to_page_size(int size)
2649 return 1024 << (2 * size);
2652 static inline int booke_page_size_to_tlb(target_ulong page_size)
2654 int size;
2656 switch (page_size) {
2657 case 0x00000400UL:
2658 size = 0x0;
2659 break;
2660 case 0x00001000UL:
2661 size = 0x1;
2662 break;
2663 case 0x00004000UL:
2664 size = 0x2;
2665 break;
2666 case 0x00010000UL:
2667 size = 0x3;
2668 break;
2669 case 0x00040000UL:
2670 size = 0x4;
2671 break;
2672 case 0x00100000UL:
2673 size = 0x5;
2674 break;
2675 case 0x00400000UL:
2676 size = 0x6;
2677 break;
2678 case 0x01000000UL:
2679 size = 0x7;
2680 break;
2681 case 0x04000000UL:
2682 size = 0x8;
2683 break;
2684 case 0x10000000UL:
2685 size = 0x9;
2686 break;
2687 case 0x40000000UL:
2688 size = 0xA;
2689 break;
2690 #if defined(TARGET_PPC64)
2691 case 0x000100000000ULL:
2692 size = 0xB;
2693 break;
2694 case 0x000400000000ULL:
2695 size = 0xC;
2696 break;
2697 case 0x001000000000ULL:
2698 size = 0xD;
2699 break;
2700 case 0x004000000000ULL:
2701 size = 0xE;
2702 break;
2703 case 0x010000000000ULL:
2704 size = 0xF;
2705 break;
2706 #endif
2707 default:
2708 size = -1;
2709 break;
2712 return size;
2715 /* Helpers for 4xx TLB management */
2716 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2718 #define PPC4XX_TLBHI_V 0x00000040
2719 #define PPC4XX_TLBHI_E 0x00000020
2720 #define PPC4XX_TLBHI_SIZE_MIN 0
2721 #define PPC4XX_TLBHI_SIZE_MAX 7
2722 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2723 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2724 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2726 #define PPC4XX_TLBLO_EX 0x00000200
2727 #define PPC4XX_TLBLO_WR 0x00000100
2728 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2729 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2731 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
2733 ppcemb_tlb_t *tlb;
2734 target_ulong ret;
2735 int size;
2737 entry &= PPC4XX_TLB_ENTRY_MASK;
2738 tlb = &env->tlb.tlbe[entry];
2739 ret = tlb->EPN;
2740 if (tlb->prot & PAGE_VALID) {
2741 ret |= PPC4XX_TLBHI_V;
2743 size = booke_page_size_to_tlb(tlb->size);
2744 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
2745 size = PPC4XX_TLBHI_SIZE_DEFAULT;
2747 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
2748 env->spr[SPR_40x_PID] = tlb->PID;
2749 return ret;
2752 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
2754 ppcemb_tlb_t *tlb;
2755 target_ulong ret;
2757 entry &= PPC4XX_TLB_ENTRY_MASK;
2758 tlb = &env->tlb.tlbe[entry];
2759 ret = tlb->RPN;
2760 if (tlb->prot & PAGE_EXEC) {
2761 ret |= PPC4XX_TLBLO_EX;
2763 if (tlb->prot & PAGE_WRITE) {
2764 ret |= PPC4XX_TLBLO_WR;
2766 return ret;
2769 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
2770 target_ulong val)
2772 ppcemb_tlb_t *tlb;
2773 target_ulong page, end;
2775 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
2776 val);
2777 entry &= PPC4XX_TLB_ENTRY_MASK;
2778 tlb = &env->tlb.tlbe[entry];
2779 /* Invalidate previous TLB (if it's valid) */
2780 if (tlb->prot & PAGE_VALID) {
2781 end = tlb->EPN + tlb->size;
2782 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
2783 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2784 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2785 tlb_flush_page(env, page);
2788 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
2789 & PPC4XX_TLBHI_SIZE_MASK);
2790 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2791 * If this ever occurs, one should use the ppcemb target instead
2792 * of the ppc or ppc64 one
2794 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
2795 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2796 "are not supported (%d)\n",
2797 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2799 tlb->EPN = val & ~(tlb->size - 1);
2800 if (val & PPC4XX_TLBHI_V) {
2801 tlb->prot |= PAGE_VALID;
2802 if (val & PPC4XX_TLBHI_E) {
2803 /* XXX: TO BE FIXED */
2804 cpu_abort(env,
2805 "Little-endian TLB entries are not supported by now\n");
2807 } else {
2808 tlb->prot &= ~PAGE_VALID;
2810 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2811 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2812 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2813 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2814 tlb->prot & PAGE_READ ? 'r' : '-',
2815 tlb->prot & PAGE_WRITE ? 'w' : '-',
2816 tlb->prot & PAGE_EXEC ? 'x' : '-',
2817 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2818 /* Invalidate new TLB (if valid) */
2819 if (tlb->prot & PAGE_VALID) {
2820 end = tlb->EPN + tlb->size;
2821 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
2822 TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
2823 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
2824 tlb_flush_page(env, page);
2829 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
2830 target_ulong val)
2832 ppcemb_tlb_t *tlb;
2834 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
2835 val);
2836 entry &= PPC4XX_TLB_ENTRY_MASK;
2837 tlb = &env->tlb.tlbe[entry];
2838 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
2839 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
2840 tlb->prot = PAGE_READ;
2841 if (val & PPC4XX_TLBLO_EX) {
2842 tlb->prot |= PAGE_EXEC;
2844 if (val & PPC4XX_TLBLO_WR) {
2845 tlb->prot |= PAGE_WRITE;
2847 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
2848 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
2849 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2850 tlb->prot & PAGE_READ ? 'r' : '-',
2851 tlb->prot & PAGE_WRITE ? 'w' : '-',
2852 tlb->prot & PAGE_EXEC ? 'x' : '-',
2853 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2856 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
2858 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2861 /* PowerPC 440 TLB management */
2862 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
2863 target_ulong value)
2865 ppcemb_tlb_t *tlb;
2866 target_ulong EPN, RPN, size;
2867 int do_flush_tlbs;
2869 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
2870 __func__, word, (int)entry, value);
2871 do_flush_tlbs = 0;
2872 entry &= 0x3F;
2873 tlb = &env->tlb.tlbe[entry];
2874 switch (word) {
2875 default:
2876 /* Just here to please gcc */
2877 case 0:
2878 EPN = value & 0xFFFFFC00;
2879 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
2880 do_flush_tlbs = 1;
2882 tlb->EPN = EPN;
2883 size = booke_tlb_to_page_size((value >> 4) & 0xF);
2884 if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
2885 do_flush_tlbs = 1;
2887 tlb->size = size;
2888 tlb->attr &= ~0x1;
2889 tlb->attr |= (value >> 8) & 1;
2890 if (value & 0x200) {
2891 tlb->prot |= PAGE_VALID;
2892 } else {
2893 if (tlb->prot & PAGE_VALID) {
2894 tlb->prot &= ~PAGE_VALID;
2895 do_flush_tlbs = 1;
2898 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2899 if (do_flush_tlbs) {
2900 tlb_flush(env, 1);
2902 break;
2903 case 1:
2904 RPN = value & 0xFFFFFC0F;
2905 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
2906 tlb_flush(env, 1);
2908 tlb->RPN = RPN;
2909 break;
2910 case 2:
2911 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
2912 tlb->prot = tlb->prot & PAGE_VALID;
2913 if (value & 0x1) {
2914 tlb->prot |= PAGE_READ << 4;
2916 if (value & 0x2) {
2917 tlb->prot |= PAGE_WRITE << 4;
2919 if (value & 0x4) {
2920 tlb->prot |= PAGE_EXEC << 4;
2922 if (value & 0x8) {
2923 tlb->prot |= PAGE_READ;
2925 if (value & 0x10) {
2926 tlb->prot |= PAGE_WRITE;
2928 if (value & 0x20) {
2929 tlb->prot |= PAGE_EXEC;
2931 break;
2935 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
2936 target_ulong entry)
2938 ppcemb_tlb_t *tlb;
2939 target_ulong ret;
2940 int size;
2942 entry &= 0x3F;
2943 tlb = &env->tlb.tlbe[entry];
2944 switch (word) {
2945 default:
2946 /* Just here to please gcc */
2947 case 0:
2948 ret = tlb->EPN;
2949 size = booke_page_size_to_tlb(tlb->size);
2950 if (size < 0 || size > 0xF) {
2951 size = 1;
2953 ret |= size << 4;
2954 if (tlb->attr & 0x1) {
2955 ret |= 0x100;
2957 if (tlb->prot & PAGE_VALID) {
2958 ret |= 0x200;
2960 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2961 env->spr[SPR_440_MMUCR] |= tlb->PID;
2962 break;
2963 case 1:
2964 ret = tlb->RPN;
2965 break;
2966 case 2:
2967 ret = tlb->attr & ~0x1;
2968 if (tlb->prot & (PAGE_READ << 4)) {
2969 ret |= 0x1;
2971 if (tlb->prot & (PAGE_WRITE << 4)) {
2972 ret |= 0x2;
2974 if (tlb->prot & (PAGE_EXEC << 4)) {
2975 ret |= 0x4;
2977 if (tlb->prot & PAGE_READ) {
2978 ret |= 0x8;
2980 if (tlb->prot & PAGE_WRITE) {
2981 ret |= 0x10;
2983 if (tlb->prot & PAGE_EXEC) {
2984 ret |= 0x20;
2986 break;
2988 return ret;
2991 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
2993 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
2996 /* PowerPC BookE 2.06 TLB management */
2998 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
3000 uint32_t tlbncfg = 0;
3001 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
3002 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
3003 int tlb;
3005 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
3006 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
3008 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
3009 cpu_abort(env, "we don't support HES yet\n");
3012 return booke206_get_tlbm(env, tlb, ea, esel);
3015 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
3017 env->spr[pidn] = pid;
3018 /* changing PIDs mean we're in a different address space now */
3019 tlb_flush(env, 1);
3022 void helper_booke206_tlbwe(CPUPPCState *env)
3024 uint32_t tlbncfg, tlbn;
3025 ppcmas_tlb_t *tlb;
3026 uint32_t size_tlb, size_ps;
3027 target_ulong mask;
3030 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
3031 case MAS0_WQ_ALWAYS:
3032 /* good to go, write that entry */
3033 break;
3034 case MAS0_WQ_COND:
3035 /* XXX check if reserved */
3036 if (0) {
3037 return;
3039 break;
3040 case MAS0_WQ_CLR_RSRV:
3041 /* XXX clear entry */
3042 return;
3043 default:
3044 /* no idea what to do */
3045 return;
3048 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
3049 !msr_gs) {
3050 /* XXX we don't support direct LRAT setting yet */
3051 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
3052 return;
3055 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
3056 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
3058 tlb = booke206_cur_tlb(env);
3060 if (!tlb) {
3061 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
3062 POWERPC_EXCP_INVAL |
3063 POWERPC_EXCP_INVAL_INVAL);
3066 /* check that we support the targeted size */
3067 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
3068 size_ps = booke206_tlbnps(env, tlbn);
3069 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
3070 !(size_ps & (1 << size_tlb))) {
3071 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
3072 POWERPC_EXCP_INVAL |
3073 POWERPC_EXCP_INVAL_INVAL);
3076 if (msr_gs) {
3077 cpu_abort(env, "missing HV implementation\n");
3079 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
3080 env->spr[SPR_BOOKE_MAS3];
3081 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
3083 /* MAV 1.0 only */
3084 if (!(tlbncfg & TLBnCFG_AVAIL)) {
3085 /* force !AVAIL TLB entries to correct page size */
3086 tlb->mas1 &= ~MAS1_TSIZE_MASK;
3087 /* XXX can be configured in MMUCSR0 */
3088 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
3091 /* Make a mask from TLB size to discard invalid bits in EPN field */
3092 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
3093 /* Add a mask for page attributes */
3094 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
3096 if (!msr_cm) {
3097 /* Executing a tlbwe instruction in 32-bit mode will set
3098 * bits 0:31 of the TLB EPN field to zero.
3100 mask &= 0xffffffff;
3103 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
3105 if (!(tlbncfg & TLBnCFG_IPROT)) {
3106 /* no IPROT supported by TLB */
3107 tlb->mas1 &= ~MAS1_IPROT;
3110 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
3111 tlb_flush_page(env, tlb->mas2 & MAS2_EPN_MASK);
3112 } else {
3113 tlb_flush(env, 1);
3117 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
3119 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
3120 int way = booke206_tlbm_to_way(env, tlb);
3122 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
3123 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
3124 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
3126 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
3127 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
3128 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
3129 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
3132 void helper_booke206_tlbre(CPUPPCState *env)
3134 ppcmas_tlb_t *tlb = NULL;
3136 tlb = booke206_cur_tlb(env);
3137 if (!tlb) {
3138 env->spr[SPR_BOOKE_MAS1] = 0;
3139 } else {
3140 booke206_tlb_to_mas(env, tlb);
3144 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
3146 ppcmas_tlb_t *tlb = NULL;
3147 int i, j;
3148 target_phys_addr_t raddr;
3149 uint32_t spid, sas;
3151 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
3152 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
3154 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
3155 int ways = booke206_tlb_ways(env, i);
3157 for (j = 0; j < ways; j++) {
3158 tlb = booke206_get_tlbm(env, i, address, j);
3160 if (!tlb) {
3161 continue;
3164 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
3165 continue;
3168 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
3169 continue;
3172 booke206_tlb_to_mas(env, tlb);
3173 return;
3177 /* no entry found, fill with defaults */
3178 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
3179 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
3180 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
3181 env->spr[SPR_BOOKE_MAS3] = 0;
3182 env->spr[SPR_BOOKE_MAS7] = 0;
3184 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
3185 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
3188 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
3189 << MAS1_TID_SHIFT;
3191 /* next victim logic */
3192 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
3193 env->last_way++;
3194 env->last_way &= booke206_tlb_ways(env, 0) - 1;
3195 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
3198 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
3199 uint32_t ea)
3201 int i;
3202 int ways = booke206_tlb_ways(env, tlbn);
3203 target_ulong mask;
3205 for (i = 0; i < ways; i++) {
3206 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
3207 if (!tlb) {
3208 continue;
3210 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
3211 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
3212 !(tlb->mas1 & MAS1_IPROT)) {
3213 tlb->mas1 &= ~MAS1_VALID;
3218 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
3220 if (address & 0x4) {
3221 /* flush all entries */
3222 if (address & 0x8) {
3223 /* flush all of TLB1 */
3224 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
3225 } else {
3226 /* flush all of TLB0 */
3227 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
3229 return;
3232 if (address & 0x8) {
3233 /* flush TLB1 entries */
3234 booke206_invalidate_ea_tlb(env, 1, address);
3235 tlb_flush(env, 1);
3236 } else {
3237 /* flush TLB0 entries */
3238 booke206_invalidate_ea_tlb(env, 0, address);
3239 tlb_flush_page(env, address & MAS2_EPN_MASK);
3243 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
3245 /* XXX missing LPID handling */
3246 booke206_flush_tlb(env, -1, 1);
3249 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
3251 int i, j;
3252 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
3253 ppcmas_tlb_t *tlb = env->tlb.tlbm;
3254 int tlb_size;
3256 /* XXX missing LPID handling */
3257 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
3258 tlb_size = booke206_tlb_size(env, i);
3259 for (j = 0; j < tlb_size; j++) {
3260 if (!(tlb[j].mas1 & MAS1_IPROT) &&
3261 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
3262 tlb[j].mas1 &= ~MAS1_VALID;
3265 tlb += booke206_tlb_size(env, i);
3267 tlb_flush(env, 1);
3270 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
3272 int i, j;
3273 ppcmas_tlb_t *tlb;
3274 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
3275 int pid = tid >> MAS6_SPID_SHIFT;
3276 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
3277 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
3278 /* XXX check for unsupported isize and raise an invalid opcode then */
3279 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
3280 /* XXX implement MAV2 handling */
3281 bool mav2 = false;
3283 /* XXX missing LPID handling */
3284 /* flush by pid and ea */
3285 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
3286 int ways = booke206_tlb_ways(env, i);
3288 for (j = 0; j < ways; j++) {
3289 tlb = booke206_get_tlbm(env, i, address, j);
3290 if (!tlb) {
3291 continue;
3293 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
3294 (tlb->mas1 & MAS1_IPROT) ||
3295 ((tlb->mas1 & MAS1_IND) != ind) ||
3296 ((tlb->mas8 & MAS8_TGS) != sgs)) {
3297 continue;
3299 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
3300 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3301 continue;
3303 /* XXX e500mc doesn't match SAS, but other cores might */
3304 tlb->mas1 &= ~MAS1_VALID;
3307 tlb_flush(env, 1);
3310 void helper_booke206_tlbflush(CPUPPCState *env, uint32_t type)
3312 int flags = 0;
3314 if (type & 2) {
3315 flags |= BOOKE206_FLUSH_TLB1;
3318 if (type & 4) {
3319 flags |= BOOKE206_FLUSH_TLB0;
3322 booke206_flush_tlb(env, flags, 1);
3324 #endif