hw/xen/hvm: Get target page size at runtime
[qemu/ar7.git] / target / ppc / mmu_helper.c
blobc071b4d5e2159d179fe16e43d15e016693e02dad
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "cpu.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_ppc.h"
25 #include "mmu-hash64.h"
26 #include "mmu-hash32.h"
27 #include "exec/exec-all.h"
28 #include "exec/log.h"
29 #include "helper_regs.h"
30 #include "qemu/error-report.h"
31 #include "qemu/qemu-print.h"
32 #include "internal.h"
33 #include "mmu-book3s-v3.h"
34 #include "mmu-radix64.h"
35 #include "exec/helper-proto.h"
36 #include "exec/cpu_ldst.h"
38 /* #define FLUSH_ALL_TLBS */
40 /*****************************************************************************/
41 /* PowerPC MMU emulation */
43 /* Software driven TLB helpers */
44 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
46 ppc6xx_tlb_t *tlb;
47 int nr, max;
49 /* LOG_SWTLB("Invalidate all TLBs\n"); */
50 /* Invalidate all defined software TLB */
51 max = env->nb_tlb;
52 if (env->id_tlbs == 1) {
53 max *= 2;
55 for (nr = 0; nr < max; nr++) {
56 tlb = &env->tlb.tlb6[nr];
57 pte_invalidate(&tlb->pte0);
59 tlb_flush(env_cpu(env));
62 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
63 target_ulong eaddr,
64 int is_code, int match_epn)
66 #if !defined(FLUSH_ALL_TLBS)
67 CPUState *cs = env_cpu(env);
68 ppc6xx_tlb_t *tlb;
69 int way, nr;
71 /* Invalidate ITLB + DTLB, all ways */
72 for (way = 0; way < env->nb_ways; way++) {
73 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
74 tlb = &env->tlb.tlb6[nr];
75 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
76 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
77 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
78 pte_invalidate(&tlb->pte0);
79 tlb_flush_page(cs, tlb->EPN);
82 #else
83 /* XXX: PowerPC specification say this is valid as well */
84 ppc6xx_tlb_invalidate_all(env);
85 #endif
88 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
89 target_ulong eaddr, int is_code)
91 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
94 static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
95 int is_code, target_ulong pte0, target_ulong pte1)
97 ppc6xx_tlb_t *tlb;
98 int nr;
100 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
101 tlb = &env->tlb.tlb6[nr];
102 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
103 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
104 EPN, pte0, pte1);
105 /* Invalidate any pending reference in QEMU for this virtual address */
106 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
107 tlb->pte0 = pte0;
108 tlb->pte1 = pte1;
109 tlb->EPN = EPN;
110 /* Store last way for LRU mechanism */
111 env->last_way = way;
114 /* Helpers specific to PowerPC 40x implementations */
115 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
117 ppcemb_tlb_t *tlb;
118 int i;
120 for (i = 0; i < env->nb_tlb; i++) {
121 tlb = &env->tlb.tlbe[i];
122 tlb->prot &= ~PAGE_VALID;
124 tlb_flush(env_cpu(env));
127 static void booke206_flush_tlb(CPUPPCState *env, int flags,
128 const int check_iprot)
130 int tlb_size;
131 int i, j;
132 ppcmas_tlb_t *tlb = env->tlb.tlbm;
134 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
135 if (flags & (1 << i)) {
136 tlb_size = booke206_tlb_size(env, i);
137 for (j = 0; j < tlb_size; j++) {
138 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
139 tlb[j].mas1 &= ~MAS1_VALID;
143 tlb += booke206_tlb_size(env, i);
146 tlb_flush(env_cpu(env));
149 /*****************************************************************************/
150 /* BATs management */
151 #if !defined(FLUSH_ALL_TLBS)
152 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
153 target_ulong mask)
155 CPUState *cs = env_cpu(env);
156 target_ulong base, end, page;
158 base = BATu & ~0x0001FFFF;
159 end = base + mask + 0x00020000;
160 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
161 /* Flushing 1024 4K pages is slower than a complete flush */
162 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
163 tlb_flush(cs);
164 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
165 return;
167 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
168 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
169 base, end, mask);
170 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
171 tlb_flush_page(cs, page);
173 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
175 #endif
177 static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
178 target_ulong value)
180 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
181 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
182 value, env->nip);
185 void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
187 target_ulong mask;
189 dump_store_bat(env, 'I', 0, nr, value);
190 if (env->IBAT[0][nr] != value) {
191 mask = (value << 15) & 0x0FFE0000UL;
192 #if !defined(FLUSH_ALL_TLBS)
193 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
194 #endif
196 * When storing valid upper BAT, mask BEPI and BRPN and
197 * invalidate all TLBs covered by this BAT
199 mask = (value << 15) & 0x0FFE0000UL;
200 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
201 (value & ~0x0001FFFFUL & ~mask);
202 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
203 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
204 #if !defined(FLUSH_ALL_TLBS)
205 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
206 #else
207 tlb_flush(env_cpu(env));
208 #endif
212 void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
214 dump_store_bat(env, 'I', 1, nr, value);
215 env->IBAT[1][nr] = value;
218 void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
220 target_ulong mask;
222 dump_store_bat(env, 'D', 0, nr, value);
223 if (env->DBAT[0][nr] != value) {
225 * When storing valid upper BAT, mask BEPI and BRPN and
226 * invalidate all TLBs covered by this BAT
228 mask = (value << 15) & 0x0FFE0000UL;
229 #if !defined(FLUSH_ALL_TLBS)
230 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
231 #endif
232 mask = (value << 15) & 0x0FFE0000UL;
233 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
234 (value & ~0x0001FFFFUL & ~mask);
235 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
236 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
237 #if !defined(FLUSH_ALL_TLBS)
238 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
239 #else
240 tlb_flush(env_cpu(env));
241 #endif
245 void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
247 dump_store_bat(env, 'D', 1, nr, value);
248 env->DBAT[1][nr] = value;
251 /*****************************************************************************/
252 /* TLB management */
253 void ppc_tlb_invalidate_all(CPUPPCState *env)
255 #if defined(TARGET_PPC64)
256 if (mmu_is_64bit(env->mmu_model)) {
257 env->tlb_need_flush = 0;
258 tlb_flush(env_cpu(env));
259 } else
260 #endif /* defined(TARGET_PPC64) */
261 switch (env->mmu_model) {
262 case POWERPC_MMU_SOFT_6xx:
263 ppc6xx_tlb_invalidate_all(env);
264 break;
265 case POWERPC_MMU_SOFT_4xx:
266 ppc4xx_tlb_invalidate_all(env);
267 break;
268 case POWERPC_MMU_REAL:
269 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
270 break;
271 case POWERPC_MMU_MPC8xx:
272 /* XXX: TODO */
273 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
274 break;
275 case POWERPC_MMU_BOOKE:
276 tlb_flush(env_cpu(env));
277 break;
278 case POWERPC_MMU_BOOKE206:
279 booke206_flush_tlb(env, -1, 0);
280 break;
281 case POWERPC_MMU_32B:
282 env->tlb_need_flush = 0;
283 tlb_flush(env_cpu(env));
284 break;
285 default:
286 /* XXX: TODO */
287 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
288 break;
292 void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
294 #if !defined(FLUSH_ALL_TLBS)
295 addr &= TARGET_PAGE_MASK;
296 #if defined(TARGET_PPC64)
297 if (mmu_is_64bit(env->mmu_model)) {
298 /* tlbie invalidate TLBs for all segments */
300 * XXX: given the fact that there are too many segments to invalidate,
301 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
302 * we just invalidate all TLBs
304 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
305 } else
306 #endif /* defined(TARGET_PPC64) */
307 switch (env->mmu_model) {
308 case POWERPC_MMU_SOFT_6xx:
309 ppc6xx_tlb_invalidate_virt(env, addr, 0);
310 if (env->id_tlbs == 1) {
311 ppc6xx_tlb_invalidate_virt(env, addr, 1);
313 break;
314 case POWERPC_MMU_32B:
316 * Actual CPUs invalidate entire congruence classes based on
317 * the geometry of their TLBs and some OSes take that into
318 * account, we just mark the TLB to be flushed later (context
319 * synchronizing event or sync instruction on 32-bit).
321 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
322 break;
323 default:
324 /* Should never reach here with other MMU models */
325 assert(0);
327 #else
328 ppc_tlb_invalidate_all(env);
329 #endif
332 /*****************************************************************************/
333 /* Special registers manipulation */
335 /* Segment registers load and store */
336 target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
338 #if defined(TARGET_PPC64)
339 if (mmu_is_64bit(env->mmu_model)) {
340 /* XXX */
341 return 0;
343 #endif
344 return env->sr[sr_num];
347 void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
349 qemu_log_mask(CPU_LOG_MMU,
350 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
351 (int)srnum, value, env->sr[srnum]);
352 #if defined(TARGET_PPC64)
353 if (mmu_is_64bit(env->mmu_model)) {
354 PowerPCCPU *cpu = env_archcpu(env);
355 uint64_t esid, vsid;
357 /* ESID = srnum */
358 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
360 /* VSID = VSID */
361 vsid = (value & 0xfffffff) << 12;
362 /* flags = flags */
363 vsid |= ((value >> 27) & 0xf) << 8;
365 ppc_store_slb(cpu, srnum, esid, vsid);
366 } else
367 #endif
368 if (env->sr[srnum] != value) {
369 env->sr[srnum] = value;
371 * Invalidating 256MB of virtual memory in 4kB pages is way
372 * longer than flushing the whole TLB.
374 #if !defined(FLUSH_ALL_TLBS) && 0
376 target_ulong page, end;
377 /* Invalidate 256 MB of virtual memory */
378 page = (16 << 20) * srnum;
379 end = page + (16 << 20);
380 for (; page != end; page += TARGET_PAGE_SIZE) {
381 tlb_flush_page(env_cpu(env), page);
384 #else
385 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
386 #endif
390 /* TLB management */
391 void helper_tlbia(CPUPPCState *env)
393 ppc_tlb_invalidate_all(env);
396 void helper_tlbie(CPUPPCState *env, target_ulong addr)
398 ppc_tlb_invalidate_one(env, addr);
401 #if defined(TARGET_PPC64)
403 /* Invalidation Selector */
404 #define TLBIE_IS_VA 0
405 #define TLBIE_IS_PID 1
406 #define TLBIE_IS_LPID 2
407 #define TLBIE_IS_ALL 3
409 /* Radix Invalidation Control */
410 #define TLBIE_RIC_TLB 0
411 #define TLBIE_RIC_PWC 1
412 #define TLBIE_RIC_ALL 2
413 #define TLBIE_RIC_GRP 3
415 /* Radix Actual Page sizes */
416 #define TLBIE_R_AP_4K 0
417 #define TLBIE_R_AP_64K 5
418 #define TLBIE_R_AP_2M 1
419 #define TLBIE_R_AP_1G 2
421 /* RB field masks */
422 #define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51)
423 #define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53)
424 #define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58)
426 void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
427 uint32_t flags)
429 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT;
431 * With the exception of the checks for invalid instruction forms,
432 * PRS is currently ignored, because we don't know if a given TLB entry
433 * is process or partition scoped.
435 bool prs = flags & TLBIE_F_PRS;
436 bool r = flags & TLBIE_F_R;
437 bool local = flags & TLBIE_F_LOCAL;
438 bool effR;
439 unsigned is = extract64(rb, PPC_BIT_NR(53), 2);
440 unsigned ap; /* actual page size */
441 target_ulong addr, pgoffs_mask;
443 qemu_log_mask(CPU_LOG_MMU,
444 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n",
445 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is);
447 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR;
449 /* Partial TLB invalidation is supported for Radix only for now. */
450 if (!effR) {
451 goto inval_all;
454 /* Check for invalid instruction forms (effR=1). */
455 if (unlikely(ric == TLBIE_RIC_GRP ||
456 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) &&
457 is == TLBIE_IS_VA) ||
458 (!prs && is == TLBIE_IS_PID))) {
459 qemu_log_mask(LOG_GUEST_ERROR,
460 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n",
461 __func__, ric, prs, r, is);
462 goto invalid;
465 /* We don't cache Page Walks. */
466 if (ric == TLBIE_RIC_PWC) {
467 if (local) {
468 unsigned set = extract64(rb, PPC_BIT_NR(51), 12);
469 if (set != 0) {
470 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n",
471 __func__, set);
472 goto invalid;
475 return;
479 * Invalidation by LPID or PID is not supported, so fallback
480 * to full TLB flush in these cases.
482 if (is != TLBIE_IS_VA) {
483 goto inval_all;
487 * The results of an attempt to invalidate a translation outside of
488 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0,
489 * and EA 0:1 != 0b00) are boundedly undefined.
491 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA &&
492 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) {
493 qemu_log_mask(LOG_GUEST_ERROR,
494 "%s: attempt to invalidate a translation outside of quadrant 0\n",
495 __func__);
496 goto inval_all;
499 assert(is == TLBIE_IS_VA);
500 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL);
502 ap = extract64(rb, PPC_BIT_NR(58), 3);
503 switch (ap) {
504 case TLBIE_R_AP_4K:
505 pgoffs_mask = 0xfffull;
506 break;
508 case TLBIE_R_AP_64K:
509 pgoffs_mask = 0xffffull;
510 break;
512 case TLBIE_R_AP_2M:
513 pgoffs_mask = 0x1fffffull;
514 break;
516 case TLBIE_R_AP_1G:
517 pgoffs_mask = 0x3fffffffull;
518 break;
520 default:
522 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58,
523 * RB 44:51, or RB 56:63, when it is needed to perform the specified
524 * operation, is not supported by the implementation, the instruction
525 * is treated as if the instruction form were invalid.
527 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap);
528 goto invalid;
531 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask;
533 if (local) {
534 tlb_flush_page(env_cpu(env), addr);
535 } else {
536 tlb_flush_page_all_cpus(env_cpu(env), addr);
538 return;
540 inval_all:
541 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
542 if (!local) {
543 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH;
545 return;
547 invalid:
548 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
549 POWERPC_EXCP_INVAL |
550 POWERPC_EXCP_INVAL_INVAL, GETPC());
553 #endif
555 void helper_tlbiva(CPUPPCState *env, target_ulong addr)
557 /* tlbiva instruction only exists on BookE */
558 assert(env->mmu_model == POWERPC_MMU_BOOKE);
559 /* XXX: TODO */
560 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
563 /* Software driven TLBs management */
564 /* PowerPC 602/603 software TLB load instructions helpers */
565 static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
567 target_ulong RPN, CMP, EPN;
568 int way;
570 RPN = env->spr[SPR_RPA];
571 if (is_code) {
572 CMP = env->spr[SPR_ICMP];
573 EPN = env->spr[SPR_IMISS];
574 } else {
575 CMP = env->spr[SPR_DCMP];
576 EPN = env->spr[SPR_DMISS];
578 way = (env->spr[SPR_SRR1] >> 17) & 1;
579 (void)EPN; /* avoid a compiler warning */
580 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
581 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
582 __func__, new_EPN, EPN, CMP, RPN, way);
583 /* Store this TLB */
584 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
585 way, is_code, CMP, RPN);
588 void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
590 do_6xx_tlb(env, EPN, 0);
593 void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
595 do_6xx_tlb(env, EPN, 1);
598 /*****************************************************************************/
599 /* PowerPC 601 specific instructions (POWER bridge) */
601 target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
603 mmu_ctx_t ctx;
604 int nb_BATs;
605 target_ulong ret = 0;
608 * We don't have to generate many instances of this instruction,
609 * as rac is supervisor only.
611 * XXX: FIX THIS: Pretend we have no BAT
613 nb_BATs = env->nb_BATs;
614 env->nb_BATs = 0;
615 if (get_physical_address_wtlb(env, &ctx, addr, 0, ACCESS_INT, 0) == 0) {
616 ret = ctx.raddr;
618 env->nb_BATs = nb_BATs;
619 return ret;
622 static inline target_ulong booke_tlb_to_page_size(int size)
624 return 1024 << (2 * size);
627 static inline int booke_page_size_to_tlb(target_ulong page_size)
629 int size;
631 switch (page_size) {
632 case 0x00000400UL:
633 size = 0x0;
634 break;
635 case 0x00001000UL:
636 size = 0x1;
637 break;
638 case 0x00004000UL:
639 size = 0x2;
640 break;
641 case 0x00010000UL:
642 size = 0x3;
643 break;
644 case 0x00040000UL:
645 size = 0x4;
646 break;
647 case 0x00100000UL:
648 size = 0x5;
649 break;
650 case 0x00400000UL:
651 size = 0x6;
652 break;
653 case 0x01000000UL:
654 size = 0x7;
655 break;
656 case 0x04000000UL:
657 size = 0x8;
658 break;
659 case 0x10000000UL:
660 size = 0x9;
661 break;
662 case 0x40000000UL:
663 size = 0xA;
664 break;
665 #if defined(TARGET_PPC64)
666 case 0x000100000000ULL:
667 size = 0xB;
668 break;
669 case 0x000400000000ULL:
670 size = 0xC;
671 break;
672 case 0x001000000000ULL:
673 size = 0xD;
674 break;
675 case 0x004000000000ULL:
676 size = 0xE;
677 break;
678 case 0x010000000000ULL:
679 size = 0xF;
680 break;
681 #endif
682 default:
683 size = -1;
684 break;
687 return size;
690 /* Helpers for 4xx TLB management */
691 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
693 #define PPC4XX_TLBHI_V 0x00000040
694 #define PPC4XX_TLBHI_E 0x00000020
695 #define PPC4XX_TLBHI_SIZE_MIN 0
696 #define PPC4XX_TLBHI_SIZE_MAX 7
697 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
698 #define PPC4XX_TLBHI_SIZE_SHIFT 7
699 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
701 #define PPC4XX_TLBLO_EX 0x00000200
702 #define PPC4XX_TLBLO_WR 0x00000100
703 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
704 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
706 void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
708 if (env->spr[SPR_40x_PID] != val) {
709 env->spr[SPR_40x_PID] = val;
710 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
714 target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
716 ppcemb_tlb_t *tlb;
717 target_ulong ret;
718 int size;
720 entry &= PPC4XX_TLB_ENTRY_MASK;
721 tlb = &env->tlb.tlbe[entry];
722 ret = tlb->EPN;
723 if (tlb->prot & PAGE_VALID) {
724 ret |= PPC4XX_TLBHI_V;
726 size = booke_page_size_to_tlb(tlb->size);
727 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
728 size = PPC4XX_TLBHI_SIZE_DEFAULT;
730 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
731 helper_store_40x_pid(env, tlb->PID);
732 return ret;
735 target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
737 ppcemb_tlb_t *tlb;
738 target_ulong ret;
740 entry &= PPC4XX_TLB_ENTRY_MASK;
741 tlb = &env->tlb.tlbe[entry];
742 ret = tlb->RPN;
743 if (tlb->prot & PAGE_EXEC) {
744 ret |= PPC4XX_TLBLO_EX;
746 if (tlb->prot & PAGE_WRITE) {
747 ret |= PPC4XX_TLBLO_WR;
749 return ret;
752 static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb)
754 unsigned mmu_idx = 0;
756 if (tlb->prot & 0xf) {
757 mmu_idx |= 0x1;
759 if ((tlb->prot >> 4) & 0xf) {
760 mmu_idx |= 0x2;
762 if (tlb->attr & 1) {
763 mmu_idx <<= 2;
766 tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx,
767 TARGET_LONG_BITS);
770 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
771 target_ulong val)
773 CPUState *cs = env_cpu(env);
774 ppcemb_tlb_t *tlb;
776 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
777 __func__, (int)entry,
778 val);
779 entry &= PPC4XX_TLB_ENTRY_MASK;
780 tlb = &env->tlb.tlbe[entry];
781 /* Invalidate previous TLB (if it's valid) */
782 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
783 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
784 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
785 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
786 ppcemb_tlb_flush(cs, tlb);
788 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
789 & PPC4XX_TLBHI_SIZE_MASK);
791 * We cannot handle TLB size < TARGET_PAGE_SIZE.
792 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
794 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
795 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
796 "are not supported (%d)\n"
797 "Please implement TARGET_PAGE_BITS_VARY\n",
798 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
800 tlb->EPN = val & ~(tlb->size - 1);
801 if (val & PPC4XX_TLBHI_V) {
802 tlb->prot |= PAGE_VALID;
803 if (val & PPC4XX_TLBHI_E) {
804 /* XXX: TO BE FIXED */
805 cpu_abort(cs,
806 "Little-endian TLB entries are not supported by now\n");
808 } else {
809 tlb->prot &= ~PAGE_VALID;
811 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
812 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
813 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
814 " prot %c%c%c%c PID %d\n", __func__,
815 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
816 tlb->prot & PAGE_READ ? 'r' : '-',
817 tlb->prot & PAGE_WRITE ? 'w' : '-',
818 tlb->prot & PAGE_EXEC ? 'x' : '-',
819 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
822 void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
823 target_ulong val)
825 CPUState *cs = env_cpu(env);
826 ppcemb_tlb_t *tlb;
828 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
829 __func__, (int)entry, val);
830 entry &= PPC4XX_TLB_ENTRY_MASK;
831 tlb = &env->tlb.tlbe[entry];
832 /* Invalidate previous TLB (if it's valid) */
833 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
834 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
835 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
836 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
837 ppcemb_tlb_flush(cs, tlb);
839 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
840 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
841 tlb->prot = PAGE_READ;
842 if (val & PPC4XX_TLBLO_EX) {
843 tlb->prot |= PAGE_EXEC;
845 if (val & PPC4XX_TLBLO_WR) {
846 tlb->prot |= PAGE_WRITE;
848 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
849 " EPN " TARGET_FMT_lx
850 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
851 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
852 tlb->prot & PAGE_READ ? 'r' : '-',
853 tlb->prot & PAGE_WRITE ? 'w' : '-',
854 tlb->prot & PAGE_EXEC ? 'x' : '-',
855 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
858 target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
860 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
863 static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb)
865 if (tlb->PID == env->spr[SPR_BOOKE_PID]) {
866 return true;
868 if (!env->nb_pids) {
869 return false;
872 if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) {
873 return true;
875 if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) {
876 return true;
879 return false;
882 /* PowerPC 440 TLB management */
883 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
884 target_ulong value)
886 ppcemb_tlb_t *tlb;
888 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
889 __func__, word, (int)entry, value);
890 entry &= 0x3F;
891 tlb = &env->tlb.tlbe[entry];
893 /* Invalidate previous TLB (if it's valid) */
894 if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) {
895 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
896 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
897 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
898 ppcemb_tlb_flush(env_cpu(env), tlb);
901 switch (word) {
902 default:
903 /* Just here to please gcc */
904 case 0:
905 tlb->EPN = value & 0xFFFFFC00;
906 tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF);
907 tlb->attr &= ~0x1;
908 tlb->attr |= (value >> 8) & 1;
909 if (value & 0x200) {
910 tlb->prot |= PAGE_VALID;
911 } else {
912 tlb->prot &= ~PAGE_VALID;
914 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
915 break;
916 case 1:
917 tlb->RPN = value & 0xFFFFFC0F;
918 break;
919 case 2:
920 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
921 tlb->prot = tlb->prot & PAGE_VALID;
922 if (value & 0x1) {
923 tlb->prot |= PAGE_READ << 4;
925 if (value & 0x2) {
926 tlb->prot |= PAGE_WRITE << 4;
928 if (value & 0x4) {
929 tlb->prot |= PAGE_EXEC << 4;
931 if (value & 0x8) {
932 tlb->prot |= PAGE_READ;
934 if (value & 0x10) {
935 tlb->prot |= PAGE_WRITE;
937 if (value & 0x20) {
938 tlb->prot |= PAGE_EXEC;
940 break;
944 target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
945 target_ulong entry)
947 ppcemb_tlb_t *tlb;
948 target_ulong ret;
949 int size;
951 entry &= 0x3F;
952 tlb = &env->tlb.tlbe[entry];
953 switch (word) {
954 default:
955 /* Just here to please gcc */
956 case 0:
957 ret = tlb->EPN;
958 size = booke_page_size_to_tlb(tlb->size);
959 if (size < 0 || size > 0xF) {
960 size = 1;
962 ret |= size << 4;
963 if (tlb->attr & 0x1) {
964 ret |= 0x100;
966 if (tlb->prot & PAGE_VALID) {
967 ret |= 0x200;
969 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
970 env->spr[SPR_440_MMUCR] |= tlb->PID;
971 break;
972 case 1:
973 ret = tlb->RPN;
974 break;
975 case 2:
976 ret = tlb->attr & ~0x1;
977 if (tlb->prot & (PAGE_READ << 4)) {
978 ret |= 0x1;
980 if (tlb->prot & (PAGE_WRITE << 4)) {
981 ret |= 0x2;
983 if (tlb->prot & (PAGE_EXEC << 4)) {
984 ret |= 0x4;
986 if (tlb->prot & PAGE_READ) {
987 ret |= 0x8;
989 if (tlb->prot & PAGE_WRITE) {
990 ret |= 0x10;
992 if (tlb->prot & PAGE_EXEC) {
993 ret |= 0x20;
995 break;
997 return ret;
1000 target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
1002 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
1005 /* PowerPC BookE 2.06 TLB management */
1007 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
1009 uint32_t tlbncfg = 0;
1010 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
1011 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
1012 int tlb;
1014 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1015 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
1017 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
1018 cpu_abort(env_cpu(env), "we don't support HES yet\n");
1021 return booke206_get_tlbm(env, tlb, ea, esel);
1024 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
1026 env->spr[pidn] = pid;
1027 /* changing PIDs mean we're in a different address space now */
1028 tlb_flush(env_cpu(env));
1031 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
1033 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
1034 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
1036 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
1038 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
1039 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
1042 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
1044 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
1045 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
1046 } else {
1047 tlb_flush(env_cpu(env));
1051 void helper_booke206_tlbwe(CPUPPCState *env)
1053 uint32_t tlbncfg, tlbn;
1054 ppcmas_tlb_t *tlb;
1055 uint32_t size_tlb, size_ps;
1056 target_ulong mask;
1059 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
1060 case MAS0_WQ_ALWAYS:
1061 /* good to go, write that entry */
1062 break;
1063 case MAS0_WQ_COND:
1064 /* XXX check if reserved */
1065 if (0) {
1066 return;
1068 break;
1069 case MAS0_WQ_CLR_RSRV:
1070 /* XXX clear entry */
1071 return;
1072 default:
1073 /* no idea what to do */
1074 return;
1077 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
1078 !FIELD_EX64(env->msr, MSR, GS)) {
1079 /* XXX we don't support direct LRAT setting yet */
1080 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
1081 return;
1084 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1085 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
1087 tlb = booke206_cur_tlb(env);
1089 if (!tlb) {
1090 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1091 POWERPC_EXCP_INVAL |
1092 POWERPC_EXCP_INVAL_INVAL, GETPC());
1095 /* check that we support the targeted size */
1096 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1097 size_ps = booke206_tlbnps(env, tlbn);
1098 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
1099 !(size_ps & (1 << size_tlb))) {
1100 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1101 POWERPC_EXCP_INVAL |
1102 POWERPC_EXCP_INVAL_INVAL, GETPC());
1105 if (FIELD_EX64(env->msr, MSR, GS)) {
1106 cpu_abort(env_cpu(env), "missing HV implementation\n");
1109 if (tlb->mas1 & MAS1_VALID) {
1111 * Invalidate the page in QEMU TLB if it was a valid entry.
1113 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
1114 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
1115 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
1117 * "Note that when an L2 TLB entry is written, it may be displacing an
1118 * already valid entry in the same L2 TLB location (a victim). If a
1119 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
1120 * TLB entry is automatically invalidated."
1122 flush_page(env, tlb);
1125 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
1126 env->spr[SPR_BOOKE_MAS3];
1127 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
1129 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
1130 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
1131 booke206_fixed_size_tlbn(env, tlbn, tlb);
1132 } else {
1133 if (!(tlbncfg & TLBnCFG_AVAIL)) {
1134 /* force !AVAIL TLB entries to correct page size */
1135 tlb->mas1 &= ~MAS1_TSIZE_MASK;
1136 /* XXX can be configured in MMUCSR0 */
1137 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
1141 /* Make a mask from TLB size to discard invalid bits in EPN field */
1142 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1143 /* Add a mask for page attributes */
1144 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
1146 if (!FIELD_EX64(env->msr, MSR, CM)) {
1148 * Executing a tlbwe instruction in 32-bit mode will set bits
1149 * 0:31 of the TLB EPN field to zero.
1151 mask &= 0xffffffff;
1154 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
1156 if (!(tlbncfg & TLBnCFG_IPROT)) {
1157 /* no IPROT supported by TLB */
1158 tlb->mas1 &= ~MAS1_IPROT;
1161 flush_page(env, tlb);
1164 static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
1166 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
1167 int way = booke206_tlbm_to_way(env, tlb);
1169 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
1170 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
1171 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1173 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
1174 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
1175 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
1176 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
1179 void helper_booke206_tlbre(CPUPPCState *env)
1181 ppcmas_tlb_t *tlb = NULL;
1183 tlb = booke206_cur_tlb(env);
1184 if (!tlb) {
1185 env->spr[SPR_BOOKE_MAS1] = 0;
1186 } else {
1187 booke206_tlb_to_mas(env, tlb);
1191 void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
1193 ppcmas_tlb_t *tlb = NULL;
1194 int i, j;
1195 hwaddr raddr;
1196 uint32_t spid, sas;
1198 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
1199 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
1201 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1202 int ways = booke206_tlb_ways(env, i);
1204 for (j = 0; j < ways; j++) {
1205 tlb = booke206_get_tlbm(env, i, address, j);
1207 if (!tlb) {
1208 continue;
1211 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
1212 continue;
1215 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1216 continue;
1219 booke206_tlb_to_mas(env, tlb);
1220 return;
1224 /* no entry found, fill with defaults */
1225 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1226 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1227 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1228 env->spr[SPR_BOOKE_MAS3] = 0;
1229 env->spr[SPR_BOOKE_MAS7] = 0;
1231 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
1232 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1235 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
1236 << MAS1_TID_SHIFT;
1238 /* next victim logic */
1239 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1240 env->last_way++;
1241 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1242 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1245 static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
1246 vaddr ea)
1248 int i;
1249 int ways = booke206_tlb_ways(env, tlbn);
1250 target_ulong mask;
1252 for (i = 0; i < ways; i++) {
1253 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
1254 if (!tlb) {
1255 continue;
1257 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1258 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
1259 !(tlb->mas1 & MAS1_IPROT)) {
1260 tlb->mas1 &= ~MAS1_VALID;
1265 void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
1267 CPUState *cs;
1269 if (address & 0x4) {
1270 /* flush all entries */
1271 if (address & 0x8) {
1272 /* flush all of TLB1 */
1273 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
1274 } else {
1275 /* flush all of TLB0 */
1276 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
1278 return;
1281 if (address & 0x8) {
1282 /* flush TLB1 entries */
1283 booke206_invalidate_ea_tlb(env, 1, address);
1284 CPU_FOREACH(cs) {
1285 tlb_flush(cs);
1287 } else {
1288 /* flush TLB0 entries */
1289 booke206_invalidate_ea_tlb(env, 0, address);
1290 CPU_FOREACH(cs) {
1291 tlb_flush_page(cs, address & MAS2_EPN_MASK);
1296 void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
1298 /* XXX missing LPID handling */
1299 booke206_flush_tlb(env, -1, 1);
1302 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
1304 int i, j;
1305 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1306 ppcmas_tlb_t *tlb = env->tlb.tlbm;
1307 int tlb_size;
1309 /* XXX missing LPID handling */
1310 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1311 tlb_size = booke206_tlb_size(env, i);
1312 for (j = 0; j < tlb_size; j++) {
1313 if (!(tlb[j].mas1 & MAS1_IPROT) &&
1314 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
1315 tlb[j].mas1 &= ~MAS1_VALID;
1318 tlb += booke206_tlb_size(env, i);
1320 tlb_flush(env_cpu(env));
1323 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
1325 int i, j;
1326 ppcmas_tlb_t *tlb;
1327 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1328 int pid = tid >> MAS6_SPID_SHIFT;
1329 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
1330 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
1331 /* XXX check for unsupported isize and raise an invalid opcode then */
1332 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
1333 /* XXX implement MAV2 handling */
1334 bool mav2 = false;
1336 /* XXX missing LPID handling */
1337 /* flush by pid and ea */
1338 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1339 int ways = booke206_tlb_ways(env, i);
1341 for (j = 0; j < ways; j++) {
1342 tlb = booke206_get_tlbm(env, i, address, j);
1343 if (!tlb) {
1344 continue;
1346 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
1347 (tlb->mas1 & MAS1_IPROT) ||
1348 ((tlb->mas1 & MAS1_IND) != ind) ||
1349 ((tlb->mas8 & MAS8_TGS) != sgs)) {
1350 continue;
1352 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
1353 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
1354 continue;
1356 /* XXX e500mc doesn't match SAS, but other cores might */
1357 tlb->mas1 &= ~MAS1_VALID;
1360 tlb_flush(env_cpu(env));
1363 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
1365 int flags = 0;
1367 if (type & 2) {
1368 flags |= BOOKE206_FLUSH_TLB1;
1371 if (type & 4) {
1372 flags |= BOOKE206_FLUSH_TLB0;
1375 booke206_flush_tlb(env, flags, 1);
1379 void helper_check_tlb_flush_local(CPUPPCState *env)
1381 check_tlb_flush(env, false);
1384 void helper_check_tlb_flush_global(CPUPPCState *env)
1386 check_tlb_flush(env, true);
1390 bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
1391 MMUAccessType access_type, int mmu_idx,
1392 bool probe, uintptr_t retaddr)
1394 PowerPCCPU *cpu = POWERPC_CPU(cs);
1395 hwaddr raddr;
1396 int page_size, prot;
1398 if (ppc_xlate(cpu, eaddr, access_type, &raddr,
1399 &page_size, &prot, mmu_idx, !probe)) {
1400 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1401 prot, mmu_idx, 1UL << page_size);
1402 return true;
1404 if (probe) {
1405 return false;
1407 raise_exception_err_ra(&cpu->env, cs->exception_index,
1408 cpu->env.error_code, retaddr);