qemu/compiler: Remove QEMU_GENERIC
[qemu/kevin.git] / target / ppc / mmu-hash32.c
blob9f0a497657aac2be914e67e94e0f2e264d1ff026
1 /*
2 * PowerPC MMU, TLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "internal.h"
27 #include "mmu-hash32.h"
28 #include "exec/log.h"
30 /* #define DEBUG_BAT */
32 #ifdef DEBUG_BATS
33 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
34 #else
35 # define LOG_BATS(...) do { } while (0)
36 #endif
38 struct mmu_ctx_hash32 {
39 hwaddr raddr; /* Real address */
40 int prot; /* Protection bits */
41 int key; /* Access key */
44 static int ppc_hash32_pp_prot(int key, int pp, int nx)
46 int prot;
48 if (key == 0) {
49 switch (pp) {
50 case 0x0:
51 case 0x1:
52 case 0x2:
53 prot = PAGE_READ | PAGE_WRITE;
54 break;
56 case 0x3:
57 prot = PAGE_READ;
58 break;
60 default:
61 abort();
63 } else {
64 switch (pp) {
65 case 0x0:
66 prot = 0;
67 break;
69 case 0x1:
70 case 0x3:
71 prot = PAGE_READ;
72 break;
74 case 0x2:
75 prot = PAGE_READ | PAGE_WRITE;
76 break;
78 default:
79 abort();
82 if (nx == 0) {
83 prot |= PAGE_EXEC;
86 return prot;
89 static int ppc_hash32_pte_prot(PowerPCCPU *cpu,
90 target_ulong sr, ppc_hash_pte32_t pte)
92 CPUPPCState *env = &cpu->env;
93 unsigned pp, key;
95 key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
96 pp = pte.pte1 & HPTE32_R_PP;
98 return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
101 static target_ulong hash32_bat_size(PowerPCCPU *cpu,
102 target_ulong batu, target_ulong batl)
104 CPUPPCState *env = &cpu->env;
106 if ((msr_pr && !(batu & BATU32_VP))
107 || (!msr_pr && !(batu & BATU32_VS))) {
108 return 0;
111 return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
114 static int hash32_bat_prot(PowerPCCPU *cpu,
115 target_ulong batu, target_ulong batl)
117 int pp, prot;
119 prot = 0;
120 pp = batl & BATL32_PP;
121 if (pp != 0) {
122 prot = PAGE_READ | PAGE_EXEC;
123 if (pp == 0x2) {
124 prot |= PAGE_WRITE;
127 return prot;
130 static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
131 target_ulong batu, target_ulong batl)
133 if (!(batl & BATL32_601_V)) {
134 return 0;
137 return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
140 static int hash32_bat_601_prot(PowerPCCPU *cpu,
141 target_ulong batu, target_ulong batl)
143 CPUPPCState *env = &cpu->env;
144 int key, pp;
146 pp = batu & BATU32_601_PP;
147 if (msr_pr == 0) {
148 key = !!(batu & BATU32_601_KS);
149 } else {
150 key = !!(batu & BATU32_601_KP);
152 return ppc_hash32_pp_prot(key, pp, 0);
155 static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea,
156 MMUAccessType access_type, int *prot)
158 CPUPPCState *env = &cpu->env;
159 target_ulong *BATlt, *BATut;
160 bool ifetch = access_type == MMU_INST_FETCH;
161 int i;
163 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
164 ifetch ? 'I' : 'D', ea);
165 if (ifetch) {
166 BATlt = env->IBAT[1];
167 BATut = env->IBAT[0];
168 } else {
169 BATlt = env->DBAT[1];
170 BATut = env->DBAT[0];
172 for (i = 0; i < env->nb_BATs; i++) {
173 target_ulong batu = BATut[i];
174 target_ulong batl = BATlt[i];
175 target_ulong mask;
177 if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
178 mask = hash32_bat_601_size(cpu, batu, batl);
179 } else {
180 mask = hash32_bat_size(cpu, batu, batl);
182 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
183 " BATl " TARGET_FMT_lx "\n", __func__,
184 ifetch ? 'I' : 'D', i, ea, batu, batl);
186 if (mask && ((ea & mask) == (batu & BATU32_BEPI))) {
187 hwaddr raddr = (batl & mask) | (ea & ~mask);
189 if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
190 *prot = hash32_bat_601_prot(cpu, batu, batl);
191 } else {
192 *prot = hash32_bat_prot(cpu, batu, batl);
195 return raddr & TARGET_PAGE_MASK;
199 /* No hit */
200 #if defined(DEBUG_BATS)
201 if (qemu_log_enabled()) {
202 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea);
203 for (i = 0; i < 4; i++) {
204 BATu = &BATut[i];
205 BATl = &BATlt[i];
206 BEPIu = *BATu & BATU32_BEPIU;
207 BEPIl = *BATu & BATU32_BEPIL;
208 bl = (*BATu & 0x00001FFC) << 15;
209 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
210 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
211 TARGET_FMT_lx " " TARGET_FMT_lx "\n",
212 __func__, ifetch ? 'I' : 'D', i, ea,
213 *BATu, *BATl, BEPIu, BEPIl, bl);
216 #endif
218 return -1;
221 static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
222 target_ulong eaddr,
223 MMUAccessType access_type,
224 hwaddr *raddr, int *prot)
226 CPUState *cs = CPU(cpu);
227 CPUPPCState *env = &cpu->env;
228 int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
230 qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
232 if ((sr & 0x1FF00000) >> 20 == 0x07f) {
234 * Memory-forced I/O controller interface access
236 * If T=1 and BUID=x'07F', the 601 performs a memory access
237 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
239 *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
240 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
241 return 0;
244 if (access_type == MMU_INST_FETCH) {
245 /* No code fetch is allowed in direct-store areas */
246 cs->exception_index = POWERPC_EXCP_ISI;
247 env->error_code = 0x10000000;
248 return 1;
251 switch (env->access_type) {
252 case ACCESS_INT:
253 /* Integer load/store : only access allowed */
254 break;
255 case ACCESS_FLOAT:
256 /* Floating point load/store */
257 cs->exception_index = POWERPC_EXCP_ALIGN;
258 env->error_code = POWERPC_EXCP_ALIGN_FP;
259 env->spr[SPR_DAR] = eaddr;
260 return 1;
261 case ACCESS_RES:
262 /* lwarx, ldarx or srwcx. */
263 env->error_code = 0;
264 env->spr[SPR_DAR] = eaddr;
265 if (access_type == MMU_DATA_STORE) {
266 env->spr[SPR_DSISR] = 0x06000000;
267 } else {
268 env->spr[SPR_DSISR] = 0x04000000;
270 return 1;
271 case ACCESS_CACHE:
273 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi
275 * Should make the instruction do no-op. As it already do
276 * no-op, it's quite easy :-)
278 *raddr = eaddr;
279 return 0;
280 case ACCESS_EXT:
281 /* eciwx or ecowx */
282 cs->exception_index = POWERPC_EXCP_DSI;
283 env->error_code = 0;
284 env->spr[SPR_DAR] = eaddr;
285 if (access_type == MMU_DATA_STORE) {
286 env->spr[SPR_DSISR] = 0x06100000;
287 } else {
288 env->spr[SPR_DSISR] = 0x04100000;
290 return 1;
291 default:
292 cpu_abort(cs, "ERROR: instruction should not need "
293 "address translation\n");
295 if ((access_type == MMU_DATA_STORE || key != 1) &&
296 (access_type == MMU_DATA_LOAD || key != 0)) {
297 *raddr = eaddr;
298 return 0;
299 } else {
300 cs->exception_index = POWERPC_EXCP_DSI;
301 env->error_code = 0;
302 env->spr[SPR_DAR] = eaddr;
303 if (access_type == MMU_DATA_STORE) {
304 env->spr[SPR_DSISR] = 0x0a000000;
305 } else {
306 env->spr[SPR_DSISR] = 0x08000000;
308 return 1;
312 hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
314 target_ulong mask = ppc_hash32_hpt_mask(cpu);
316 return (hash * HASH_PTEG_SIZE_32) & mask;
319 static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
320 bool secondary, target_ulong ptem,
321 ppc_hash_pte32_t *pte)
323 hwaddr pte_offset = pteg_off;
324 target_ulong pte0, pte1;
325 int i;
327 for (i = 0; i < HPTES_PER_GROUP; i++) {
328 pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
330 * pte0 contains the valid bit and must be read before pte1,
331 * otherwise we might see an old pte1 with a new valid bit and
332 * thus an inconsistent hpte value
334 smp_rmb();
335 pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
337 if ((pte0 & HPTE32_V_VALID)
338 && (secondary == !!(pte0 & HPTE32_V_SECONDARY))
339 && HPTE32_V_COMPARE(pte0, ptem)) {
340 pte->pte0 = pte0;
341 pte->pte1 = pte1;
342 return pte_offset;
345 pte_offset += HASH_PTE_SIZE_32;
348 return -1;
351 static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1)
353 target_ulong base = ppc_hash32_hpt_base(cpu);
354 hwaddr offset = pte_offset + 6;
356 /* The HW performs a non-atomic byte update */
357 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
360 static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1)
362 target_ulong base = ppc_hash32_hpt_base(cpu);
363 hwaddr offset = pte_offset + 7;
365 /* The HW performs a non-atomic byte update */
366 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
369 static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
370 target_ulong sr, target_ulong eaddr,
371 ppc_hash_pte32_t *pte)
373 hwaddr pteg_off, pte_offset;
374 hwaddr hash;
375 uint32_t vsid, pgidx, ptem;
377 vsid = sr & SR32_VSID;
378 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS;
379 hash = vsid ^ pgidx;
380 ptem = (vsid << 7) | (pgidx >> 10);
382 /* Page address translation */
383 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
384 " htab_mask " TARGET_FMT_plx
385 " hash " TARGET_FMT_plx "\n",
386 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash);
388 /* Primary PTEG lookup */
389 qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
390 " vsid=%" PRIx32 " ptem=%" PRIx32
391 " hash=" TARGET_FMT_plx "\n",
392 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu),
393 vsid, ptem, hash);
394 pteg_off = get_pteg_offset32(cpu, hash);
395 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
396 if (pte_offset == -1) {
397 /* Secondary PTEG lookup */
398 qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
399 " vsid=%" PRIx32 " api=%" PRIx32
400 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu),
401 ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash);
402 pteg_off = get_pteg_offset32(cpu, ~hash);
403 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
406 return pte_offset;
409 static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
410 target_ulong eaddr)
412 hwaddr rpn = pte.pte1 & HPTE32_R_RPN;
413 hwaddr mask = ~TARGET_PAGE_MASK;
415 return (rpn & ~mask) | (eaddr & mask);
418 int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
419 int mmu_idx)
421 CPUState *cs = CPU(cpu);
422 CPUPPCState *env = &cpu->env;
423 target_ulong sr;
424 hwaddr pte_offset;
425 ppc_hash_pte32_t pte;
426 int prot;
427 int need_prot;
428 MMUAccessType access_type;
429 hwaddr raddr;
431 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
432 access_type = rwx;
433 need_prot = prot_for_access_type(access_type);
435 /* 1. Handle real mode accesses */
436 if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) {
437 /* Translation is off */
438 raddr = eaddr;
439 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
440 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
441 TARGET_PAGE_SIZE);
442 return 0;
445 /* 2. Check Block Address Translation entries (BATs) */
446 if (env->nb_BATs != 0) {
447 raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, &prot);
448 if (raddr != -1) {
449 if (need_prot & ~prot) {
450 if (access_type == MMU_INST_FETCH) {
451 cs->exception_index = POWERPC_EXCP_ISI;
452 env->error_code = 0x08000000;
453 } else {
454 cs->exception_index = POWERPC_EXCP_DSI;
455 env->error_code = 0;
456 env->spr[SPR_DAR] = eaddr;
457 if (access_type == MMU_DATA_STORE) {
458 env->spr[SPR_DSISR] = 0x0a000000;
459 } else {
460 env->spr[SPR_DSISR] = 0x08000000;
463 return 1;
466 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
467 raddr & TARGET_PAGE_MASK, prot, mmu_idx,
468 TARGET_PAGE_SIZE);
469 return 0;
473 /* 3. Look up the Segment Register */
474 sr = env->sr[eaddr >> 28];
476 /* 4. Handle direct store segments */
477 if (sr & SR32_T) {
478 if (ppc_hash32_direct_store(cpu, sr, eaddr, access_type,
479 &raddr, &prot) == 0) {
480 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
481 raddr & TARGET_PAGE_MASK, prot, mmu_idx,
482 TARGET_PAGE_SIZE);
483 return 0;
484 } else {
485 return 1;
489 /* 5. Check for segment level no-execute violation */
490 if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) {
491 cs->exception_index = POWERPC_EXCP_ISI;
492 env->error_code = 0x10000000;
493 return 1;
496 /* 6. Locate the PTE in the hash table */
497 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
498 if (pte_offset == -1) {
499 if (access_type == MMU_INST_FETCH) {
500 cs->exception_index = POWERPC_EXCP_ISI;
501 env->error_code = 0x40000000;
502 } else {
503 cs->exception_index = POWERPC_EXCP_DSI;
504 env->error_code = 0;
505 env->spr[SPR_DAR] = eaddr;
506 if (access_type == MMU_DATA_STORE) {
507 env->spr[SPR_DSISR] = 0x42000000;
508 } else {
509 env->spr[SPR_DSISR] = 0x40000000;
513 return 1;
515 qemu_log_mask(CPU_LOG_MMU,
516 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
518 /* 7. Check access permissions */
520 prot = ppc_hash32_pte_prot(cpu, sr, pte);
522 if (need_prot & ~prot) {
523 /* Access right violation */
524 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
525 if (access_type == MMU_INST_FETCH) {
526 cs->exception_index = POWERPC_EXCP_ISI;
527 env->error_code = 0x08000000;
528 } else {
529 cs->exception_index = POWERPC_EXCP_DSI;
530 env->error_code = 0;
531 env->spr[SPR_DAR] = eaddr;
532 if (access_type == MMU_DATA_STORE) {
533 env->spr[SPR_DSISR] = 0x0a000000;
534 } else {
535 env->spr[SPR_DSISR] = 0x08000000;
538 return 1;
541 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
543 /* 8. Update PTE referenced and changed bits if necessary */
545 if (!(pte.pte1 & HPTE32_R_R)) {
546 ppc_hash32_set_r(cpu, pte_offset, pte.pte1);
548 if (!(pte.pte1 & HPTE32_R_C)) {
549 if (access_type == MMU_DATA_STORE) {
550 ppc_hash32_set_c(cpu, pte_offset, pte.pte1);
551 } else {
553 * Treat the page as read-only for now, so that a later write
554 * will pass through this function again to set the C bit
556 prot &= ~PAGE_WRITE;
560 /* 9. Determine the real address from the PTE */
562 raddr = ppc_hash32_pte_raddr(sr, pte, eaddr);
564 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
565 prot, mmu_idx, TARGET_PAGE_SIZE);
567 return 0;
570 hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
572 CPUPPCState *env = &cpu->env;
573 target_ulong sr;
574 hwaddr pte_offset;
575 ppc_hash_pte32_t pte;
576 int prot;
578 if (msr_dr == 0) {
579 /* Translation is off */
580 return eaddr;
583 if (env->nb_BATs != 0) {
584 hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot);
585 if (raddr != -1) {
586 return raddr;
590 sr = env->sr[eaddr >> 28];
592 if (sr & SR32_T) {
593 /* FIXME: Add suitable debug support for Direct Store segments */
594 return -1;
597 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
598 if (pte_offset == -1) {
599 return -1;
602 return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK;