Merge remote-tracking branch 'remotes/jasowang/tags/net-pull-request' into staging
[qemu.git] / target / microblaze / mmu.c
blob6763421ba2534a286a46f7318cffe71d5c8fc74e
1 /*
2 * Microblaze MMU emulation for qemu.
4 * Copyright (c) 2009 Edgar E. Iglesias
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
25 static unsigned int tlb_decode_size(unsigned int f)
27 static const unsigned int sizes[] = {
28 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
29 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
31 assert(f < ARRAY_SIZE(sizes));
32 return sizes[f];
35 static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
37 CPUState *cs = env_cpu(env);
38 struct microblaze_mmu *mmu = &env->mmu;
39 unsigned int tlb_size;
40 uint32_t tlb_tag, end, t;
42 t = mmu->rams[RAM_TAG][idx];
43 if (!(t & TLB_VALID))
44 return;
46 tlb_tag = t & TLB_EPN_MASK;
47 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
48 end = tlb_tag + tlb_size;
50 while (tlb_tag < end) {
51 tlb_flush_page(cs, tlb_tag);
52 tlb_tag += TARGET_PAGE_SIZE;
56 static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
58 struct microblaze_mmu *mmu = &env->mmu;
59 unsigned int i;
60 uint32_t t;
62 if (newpid & ~0xff)
63 qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
65 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
66 /* Lookup and decode. */
67 t = mmu->rams[RAM_TAG][i];
68 if (t & TLB_VALID) {
69 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
70 mmu_flush_idx(env, i);
75 /* rw - 0 = read, 1 = write, 2 = fetch. */
76 unsigned int mmu_translate(struct microblaze_mmu *mmu,
77 struct microblaze_mmu_lookup *lu,
78 target_ulong vaddr, int rw, int mmu_idx)
80 unsigned int i, hit = 0;
81 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
82 uint64_t tlb_tag, tlb_rpn, mask;
83 uint32_t tlb_size, t0;
85 lu->err = ERR_MISS;
86 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
87 uint64_t t, d;
89 /* Lookup and decode. */
90 t = mmu->rams[RAM_TAG][i];
91 if (t & TLB_VALID) {
92 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
93 if (tlb_size < TARGET_PAGE_SIZE) {
94 qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
95 abort();
98 mask = ~((uint64_t)tlb_size - 1);
99 tlb_tag = t & TLB_EPN_MASK;
100 if ((vaddr & mask) != (tlb_tag & mask)) {
101 continue;
103 if (mmu->tids[i]
104 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
105 continue;
108 /* Bring in the data part. */
109 d = mmu->rams[RAM_DATA][i];
110 tlb_ex = d & TLB_EX;
111 tlb_wr = d & TLB_WR;
113 /* Now let's see if there is a zone that overrides the protbits. */
114 tlb_zsel = (d >> 4) & 0xf;
115 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
116 t0 &= 0x3;
118 if (tlb_zsel > mmu->c_mmu_zones) {
119 qemu_log_mask(LOG_GUEST_ERROR,
120 "tlb zone select out of range! %d\n", tlb_zsel);
121 t0 = 1; /* Ignore. */
124 if (mmu->c_mmu == 1) {
125 t0 = 1; /* Zones are disabled. */
128 switch (t0) {
129 case 0:
130 if (mmu_idx == MMU_USER_IDX)
131 continue;
132 break;
133 case 2:
134 if (mmu_idx != MMU_USER_IDX) {
135 tlb_ex = 1;
136 tlb_wr = 1;
138 break;
139 case 3:
140 tlb_ex = 1;
141 tlb_wr = 1;
142 break;
143 default: break;
146 lu->err = ERR_PROT;
147 lu->prot = PAGE_READ;
148 if (tlb_wr)
149 lu->prot |= PAGE_WRITE;
150 else if (rw == 1)
151 goto done;
152 if (tlb_ex)
153 lu->prot |=PAGE_EXEC;
154 else if (rw == 2) {
155 goto done;
158 tlb_rpn = d & TLB_RPN_MASK;
160 lu->vaddr = tlb_tag;
161 lu->paddr = tlb_rpn & mmu->c_addr_mask;
162 lu->size = tlb_size;
163 lu->err = ERR_HIT;
164 lu->idx = i;
165 hit = 1;
166 goto done;
169 done:
170 qemu_log_mask(CPU_LOG_MMU,
171 "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
172 vaddr, rw, tlb_wr, tlb_ex, hit);
173 return hit;
176 /* Writes/reads to the MMU's special regs end up here. */
177 uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
179 unsigned int i;
180 uint32_t r = 0;
182 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
183 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
184 return 0;
186 if (ext && rn != MMU_R_TLBLO) {
187 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
188 return 0;
191 switch (rn) {
192 /* Reads to HI/LO trig reads from the mmu rams. */
193 case MMU_R_TLBLO:
194 case MMU_R_TLBHI:
195 if (!(env->mmu.c_mmu_tlb_access & 1)) {
196 qemu_log_mask(LOG_GUEST_ERROR,
197 "Invalid access to MMU reg %d\n", rn);
198 return 0;
201 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
202 r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
203 if (rn == MMU_R_TLBHI)
204 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
205 break;
206 case MMU_R_PID:
207 case MMU_R_ZPR:
208 if (!(env->mmu.c_mmu_tlb_access & 1)) {
209 qemu_log_mask(LOG_GUEST_ERROR,
210 "Invalid access to MMU reg %d\n", rn);
211 return 0;
213 r = env->mmu.regs[rn];
214 break;
215 case MMU_R_TLBX:
216 r = env->mmu.regs[rn];
217 break;
218 case MMU_R_TLBSX:
219 qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
220 break;
221 default:
222 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
223 break;
225 qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
226 return r;
229 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
231 uint64_t tmp64;
232 unsigned int i;
233 qemu_log_mask(CPU_LOG_MMU,
234 "%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]);
236 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
237 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
238 return;
240 if (ext && rn != MMU_R_TLBLO) {
241 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
242 return;
245 switch (rn) {
246 /* Writes to HI/LO trig writes to the mmu rams. */
247 case MMU_R_TLBLO:
248 case MMU_R_TLBHI:
249 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
250 if (rn == MMU_R_TLBHI) {
251 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
252 qemu_log_mask(LOG_GUEST_ERROR,
253 "invalidating index %x at pc=%" PRIx64 "\n",
254 i, env->sregs[SR_PC]);
255 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
256 mmu_flush_idx(env, i);
258 tmp64 = env->mmu.rams[rn & 1][i];
259 env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
260 break;
261 case MMU_R_ZPR:
262 if (env->mmu.c_mmu_tlb_access <= 1) {
263 qemu_log_mask(LOG_GUEST_ERROR,
264 "Invalid access to MMU reg %d\n", rn);
265 return;
268 /* Changes to the zone protection reg flush the QEMU TLB.
269 Fortunately, these are very uncommon. */
270 if (v != env->mmu.regs[rn]) {
271 tlb_flush(env_cpu(env));
273 env->mmu.regs[rn] = v;
274 break;
275 case MMU_R_PID:
276 if (env->mmu.c_mmu_tlb_access <= 1) {
277 qemu_log_mask(LOG_GUEST_ERROR,
278 "Invalid access to MMU reg %d\n", rn);
279 return;
282 if (v != env->mmu.regs[rn]) {
283 mmu_change_pid(env, v);
284 env->mmu.regs[rn] = v;
286 break;
287 case MMU_R_TLBX:
288 /* Bit 31 is read-only. */
289 env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v);
290 break;
291 case MMU_R_TLBSX:
293 struct microblaze_mmu_lookup lu;
294 int hit;
296 if (env->mmu.c_mmu_tlb_access <= 1) {
297 qemu_log_mask(LOG_GUEST_ERROR,
298 "Invalid access to MMU reg %d\n", rn);
299 return;
302 hit = mmu_translate(&env->mmu, &lu,
303 v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
304 if (hit) {
305 env->mmu.regs[MMU_R_TLBX] = lu.idx;
306 } else {
307 env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
309 break;
311 default:
312 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
313 break;
317 void mmu_init(struct microblaze_mmu *mmu)
319 int i;
320 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
321 mmu->regs[i] = 0;