vmstate: Add VMSTATE_MACADDR for the new type
[qemu.git] / target-microblaze / mmu.c
blobd868ac56e93a68153b9a76db46eab94cbec89309
1 /*
2 * Microblaze MMU emulation for qemu.
4 * Copyright (c) 2009 Edgar E. Iglesias
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <assert.h>
23 #include "config.h"
24 #include "cpu.h"
25 #include "exec-all.h"
27 #define D(x)
29 static unsigned int tlb_decode_size(unsigned int f)
31 static const unsigned int sizes[] = {
32 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
33 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
35 assert(f < ARRAY_SIZE(sizes));
36 return sizes[f];
39 static void mmu_flush_idx(CPUState *env, unsigned int idx)
41 struct microblaze_mmu *mmu = &env->mmu;
42 unsigned int tlb_size;
43 uint32_t tlb_tag, end, t;
45 t = mmu->rams[RAM_TAG][idx];
46 if (!(t & TLB_VALID))
47 return;
49 tlb_tag = t & TLB_EPN_MASK;
50 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
51 end = tlb_tag + tlb_size;
53 while (tlb_tag < end) {
54 tlb_flush_page(env, tlb_tag);
55 tlb_tag += TARGET_PAGE_SIZE;
59 static void mmu_change_pid(CPUState *env, unsigned int newpid)
61 struct microblaze_mmu *mmu = &env->mmu;
62 unsigned int i;
63 unsigned int tlb_size;
64 uint32_t tlb_tag, mask, t;
66 if (newpid & ~0xff)
67 qemu_log("Illegal rpid=%x\n", newpid);
69 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
70 /* Lookup and decode. */
71 t = mmu->rams[RAM_TAG][i];
72 if (t & TLB_VALID) {
73 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
74 mask = ~(tlb_size - 1);
76 tlb_tag = t & TLB_EPN_MASK;
77 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
78 mmu_flush_idx(env, i);
83 /* rw - 0 = read, 1 = write, 2 = fetch. */
84 unsigned int mmu_translate(struct microblaze_mmu *mmu,
85 struct microblaze_mmu_lookup *lu,
86 target_ulong vaddr, int rw, int mmu_idx)
88 unsigned int i, hit = 0;
89 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
90 unsigned int tlb_size;
91 uint32_t tlb_tag, tlb_rpn, mask, t0;
93 lu->err = ERR_MISS;
94 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
95 uint32_t t, d;
97 /* Lookup and decode. */
98 t = mmu->rams[RAM_TAG][i];
99 D(qemu_log("TLB %d valid=%d\n", i, t & TLB_VALID));
100 if (t & TLB_VALID) {
101 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
102 if (tlb_size < TARGET_PAGE_SIZE) {
103 qemu_log("%d pages not supported\n", tlb_size);
104 abort();
107 mask = ~(tlb_size - 1);
108 tlb_tag = t & TLB_EPN_MASK;
109 if ((vaddr & mask) != (tlb_tag & mask)) {
110 D(qemu_log("TLB %d vaddr=%x != tag=%x\n",
111 i, vaddr & mask, tlb_tag & mask));
112 continue;
114 if (mmu->tids[i]
115 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
116 D(qemu_log("TLB %d pid=%x != tid=%x\n",
117 i, mmu->regs[MMU_R_PID], mmu->tids[i]));
118 continue;
121 /* Bring in the data part. */
122 d = mmu->rams[RAM_DATA][i];
123 tlb_ex = d & TLB_EX;
124 tlb_wr = d & TLB_WR;
126 /* Now lets see if there is a zone that overrides the protbits. */
127 tlb_zsel = (d >> 4) & 0xf;
128 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
129 t0 &= 0x3;
131 if (tlb_zsel > mmu->c_mmu_zones) {
132 qemu_log("tlb zone select out of range! %d\n", tlb_zsel);
133 t0 = 1; /* Ignore. */
136 if (mmu->c_mmu == 1) {
137 t0 = 1; /* Zones are disabled. */
140 switch (t0) {
141 case 0:
142 if (mmu_idx == MMU_USER_IDX)
143 continue;
144 break;
145 case 2:
146 if (mmu_idx != MMU_USER_IDX) {
147 tlb_ex = 1;
148 tlb_wr = 1;
150 break;
151 case 3:
152 tlb_ex = 1;
153 tlb_wr = 1;
154 break;
155 default: break;
158 lu->err = ERR_PROT;
159 lu->prot = PAGE_READ;
160 if (tlb_wr)
161 lu->prot |= PAGE_WRITE;
162 else if (rw == 1)
163 goto done;
164 if (tlb_ex)
165 lu->prot |=PAGE_EXEC;
166 else if (rw == 2) {
167 goto done;
170 tlb_rpn = d & TLB_RPN_MASK;
172 lu->vaddr = tlb_tag;
173 lu->paddr = tlb_rpn;
174 lu->size = tlb_size;
175 lu->err = ERR_HIT;
176 lu->idx = i;
177 hit = 1;
178 goto done;
181 done:
182 D(qemu_log("MMU vaddr=%x rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
183 vaddr, rw, tlb_wr, tlb_ex, hit));
184 return hit;
187 /* Writes/reads to the MMU's special regs end up here. */
188 uint32_t mmu_read(CPUState *env, uint32_t rn)
190 unsigned int i;
191 uint32_t r;
193 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
194 qemu_log("MMU access on MMU-less system\n");
195 return 0;
198 switch (rn) {
199 /* Reads to HI/LO trig reads from the mmu rams. */
200 case MMU_R_TLBLO:
201 case MMU_R_TLBHI:
202 if (!(env->mmu.c_mmu_tlb_access & 1)) {
203 qemu_log("Invalid access to MMU reg %d\n", rn);
204 return 0;
207 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
208 r = env->mmu.rams[rn & 1][i];
209 if (rn == MMU_R_TLBHI)
210 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
211 break;
212 case MMU_R_PID:
213 case MMU_R_ZPR:
214 if (!(env->mmu.c_mmu_tlb_access & 1)) {
215 qemu_log("Invalid access to MMU reg %d\n", rn);
216 return 0;
218 r = env->mmu.regs[rn];
219 break;
220 default:
221 r = env->mmu.regs[rn];
222 break;
224 D(qemu_log("%s rn=%d=%x\n", __func__, rn, r));
225 return r;
228 void mmu_write(CPUState *env, uint32_t rn, uint32_t v)
230 unsigned int i;
231 D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
233 if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
234 qemu_log("MMU access on MMU-less system\n");
235 return;
238 switch (rn) {
239 /* Writes to HI/LO trig writes to the mmu rams. */
240 case MMU_R_TLBLO:
241 case MMU_R_TLBHI:
242 i = env->mmu.regs[MMU_R_TLBX] & 0xff;
243 if (rn == MMU_R_TLBHI) {
244 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
245 qemu_log("invalidating index %x at pc=%x\n",
246 i, env->sregs[SR_PC]);
247 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
248 mmu_flush_idx(env, i);
250 env->mmu.rams[rn & 1][i] = v;
252 D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v));
253 break;
254 case MMU_R_ZPR:
255 if (env->mmu.c_mmu_tlb_access <= 1) {
256 qemu_log("Invalid access to MMU reg %d\n", rn);
257 return;
260 /* Changes to the zone protection reg flush the QEMU TLB.
261 Fortunately, these are very uncommon. */
262 if (v != env->mmu.regs[rn]) {
263 tlb_flush(env, 1);
265 env->mmu.regs[rn] = v;
266 break;
267 case MMU_R_PID:
268 if (env->mmu.c_mmu_tlb_access <= 1) {
269 qemu_log("Invalid access to MMU reg %d\n", rn);
270 return;
273 if (v != env->mmu.regs[rn]) {
274 mmu_change_pid(env, v);
275 env->mmu.regs[rn] = v;
277 break;
278 case MMU_R_TLBSX:
280 struct microblaze_mmu_lookup lu;
281 int hit;
283 if (env->mmu.c_mmu_tlb_access <= 1) {
284 qemu_log("Invalid access to MMU reg %d\n", rn);
285 return;
288 hit = mmu_translate(&env->mmu, &lu,
289 v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
290 if (hit) {
291 env->mmu.regs[MMU_R_TLBX] = lu.idx;
292 } else
293 env->mmu.regs[MMU_R_TLBX] |= 0x80000000;
294 break;
296 default:
297 env->mmu.regs[rn] = v;
298 break;
302 void mmu_init(struct microblaze_mmu *mmu)
304 int i;
305 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
306 mmu->regs[i] = 0;