kvm: bios: use preprocessor to generate Processor() acpi blocks
[qemu-kvm/markmc.git] / target-cris / mmu.c
blob86f625b7537ba97555ec9ba5c04a087549a07573
1 /*
2 * CRIS mmu emulation.
4 * Copyright (c) 2007 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #ifndef CONFIG_USER_ONLY
24 #include <stdio.h>
25 #include <string.h>
26 #include <stdlib.h>
28 #include "config.h"
29 #include "cpu.h"
30 #include "mmu.h"
31 #include "exec-all.h"
33 #define D(x)
35 void cris_mmu_init(CPUState *env)
37 env->mmu_rand_lfsr = 0xcccc;
40 #define SR_POLYNOM 0x8805
41 static inline unsigned int compute_polynom(unsigned int sr)
43 unsigned int i;
44 unsigned int f;
46 f = 0;
47 for (i = 0; i < 16; i++)
48 f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
50 return f;
53 static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
55 return (rw_gc_cfg & 12) != 0;
58 static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
60 return (1 << seg) & rw_mm_cfg;
63 static uint32_t cris_mmu_translate_seg(CPUState *env, int seg)
65 uint32_t base;
66 int i;
68 if (seg < 8)
69 base = env->sregs[SFR_RW_MM_KBASE_LO];
70 else
71 base = env->sregs[SFR_RW_MM_KBASE_HI];
73 i = seg & 7;
74 base >>= i * 4;
75 base &= 15;
77 base <<= 28;
78 return base;
80 /* Used by the tlb decoder. */
81 #define EXTRACT_FIELD(src, start, end) \
82 (((src) >> start) & ((1 << (end - start + 1)) - 1))
84 static inline void set_field(uint32_t *dst, unsigned int val,
85 unsigned int offset, unsigned int width)
87 uint32_t mask;
89 mask = (1 << width) - 1;
90 mask <<= offset;
91 val <<= offset;
93 val &= mask;
94 *dst &= ~(mask);
95 *dst |= val;
98 static void dump_tlb(CPUState *env, int mmu)
100 int set;
101 int idx;
102 uint32_t hi, lo, tlb_vpn, tlb_pfn;
104 for (set = 0; set < 4; set++) {
105 for (idx = 0; idx < 16; idx++) {
106 lo = env->tlbsets[mmu][set][idx].lo;
107 hi = env->tlbsets[mmu][set][idx].hi;
108 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
109 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
111 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
112 set, idx, hi, lo, tlb_vpn, tlb_pfn);
117 /* rw 0 = read, 1 = write, 2 = exec. */
118 static int cris_mmu_translate_page(struct cris_mmu_result_t *res,
119 CPUState *env, uint32_t vaddr,
120 int rw, int usermode)
122 unsigned int vpage;
123 unsigned int idx;
124 uint32_t lo, hi;
125 uint32_t tlb_vpn, tlb_pfn = 0;
126 int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
127 int cfg_v, cfg_k, cfg_w, cfg_x;
128 int set, match = 0;
129 uint32_t r_cause;
130 uint32_t r_cfg;
131 int rwcause;
132 int mmu = 1; /* Data mmu is default. */
133 int vect_base;
135 r_cause = env->sregs[SFR_R_MM_CAUSE];
136 r_cfg = env->sregs[SFR_RW_MM_CFG];
138 switch (rw) {
139 case 2: rwcause = CRIS_MMU_ERR_EXEC; mmu = 0; break;
140 case 1: rwcause = CRIS_MMU_ERR_WRITE; break;
141 default:
142 case 0: rwcause = CRIS_MMU_ERR_READ; break;
145 /* I exception vectors 4 - 7, D 8 - 11. */
146 vect_base = (mmu + 1) * 4;
148 vpage = vaddr >> 13;
150 /* We know the index which to check on each set.
151 Scan both I and D. */
152 #if 0
153 for (set = 0; set < 4; set++) {
154 for (idx = 0; idx < 16; idx++) {
155 lo = env->tlbsets[mmu][set][idx].lo;
156 hi = env->tlbsets[mmu][set][idx].hi;
157 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
158 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
160 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
161 set, idx, hi, lo, tlb_vpn, tlb_pfn);
164 #endif
166 idx = vpage & 15;
167 for (set = 0; set < 4; set++)
169 lo = env->tlbsets[mmu][set][idx].lo;
170 hi = env->tlbsets[mmu][set][idx].hi;
172 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
173 tlb_pid = EXTRACT_FIELD(hi, 0, 7);
174 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
175 tlb_g = EXTRACT_FIELD(lo, 4, 4);
177 D(fprintf(logfile,
178 "TLB[%d][%d][%d] v=%x vpage=%x->pfn=%x lo=%x hi=%x\n",
179 mmu, set, idx, tlb_vpn, vpage, tlb_pfn, lo, hi));
180 if ((tlb_g || (tlb_pid == (env->pregs[PR_PID] & 0xff)))
181 && tlb_vpn == vpage) {
182 match = 1;
183 break;
187 res->bf_vec = vect_base;
188 if (match) {
189 cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
190 cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
191 cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
192 cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
194 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
195 tlb_v = EXTRACT_FIELD(lo, 3, 3);
196 tlb_k = EXTRACT_FIELD(lo, 2, 2);
197 tlb_w = EXTRACT_FIELD(lo, 1, 1);
198 tlb_x = EXTRACT_FIELD(lo, 0, 0);
201 set_exception_vector(0x04, i_mmu_refill);
202 set_exception_vector(0x05, i_mmu_invalid);
203 set_exception_vector(0x06, i_mmu_access);
204 set_exception_vector(0x07, i_mmu_execute);
205 set_exception_vector(0x08, d_mmu_refill);
206 set_exception_vector(0x09, d_mmu_invalid);
207 set_exception_vector(0x0a, d_mmu_access);
208 set_exception_vector(0x0b, d_mmu_write);
210 if (cfg_k && tlb_k && usermode) {
211 D(printf ("tlb: kernel protected %x lo=%x pc=%x\n",
212 vaddr, lo, env->pc));
213 match = 0;
214 res->bf_vec = vect_base + 2;
215 } else if (rw == 1 && cfg_w && !tlb_w) {
216 D(printf ("tlb: write protected %x lo=%x pc=%x\n",
217 vaddr, lo, env->pc));
218 match = 0;
219 /* write accesses never go through the I mmu. */
220 res->bf_vec = vect_base + 3;
221 } else if (rw == 2 && cfg_x && !tlb_x) {
222 D(printf ("tlb: exec protected %x lo=%x pc=%x\n",
223 vaddr, lo, env->pc));
224 match = 0;
225 res->bf_vec = vect_base + 3;
226 } else if (cfg_v && !tlb_v) {
227 D(printf ("tlb: invalid %x\n", vaddr));
228 match = 0;
229 res->bf_vec = vect_base + 1;
232 res->prot = 0;
233 if (match) {
234 res->prot |= PAGE_READ;
235 if (tlb_w)
236 res->prot |= PAGE_WRITE;
237 if (tlb_x)
238 res->prot |= PAGE_EXEC;
240 else
241 D(dump_tlb(env, mmu));
243 env->sregs[SFR_RW_MM_TLB_HI] = hi;
244 env->sregs[SFR_RW_MM_TLB_LO] = lo;
245 } else {
246 /* If refill, provide a randomized set. */
247 set = env->mmu_rand_lfsr & 3;
250 if (!match) {
251 unsigned int f;
253 /* Update lfsr at every fault. */
254 f = compute_polynom(env->mmu_rand_lfsr);
255 env->mmu_rand_lfsr >>= 1;
256 env->mmu_rand_lfsr |= (f << 15);
257 env->mmu_rand_lfsr &= 0xffff;
259 /* Compute index. */
260 idx = vpage & 15;
262 /* Update RW_MM_TLB_SEL. */
263 env->sregs[SFR_RW_MM_TLB_SEL] = 0;
264 set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
265 set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
267 /* Update RW_MM_CAUSE. */
268 set_field(&r_cause, rwcause, 8, 2);
269 set_field(&r_cause, vpage, 13, 19);
270 set_field(&r_cause, env->pregs[PR_PID], 0, 8);
271 env->sregs[SFR_R_MM_CAUSE] = r_cause;
272 D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
276 D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
277 " %x cause=%x sel=%x sp=%x %x %x\n",
278 __func__, rw, match, env->pc,
279 vaddr, vpage,
280 tlb_vpn, tlb_pfn, tlb_pid,
281 env->pregs[PR_PID],
282 r_cause,
283 env->sregs[SFR_RW_MM_TLB_SEL],
284 env->regs[R_SP], env->pregs[PR_USP], env->ksp));
286 res->pfn = tlb_pfn;
287 return !match;
290 void cris_mmu_flush_pid(CPUState *env, uint32_t pid)
292 target_ulong vaddr;
293 unsigned int idx;
294 uint32_t lo, hi;
295 uint32_t tlb_vpn;
296 int tlb_pid, tlb_g, tlb_v, tlb_k;
297 unsigned int set;
298 unsigned int mmu;
300 pid &= 0xff;
301 for (mmu = 0; mmu < 2; mmu++) {
302 for (set = 0; set < 4; set++)
304 for (idx = 0; idx < 16; idx++) {
305 lo = env->tlbsets[mmu][set][idx].lo;
306 hi = env->tlbsets[mmu][set][idx].hi;
308 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
309 tlb_pid = EXTRACT_FIELD(hi, 0, 7);
310 tlb_g = EXTRACT_FIELD(lo, 4, 4);
311 tlb_v = EXTRACT_FIELD(lo, 3, 3);
312 tlb_k = EXTRACT_FIELD(lo, 2, 2);
314 /* Kernel protected areas need to be flushed
315 as well. */
316 if (tlb_v && !tlb_g) {
317 vaddr = tlb_vpn << TARGET_PAGE_BITS;
318 D(fprintf(logfile,
319 "flush pid=%x vaddr=%x\n",
320 pid, vaddr));
321 tlb_flush_page(env, vaddr);
328 int cris_mmu_translate(struct cris_mmu_result_t *res,
329 CPUState *env, uint32_t vaddr,
330 int rw, int mmu_idx)
332 uint32_t phy = vaddr;
333 int seg;
334 int miss = 0;
335 int is_user = mmu_idx == MMU_USER_IDX;
336 uint32_t old_srs;
338 old_srs= env->pregs[PR_SRS];
340 /* rw == 2 means exec, map the access to the insn mmu. */
341 env->pregs[PR_SRS] = rw == 2 ? 1 : 2;
343 if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
344 res->phy = vaddr;
345 res->prot = PAGE_BITS;
346 goto done;
349 seg = vaddr >> 28;
350 if (cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG]))
352 uint32_t base;
354 miss = 0;
355 base = cris_mmu_translate_seg(env, seg);
356 phy = base | (0x0fffffff & vaddr);
357 res->phy = phy;
358 res->prot = PAGE_BITS;
360 else
362 miss = cris_mmu_translate_page(res, env, vaddr, rw, is_user);
363 phy = (res->pfn << 13);
364 res->phy = phy;
366 done:
367 env->pregs[PR_SRS] = old_srs;
368 return miss;
370 #endif