vfio/container: Change VFIOContainerBase to use QOM
[qemu/armbru.git] / target / cris / mmu.c
blobd51008c54108dc5ffd6d69cc52ed5be6440043b5
1 /*
2 * CRIS mmu emulation.
4 * Copyright (c) 2007 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "mmu.h"
27 #ifdef DEBUG
28 #define D(x) x
29 #define D_LOG(...) qemu_log(__VA_ARGS__)
30 #else
31 #define D(x) do { } while (0)
32 #define D_LOG(...) do { } while (0)
33 #endif
35 void cris_mmu_init(CPUCRISState *env)
37 env->mmu_rand_lfsr = 0xcccc;
40 #define SR_POLYNOM 0x8805
41 static inline unsigned int compute_polynom(unsigned int sr)
43 unsigned int i;
44 unsigned int f;
46 f = 0;
47 for (i = 0; i < 16; i++) {
48 f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
51 return f;
54 static void cris_mmu_update_rand_lfsr(CPUCRISState *env)
56 unsigned int f;
58 /* Update lfsr at every fault. */
59 f = compute_polynom(env->mmu_rand_lfsr);
60 env->mmu_rand_lfsr >>= 1;
61 env->mmu_rand_lfsr |= (f << 15);
62 env->mmu_rand_lfsr &= 0xffff;
65 static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
67 return (rw_gc_cfg & 12) != 0;
70 static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
72 return (1 << seg) & rw_mm_cfg;
75 static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg)
77 uint32_t base;
78 int i;
80 if (seg < 8) {
81 base = env->sregs[SFR_RW_MM_KBASE_LO];
82 } else {
83 base = env->sregs[SFR_RW_MM_KBASE_HI];
86 i = seg & 7;
87 base >>= i * 4;
88 base &= 15;
90 base <<= 28;
91 return base;
94 /* Used by the tlb decoder. */
95 #define EXTRACT_FIELD(src, start, end) \
96 (((src) >> start) & ((1 << (end - start + 1)) - 1))
98 static inline void set_field(uint32_t *dst, unsigned int val,
99 unsigned int offset, unsigned int width)
101 uint32_t mask;
103 mask = (1 << width) - 1;
104 mask <<= offset;
105 val <<= offset;
107 val &= mask;
108 *dst &= ~(mask);
109 *dst |= val;
112 #ifdef DEBUG
113 static void dump_tlb(CPUCRISState *env, int mmu)
115 int set;
116 int idx;
117 uint32_t hi, lo, tlb_vpn, tlb_pfn;
119 for (set = 0; set < 4; set++) {
120 for (idx = 0; idx < 16; idx++) {
121 lo = env->tlbsets[mmu][set][idx].lo;
122 hi = env->tlbsets[mmu][set][idx].hi;
123 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
124 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
126 printf("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
127 set, idx, hi, lo, tlb_vpn, tlb_pfn);
131 #endif
133 static int cris_mmu_translate_page(struct cris_mmu_result *res,
134 CPUCRISState *env, uint32_t vaddr,
135 MMUAccessType access_type,
136 int usermode, int debug)
138 unsigned int vpage;
139 unsigned int idx;
140 uint32_t pid, lo, hi;
141 uint32_t tlb_vpn, tlb_pfn = 0;
142 int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
143 int cfg_v, cfg_k, cfg_w, cfg_x;
144 int set, match = 0;
145 uint32_t r_cause;
146 uint32_t r_cfg;
147 int rwcause;
148 int mmu = 1; /* Data mmu is default. */
149 int vect_base;
151 r_cause = env->sregs[SFR_R_MM_CAUSE];
152 r_cfg = env->sregs[SFR_RW_MM_CFG];
153 pid = env->pregs[PR_PID] & 0xff;
155 switch (access_type) {
156 case MMU_INST_FETCH:
157 rwcause = CRIS_MMU_ERR_EXEC;
158 mmu = 0;
159 break;
160 case MMU_DATA_STORE:
161 rwcause = CRIS_MMU_ERR_WRITE;
162 break;
163 default:
164 case MMU_DATA_LOAD:
165 rwcause = CRIS_MMU_ERR_READ;
166 break;
169 /* I exception vectors 4 - 7, D 8 - 11. */
170 vect_base = (mmu + 1) * 4;
172 vpage = vaddr >> 13;
175 * We know the index which to check on each set.
176 * Scan both I and D.
178 idx = vpage & 15;
179 for (set = 0; set < 4; set++) {
180 lo = env->tlbsets[mmu][set][idx].lo;
181 hi = env->tlbsets[mmu][set][idx].hi;
183 tlb_vpn = hi >> 13;
184 tlb_pid = EXTRACT_FIELD(hi, 0, 7);
185 tlb_g = EXTRACT_FIELD(lo, 4, 4);
187 D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
188 mmu, set, idx, tlb_vpn, vpage, lo, hi);
189 if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) {
190 match = 1;
191 break;
195 res->bf_vec = vect_base;
196 if (match) {
197 cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
198 cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
199 cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
200 cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
202 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
203 tlb_v = EXTRACT_FIELD(lo, 3, 3);
204 tlb_k = EXTRACT_FIELD(lo, 2, 2);
205 tlb_w = EXTRACT_FIELD(lo, 1, 1);
206 tlb_x = EXTRACT_FIELD(lo, 0, 0);
209 * set_exception_vector(0x04, i_mmu_refill);
210 * set_exception_vector(0x05, i_mmu_invalid);
211 * set_exception_vector(0x06, i_mmu_access);
212 * set_exception_vector(0x07, i_mmu_execute);
213 * set_exception_vector(0x08, d_mmu_refill);
214 * set_exception_vector(0x09, d_mmu_invalid);
215 * set_exception_vector(0x0a, d_mmu_access);
216 * set_exception_vector(0x0b, d_mmu_write);
218 if (cfg_k && tlb_k && usermode) {
219 D(printf("tlb: kernel protected %x lo=%x pc=%x\n",
220 vaddr, lo, env->pc));
221 match = 0;
222 res->bf_vec = vect_base + 2;
223 } else if (access_type == MMU_DATA_STORE && cfg_w && !tlb_w) {
224 D(printf("tlb: write protected %x lo=%x pc=%x\n",
225 vaddr, lo, env->pc));
226 match = 0;
227 /* write accesses never go through the I mmu. */
228 res->bf_vec = vect_base + 3;
229 } else if (access_type == MMU_INST_FETCH && cfg_x && !tlb_x) {
230 D(printf("tlb: exec protected %x lo=%x pc=%x\n",
231 vaddr, lo, env->pc));
232 match = 0;
233 res->bf_vec = vect_base + 3;
234 } else if (cfg_v && !tlb_v) {
235 D(printf("tlb: invalid %x\n", vaddr));
236 match = 0;
237 res->bf_vec = vect_base + 1;
240 res->prot = 0;
241 if (match) {
242 res->prot |= PAGE_READ;
243 if (tlb_w) {
244 res->prot |= PAGE_WRITE;
246 if (mmu == 0 && (cfg_x || tlb_x)) {
247 res->prot |= PAGE_EXEC;
249 } else {
250 D(dump_tlb(env, mmu));
252 } else {
253 /* If refill, provide a randomized set. */
254 set = env->mmu_rand_lfsr & 3;
257 if (!match && !debug) {
258 cris_mmu_update_rand_lfsr(env);
260 /* Compute index. */
261 idx = vpage & 15;
263 /* Update RW_MM_TLB_SEL. */
264 env->sregs[SFR_RW_MM_TLB_SEL] = 0;
265 set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
266 set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
268 /* Update RW_MM_CAUSE. */
269 set_field(&r_cause, rwcause, 8, 2);
270 set_field(&r_cause, vpage, 13, 19);
271 set_field(&r_cause, pid, 0, 8);
272 env->sregs[SFR_R_MM_CAUSE] = r_cause;
273 D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
276 D(printf("%s access=%u mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
277 " %x cause=%x sel=%x sp=%x %x %x\n",
278 __func__, access_type, match, env->pc,
279 vaddr, vpage,
280 tlb_vpn, tlb_pfn, tlb_pid,
281 pid,
282 r_cause,
283 env->sregs[SFR_RW_MM_TLB_SEL],
284 env->regs[R_SP], env->pregs[PR_USP], env->ksp));
286 res->phy = tlb_pfn << TARGET_PAGE_BITS;
287 return !match;
290 void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
292 target_ulong vaddr;
293 unsigned int idx;
294 uint32_t lo, hi;
295 uint32_t tlb_vpn;
296 int tlb_pid, tlb_g, tlb_v;
297 unsigned int set;
298 unsigned int mmu;
300 pid &= 0xff;
301 for (mmu = 0; mmu < 2; mmu++) {
302 for (set = 0; set < 4; set++) {
303 for (idx = 0; idx < 16; idx++) {
304 lo = env->tlbsets[mmu][set][idx].lo;
305 hi = env->tlbsets[mmu][set][idx].hi;
307 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
308 tlb_pid = EXTRACT_FIELD(hi, 0, 7);
309 tlb_g = EXTRACT_FIELD(lo, 4, 4);
310 tlb_v = EXTRACT_FIELD(lo, 3, 3);
312 if (tlb_v && !tlb_g && (tlb_pid == pid)) {
313 vaddr = tlb_vpn << TARGET_PAGE_BITS;
314 D_LOG("flush pid=%x vaddr=%x\n", pid, vaddr);
315 tlb_flush_page(env_cpu(env), vaddr);
322 int cris_mmu_translate(struct cris_mmu_result *res,
323 CPUCRISState *env, uint32_t vaddr,
324 MMUAccessType access_type, int mmu_idx, int debug)
326 int seg;
327 int miss = 0;
328 int is_user = mmu_idx == MMU_USER_IDX;
329 uint32_t old_srs;
331 old_srs = env->pregs[PR_SRS];
333 env->pregs[PR_SRS] = access_type == MMU_INST_FETCH ? 1 : 2;
335 if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
336 res->phy = vaddr;
337 res->prot = PAGE_RWX;
338 goto done;
341 seg = vaddr >> 28;
342 if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) {
343 uint32_t base;
345 miss = 0;
346 base = cris_mmu_translate_seg(env, seg);
347 res->phy = base | (0x0fffffff & vaddr);
348 res->prot = PAGE_RWX;
349 } else {
350 miss = cris_mmu_translate_page(res, env, vaddr, access_type,
351 is_user, debug);
353 done:
354 env->pregs[PR_SRS] = old_srs;
355 return miss;