4 * Copyright (c) 2007 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef CONFIG_USER_ONLY
34 #define D_LOG(...) qemu_log(__VA_ARGS__)
36 #define D(x) do { } while (0)
37 #define D_LOG(...) do { } while (0)
40 void cris_mmu_init(CPUState
*env
)
42 env
->mmu_rand_lfsr
= 0xcccc;
45 #define SR_POLYNOM 0x8805
46 static inline unsigned int compute_polynom(unsigned int sr
)
52 for (i
= 0; i
< 16; i
++)
53 f
+= ((SR_POLYNOM
>> i
) & 1) & ((sr
>> i
) & 1);
58 static void cris_mmu_update_rand_lfsr(CPUState
*env
)
62 /* Update lfsr at every fault. */
63 f
= compute_polynom(env
->mmu_rand_lfsr
);
64 env
->mmu_rand_lfsr
>>= 1;
65 env
->mmu_rand_lfsr
|= (f
<< 15);
66 env
->mmu_rand_lfsr
&= 0xffff;
69 static inline int cris_mmu_enabled(uint32_t rw_gc_cfg
)
71 return (rw_gc_cfg
& 12) != 0;
74 static inline int cris_mmu_segmented_addr(int seg
, uint32_t rw_mm_cfg
)
76 return (1 << seg
) & rw_mm_cfg
;
79 static uint32_t cris_mmu_translate_seg(CPUState
*env
, int seg
)
85 base
= env
->sregs
[SFR_RW_MM_KBASE_LO
];
87 base
= env
->sregs
[SFR_RW_MM_KBASE_HI
];
96 /* Used by the tlb decoder. */
97 #define EXTRACT_FIELD(src, start, end) \
98 (((src) >> start) & ((1 << (end - start + 1)) - 1))
100 static inline void set_field(uint32_t *dst
, unsigned int val
,
101 unsigned int offset
, unsigned int width
)
105 mask
= (1 << width
) - 1;
115 static void dump_tlb(CPUState
*env
, int mmu
)
119 uint32_t hi
, lo
, tlb_vpn
, tlb_pfn
;
121 for (set
= 0; set
< 4; set
++) {
122 for (idx
= 0; idx
< 16; idx
++) {
123 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
124 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
125 tlb_vpn
= EXTRACT_FIELD(hi
, 13, 31);
126 tlb_pfn
= EXTRACT_FIELD(lo
, 13, 31);
128 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
129 set
, idx
, hi
, lo
, tlb_vpn
, tlb_pfn
);
135 /* rw 0 = read, 1 = write, 2 = exec. */
136 static int cris_mmu_translate_page(struct cris_mmu_result
*res
,
137 CPUState
*env
, uint32_t vaddr
,
138 int rw
, int usermode
, int debug
)
142 uint32_t pid
, lo
, hi
;
143 uint32_t tlb_vpn
, tlb_pfn
= 0;
144 int tlb_pid
, tlb_g
, tlb_v
, tlb_k
, tlb_w
, tlb_x
;
145 int cfg_v
, cfg_k
, cfg_w
, cfg_x
;
150 int mmu
= 1; /* Data mmu is default. */
153 r_cause
= env
->sregs
[SFR_R_MM_CAUSE
];
154 r_cfg
= env
->sregs
[SFR_RW_MM_CFG
];
155 pid
= env
->pregs
[PR_PID
] & 0xff;
158 case 2: rwcause
= CRIS_MMU_ERR_EXEC
; mmu
= 0; break;
159 case 1: rwcause
= CRIS_MMU_ERR_WRITE
; break;
161 case 0: rwcause
= CRIS_MMU_ERR_READ
; break;
164 /* I exception vectors 4 - 7, D 8 - 11. */
165 vect_base
= (mmu
+ 1) * 4;
169 /* We know the index which to check on each set.
170 Scan both I and D. */
172 for (set
= 0; set
< 4; set
++) {
173 for (idx
= 0; idx
< 16; idx
++) {
174 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
175 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
176 tlb_vpn
= EXTRACT_FIELD(hi
, 13, 31);
177 tlb_pfn
= EXTRACT_FIELD(lo
, 13, 31);
179 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
180 set
, idx
, hi
, lo
, tlb_vpn
, tlb_pfn
);
186 for (set
= 0; set
< 4; set
++)
188 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
189 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
192 tlb_pid
= EXTRACT_FIELD(hi
, 0, 7);
193 tlb_g
= EXTRACT_FIELD(lo
, 4, 4);
195 D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
196 mmu
, set
, idx
, tlb_vpn
, vpage
, lo
, hi
);
197 if ((tlb_g
|| (tlb_pid
== pid
))
198 && tlb_vpn
== vpage
) {
204 res
->bf_vec
= vect_base
;
206 cfg_w
= EXTRACT_FIELD(r_cfg
, 19, 19);
207 cfg_k
= EXTRACT_FIELD(r_cfg
, 18, 18);
208 cfg_x
= EXTRACT_FIELD(r_cfg
, 17, 17);
209 cfg_v
= EXTRACT_FIELD(r_cfg
, 16, 16);
211 tlb_pfn
= EXTRACT_FIELD(lo
, 13, 31);
212 tlb_v
= EXTRACT_FIELD(lo
, 3, 3);
213 tlb_k
= EXTRACT_FIELD(lo
, 2, 2);
214 tlb_w
= EXTRACT_FIELD(lo
, 1, 1);
215 tlb_x
= EXTRACT_FIELD(lo
, 0, 0);
218 set_exception_vector(0x04, i_mmu_refill);
219 set_exception_vector(0x05, i_mmu_invalid);
220 set_exception_vector(0x06, i_mmu_access);
221 set_exception_vector(0x07, i_mmu_execute);
222 set_exception_vector(0x08, d_mmu_refill);
223 set_exception_vector(0x09, d_mmu_invalid);
224 set_exception_vector(0x0a, d_mmu_access);
225 set_exception_vector(0x0b, d_mmu_write);
227 if (cfg_k
&& tlb_k
&& usermode
) {
228 D(printf ("tlb: kernel protected %x lo=%x pc=%x\n",
229 vaddr
, lo
, env
->pc
));
231 res
->bf_vec
= vect_base
+ 2;
232 } else if (rw
== 1 && cfg_w
&& !tlb_w
) {
233 D(printf ("tlb: write protected %x lo=%x pc=%x\n",
234 vaddr
, lo
, env
->pc
));
236 /* write accesses never go through the I mmu. */
237 res
->bf_vec
= vect_base
+ 3;
238 } else if (rw
== 2 && cfg_x
&& !tlb_x
) {
239 D(printf ("tlb: exec protected %x lo=%x pc=%x\n",
240 vaddr
, lo
, env
->pc
));
242 res
->bf_vec
= vect_base
+ 3;
243 } else if (cfg_v
&& !tlb_v
) {
244 D(printf ("tlb: invalid %x\n", vaddr
));
246 res
->bf_vec
= vect_base
+ 1;
251 res
->prot
|= PAGE_READ
;
253 res
->prot
|= PAGE_WRITE
;
254 if (mmu
== 0 && (cfg_x
|| tlb_x
))
255 res
->prot
|= PAGE_EXEC
;
258 D(dump_tlb(env
, mmu
));
260 /* If refill, provide a randomized set. */
261 set
= env
->mmu_rand_lfsr
& 3;
264 if (!match
&& !debug
) {
265 cris_mmu_update_rand_lfsr(env
);
270 /* Update RW_MM_TLB_SEL. */
271 env
->sregs
[SFR_RW_MM_TLB_SEL
] = 0;
272 set_field(&env
->sregs
[SFR_RW_MM_TLB_SEL
], idx
, 0, 4);
273 set_field(&env
->sregs
[SFR_RW_MM_TLB_SEL
], set
, 4, 2);
275 /* Update RW_MM_CAUSE. */
276 set_field(&r_cause
, rwcause
, 8, 2);
277 set_field(&r_cause
, vpage
, 13, 19);
278 set_field(&r_cause
, pid
, 0, 8);
279 env
->sregs
[SFR_R_MM_CAUSE
] = r_cause
;
280 D(printf("refill vaddr=%x pc=%x\n", vaddr
, env
->pc
));
283 D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
284 " %x cause=%x sel=%x sp=%x %x %x\n",
285 __func__
, rw
, match
, env
->pc
,
287 tlb_vpn
, tlb_pfn
, tlb_pid
,
290 env
->sregs
[SFR_RW_MM_TLB_SEL
],
291 env
->regs
[R_SP
], env
->pregs
[PR_USP
], env
->ksp
));
293 res
->phy
= tlb_pfn
<< TARGET_PAGE_BITS
;
297 void cris_mmu_flush_pid(CPUState
*env
, uint32_t pid
)
303 int tlb_pid
, tlb_g
, tlb_v
;
308 for (mmu
= 0; mmu
< 2; mmu
++) {
309 for (set
= 0; set
< 4; set
++)
311 for (idx
= 0; idx
< 16; idx
++) {
312 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
313 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
315 tlb_vpn
= EXTRACT_FIELD(hi
, 13, 31);
316 tlb_pid
= EXTRACT_FIELD(hi
, 0, 7);
317 tlb_g
= EXTRACT_FIELD(lo
, 4, 4);
318 tlb_v
= EXTRACT_FIELD(lo
, 3, 3);
320 if (tlb_v
&& !tlb_g
&& (tlb_pid
== pid
)) {
321 vaddr
= tlb_vpn
<< TARGET_PAGE_BITS
;
322 D_LOG("flush pid=%x vaddr=%x\n",
324 tlb_flush_page(env
, vaddr
);
331 int cris_mmu_translate(struct cris_mmu_result
*res
,
332 CPUState
*env
, uint32_t vaddr
,
333 int rw
, int mmu_idx
, int debug
)
337 int is_user
= mmu_idx
== MMU_USER_IDX
;
340 old_srs
= env
->pregs
[PR_SRS
];
342 /* rw == 2 means exec, map the access to the insn mmu. */
343 env
->pregs
[PR_SRS
] = rw
== 2 ? 1 : 2;
345 if (!cris_mmu_enabled(env
->sregs
[SFR_RW_GC_CFG
])) {
347 res
->prot
= PAGE_BITS
;
352 if (!is_user
&& cris_mmu_segmented_addr(seg
, env
->sregs
[SFR_RW_MM_CFG
]))
357 base
= cris_mmu_translate_seg(env
, seg
);
358 res
->phy
= base
| (0x0fffffff & vaddr
);
359 res
->prot
= PAGE_BITS
;
361 miss
= cris_mmu_translate_page(res
, env
, vaddr
, rw
,
365 env
->pregs
[PR_SRS
] = old_srs
;