4 * Copyright (c) 2007 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #ifndef CONFIG_USER_ONLY
34 #define D_LOG(...) qemu_log(__VA_ARGS__)
37 #define D_LOG(...) do { } while (0)
40 void cris_mmu_init(CPUState
*env
)
42 env
->mmu_rand_lfsr
= 0xcccc;
45 #define SR_POLYNOM 0x8805
46 static inline unsigned int compute_polynom(unsigned int sr
)
52 for (i
= 0; i
< 16; i
++)
53 f
+= ((SR_POLYNOM
>> i
) & 1) & ((sr
>> i
) & 1);
58 static inline int cris_mmu_enabled(uint32_t rw_gc_cfg
)
60 return (rw_gc_cfg
& 12) != 0;
63 static inline int cris_mmu_segmented_addr(int seg
, uint32_t rw_mm_cfg
)
65 return (1 << seg
) & rw_mm_cfg
;
68 static uint32_t cris_mmu_translate_seg(CPUState
*env
, int seg
)
74 base
= env
->sregs
[SFR_RW_MM_KBASE_LO
];
76 base
= env
->sregs
[SFR_RW_MM_KBASE_HI
];
85 /* Used by the tlb decoder. */
86 #define EXTRACT_FIELD(src, start, end) \
87 (((src) >> start) & ((1 << (end - start + 1)) - 1))
89 static inline void set_field(uint32_t *dst
, unsigned int val
,
90 unsigned int offset
, unsigned int width
)
94 mask
= (1 << width
) - 1;
104 static void dump_tlb(CPUState
*env
, int mmu
)
108 uint32_t hi
, lo
, tlb_vpn
, tlb_pfn
;
110 for (set
= 0; set
< 4; set
++) {
111 for (idx
= 0; idx
< 16; idx
++) {
112 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
113 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
114 tlb_vpn
= EXTRACT_FIELD(hi
, 13, 31);
115 tlb_pfn
= EXTRACT_FIELD(lo
, 13, 31);
117 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
118 set
, idx
, hi
, lo
, tlb_vpn
, tlb_pfn
);
124 /* rw 0 = read, 1 = write, 2 = exec. */
125 static int cris_mmu_translate_page(struct cris_mmu_result
*res
,
126 CPUState
*env
, uint32_t vaddr
,
127 int rw
, int usermode
)
131 uint32_t pid
, lo
, hi
;
132 uint32_t tlb_vpn
, tlb_pfn
= 0;
133 int tlb_pid
, tlb_g
, tlb_v
, tlb_k
, tlb_w
, tlb_x
;
134 int cfg_v
, cfg_k
, cfg_w
, cfg_x
;
139 int mmu
= 1; /* Data mmu is default. */
142 r_cause
= env
->sregs
[SFR_R_MM_CAUSE
];
143 r_cfg
= env
->sregs
[SFR_RW_MM_CFG
];
144 pid
= env
->pregs
[PR_PID
] & 0xff;
147 case 2: rwcause
= CRIS_MMU_ERR_EXEC
; mmu
= 0; break;
148 case 1: rwcause
= CRIS_MMU_ERR_WRITE
; break;
150 case 0: rwcause
= CRIS_MMU_ERR_READ
; break;
153 /* I exception vectors 4 - 7, D 8 - 11. */
154 vect_base
= (mmu
+ 1) * 4;
158 /* We know the index which to check on each set.
159 Scan both I and D. */
161 for (set
= 0; set
< 4; set
++) {
162 for (idx
= 0; idx
< 16; idx
++) {
163 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
164 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
165 tlb_vpn
= EXTRACT_FIELD(hi
, 13, 31);
166 tlb_pfn
= EXTRACT_FIELD(lo
, 13, 31);
168 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
169 set
, idx
, hi
, lo
, tlb_vpn
, tlb_pfn
);
175 for (set
= 0; set
< 4; set
++)
177 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
178 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
181 tlb_pid
= EXTRACT_FIELD(hi
, 0, 7);
182 tlb_g
= EXTRACT_FIELD(lo
, 4, 4);
184 D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
185 mmu
, set
, idx
, tlb_vpn
, vpage
, lo
, hi
);
186 if ((tlb_g
|| (tlb_pid
== pid
))
187 && tlb_vpn
== vpage
) {
193 res
->bf_vec
= vect_base
;
195 cfg_w
= EXTRACT_FIELD(r_cfg
, 19, 19);
196 cfg_k
= EXTRACT_FIELD(r_cfg
, 18, 18);
197 cfg_x
= EXTRACT_FIELD(r_cfg
, 17, 17);
198 cfg_v
= EXTRACT_FIELD(r_cfg
, 16, 16);
200 tlb_pfn
= EXTRACT_FIELD(lo
, 13, 31);
201 tlb_v
= EXTRACT_FIELD(lo
, 3, 3);
202 tlb_k
= EXTRACT_FIELD(lo
, 2, 2);
203 tlb_w
= EXTRACT_FIELD(lo
, 1, 1);
204 tlb_x
= EXTRACT_FIELD(lo
, 0, 0);
207 set_exception_vector(0x04, i_mmu_refill);
208 set_exception_vector(0x05, i_mmu_invalid);
209 set_exception_vector(0x06, i_mmu_access);
210 set_exception_vector(0x07, i_mmu_execute);
211 set_exception_vector(0x08, d_mmu_refill);
212 set_exception_vector(0x09, d_mmu_invalid);
213 set_exception_vector(0x0a, d_mmu_access);
214 set_exception_vector(0x0b, d_mmu_write);
216 if (cfg_k
&& tlb_k
&& usermode
) {
217 D(printf ("tlb: kernel protected %x lo=%x pc=%x\n",
218 vaddr
, lo
, env
->pc
));
220 res
->bf_vec
= vect_base
+ 2;
221 } else if (rw
== 1 && cfg_w
&& !tlb_w
) {
222 D(printf ("tlb: write protected %x lo=%x pc=%x\n",
223 vaddr
, lo
, env
->pc
));
225 /* write accesses never go through the I mmu. */
226 res
->bf_vec
= vect_base
+ 3;
227 } else if (rw
== 2 && cfg_x
&& !tlb_x
) {
228 D(printf ("tlb: exec protected %x lo=%x pc=%x\n",
229 vaddr
, lo
, env
->pc
));
231 res
->bf_vec
= vect_base
+ 3;
232 } else if (cfg_v
&& !tlb_v
) {
233 D(printf ("tlb: invalid %x\n", vaddr
));
235 res
->bf_vec
= vect_base
+ 1;
240 res
->prot
|= PAGE_READ
;
242 res
->prot
|= PAGE_WRITE
;
244 res
->prot
|= PAGE_EXEC
;
247 D(dump_tlb(env
, mmu
));
249 /* If refill, provide a randomized set. */
250 set
= env
->mmu_rand_lfsr
& 3;
256 /* Update lfsr at every fault. */
257 f
= compute_polynom(env
->mmu_rand_lfsr
);
258 env
->mmu_rand_lfsr
>>= 1;
259 env
->mmu_rand_lfsr
|= (f
<< 15);
260 env
->mmu_rand_lfsr
&= 0xffff;
265 /* Update RW_MM_TLB_SEL. */
266 env
->sregs
[SFR_RW_MM_TLB_SEL
] = 0;
267 set_field(&env
->sregs
[SFR_RW_MM_TLB_SEL
], idx
, 0, 4);
268 set_field(&env
->sregs
[SFR_RW_MM_TLB_SEL
], set
, 4, 2);
270 /* Update RW_MM_CAUSE. */
271 set_field(&r_cause
, rwcause
, 8, 2);
272 set_field(&r_cause
, vpage
, 13, 19);
273 set_field(&r_cause
, pid
, 0, 8);
274 env
->sregs
[SFR_R_MM_CAUSE
] = r_cause
;
275 D(printf("refill vaddr=%x pc=%x\n", vaddr
, env
->pc
));
278 D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
279 " %x cause=%x sel=%x sp=%x %x %x\n",
280 __func__
, rw
, match
, env
->pc
,
282 tlb_vpn
, tlb_pfn
, tlb_pid
,
285 env
->sregs
[SFR_RW_MM_TLB_SEL
],
286 env
->regs
[R_SP
], env
->pregs
[PR_USP
], env
->ksp
));
288 res
->phy
= tlb_pfn
<< TARGET_PAGE_BITS
;
292 void cris_mmu_flush_pid(CPUState
*env
, uint32_t pid
)
298 int tlb_pid
, tlb_g
, tlb_v
;
303 for (mmu
= 0; mmu
< 2; mmu
++) {
304 for (set
= 0; set
< 4; set
++)
306 for (idx
= 0; idx
< 16; idx
++) {
307 lo
= env
->tlbsets
[mmu
][set
][idx
].lo
;
308 hi
= env
->tlbsets
[mmu
][set
][idx
].hi
;
310 tlb_vpn
= EXTRACT_FIELD(hi
, 13, 31);
311 tlb_pid
= EXTRACT_FIELD(hi
, 0, 7);
312 tlb_g
= EXTRACT_FIELD(lo
, 4, 4);
313 tlb_v
= EXTRACT_FIELD(lo
, 3, 3);
315 if (tlb_v
&& !tlb_g
&& (tlb_pid
== pid
)) {
316 vaddr
= tlb_vpn
<< TARGET_PAGE_BITS
;
317 D_LOG("flush pid=%x vaddr=%x\n",
319 tlb_flush_page(env
, vaddr
);
326 int cris_mmu_translate(struct cris_mmu_result
*res
,
327 CPUState
*env
, uint32_t vaddr
,
332 int is_user
= mmu_idx
== MMU_USER_IDX
;
335 old_srs
= env
->pregs
[PR_SRS
];
337 /* rw == 2 means exec, map the access to the insn mmu. */
338 env
->pregs
[PR_SRS
] = rw
== 2 ? 1 : 2;
340 if (!cris_mmu_enabled(env
->sregs
[SFR_RW_GC_CFG
])) {
342 res
->prot
= PAGE_BITS
;
347 if (!is_user
&& cris_mmu_segmented_addr(seg
, env
->sregs
[SFR_RW_MM_CFG
]))
352 base
= cris_mmu_translate_seg(env
, seg
);
353 res
->phy
= base
| (0x0fffffff & vaddr
);
354 res
->prot
= PAGE_BITS
;
357 miss
= cris_mmu_translate_page(res
, env
, vaddr
, rw
, is_user
);
359 env
->pregs
[PR_SRS
] = old_srs
;