audio: make playback packet length calculation exact
[qemu/ar7.git] / target / loongarch / tlb_helper.c
blobcce1db1e0adba309521b24921f34164be9c5cbd7
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch TLB helpers
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
7 */
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/exec-all.h"
16 #include "exec/cpu_ldst.h"
17 #include "exec/log.h"
18 #include "cpu-csr.h"
20 enum {
21 TLBRET_MATCH = 0,
22 TLBRET_BADADDR = 1,
23 TLBRET_NOMATCH = 2,
24 TLBRET_INVALID = 3,
25 TLBRET_DIRTY = 4,
26 TLBRET_RI = 5,
27 TLBRET_XI = 6,
28 TLBRET_PE = 7,
31 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
32 int *prot, target_ulong address,
33 int access_type, int index, int mmu_idx)
35 LoongArchTLB *tlb = &env->tlb[index];
36 uint64_t plv = mmu_idx;
37 uint64_t tlb_entry, tlb_ppn;
38 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
40 if (index >= LOONGARCH_STLB) {
41 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
42 } else {
43 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
45 n = (address >> tlb_ps) & 0x1;/* Odd or even */
47 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
48 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
49 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
50 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
51 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY, PPN);
52 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY, NX);
53 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY, NR);
54 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY, RPLV);
56 /* Check access rights */
57 if (!tlb_v) {
58 return TLBRET_INVALID;
61 if (access_type == MMU_INST_FETCH && tlb_nx) {
62 return TLBRET_XI;
65 if (access_type == MMU_DATA_LOAD && tlb_nr) {
66 return TLBRET_RI;
69 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
70 ((tlb_rplv == 1) && (plv != tlb_plv))) {
71 return TLBRET_PE;
74 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
75 return TLBRET_DIRTY;
79 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15]
80 * need adjust.
82 *physical = (tlb_ppn << R_TLBENTRY_PPN_SHIFT) |
83 (address & MAKE_64BIT_MASK(0, tlb_ps));
84 *prot = PAGE_READ;
85 if (tlb_d) {
86 *prot |= PAGE_WRITE;
88 if (!tlb_nx) {
89 *prot |= PAGE_EXEC;
91 return TLBRET_MATCH;
95 * One tlb entry holds an adjacent odd/even pair, the vpn is the
96 * content of the virtual page number divided by 2. So the
97 * compare vpn is bit[47:15] for 16KiB page. while the vppn
98 * field in tlb entry contains bit[47:13], so need adjust.
99 * virt_vpn = vaddr[47:13]
101 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
102 int *index)
104 LoongArchTLB *tlb;
105 uint16_t csr_asid, tlb_asid, stlb_idx;
106 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
107 int i, compare_shift;
108 uint64_t vpn, tlb_vppn;
110 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
111 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
112 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
113 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
114 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
116 /* Search STLB */
117 for (i = 0; i < 8; ++i) {
118 tlb = &env->tlb[i * 256 + stlb_idx];
119 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
120 if (tlb_e) {
121 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
122 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
123 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
125 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
126 (vpn == (tlb_vppn >> compare_shift))) {
127 *index = i * 256 + stlb_idx;
128 return true;
133 /* Search MTLB */
134 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
135 tlb = &env->tlb[i];
136 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
137 if (tlb_e) {
138 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
139 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
140 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
141 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
142 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
143 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
144 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
145 (vpn == (tlb_vppn >> compare_shift))) {
146 *index = i;
147 return true;
151 return false;
154 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
155 int *prot, target_ulong address,
156 MMUAccessType access_type, int mmu_idx)
158 int index, match;
160 match = loongarch_tlb_search(env, address, &index);
161 if (match) {
162 return loongarch_map_tlb_entry(env, physical, prot,
163 address, access_type, index, mmu_idx);
166 return TLBRET_NOMATCH;
169 static int get_physical_address(CPULoongArchState *env, hwaddr *physical,
170 int *prot, target_ulong address,
171 MMUAccessType access_type, int mmu_idx)
173 int user_mode = mmu_idx == MMU_IDX_USER;
174 int kernel_mode = mmu_idx == MMU_IDX_KERNEL;
175 uint32_t plv, base_c, base_v;
176 int64_t addr_high;
177 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
178 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
180 /* Check PG and DA */
181 if (da & !pg) {
182 *physical = address & TARGET_PHYS_MASK;
183 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
184 return TLBRET_MATCH;
187 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
188 base_v = address >> TARGET_VIRT_ADDR_SPACE_BITS;
189 /* Check direct map window */
190 for (int i = 0; i < 4; i++) {
191 base_c = env->CSR_DMW[i] >> TARGET_VIRT_ADDR_SPACE_BITS;
192 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
193 *physical = dmw_va2pa(address);
194 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
195 return TLBRET_MATCH;
199 /* Check valid extension */
200 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
201 if (!(addr_high == 0 || addr_high == -1)) {
202 return TLBRET_BADADDR;
205 /* Mapped address */
206 return loongarch_map_address(env, physical, prot, address,
207 access_type, mmu_idx);
210 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
212 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
213 CPULoongArchState *env = &cpu->env;
214 hwaddr phys_addr;
215 int prot;
217 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
218 cpu_mmu_index(env, false)) != 0) {
219 return -1;
221 return phys_addr;
224 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
225 MMUAccessType access_type, int tlb_error)
227 CPUState *cs = env_cpu(env);
229 switch (tlb_error) {
230 default:
231 case TLBRET_BADADDR:
232 cs->exception_index = access_type == MMU_INST_FETCH
233 ? EXCCODE_ADEF : EXCCODE_ADEM;
234 break;
235 case TLBRET_NOMATCH:
236 /* No TLB match for a mapped address */
237 if (access_type == MMU_DATA_LOAD) {
238 cs->exception_index = EXCCODE_PIL;
239 } else if (access_type == MMU_DATA_STORE) {
240 cs->exception_index = EXCCODE_PIS;
241 } else if (access_type == MMU_INST_FETCH) {
242 cs->exception_index = EXCCODE_PIF;
244 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
245 break;
246 case TLBRET_INVALID:
247 /* TLB match with no valid bit */
248 if (access_type == MMU_DATA_LOAD) {
249 cs->exception_index = EXCCODE_PIL;
250 } else if (access_type == MMU_DATA_STORE) {
251 cs->exception_index = EXCCODE_PIS;
252 } else if (access_type == MMU_INST_FETCH) {
253 cs->exception_index = EXCCODE_PIF;
255 break;
256 case TLBRET_DIRTY:
257 /* TLB match but 'D' bit is cleared */
258 cs->exception_index = EXCCODE_PME;
259 break;
260 case TLBRET_XI:
261 /* Execute-Inhibit Exception */
262 cs->exception_index = EXCCODE_PNX;
263 break;
264 case TLBRET_RI:
265 /* Read-Inhibit Exception */
266 cs->exception_index = EXCCODE_PNR;
267 break;
268 case TLBRET_PE:
269 /* Privileged Exception */
270 cs->exception_index = EXCCODE_PPI;
271 break;
274 if (tlb_error == TLBRET_NOMATCH) {
275 env->CSR_TLBRBADV = address;
276 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN,
277 extract64(address, 13, 35));
278 } else {
279 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
280 env->CSR_BADV = address;
282 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
286 static void invalidate_tlb_entry(CPULoongArchState *env, int index)
288 target_ulong addr, mask, pagesize;
289 uint8_t tlb_ps;
290 LoongArchTLB *tlb = &env->tlb[index];
292 int mmu_idx = cpu_mmu_index(env, false);
293 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
294 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
295 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
297 if (index >= LOONGARCH_STLB) {
298 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
299 } else {
300 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
302 pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
303 mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
305 if (tlb_v0) {
306 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */
307 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
308 mmu_idx, TARGET_LONG_BITS);
311 if (tlb_v1) {
312 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */
313 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
314 mmu_idx, TARGET_LONG_BITS);
318 static void invalidate_tlb(CPULoongArchState *env, int index)
320 LoongArchTLB *tlb;
321 uint16_t csr_asid, tlb_asid, tlb_g;
323 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
324 tlb = &env->tlb[index];
325 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
326 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
327 if (tlb_g == 0 && tlb_asid != csr_asid) {
328 return;
330 invalidate_tlb_entry(env, index);
333 static void fill_tlb_entry(CPULoongArchState *env, int index)
335 LoongArchTLB *tlb = &env->tlb[index];
336 uint64_t lo0, lo1, csr_vppn;
337 uint16_t csr_asid;
338 uint8_t csr_ps;
340 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
341 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
342 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN);
343 lo0 = env->CSR_TLBRELO0;
344 lo1 = env->CSR_TLBRELO1;
345 } else {
346 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
347 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI, VPPN);
348 lo0 = env->CSR_TLBELO0;
349 lo1 = env->CSR_TLBELO1;
352 if (csr_ps == 0) {
353 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n");
356 /* Only MTLB has the ps fields */
357 if (index >= LOONGARCH_STLB) {
358 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
361 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
362 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
363 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
364 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
366 tlb->tlb_entry0 = lo0;
367 tlb->tlb_entry1 = lo1;
370 /* Return an random value between low and high */
371 static uint32_t get_random_tlb(uint32_t low, uint32_t high)
373 uint32_t val;
375 qemu_guest_getrandom_nofail(&val, sizeof(val));
376 return val % (high - low + 1) + low;
379 void helper_tlbsrch(CPULoongArchState *env)
381 int index, match;
383 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
384 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
385 } else {
386 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
389 if (match) {
390 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
391 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
392 return;
395 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
398 void helper_tlbrd(CPULoongArchState *env)
400 LoongArchTLB *tlb;
401 int index;
402 uint8_t tlb_ps, tlb_e;
404 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
405 tlb = &env->tlb[index];
407 if (index >= LOONGARCH_STLB) {
408 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
409 } else {
410 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
412 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
414 if (!tlb_e) {
415 /* Invalid TLB entry */
416 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
417 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
418 env->CSR_TLBEHI = 0;
419 env->CSR_TLBELO0 = 0;
420 env->CSR_TLBELO1 = 0;
421 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
422 } else {
423 /* Valid TLB entry */
424 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
425 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
426 PS, (tlb_ps & 0x3f));
427 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
428 R_TLB_MISC_VPPN_SHIFT;
429 env->CSR_TLBELO0 = tlb->tlb_entry0;
430 env->CSR_TLBELO1 = tlb->tlb_entry1;
434 void helper_tlbwr(CPULoongArchState *env)
436 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
438 invalidate_tlb(env, index);
440 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
441 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
442 TLB_MISC, E, 0);
443 return;
446 fill_tlb_entry(env, index);
449 void helper_tlbfill(CPULoongArchState *env)
451 uint64_t address, entryhi;
452 int index, set, stlb_idx;
453 uint16_t pagesize, stlb_ps;
455 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
456 entryhi = env->CSR_TLBREHI;
457 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
458 } else {
459 entryhi = env->CSR_TLBEHI;
460 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
463 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
465 if (pagesize == stlb_ps) {
466 /* Only write into STLB bits [47:13] */
467 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_VPPN_SHIFT);
469 /* Choose one set ramdomly */
470 set = get_random_tlb(0, 7);
472 /* Index in one set */
473 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
475 index = set * 256 + stlb_idx;
476 } else {
477 /* Only write into MTLB */
478 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
481 invalidate_tlb(env, index);
482 fill_tlb_entry(env, index);
485 void helper_tlbclr(CPULoongArchState *env)
487 LoongArchTLB *tlb;
488 int i, index;
489 uint16_t csr_asid, tlb_asid, tlb_g;
491 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
492 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
494 if (index < LOONGARCH_STLB) {
495 /* STLB. One line per operation */
496 for (i = 0; i < 8; i++) {
497 tlb = &env->tlb[i * 256 + (index % 256)];
498 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
499 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
500 if (!tlb_g && tlb_asid == csr_asid) {
501 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
504 } else if (index < LOONGARCH_TLB_MAX) {
505 /* All MTLB entries */
506 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
507 tlb = &env->tlb[i];
508 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
509 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
510 if (!tlb_g && tlb_asid == csr_asid) {
511 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
516 tlb_flush(env_cpu(env));
519 void helper_tlbflush(CPULoongArchState *env)
521 int i, index;
523 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
525 if (index < LOONGARCH_STLB) {
526 /* STLB. One line per operation */
527 for (i = 0; i < 8; i++) {
528 int s_idx = i * 256 + (index % 256);
529 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
530 TLB_MISC, E, 0);
532 } else if (index < LOONGARCH_TLB_MAX) {
533 /* All MTLB entries */
534 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
535 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
536 TLB_MISC, E, 0);
540 tlb_flush(env_cpu(env));
543 void helper_invtlb_all(CPULoongArchState *env)
545 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
546 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
547 TLB_MISC, E, 0);
549 tlb_flush(env_cpu(env));
552 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
554 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
555 LoongArchTLB *tlb = &env->tlb[i];
556 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
558 if (tlb_g == g) {
559 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
562 tlb_flush(env_cpu(env));
565 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
567 uint16_t asid = info & R_CSR_ASID_ASID_MASK;
569 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
570 LoongArchTLB *tlb = &env->tlb[i];
571 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
572 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
574 if (!tlb_g && (tlb_asid == asid)) {
575 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
578 tlb_flush(env_cpu(env));
581 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
582 target_ulong addr)
584 uint16_t asid = info & 0x3ff;
586 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
587 LoongArchTLB *tlb = &env->tlb[i];
588 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
589 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
590 uint64_t vpn, tlb_vppn;
591 uint8_t tlb_ps, compare_shift;
593 if (i >= LOONGARCH_STLB) {
594 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
595 } else {
596 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
598 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
599 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
600 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
602 if (!tlb_g && (tlb_asid == asid) &&
603 (vpn == (tlb_vppn >> compare_shift))) {
604 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
607 tlb_flush(env_cpu(env));
610 void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
611 target_ulong info, target_ulong addr)
613 uint16_t asid = info & 0x3ff;
615 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
616 LoongArchTLB *tlb = &env->tlb[i];
617 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
618 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
619 uint64_t vpn, tlb_vppn;
620 uint8_t tlb_ps, compare_shift;
622 if (i >= LOONGARCH_STLB) {
623 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
624 } else {
625 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
627 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
628 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
629 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
631 if ((tlb_g || (tlb_asid == asid)) &&
632 (vpn == (tlb_vppn >> compare_shift))) {
633 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
636 tlb_flush(env_cpu(env));
639 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
640 MMUAccessType access_type, int mmu_idx,
641 bool probe, uintptr_t retaddr)
643 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
644 CPULoongArchState *env = &cpu->env;
645 hwaddr physical;
646 int prot;
647 int ret;
649 /* Data access */
650 ret = get_physical_address(env, &physical, &prot, address,
651 access_type, mmu_idx);
653 if (ret == TLBRET_MATCH) {
654 tlb_set_page(cs, address & TARGET_PAGE_MASK,
655 physical & TARGET_PAGE_MASK, prot,
656 mmu_idx, TARGET_PAGE_SIZE);
657 qemu_log_mask(CPU_LOG_MMU,
658 "%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
659 " prot %d\n", __func__, address, physical, prot);
660 return true;
661 } else {
662 qemu_log_mask(CPU_LOG_MMU,
663 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
664 ret);
666 if (probe) {
667 return false;
669 raise_mmu_exception(env, address, access_type, ret);
670 cpu_loop_exit_restore(cs, retaddr);
673 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
674 target_ulong level, uint32_t mem_idx)
676 CPUState *cs = env_cpu(env);
677 target_ulong badvaddr, index, phys, ret;
678 int shift;
679 uint64_t dir_base, dir_width;
680 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1;
682 badvaddr = env->CSR_TLBRBADV;
683 base = base & TARGET_PHYS_MASK;
685 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
686 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
687 shift = (shift + 1) * 3;
689 if (huge) {
690 return base;
692 switch (level) {
693 case 1:
694 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
695 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
696 break;
697 case 2:
698 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
699 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
700 break;
701 case 3:
702 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
703 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
704 break;
705 case 4:
706 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
707 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
708 break;
709 default:
710 do_raise_exception(env, EXCCODE_INE, GETPC());
711 return 0;
713 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
714 phys = base | index << shift;
715 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
716 return ret;
719 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
720 uint32_t mem_idx)
722 CPUState *cs = env_cpu(env);
723 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
724 int shift;
725 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1;
726 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
727 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
729 base = base & TARGET_PHYS_MASK;
731 if (huge) {
732 /* Huge Page. base is paddr */
733 tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT);
734 /* Move Global bit */
735 tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >>
736 LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT |
737 (tmp0 & (~(1 << R_TLBENTRY_G_SHIFT)));
738 ps = ptbase + ptwidth - 1;
739 if (odd) {
740 tmp0 += MAKE_64BIT_MASK(ps, 1);
742 } else {
743 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
744 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
745 shift = (shift + 1) * 3;
746 badv = env->CSR_TLBRBADV;
748 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
749 ptindex = ptindex & ~0x1; /* clear bit 0 */
750 ptoffset0 = ptindex << shift;
751 ptoffset1 = (ptindex + 1) << shift;
753 phys = base | (odd ? ptoffset1 : ptoffset0);
754 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
755 ps = ptbase;
758 if (odd) {
759 env->CSR_TLBRELO1 = tmp0;
760 } else {
761 env->CSR_TLBRELO0 = tmp0;
763 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);