tests/tcg/loongarch64: Add fp comparison instructions test
[qemu/rayw.git] / target / loongarch / tlb_helper.c
blob610b6d123c59c6d9e16eae9fd2b7b6736f0382b4
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch TLB helpers
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
7 */
9 #include "qemu/osdep.h"
10 #include "qemu/guest-random.h"
12 #include "cpu.h"
13 #include "internals.h"
14 #include "exec/helper-proto.h"
15 #include "exec/exec-all.h"
16 #include "exec/cpu_ldst.h"
17 #include "exec/log.h"
18 #include "cpu-csr.h"
20 enum {
21 TLBRET_MATCH = 0,
22 TLBRET_BADADDR = 1,
23 TLBRET_NOMATCH = 2,
24 TLBRET_INVALID = 3,
25 TLBRET_DIRTY = 4,
26 TLBRET_RI = 5,
27 TLBRET_XI = 6,
28 TLBRET_PE = 7,
31 static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
32 int *prot, target_ulong address,
33 int access_type, int index, int mmu_idx)
35 LoongArchTLB *tlb = &env->tlb[index];
36 uint64_t plv = mmu_idx;
37 uint64_t tlb_entry, tlb_ppn;
38 uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
40 if (index >= LOONGARCH_STLB) {
41 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
42 } else {
43 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
45 n = (address >> tlb_ps) & 0x1;/* Odd or even */
47 tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
48 tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
49 tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
50 tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
51 tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY, PPN);
52 tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY, NX);
53 tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY, NR);
54 tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY, RPLV);
56 /* Check access rights */
57 if (!tlb_v) {
58 return TLBRET_INVALID;
61 if (access_type == MMU_INST_FETCH && tlb_nx) {
62 return TLBRET_XI;
65 if (access_type == MMU_DATA_LOAD && tlb_nr) {
66 return TLBRET_RI;
69 if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
70 ((tlb_rplv == 1) && (plv != tlb_plv))) {
71 return TLBRET_PE;
74 if ((access_type == MMU_DATA_STORE) && !tlb_d) {
75 return TLBRET_DIRTY;
79 * tlb_entry contains ppn[47:12] while 16KiB ppn is [47:15]
80 * need adjust.
82 *physical = (tlb_ppn << R_TLBENTRY_PPN_SHIFT) |
83 (address & MAKE_64BIT_MASK(0, tlb_ps));
84 *prot = PAGE_READ;
85 if (tlb_d) {
86 *prot |= PAGE_WRITE;
88 if (!tlb_nx) {
89 *prot |= PAGE_EXEC;
91 return TLBRET_MATCH;
95 * One tlb entry holds an adjacent odd/even pair, the vpn is the
96 * content of the virtual page number divided by 2. So the
97 * compare vpn is bit[47:15] for 16KiB page. while the vppn
98 * field in tlb entry contains bit[47:13], so need adjust.
99 * virt_vpn = vaddr[47:13]
101 static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
102 int *index)
104 LoongArchTLB *tlb;
105 uint16_t csr_asid, tlb_asid, stlb_idx;
106 uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
107 int i, compare_shift;
108 uint64_t vpn, tlb_vppn;
110 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
111 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
112 vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
113 stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
114 compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
116 /* Search STLB */
117 for (i = 0; i < 8; ++i) {
118 tlb = &env->tlb[i * 256 + stlb_idx];
119 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
120 if (tlb_e) {
121 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
122 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
123 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
125 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
126 (vpn == (tlb_vppn >> compare_shift))) {
127 *index = i * 256 + stlb_idx;
128 return true;
133 /* Search MTLB */
134 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
135 tlb = &env->tlb[i];
136 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
137 if (tlb_e) {
138 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
139 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
140 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
141 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
142 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
143 vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
144 if ((tlb_g == 1 || tlb_asid == csr_asid) &&
145 (vpn == (tlb_vppn >> compare_shift))) {
146 *index = i;
147 return true;
151 return false;
154 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
155 int *prot, target_ulong address,
156 MMUAccessType access_type, int mmu_idx)
158 int index, match;
160 match = loongarch_tlb_search(env, address, &index);
161 if (match) {
162 return loongarch_map_tlb_entry(env, physical, prot,
163 address, access_type, index, mmu_idx);
166 return TLBRET_NOMATCH;
169 static int get_physical_address(CPULoongArchState *env, hwaddr *physical,
170 int *prot, target_ulong address,
171 MMUAccessType access_type, int mmu_idx)
173 int user_mode = mmu_idx == MMU_USER_IDX;
174 int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
175 uint32_t plv, base_c, base_v;
176 int64_t addr_high;
177 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
178 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
180 /* Check PG and DA */
181 if (da & !pg) {
182 *physical = address & TARGET_PHYS_MASK;
183 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
184 return TLBRET_MATCH;
187 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
188 base_v = address >> TARGET_VIRT_ADDR_SPACE_BITS;
189 /* Check direct map window */
190 for (int i = 0; i < 4; i++) {
191 base_c = env->CSR_DMW[i] >> TARGET_VIRT_ADDR_SPACE_BITS;
192 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
193 *physical = dmw_va2pa(address);
194 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
195 return TLBRET_MATCH;
199 /* Check valid extension */
200 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
201 if (!(addr_high == 0 || addr_high == -1)) {
202 return TLBRET_BADADDR;
205 /* Mapped address */
206 return loongarch_map_address(env, physical, prot, address,
207 access_type, mmu_idx);
210 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
212 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
213 CPULoongArchState *env = &cpu->env;
214 hwaddr phys_addr;
215 int prot;
217 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
218 cpu_mmu_index(env, false)) != 0) {
219 return -1;
221 return phys_addr;
224 static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
225 MMUAccessType access_type, int tlb_error)
227 CPUState *cs = env_cpu(env);
229 switch (tlb_error) {
230 default:
231 case TLBRET_BADADDR:
232 cs->exception_index = EXCCODE_ADEM;
233 break;
234 case TLBRET_NOMATCH:
235 /* No TLB match for a mapped address */
236 if (access_type == MMU_DATA_LOAD) {
237 cs->exception_index = EXCCODE_PIL;
238 } else if (access_type == MMU_DATA_STORE) {
239 cs->exception_index = EXCCODE_PIS;
240 } else if (access_type == MMU_INST_FETCH) {
241 cs->exception_index = EXCCODE_PIF;
243 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
244 break;
245 case TLBRET_INVALID:
246 /* TLB match with no valid bit */
247 if (access_type == MMU_DATA_LOAD) {
248 cs->exception_index = EXCCODE_PIL;
249 } else if (access_type == MMU_DATA_STORE) {
250 cs->exception_index = EXCCODE_PIS;
251 } else if (access_type == MMU_INST_FETCH) {
252 cs->exception_index = EXCCODE_PIF;
254 break;
255 case TLBRET_DIRTY:
256 /* TLB match but 'D' bit is cleared */
257 cs->exception_index = EXCCODE_PME;
258 break;
259 case TLBRET_XI:
260 /* Execute-Inhibit Exception */
261 cs->exception_index = EXCCODE_PNX;
262 break;
263 case TLBRET_RI:
264 /* Read-Inhibit Exception */
265 cs->exception_index = EXCCODE_PNR;
266 break;
267 case TLBRET_PE:
268 /* Privileged Exception */
269 cs->exception_index = EXCCODE_PPI;
270 break;
273 if (tlb_error == TLBRET_NOMATCH) {
274 env->CSR_TLBRBADV = address;
275 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN,
276 extract64(address, 13, 35));
277 } else {
278 if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
279 env->CSR_BADV = address;
281 env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
285 static void invalidate_tlb_entry(CPULoongArchState *env, int index)
287 target_ulong addr, mask, pagesize;
288 uint8_t tlb_ps;
289 LoongArchTLB *tlb = &env->tlb[index];
291 int mmu_idx = cpu_mmu_index(env, false);
292 uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
293 uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
294 uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
296 if (index >= LOONGARCH_STLB) {
297 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
298 } else {
299 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
301 pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
302 mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
304 if (tlb_v0) {
305 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */
306 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
307 mmu_idx, TARGET_LONG_BITS);
310 if (tlb_v1) {
311 addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */
312 tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
313 mmu_idx, TARGET_LONG_BITS);
317 static void invalidate_tlb(CPULoongArchState *env, int index)
319 LoongArchTLB *tlb;
320 uint16_t csr_asid, tlb_asid, tlb_g;
322 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
323 tlb = &env->tlb[index];
324 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
325 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
326 if (tlb_g == 0 && tlb_asid != csr_asid) {
327 return;
329 invalidate_tlb_entry(env, index);
332 static void fill_tlb_entry(CPULoongArchState *env, int index)
334 LoongArchTLB *tlb = &env->tlb[index];
335 uint64_t lo0, lo1, csr_vppn;
336 uint16_t csr_asid;
337 uint8_t csr_ps;
339 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
340 csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
341 csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, VPPN);
342 lo0 = env->CSR_TLBRELO0;
343 lo1 = env->CSR_TLBRELO1;
344 } else {
345 csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
346 csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI, VPPN);
347 lo0 = env->CSR_TLBELO0;
348 lo1 = env->CSR_TLBELO1;
351 if (csr_ps == 0) {
352 qemu_log_mask(CPU_LOG_MMU, "page size is 0\n");
355 /* Only MTLB has the ps fields */
356 if (index >= LOONGARCH_STLB) {
357 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
360 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
361 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
362 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
363 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
365 tlb->tlb_entry0 = lo0;
366 tlb->tlb_entry1 = lo1;
369 /* Return an random value between low and high */
370 static uint32_t get_random_tlb(uint32_t low, uint32_t high)
372 uint32_t val;
374 qemu_guest_getrandom_nofail(&val, sizeof(val));
375 return val % (high - low + 1) + low;
378 void helper_tlbsrch(CPULoongArchState *env)
380 int index, match;
382 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
383 match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
384 } else {
385 match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
388 if (match) {
389 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
390 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
391 return;
394 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
397 void helper_tlbrd(CPULoongArchState *env)
399 LoongArchTLB *tlb;
400 int index;
401 uint8_t tlb_ps, tlb_e;
403 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
404 tlb = &env->tlb[index];
406 if (index >= LOONGARCH_STLB) {
407 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
408 } else {
409 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
411 tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
413 if (!tlb_e) {
414 /* Invalid TLB entry */
415 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
416 env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
417 env->CSR_TLBEHI = 0;
418 env->CSR_TLBELO0 = 0;
419 env->CSR_TLBELO1 = 0;
420 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
421 } else {
422 /* Valid TLB entry */
423 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
424 env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
425 PS, (tlb_ps & 0x3f));
426 env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
427 R_TLB_MISC_VPPN_SHIFT;
428 env->CSR_TLBELO0 = tlb->tlb_entry0;
429 env->CSR_TLBELO1 = tlb->tlb_entry1;
433 void helper_tlbwr(CPULoongArchState *env)
435 int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
437 invalidate_tlb(env, index);
439 if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
440 env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
441 TLB_MISC, E, 0);
442 return;
445 fill_tlb_entry(env, index);
448 void helper_tlbfill(CPULoongArchState *env)
450 uint64_t address, entryhi;
451 int index, set, stlb_idx;
452 uint16_t pagesize, stlb_ps;
454 if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
455 entryhi = env->CSR_TLBREHI;
456 pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
457 } else {
458 entryhi = env->CSR_TLBEHI;
459 pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
462 stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
464 if (pagesize == stlb_ps) {
465 /* Only write into STLB bits [47:13] */
466 address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_VPPN_SHIFT);
468 /* Choose one set ramdomly */
469 set = get_random_tlb(0, 7);
471 /* Index in one set */
472 stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
474 index = set * 256 + stlb_idx;
475 } else {
476 /* Only write into MTLB */
477 index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
480 invalidate_tlb(env, index);
481 fill_tlb_entry(env, index);
484 void helper_tlbclr(CPULoongArchState *env)
486 LoongArchTLB *tlb;
487 int i, index;
488 uint16_t csr_asid, tlb_asid, tlb_g;
490 csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
491 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
493 if (index < LOONGARCH_STLB) {
494 /* STLB. One line per operation */
495 for (i = 0; i < 8; i++) {
496 tlb = &env->tlb[i * 256 + (index % 256)];
497 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
498 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
499 if (!tlb_g && tlb_asid == csr_asid) {
500 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
503 } else if (index < LOONGARCH_TLB_MAX) {
504 /* All MTLB entries */
505 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
506 tlb = &env->tlb[i];
507 tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
508 tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
509 if (!tlb_g && tlb_asid == csr_asid) {
510 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
515 tlb_flush(env_cpu(env));
518 void helper_tlbflush(CPULoongArchState *env)
520 int i, index;
522 index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
524 if (index < LOONGARCH_STLB) {
525 /* STLB. One line per operation */
526 for (i = 0; i < 8; i++) {
527 int s_idx = i * 256 + (index % 256);
528 env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
529 TLB_MISC, E, 0);
531 } else if (index < LOONGARCH_TLB_MAX) {
532 /* All MTLB entries */
533 for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
534 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
535 TLB_MISC, E, 0);
539 tlb_flush(env_cpu(env));
542 void helper_invtlb_all(CPULoongArchState *env)
544 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
545 env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
546 TLB_MISC, E, 0);
548 tlb_flush(env_cpu(env));
551 void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
553 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
554 LoongArchTLB *tlb = &env->tlb[i];
555 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
557 if (tlb_g == g) {
558 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
561 tlb_flush(env_cpu(env));
564 void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
566 uint16_t asid = info & R_CSR_ASID_ASID_MASK;
568 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
569 LoongArchTLB *tlb = &env->tlb[i];
570 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
571 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
573 if (!tlb_g && (tlb_asid == asid)) {
574 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
577 tlb_flush(env_cpu(env));
580 void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
581 target_ulong addr)
583 uint16_t asid = info & 0x3ff;
585 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
586 LoongArchTLB *tlb = &env->tlb[i];
587 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
588 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
589 uint64_t vpn, tlb_vppn;
590 uint8_t tlb_ps, compare_shift;
592 if (i >= LOONGARCH_STLB) {
593 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
594 } else {
595 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
597 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
598 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
599 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
601 if (!tlb_g && (tlb_asid == asid) &&
602 (vpn == (tlb_vppn >> compare_shift))) {
603 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
606 tlb_flush(env_cpu(env));
609 void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
610 target_ulong info, target_ulong addr)
612 uint16_t asid = info & 0x3ff;
614 for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
615 LoongArchTLB *tlb = &env->tlb[i];
616 uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
617 uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
618 uint64_t vpn, tlb_vppn;
619 uint8_t tlb_ps, compare_shift;
621 if (i >= LOONGARCH_STLB) {
622 tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
623 } else {
624 tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
626 tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
627 vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
628 compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
630 if ((tlb_g || (tlb_asid == asid)) &&
631 (vpn == (tlb_vppn >> compare_shift))) {
632 tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
635 tlb_flush(env_cpu(env));
638 bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
639 MMUAccessType access_type, int mmu_idx,
640 bool probe, uintptr_t retaddr)
642 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
643 CPULoongArchState *env = &cpu->env;
644 hwaddr physical;
645 int prot;
646 int ret = TLBRET_BADADDR;
648 /* Data access */
649 ret = get_physical_address(env, &physical, &prot, address,
650 access_type, mmu_idx);
652 if (ret == TLBRET_MATCH) {
653 tlb_set_page(cs, address & TARGET_PAGE_MASK,
654 physical & TARGET_PAGE_MASK, prot,
655 mmu_idx, TARGET_PAGE_SIZE);
656 qemu_log_mask(CPU_LOG_MMU,
657 "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
658 " prot %d\n", __func__, address, physical, prot);
659 return true;
660 } else {
661 qemu_log_mask(CPU_LOG_MMU,
662 "%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
663 ret);
665 if (probe) {
666 return false;
668 raise_mmu_exception(env, address, access_type, ret);
669 cpu_loop_exit_restore(cs, retaddr);
672 target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
673 target_ulong level, uint32_t mem_idx)
675 CPUState *cs = env_cpu(env);
676 target_ulong badvaddr, index, phys, ret;
677 int shift;
678 uint64_t dir_base, dir_width;
679 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1;
681 badvaddr = env->CSR_TLBRBADV;
682 base = base & TARGET_PHYS_MASK;
684 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
685 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
686 shift = (shift + 1) * 3;
688 if (huge) {
689 return base;
691 switch (level) {
692 case 1:
693 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
694 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
695 break;
696 case 2:
697 dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
698 dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
699 break;
700 case 3:
701 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
702 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
703 break;
704 case 4:
705 dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
706 dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
707 break;
708 default:
709 do_raise_exception(env, EXCCODE_INE, GETPC());
710 return 0;
712 index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
713 phys = base | index << shift;
714 ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
715 return ret;
718 void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
719 uint32_t mem_idx)
721 CPUState *cs = env_cpu(env);
722 target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
723 int shift;
724 bool huge = (base >> LOONGARCH_PAGE_HUGE_SHIFT) & 0x1;
725 uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
726 uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
728 base = base & TARGET_PHYS_MASK;
730 if (huge) {
731 /* Huge Page. base is paddr */
732 tmp0 = base ^ (1 << LOONGARCH_PAGE_HUGE_SHIFT);
733 /* Move Global bit */
734 tmp0 = ((tmp0 & (1 << LOONGARCH_HGLOBAL_SHIFT)) >>
735 LOONGARCH_HGLOBAL_SHIFT) << R_TLBENTRY_G_SHIFT |
736 (tmp0 & (~(1 << R_TLBENTRY_G_SHIFT)));
737 ps = ptbase + ptwidth - 1;
738 if (odd) {
739 tmp0 += MAKE_64BIT_MASK(ps, 1);
741 } else {
742 /* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
743 shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
744 shift = (shift + 1) * 3;
745 badv = env->CSR_TLBRBADV;
747 ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
748 ptindex = ptindex & ~0x1; /* clear bit 0 */
749 ptoffset0 = ptindex << shift;
750 ptoffset1 = (ptindex + 1) << shift;
752 phys = base | (odd ? ptoffset1 : ptoffset0);
753 tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
754 ps = ptbase;
757 if (odd) {
758 env->CSR_TLBRELO1 = tmp0;
759 } else {
760 env->CSR_TLBRELO0 = tmp0;
762 env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);