target/loongarch: Fix cpu_reset set wrong CSR_CRMD
[qemu/ar7.git] / target / loongarch / cpu.c
blob5e85b9dbef30161b4ebdbc6238133e09fd635e87
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * QEMU LoongArch CPU
5 * Copyright (c) 2021 Loongson Technology Corporation Limited
6 */
8 #include "qemu/osdep.h"
9 #include "qemu/log.h"
10 #include "qemu/qemu-print.h"
11 #include "qapi/error.h"
12 #include "qemu/module.h"
13 #include "sysemu/qtest.h"
14 #include "sysemu/tcg.h"
15 #include "sysemu/kvm.h"
16 #include "kvm/kvm_loongarch.h"
17 #include "exec/exec-all.h"
18 #include "cpu.h"
19 #include "internals.h"
20 #include "fpu/softfloat-helpers.h"
21 #include "cpu-csr.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/reset.h"
24 #endif
25 #include "vec.h"
26 #ifdef CONFIG_KVM
27 #include <linux/kvm.h>
28 #endif
29 #ifdef CONFIG_TCG
30 #include "exec/cpu_ldst.h"
31 #include "tcg/tcg.h"
32 #endif
34 const char * const regnames[32] = {
35 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
36 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
37 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
38 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
41 const char * const fregnames[32] = {
42 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
43 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
44 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
45 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
48 struct TypeExcp {
49 int32_t exccode;
50 const char * const name;
53 static const struct TypeExcp excp_names[] = {
54 {EXCCODE_INT, "Interrupt"},
55 {EXCCODE_PIL, "Page invalid exception for load"},
56 {EXCCODE_PIS, "Page invalid exception for store"},
57 {EXCCODE_PIF, "Page invalid exception for fetch"},
58 {EXCCODE_PME, "Page modified exception"},
59 {EXCCODE_PNR, "Page Not Readable exception"},
60 {EXCCODE_PNX, "Page Not Executable exception"},
61 {EXCCODE_PPI, "Page Privilege error"},
62 {EXCCODE_ADEF, "Address error for instruction fetch"},
63 {EXCCODE_ADEM, "Address error for Memory access"},
64 {EXCCODE_SYS, "Syscall"},
65 {EXCCODE_BRK, "Break"},
66 {EXCCODE_INE, "Instruction Non-Existent"},
67 {EXCCODE_IPE, "Instruction privilege error"},
68 {EXCCODE_FPD, "Floating Point Disabled"},
69 {EXCCODE_FPE, "Floating Point Exception"},
70 {EXCCODE_DBP, "Debug breakpoint"},
71 {EXCCODE_BCE, "Bound Check Exception"},
72 {EXCCODE_SXD, "128 bit vector instructions Disable exception"},
73 {EXCCODE_ASXD, "256 bit vector instructions Disable exception"},
74 {EXCP_HLT, "EXCP_HLT"},
77 const char *loongarch_exception_name(int32_t exception)
79 int i;
81 for (i = 0; i < ARRAY_SIZE(excp_names); i++) {
82 if (excp_names[i].exccode == exception) {
83 return excp_names[i].name;
86 return "Unknown";
89 void G_NORETURN do_raise_exception(CPULoongArchState *env,
90 uint32_t exception,
91 uintptr_t pc)
93 CPUState *cs = env_cpu(env);
95 qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n",
96 __func__,
97 exception,
98 loongarch_exception_name(exception));
99 cs->exception_index = exception;
101 cpu_loop_exit_restore(cs, pc);
104 static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
106 set_pc(cpu_env(cs), value);
109 static vaddr loongarch_cpu_get_pc(CPUState *cs)
111 return cpu_env(cs)->pc;
114 #ifndef CONFIG_USER_ONLY
115 #include "hw/loongarch/virt.h"
117 void loongarch_cpu_set_irq(void *opaque, int irq, int level)
119 LoongArchCPU *cpu = opaque;
120 CPULoongArchState *env = &cpu->env;
121 CPUState *cs = CPU(cpu);
123 if (irq < 0 || irq >= N_IRQS) {
124 return;
127 if (kvm_enabled()) {
128 kvm_loongarch_set_interrupt(cpu, irq, level);
129 } else if (tcg_enabled()) {
130 env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0);
131 if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) {
132 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
133 } else {
134 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
139 static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env)
141 bool ret = 0;
143 ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) &&
144 !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)));
146 return ret;
149 /* Check if there is pending and not masked out interrupt */
150 static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env)
152 uint32_t pending;
153 uint32_t status;
155 pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
156 status = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
158 return (pending & status) != 0;
160 #endif
162 #ifdef CONFIG_TCG
163 #ifndef CONFIG_USER_ONLY
164 static void loongarch_cpu_do_interrupt(CPUState *cs)
166 CPULoongArchState *env = cpu_env(cs);
167 bool update_badinstr = 1;
168 int cause = -1;
169 bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR);
170 uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS);
172 if (cs->exception_index != EXCCODE_INT) {
173 qemu_log_mask(CPU_LOG_INT,
174 "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx
175 " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n",
176 __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA,
177 cs->exception_index,
178 loongarch_exception_name(cs->exception_index));
181 switch (cs->exception_index) {
182 case EXCCODE_DBP:
183 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1);
184 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC);
185 goto set_DERA;
186 set_DERA:
187 env->CSR_DERA = env->pc;
188 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1);
189 set_pc(env, env->CSR_EENTRY + 0x480);
190 break;
191 case EXCCODE_INT:
192 if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
193 env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1);
194 goto set_DERA;
196 QEMU_FALLTHROUGH;
197 case EXCCODE_PIF:
198 case EXCCODE_ADEF:
199 cause = cs->exception_index;
200 update_badinstr = 0;
201 break;
202 case EXCCODE_SYS:
203 case EXCCODE_BRK:
204 case EXCCODE_INE:
205 case EXCCODE_IPE:
206 case EXCCODE_FPD:
207 case EXCCODE_FPE:
208 case EXCCODE_SXD:
209 case EXCCODE_ASXD:
210 env->CSR_BADV = env->pc;
211 QEMU_FALLTHROUGH;
212 case EXCCODE_BCE:
213 case EXCCODE_ADEM:
214 case EXCCODE_PIL:
215 case EXCCODE_PIS:
216 case EXCCODE_PME:
217 case EXCCODE_PNR:
218 case EXCCODE_PNX:
219 case EXCCODE_PPI:
220 cause = cs->exception_index;
221 break;
222 default:
223 qemu_log("Error: exception(%d) has not been supported\n",
224 cs->exception_index);
225 abort();
228 if (update_badinstr) {
229 env->CSR_BADI = cpu_ldl_code(env, env->pc);
232 /* Save PLV and IE */
233 if (tlbfill) {
234 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV,
235 FIELD_EX64(env->CSR_CRMD,
236 CSR_CRMD, PLV));
237 env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE,
238 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
239 /* set the DA mode */
240 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
241 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
242 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA,
243 PC, (env->pc >> 2));
244 } else {
245 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE,
246 EXCODE_MCODE(cause));
247 env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE,
248 EXCODE_SUBCODE(cause));
249 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV,
250 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV));
251 env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE,
252 FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE));
253 env->CSR_ERA = env->pc;
256 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
257 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
259 if (vec_size) {
260 vec_size = (1 << vec_size) * 4;
263 if (cs->exception_index == EXCCODE_INT) {
264 /* Interrupt */
265 uint32_t vector = 0;
266 uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS);
267 pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE);
269 /* Find the highest-priority interrupt. */
270 vector = 31 - clz32(pending);
271 set_pc(env, env->CSR_EENTRY + \
272 (EXCCODE_EXTERNAL_INT + vector) * vec_size);
273 qemu_log_mask(CPU_LOG_INT,
274 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
275 " cause %d\n" " A " TARGET_FMT_lx " D "
276 TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS"
277 TARGET_FMT_lx "\n",
278 __func__, env->pc, env->CSR_ERA,
279 cause, env->CSR_BADV, env->CSR_DERA, vector,
280 env->CSR_ECFG, env->CSR_ESTAT);
281 } else {
282 if (tlbfill) {
283 set_pc(env, env->CSR_TLBRENTRY);
284 } else {
285 set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size);
287 qemu_log_mask(CPU_LOG_INT,
288 "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
289 " cause %d%s\n, ESTAT " TARGET_FMT_lx
290 " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx
291 "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu
292 " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc,
293 tlbfill ? env->CSR_TLBRERA : env->CSR_ERA,
294 cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT,
295 env->CSR_ECFG,
296 tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV,
297 env->CSR_BADI, env->gpr[11], cs->cpu_index,
298 env->CSR_ASID);
300 cs->exception_index = -1;
303 static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
304 vaddr addr, unsigned size,
305 MMUAccessType access_type,
306 int mmu_idx, MemTxAttrs attrs,
307 MemTxResult response,
308 uintptr_t retaddr)
310 CPULoongArchState *env = cpu_env(cs);
312 if (access_type == MMU_INST_FETCH) {
313 do_raise_exception(env, EXCCODE_ADEF, retaddr);
314 } else {
315 do_raise_exception(env, EXCCODE_ADEM, retaddr);
319 static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
321 if (interrupt_request & CPU_INTERRUPT_HARD) {
322 CPULoongArchState *env = cpu_env(cs);
324 if (cpu_loongarch_hw_interrupts_enabled(env) &&
325 cpu_loongarch_hw_interrupts_pending(env)) {
326 /* Raise it */
327 cs->exception_index = EXCCODE_INT;
328 loongarch_cpu_do_interrupt(cs);
329 return true;
332 return false;
334 #endif
336 static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
337 const TranslationBlock *tb)
339 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
340 set_pc(cpu_env(cs), tb->pc);
343 static void loongarch_restore_state_to_opc(CPUState *cs,
344 const TranslationBlock *tb,
345 const uint64_t *data)
347 set_pc(cpu_env(cs), data[0]);
349 #endif /* CONFIG_TCG */
351 static bool loongarch_cpu_has_work(CPUState *cs)
353 #ifdef CONFIG_USER_ONLY
354 return true;
355 #else
356 bool has_work = false;
358 if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
359 cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) {
360 has_work = true;
363 return has_work;
364 #endif
367 static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
369 CPULoongArchState *env = cpu_env(cs);
371 if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) {
372 return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV);
374 return MMU_DA_IDX;
377 static void loongarch_la464_initfn(Object *obj)
379 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
380 CPULoongArchState *env = &cpu->env;
381 int i;
383 for (i = 0; i < 21; i++) {
384 env->cpucfg[i] = 0x0;
387 cpu->dtb_compatible = "loongarch,Loongson-3A5000";
388 env->cpucfg[0] = 0x14c010; /* PRID */
390 uint32_t data = 0;
391 data = FIELD_DP32(data, CPUCFG1, ARCH, 2);
392 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
393 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
394 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x2f);
395 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x2f);
396 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
397 data = FIELD_DP32(data, CPUCFG1, RI, 1);
398 data = FIELD_DP32(data, CPUCFG1, EP, 1);
399 data = FIELD_DP32(data, CPUCFG1, RPLV, 1);
400 data = FIELD_DP32(data, CPUCFG1, HP, 1);
401 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
402 env->cpucfg[1] = data;
404 data = 0;
405 data = FIELD_DP32(data, CPUCFG2, FP, 1);
406 data = FIELD_DP32(data, CPUCFG2, FP_SP, 1);
407 data = FIELD_DP32(data, CPUCFG2, FP_DP, 1);
408 data = FIELD_DP32(data, CPUCFG2, FP_VER, 1);
409 data = FIELD_DP32(data, CPUCFG2, LSX, 1),
410 data = FIELD_DP32(data, CPUCFG2, LASX, 1),
411 data = FIELD_DP32(data, CPUCFG2, LLFTP, 1);
412 data = FIELD_DP32(data, CPUCFG2, LLFTP_VER, 1);
413 data = FIELD_DP32(data, CPUCFG2, LSPW, 1);
414 data = FIELD_DP32(data, CPUCFG2, LAM, 1);
415 env->cpucfg[2] = data;
417 env->cpucfg[4] = 100 * 1000 * 1000; /* Crystal frequency */
419 data = 0;
420 data = FIELD_DP32(data, CPUCFG5, CC_MUL, 1);
421 data = FIELD_DP32(data, CPUCFG5, CC_DIV, 1);
422 env->cpucfg[5] = data;
424 data = 0;
425 data = FIELD_DP32(data, CPUCFG16, L1_IUPRE, 1);
426 data = FIELD_DP32(data, CPUCFG16, L1_DPRE, 1);
427 data = FIELD_DP32(data, CPUCFG16, L2_IUPRE, 1);
428 data = FIELD_DP32(data, CPUCFG16, L2_IUUNIFY, 1);
429 data = FIELD_DP32(data, CPUCFG16, L2_IUPRIV, 1);
430 data = FIELD_DP32(data, CPUCFG16, L3_IUPRE, 1);
431 data = FIELD_DP32(data, CPUCFG16, L3_IUUNIFY, 1);
432 data = FIELD_DP32(data, CPUCFG16, L3_IUINCL, 1);
433 env->cpucfg[16] = data;
435 data = 0;
436 data = FIELD_DP32(data, CPUCFG17, L1IU_WAYS, 3);
437 data = FIELD_DP32(data, CPUCFG17, L1IU_SETS, 8);
438 data = FIELD_DP32(data, CPUCFG17, L1IU_SIZE, 6);
439 env->cpucfg[17] = data;
441 data = 0;
442 data = FIELD_DP32(data, CPUCFG18, L1D_WAYS, 3);
443 data = FIELD_DP32(data, CPUCFG18, L1D_SETS, 8);
444 data = FIELD_DP32(data, CPUCFG18, L1D_SIZE, 6);
445 env->cpucfg[18] = data;
447 data = 0;
448 data = FIELD_DP32(data, CPUCFG19, L2IU_WAYS, 15);
449 data = FIELD_DP32(data, CPUCFG19, L2IU_SETS, 8);
450 data = FIELD_DP32(data, CPUCFG19, L2IU_SIZE, 6);
451 env->cpucfg[19] = data;
453 data = 0;
454 data = FIELD_DP32(data, CPUCFG20, L3IU_WAYS, 15);
455 data = FIELD_DP32(data, CPUCFG20, L3IU_SETS, 14);
456 data = FIELD_DP32(data, CPUCFG20, L3IU_SIZE, 6);
457 env->cpucfg[20] = data;
459 env->CSR_ASID = FIELD_DP64(0, CSR_ASID, ASIDBITS, 0xa);
461 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM, 8);
462 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, TIMER_BITS, 0x2f);
463 env->CSR_PRCFG1 = FIELD_DP64(env->CSR_PRCFG1, CSR_PRCFG1, VSMAX, 7);
465 env->CSR_PRCFG2 = 0x3ffff000;
467 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2);
468 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63);
469 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7);
470 env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8);
472 loongarch_cpu_post_init(obj);
475 static void loongarch_la132_initfn(Object *obj)
477 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
478 CPULoongArchState *env = &cpu->env;
480 int i;
482 for (i = 0; i < 21; i++) {
483 env->cpucfg[i] = 0x0;
486 cpu->dtb_compatible = "loongarch,Loongson-1C103";
487 env->cpucfg[0] = 0x148042; /* PRID */
489 uint32_t data = 0;
490 data = FIELD_DP32(data, CPUCFG1, ARCH, 1); /* LA32 */
491 data = FIELD_DP32(data, CPUCFG1, PGMMU, 1);
492 data = FIELD_DP32(data, CPUCFG1, IOCSR, 1);
493 data = FIELD_DP32(data, CPUCFG1, PALEN, 0x1f); /* 32 bits */
494 data = FIELD_DP32(data, CPUCFG1, VALEN, 0x1f); /* 32 bits */
495 data = FIELD_DP32(data, CPUCFG1, UAL, 1);
496 data = FIELD_DP32(data, CPUCFG1, RI, 0);
497 data = FIELD_DP32(data, CPUCFG1, EP, 0);
498 data = FIELD_DP32(data, CPUCFG1, RPLV, 0);
499 data = FIELD_DP32(data, CPUCFG1, HP, 1);
500 data = FIELD_DP32(data, CPUCFG1, IOCSR_BRD, 1);
501 env->cpucfg[1] = data;
504 static void loongarch_max_initfn(Object *obj)
506 /* '-cpu max' for TCG: we use cpu la464. */
507 loongarch_la464_initfn(obj);
510 static void loongarch_cpu_reset_hold(Object *obj, ResetType type)
512 CPUState *cs = CPU(obj);
513 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(obj);
514 CPULoongArchState *env = cpu_env(cs);
516 if (lacc->parent_phases.hold) {
517 lacc->parent_phases.hold(obj, type);
520 #ifdef CONFIG_TCG
521 env->fcsr0_mask = FCSR0_M1 | FCSR0_M2 | FCSR0_M3;
522 #endif
523 env->fcsr0 = 0x0;
525 int n;
526 /* Set csr registers value after reset, see the manual 6.4. */
527 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0);
528 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0);
529 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1);
530 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0);
531 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATF, 0);
532 env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DATM, 0);
534 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, FPE, 0);
535 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, SXE, 0);
536 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, ASXE, 0);
537 env->CSR_EUEN = FIELD_DP64(env->CSR_EUEN, CSR_EUEN, BTE, 0);
539 env->CSR_MISC = 0;
541 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0);
542 env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0);
544 env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2));
545 env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0);
546 env->CSR_CPUID = cs->cpu_index;
547 env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0);
548 env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0);
549 env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0);
550 env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0);
551 env->CSR_TID = cs->cpu_index;
553 for (n = 0; n < 4; n++) {
554 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV0, 0);
555 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV1, 0);
556 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV2, 0);
557 env->CSR_DMW[n] = FIELD_DP64(env->CSR_DMW[n], CSR_DMW, PLV3, 0);
560 #ifndef CONFIG_USER_ONLY
561 env->pc = 0x1c000000;
562 #ifdef CONFIG_TCG
563 memset(env->tlb, 0, sizeof(env->tlb));
564 #endif
565 if (kvm_enabled()) {
566 kvm_arch_reset_vcpu(env);
568 #endif
570 #ifdef CONFIG_TCG
571 restore_fp_status(env);
572 #endif
573 cs->exception_index = -1;
576 static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
578 info->print_insn = print_insn_loongarch;
581 static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
583 CPUState *cs = CPU(dev);
584 LoongArchCPUClass *lacc = LOONGARCH_CPU_GET_CLASS(dev);
585 Error *local_err = NULL;
587 cpu_exec_realizefn(cs, &local_err);
588 if (local_err != NULL) {
589 error_propagate(errp, local_err);
590 return;
593 loongarch_cpu_register_gdb_regs_for_features(cs);
595 cpu_reset(cs);
596 qemu_init_vcpu(cs);
598 lacc->parent_realize(dev, errp);
601 static bool loongarch_get_lsx(Object *obj, Error **errp)
603 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
604 bool ret;
606 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
607 ret = true;
608 } else {
609 ret = false;
611 return ret;
614 static void loongarch_set_lsx(Object *obj, bool value, Error **errp)
616 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
618 if (value) {
619 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
620 } else {
621 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 0);
622 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
626 static bool loongarch_get_lasx(Object *obj, Error **errp)
628 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
629 bool ret;
631 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) {
632 ret = true;
633 } else {
634 ret = false;
636 return ret;
639 static void loongarch_set_lasx(Object *obj, bool value, Error **errp)
641 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
643 if (value) {
644 if (!FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) {
645 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LSX, 1);
647 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 1);
648 } else {
649 cpu->env.cpucfg[2] = FIELD_DP32(cpu->env.cpucfg[2], CPUCFG2, LASX, 0);
653 void loongarch_cpu_post_init(Object *obj)
655 object_property_add_bool(obj, "lsx", loongarch_get_lsx,
656 loongarch_set_lsx);
657 object_property_add_bool(obj, "lasx", loongarch_get_lasx,
658 loongarch_set_lasx);
661 static void loongarch_cpu_init(Object *obj)
663 #ifndef CONFIG_USER_ONLY
664 LoongArchCPU *cpu = LOONGARCH_CPU(obj);
666 qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS);
667 #ifdef CONFIG_TCG
668 timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL,
669 &loongarch_constant_timer_cb, cpu);
670 #endif
671 #endif
674 static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
676 ObjectClass *oc;
678 oc = object_class_by_name(cpu_model);
679 if (!oc) {
680 g_autofree char *typename
681 = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
682 oc = object_class_by_name(typename);
685 return oc;
688 void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
690 CPULoongArchState *env = cpu_env(cs);
691 int i;
693 qemu_fprintf(f, " PC=%016" PRIx64 " ", env->pc);
694 qemu_fprintf(f, " FCSR0 0x%08x\n", env->fcsr0);
696 /* gpr */
697 for (i = 0; i < 32; i++) {
698 if ((i & 3) == 0) {
699 qemu_fprintf(f, " GPR%02d:", i);
701 qemu_fprintf(f, " %s %016" PRIx64, regnames[i], env->gpr[i]);
702 if ((i & 3) == 3) {
703 qemu_fprintf(f, "\n");
707 qemu_fprintf(f, "CRMD=%016" PRIx64 "\n", env->CSR_CRMD);
708 qemu_fprintf(f, "PRMD=%016" PRIx64 "\n", env->CSR_PRMD);
709 qemu_fprintf(f, "EUEN=%016" PRIx64 "\n", env->CSR_EUEN);
710 qemu_fprintf(f, "ESTAT=%016" PRIx64 "\n", env->CSR_ESTAT);
711 qemu_fprintf(f, "ERA=%016" PRIx64 "\n", env->CSR_ERA);
712 qemu_fprintf(f, "BADV=%016" PRIx64 "\n", env->CSR_BADV);
713 qemu_fprintf(f, "BADI=%016" PRIx64 "\n", env->CSR_BADI);
714 qemu_fprintf(f, "EENTRY=%016" PRIx64 "\n", env->CSR_EENTRY);
715 qemu_fprintf(f, "PRCFG1=%016" PRIx64 ", PRCFG2=%016" PRIx64 ","
716 " PRCFG3=%016" PRIx64 "\n",
717 env->CSR_PRCFG1, env->CSR_PRCFG2, env->CSR_PRCFG3);
718 qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY);
719 qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV);
720 qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA);
721 qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG);
722 qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL);
724 /* fpr */
725 if (flags & CPU_DUMP_FPU) {
726 for (i = 0; i < 32; i++) {
727 qemu_fprintf(f, " %s %016" PRIx64, fregnames[i], env->fpr[i].vreg.D(0));
728 if ((i & 3) == 3) {
729 qemu_fprintf(f, "\n");
735 #ifdef CONFIG_TCG
736 #include "hw/core/tcg-cpu-ops.h"
738 static const TCGCPUOps loongarch_tcg_ops = {
739 .initialize = loongarch_translate_init,
740 .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
741 .restore_state_to_opc = loongarch_restore_state_to_opc,
743 #ifndef CONFIG_USER_ONLY
744 .tlb_fill = loongarch_cpu_tlb_fill,
745 .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
746 .cpu_exec_halt = loongarch_cpu_has_work,
747 .do_interrupt = loongarch_cpu_do_interrupt,
748 .do_transaction_failed = loongarch_cpu_do_transaction_failed,
749 #endif
751 #endif /* CONFIG_TCG */
753 #ifndef CONFIG_USER_ONLY
754 #include "hw/core/sysemu-cpu-ops.h"
756 static const struct SysemuCPUOps loongarch_sysemu_ops = {
757 .get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
760 static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
762 LoongArchCPU *cpu = LOONGARCH_CPU(cs);
764 return cpu->phy_id;
766 #endif
768 static void loongarch_cpu_class_init(ObjectClass *c, void *data)
770 LoongArchCPUClass *lacc = LOONGARCH_CPU_CLASS(c);
771 CPUClass *cc = CPU_CLASS(c);
772 DeviceClass *dc = DEVICE_CLASS(c);
773 ResettableClass *rc = RESETTABLE_CLASS(c);
775 device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
776 &lacc->parent_realize);
777 resettable_class_set_parent_phases(rc, NULL, loongarch_cpu_reset_hold, NULL,
778 &lacc->parent_phases);
780 cc->class_by_name = loongarch_cpu_class_by_name;
781 cc->has_work = loongarch_cpu_has_work;
782 cc->mmu_index = loongarch_cpu_mmu_index;
783 cc->dump_state = loongarch_cpu_dump_state;
784 cc->set_pc = loongarch_cpu_set_pc;
785 cc->get_pc = loongarch_cpu_get_pc;
786 #ifndef CONFIG_USER_ONLY
787 cc->get_arch_id = loongarch_cpu_get_arch_id;
788 dc->vmsd = &vmstate_loongarch_cpu;
789 cc->sysemu_ops = &loongarch_sysemu_ops;
790 #endif
791 cc->disas_set_info = loongarch_cpu_disas_set_info;
792 cc->gdb_read_register = loongarch_cpu_gdb_read_register;
793 cc->gdb_write_register = loongarch_cpu_gdb_write_register;
794 cc->gdb_stop_before_watchpoint = true;
796 #ifdef CONFIG_TCG
797 cc->tcg_ops = &loongarch_tcg_ops;
798 #endif
801 static const gchar *loongarch32_gdb_arch_name(CPUState *cs)
803 return "loongarch32";
806 static void loongarch32_cpu_class_init(ObjectClass *c, void *data)
808 CPUClass *cc = CPU_CLASS(c);
810 cc->gdb_core_xml_file = "loongarch-base32.xml";
811 cc->gdb_arch_name = loongarch32_gdb_arch_name;
814 static const gchar *loongarch64_gdb_arch_name(CPUState *cs)
816 return "loongarch64";
819 static void loongarch64_cpu_class_init(ObjectClass *c, void *data)
821 CPUClass *cc = CPU_CLASS(c);
823 cc->gdb_core_xml_file = "loongarch-base64.xml";
824 cc->gdb_arch_name = loongarch64_gdb_arch_name;
827 #define DEFINE_LOONGARCH_CPU_TYPE(size, model, initfn) \
829 .parent = TYPE_LOONGARCH##size##_CPU, \
830 .instance_init = initfn, \
831 .name = LOONGARCH_CPU_TYPE_NAME(model), \
834 static const TypeInfo loongarch_cpu_type_infos[] = {
836 .name = TYPE_LOONGARCH_CPU,
837 .parent = TYPE_CPU,
838 .instance_size = sizeof(LoongArchCPU),
839 .instance_align = __alignof(LoongArchCPU),
840 .instance_init = loongarch_cpu_init,
842 .abstract = true,
843 .class_size = sizeof(LoongArchCPUClass),
844 .class_init = loongarch_cpu_class_init,
847 .name = TYPE_LOONGARCH32_CPU,
848 .parent = TYPE_LOONGARCH_CPU,
850 .abstract = true,
851 .class_init = loongarch32_cpu_class_init,
854 .name = TYPE_LOONGARCH64_CPU,
855 .parent = TYPE_LOONGARCH_CPU,
857 .abstract = true,
858 .class_init = loongarch64_cpu_class_init,
860 DEFINE_LOONGARCH_CPU_TYPE(64, "la464", loongarch_la464_initfn),
861 DEFINE_LOONGARCH_CPU_TYPE(32, "la132", loongarch_la132_initfn),
862 DEFINE_LOONGARCH_CPU_TYPE(64, "max", loongarch_max_initfn),
865 DEFINE_TYPES(loongarch_cpu_type_infos)