2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/translator.h"
26 #include "tcg/tcg-op.h"
27 #include "qemu/qemu-print.h"
29 #include "exec/cpu_ldst.h"
30 #include "hw/lm32/lm32_pic.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
40 #define LOG_DIS(...) \
43 qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
47 #define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
52 /* is_jmp field values */
53 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
54 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
55 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
57 static TCGv cpu_R
[32];
67 static TCGv cpu_bp
[4];
68 static TCGv cpu_wp
[4];
70 #include "exec/gen-icount.h"
79 /* This is the state at translation time. */
80 typedef struct DisasContext
{
87 uint8_t r0
, r1
, r2
, csr
;
92 unsigned int delayed_branch
;
93 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
96 struct TranslationBlock
*tb
;
97 int singlestep_enabled
;
100 uint8_t num_breakpoints
;
101 uint8_t num_watchpoints
;
104 static const char *regnames
[] = {
105 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
106 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
107 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
108 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
109 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
113 static inline int zero_extend(unsigned int val
, int width
)
115 return val
& ((1 << width
) - 1);
118 static inline int sign_extend(unsigned int val
, int width
)
131 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
133 TCGv_i32 tmp
= tcg_const_i32(index
);
135 gen_helper_raise_exception(cpu_env
, tmp
);
136 tcg_temp_free_i32(tmp
);
139 static inline void t_gen_illegal_insn(DisasContext
*dc
)
141 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
142 gen_helper_ill(cpu_env
);
145 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
147 if (unlikely(dc
->singlestep_enabled
)) {
151 #ifndef CONFIG_USER_ONLY
152 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
158 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
160 if (use_goto_tb(dc
, dest
)) {
162 tcg_gen_movi_tl(cpu_pc
, dest
);
163 tcg_gen_exit_tb(dc
->tb
, n
);
165 tcg_gen_movi_tl(cpu_pc
, dest
);
166 if (dc
->singlestep_enabled
) {
167 t_gen_raise_exception(dc
, EXCP_DEBUG
);
169 tcg_gen_exit_tb(NULL
, 0);
173 static void dec_add(DisasContext
*dc
)
175 if (dc
->format
== OP_FMT_RI
) {
176 if (dc
->r0
== R_R0
) {
177 if (dc
->r1
== R_R0
&& dc
->imm16
== 0) {
180 LOG_DIS("mvi r%d, %d\n", dc
->r1
, sign_extend(dc
->imm16
, 16));
183 LOG_DIS("addi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
184 sign_extend(dc
->imm16
, 16));
187 LOG_DIS("add r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
190 if (dc
->format
== OP_FMT_RI
) {
191 tcg_gen_addi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
192 sign_extend(dc
->imm16
, 16));
194 tcg_gen_add_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
198 static void dec_and(DisasContext
*dc
)
200 if (dc
->format
== OP_FMT_RI
) {
201 LOG_DIS("andi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
202 zero_extend(dc
->imm16
, 16));
204 LOG_DIS("and r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
207 if (dc
->format
== OP_FMT_RI
) {
208 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
209 zero_extend(dc
->imm16
, 16));
211 if (dc
->r0
== 0 && dc
->r1
== 0 && dc
->r2
== 0) {
212 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
213 gen_helper_hlt(cpu_env
);
215 tcg_gen_and_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
220 static void dec_andhi(DisasContext
*dc
)
222 LOG_DIS("andhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
224 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
227 static void dec_b(DisasContext
*dc
)
229 if (dc
->r0
== R_RA
) {
231 } else if (dc
->r0
== R_EA
) {
233 } else if (dc
->r0
== R_BA
) {
236 LOG_DIS("b r%d\n", dc
->r0
);
239 /* restore IE.IE in case of an eret */
240 if (dc
->r0
== R_EA
) {
241 TCGv t0
= tcg_temp_new();
242 TCGLabel
*l1
= gen_new_label();
243 tcg_gen_andi_tl(t0
, cpu_ie
, IE_EIE
);
244 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
245 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_EIE
, l1
);
246 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
249 } else if (dc
->r0
== R_BA
) {
250 TCGv t0
= tcg_temp_new();
251 TCGLabel
*l1
= gen_new_label();
252 tcg_gen_andi_tl(t0
, cpu_ie
, IE_BIE
);
253 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
254 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_BIE
, l1
);
255 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
259 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
261 dc
->is_jmp
= DISAS_JUMP
;
264 static void dec_bi(DisasContext
*dc
)
266 LOG_DIS("bi %d\n", sign_extend(dc
->imm26
<< 2, 26));
268 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
270 dc
->is_jmp
= DISAS_TB_JUMP
;
273 static inline void gen_cond_branch(DisasContext
*dc
, int cond
)
275 TCGLabel
*l1
= gen_new_label();
276 tcg_gen_brcond_tl(cond
, cpu_R
[dc
->r0
], cpu_R
[dc
->r1
], l1
);
277 gen_goto_tb(dc
, 0, dc
->pc
+ 4);
279 gen_goto_tb(dc
, 1, dc
->pc
+ (sign_extend(dc
->imm16
<< 2, 16)));
280 dc
->is_jmp
= DISAS_TB_JUMP
;
283 static void dec_be(DisasContext
*dc
)
285 LOG_DIS("be r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
286 sign_extend(dc
->imm16
, 16) * 4);
288 gen_cond_branch(dc
, TCG_COND_EQ
);
291 static void dec_bg(DisasContext
*dc
)
293 LOG_DIS("bg r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
294 sign_extend(dc
->imm16
, 16 * 4));
296 gen_cond_branch(dc
, TCG_COND_GT
);
299 static void dec_bge(DisasContext
*dc
)
301 LOG_DIS("bge r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
302 sign_extend(dc
->imm16
, 16) * 4);
304 gen_cond_branch(dc
, TCG_COND_GE
);
307 static void dec_bgeu(DisasContext
*dc
)
309 LOG_DIS("bgeu r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
310 sign_extend(dc
->imm16
, 16) * 4);
312 gen_cond_branch(dc
, TCG_COND_GEU
);
315 static void dec_bgu(DisasContext
*dc
)
317 LOG_DIS("bgu r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
318 sign_extend(dc
->imm16
, 16) * 4);
320 gen_cond_branch(dc
, TCG_COND_GTU
);
323 static void dec_bne(DisasContext
*dc
)
325 LOG_DIS("bne r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
326 sign_extend(dc
->imm16
, 16) * 4);
328 gen_cond_branch(dc
, TCG_COND_NE
);
331 static void dec_call(DisasContext
*dc
)
333 LOG_DIS("call r%d\n", dc
->r0
);
335 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
336 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
338 dc
->is_jmp
= DISAS_JUMP
;
341 static void dec_calli(DisasContext
*dc
)
343 LOG_DIS("calli %d\n", sign_extend(dc
->imm26
, 26) * 4);
345 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
346 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
348 dc
->is_jmp
= DISAS_TB_JUMP
;
351 static inline void gen_compare(DisasContext
*dc
, int cond
)
355 if (dc
->format
== OP_FMT_RI
) {
359 i
= zero_extend(dc
->imm16
, 16);
362 i
= sign_extend(dc
->imm16
, 16);
366 tcg_gen_setcondi_tl(cond
, cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], i
);
368 tcg_gen_setcond_tl(cond
, cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
372 static void dec_cmpe(DisasContext
*dc
)
374 if (dc
->format
== OP_FMT_RI
) {
375 LOG_DIS("cmpei r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
376 sign_extend(dc
->imm16
, 16));
378 LOG_DIS("cmpe r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
381 gen_compare(dc
, TCG_COND_EQ
);
384 static void dec_cmpg(DisasContext
*dc
)
386 if (dc
->format
== OP_FMT_RI
) {
387 LOG_DIS("cmpgi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
388 sign_extend(dc
->imm16
, 16));
390 LOG_DIS("cmpg r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
393 gen_compare(dc
, TCG_COND_GT
);
396 static void dec_cmpge(DisasContext
*dc
)
398 if (dc
->format
== OP_FMT_RI
) {
399 LOG_DIS("cmpgei r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
400 sign_extend(dc
->imm16
, 16));
402 LOG_DIS("cmpge r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
405 gen_compare(dc
, TCG_COND_GE
);
408 static void dec_cmpgeu(DisasContext
*dc
)
410 if (dc
->format
== OP_FMT_RI
) {
411 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
412 zero_extend(dc
->imm16
, 16));
414 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
417 gen_compare(dc
, TCG_COND_GEU
);
420 static void dec_cmpgu(DisasContext
*dc
)
422 if (dc
->format
== OP_FMT_RI
) {
423 LOG_DIS("cmpgui r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
424 zero_extend(dc
->imm16
, 16));
426 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
429 gen_compare(dc
, TCG_COND_GTU
);
432 static void dec_cmpne(DisasContext
*dc
)
434 if (dc
->format
== OP_FMT_RI
) {
435 LOG_DIS("cmpnei r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
436 sign_extend(dc
->imm16
, 16));
438 LOG_DIS("cmpne r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
441 gen_compare(dc
, TCG_COND_NE
);
444 static void dec_divu(DisasContext
*dc
)
448 LOG_DIS("divu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
450 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
451 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
452 t_gen_illegal_insn(dc
);
456 l1
= gen_new_label();
457 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
458 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
459 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
461 tcg_gen_divu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
464 static void dec_lb(DisasContext
*dc
)
468 LOG_DIS("lb r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
471 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
472 tcg_gen_qemu_ld8s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
476 static void dec_lbu(DisasContext
*dc
)
480 LOG_DIS("lbu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
483 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
484 tcg_gen_qemu_ld8u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
488 static void dec_lh(DisasContext
*dc
)
492 LOG_DIS("lh r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
495 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
496 tcg_gen_qemu_ld16s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
500 static void dec_lhu(DisasContext
*dc
)
504 LOG_DIS("lhu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
507 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
508 tcg_gen_qemu_ld16u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
512 static void dec_lw(DisasContext
*dc
)
516 LOG_DIS("lw r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, sign_extend(dc
->imm16
, 16));
519 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
520 tcg_gen_qemu_ld32s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
524 static void dec_modu(DisasContext
*dc
)
528 LOG_DIS("modu r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->r1
);
530 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
531 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
532 t_gen_illegal_insn(dc
);
536 l1
= gen_new_label();
537 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
538 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
539 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
541 tcg_gen_remu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
544 static void dec_mul(DisasContext
*dc
)
546 if (dc
->format
== OP_FMT_RI
) {
547 LOG_DIS("muli r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
548 sign_extend(dc
->imm16
, 16));
550 LOG_DIS("mul r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
553 if (!(dc
->features
& LM32_FEATURE_MULTIPLY
)) {
554 qemu_log_mask(LOG_GUEST_ERROR
,
555 "hardware multiplier is not available\n");
556 t_gen_illegal_insn(dc
);
560 if (dc
->format
== OP_FMT_RI
) {
561 tcg_gen_muli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
562 sign_extend(dc
->imm16
, 16));
564 tcg_gen_mul_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
568 static void dec_nor(DisasContext
*dc
)
570 if (dc
->format
== OP_FMT_RI
) {
571 LOG_DIS("nori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
572 zero_extend(dc
->imm16
, 16));
574 LOG_DIS("nor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
577 if (dc
->format
== OP_FMT_RI
) {
578 TCGv t0
= tcg_temp_new();
579 tcg_gen_movi_tl(t0
, zero_extend(dc
->imm16
, 16));
580 tcg_gen_nor_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], t0
);
583 tcg_gen_nor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
587 static void dec_or(DisasContext
*dc
)
589 if (dc
->format
== OP_FMT_RI
) {
590 LOG_DIS("ori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
591 zero_extend(dc
->imm16
, 16));
593 if (dc
->r1
== R_R0
) {
594 LOG_DIS("mv r%d, r%d\n", dc
->r2
, dc
->r0
);
596 LOG_DIS("or r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
600 if (dc
->format
== OP_FMT_RI
) {
601 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
602 zero_extend(dc
->imm16
, 16));
604 tcg_gen_or_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
608 static void dec_orhi(DisasContext
*dc
)
610 if (dc
->r0
== R_R0
) {
611 LOG_DIS("mvhi r%d, %d\n", dc
->r1
, dc
->imm16
);
613 LOG_DIS("orhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
616 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
619 static void dec_scall(DisasContext
*dc
)
624 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
625 t_gen_raise_exception(dc
, EXCP_BREAKPOINT
);
629 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
630 t_gen_raise_exception(dc
, EXCP_SYSTEMCALL
);
633 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode @0x%x", dc
->pc
);
634 t_gen_illegal_insn(dc
);
639 static void dec_rcsr(DisasContext
*dc
)
641 LOG_DIS("rcsr r%d, %d\n", dc
->r2
, dc
->csr
);
645 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_ie
);
648 gen_helper_rcsr_im(cpu_R
[dc
->r2
], cpu_env
);
651 gen_helper_rcsr_ip(cpu_R
[dc
->r2
], cpu_env
);
654 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cc
);
657 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cfg
);
660 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_eba
);
663 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_dc
);
666 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_deba
);
669 gen_helper_rcsr_jtx(cpu_R
[dc
->r2
], cpu_env
);
672 gen_helper_rcsr_jrx(cpu_R
[dc
->r2
], cpu_env
);
684 qemu_log_mask(LOG_GUEST_ERROR
, "invalid read access csr=%x\n", dc
->csr
);
687 qemu_log_mask(LOG_GUEST_ERROR
, "read_csr: unknown csr=%x\n", dc
->csr
);
692 static void dec_sb(DisasContext
*dc
)
696 LOG_DIS("sb (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
699 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
700 tcg_gen_qemu_st8(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
704 static void dec_sextb(DisasContext
*dc
)
706 LOG_DIS("sextb r%d, r%d\n", dc
->r2
, dc
->r0
);
708 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
709 qemu_log_mask(LOG_GUEST_ERROR
,
710 "hardware sign extender is not available\n");
711 t_gen_illegal_insn(dc
);
715 tcg_gen_ext8s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
718 static void dec_sexth(DisasContext
*dc
)
720 LOG_DIS("sexth r%d, r%d\n", dc
->r2
, dc
->r0
);
722 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
723 qemu_log_mask(LOG_GUEST_ERROR
,
724 "hardware sign extender is not available\n");
725 t_gen_illegal_insn(dc
);
729 tcg_gen_ext16s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
732 static void dec_sh(DisasContext
*dc
)
736 LOG_DIS("sh (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
739 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
740 tcg_gen_qemu_st16(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
744 static void dec_sl(DisasContext
*dc
)
746 if (dc
->format
== OP_FMT_RI
) {
747 LOG_DIS("sli r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
749 LOG_DIS("sl r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
752 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
753 qemu_log_mask(LOG_GUEST_ERROR
, "hardware shifter is not available\n");
754 t_gen_illegal_insn(dc
);
758 if (dc
->format
== OP_FMT_RI
) {
759 tcg_gen_shli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
761 TCGv t0
= tcg_temp_new();
762 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
763 tcg_gen_shl_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
768 static void dec_sr(DisasContext
*dc
)
770 if (dc
->format
== OP_FMT_RI
) {
771 LOG_DIS("sri r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
773 LOG_DIS("sr r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
776 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
778 if (dc
->format
== OP_FMT_RI
) {
779 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
780 qemu_log_mask(LOG_GUEST_ERROR
,
781 "hardware shifter is not available\n");
782 t_gen_illegal_insn(dc
);
785 tcg_gen_sari_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
787 TCGLabel
*l1
= gen_new_label();
788 TCGLabel
*l2
= gen_new_label();
789 TCGv t0
= tcg_temp_local_new();
790 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
792 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
793 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
794 t_gen_illegal_insn(dc
);
799 tcg_gen_sar_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
806 static void dec_sru(DisasContext
*dc
)
808 if (dc
->format
== OP_FMT_RI
) {
809 LOG_DIS("srui r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
811 LOG_DIS("sru r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
814 if (dc
->format
== OP_FMT_RI
) {
815 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
816 qemu_log_mask(LOG_GUEST_ERROR
,
817 "hardware shifter is not available\n");
818 t_gen_illegal_insn(dc
);
821 tcg_gen_shri_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
823 TCGLabel
*l1
= gen_new_label();
824 TCGLabel
*l2
= gen_new_label();
825 TCGv t0
= tcg_temp_local_new();
826 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
828 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
829 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
830 t_gen_illegal_insn(dc
);
835 tcg_gen_shr_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
842 static void dec_sub(DisasContext
*dc
)
844 LOG_DIS("sub r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
846 tcg_gen_sub_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
849 static void dec_sw(DisasContext
*dc
)
853 LOG_DIS("sw (r%d+%d), r%d\n", dc
->r0
, sign_extend(dc
->imm16
, 16), dc
->r1
);
856 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
857 tcg_gen_qemu_st32(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
861 static void dec_user(DisasContext
*dc
)
865 qemu_log_mask(LOG_GUEST_ERROR
, "user instruction undefined\n");
866 t_gen_illegal_insn(dc
);
869 static void dec_wcsr(DisasContext
*dc
)
873 LOG_DIS("wcsr %d, r%d\n", dc
->csr
, dc
->r1
);
877 tcg_gen_mov_tl(cpu_ie
, cpu_R
[dc
->r1
]);
878 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
879 dc
->is_jmp
= DISAS_UPDATE
;
882 /* mark as an io operation because it could cause an interrupt */
883 if (tb_cflags(dc
->tb
) & CF_USE_ICOUNT
) {
886 gen_helper_wcsr_im(cpu_env
, cpu_R
[dc
->r1
]);
887 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
888 dc
->is_jmp
= DISAS_UPDATE
;
891 /* mark as an io operation because it could cause an interrupt */
892 if (tb_cflags(dc
->tb
) & CF_USE_ICOUNT
) {
895 gen_helper_wcsr_ip(cpu_env
, cpu_R
[dc
->r1
]);
896 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
897 dc
->is_jmp
= DISAS_UPDATE
;
906 tcg_gen_mov_tl(cpu_eba
, cpu_R
[dc
->r1
]);
909 tcg_gen_mov_tl(cpu_deba
, cpu_R
[dc
->r1
]);
912 gen_helper_wcsr_jtx(cpu_env
, cpu_R
[dc
->r1
]);
915 gen_helper_wcsr_jrx(cpu_env
, cpu_R
[dc
->r1
]);
918 gen_helper_wcsr_dc(cpu_env
, cpu_R
[dc
->r1
]);
924 no
= dc
->csr
- CSR_BP0
;
925 if (dc
->num_breakpoints
<= no
) {
926 qemu_log_mask(LOG_GUEST_ERROR
,
927 "breakpoint #%i is not available\n", no
);
928 t_gen_illegal_insn(dc
);
931 gen_helper_wcsr_bp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
937 no
= dc
->csr
- CSR_WP0
;
938 if (dc
->num_watchpoints
<= no
) {
939 qemu_log_mask(LOG_GUEST_ERROR
,
940 "watchpoint #%i is not available\n", no
);
941 t_gen_illegal_insn(dc
);
944 gen_helper_wcsr_wp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
948 qemu_log_mask(LOG_GUEST_ERROR
, "invalid write access csr=%x\n",
952 qemu_log_mask(LOG_GUEST_ERROR
, "write_csr: unknown csr=%x\n",
958 static void dec_xnor(DisasContext
*dc
)
960 if (dc
->format
== OP_FMT_RI
) {
961 LOG_DIS("xnori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
962 zero_extend(dc
->imm16
, 16));
964 if (dc
->r1
== R_R0
) {
965 LOG_DIS("not r%d, r%d\n", dc
->r2
, dc
->r0
);
967 LOG_DIS("xnor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
971 if (dc
->format
== OP_FMT_RI
) {
972 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
973 zero_extend(dc
->imm16
, 16));
974 tcg_gen_not_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r1
]);
976 tcg_gen_eqv_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
980 static void dec_xor(DisasContext
*dc
)
982 if (dc
->format
== OP_FMT_RI
) {
983 LOG_DIS("xori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
984 zero_extend(dc
->imm16
, 16));
986 LOG_DIS("xor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
989 if (dc
->format
== OP_FMT_RI
) {
990 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
991 zero_extend(dc
->imm16
, 16));
993 tcg_gen_xor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
997 static void dec_ill(DisasContext
*dc
)
999 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode 0x%02x\n", dc
->opcode
);
1000 t_gen_illegal_insn(dc
);
1003 typedef void (*DecoderInfo
)(DisasContext
*dc
);
1004 static const DecoderInfo decinfo
[] = {
1005 dec_sru
, dec_nor
, dec_mul
, dec_sh
, dec_lb
, dec_sr
, dec_xor
, dec_lh
,
1006 dec_and
, dec_xnor
, dec_lw
, dec_lhu
, dec_sb
, dec_add
, dec_or
, dec_sl
,
1007 dec_lbu
, dec_be
, dec_bg
, dec_bge
, dec_bgeu
, dec_bgu
, dec_sw
, dec_bne
,
1008 dec_andhi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_orhi
,
1010 dec_sru
, dec_nor
, dec_mul
, dec_divu
, dec_rcsr
, dec_sr
, dec_xor
, dec_ill
,
1011 dec_and
, dec_xnor
, dec_ill
, dec_scall
, dec_sextb
, dec_add
, dec_or
, dec_sl
,
1012 dec_b
, dec_modu
, dec_sub
, dec_user
, dec_wcsr
, dec_ill
, dec_call
, dec_sexth
,
1013 dec_bi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_calli
,
1017 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1020 LOG_DIS("%8.8x\t", dc
->ir
);
1022 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1024 dc
->imm5
= EXTRACT_FIELD(ir
, 0, 4);
1025 dc
->imm16
= EXTRACT_FIELD(ir
, 0, 15);
1026 dc
->imm26
= EXTRACT_FIELD(ir
, 0, 25);
1028 dc
->csr
= EXTRACT_FIELD(ir
, 21, 25);
1029 dc
->r0
= EXTRACT_FIELD(ir
, 21, 25);
1030 dc
->r1
= EXTRACT_FIELD(ir
, 16, 20);
1031 dc
->r2
= EXTRACT_FIELD(ir
, 11, 15);
1033 /* bit 31 seems to indicate insn type. */
1034 if (ir
& (1 << 31)) {
1035 dc
->format
= OP_FMT_RR
;
1037 dc
->format
= OP_FMT_RI
;
1040 assert(ARRAY_SIZE(decinfo
) == 64);
1041 assert(dc
->opcode
< 64);
1043 decinfo
[dc
->opcode
](dc
);
1046 /* generate intermediate code for basic block 'tb'. */
1047 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
1049 CPULM32State
*env
= cs
->env_ptr
;
1050 LM32CPU
*cpu
= env_archcpu(env
);
1051 struct DisasContext ctx
, *dc
= &ctx
;
1053 uint32_t page_start
;
1057 dc
->features
= cpu
->features
;
1058 dc
->num_breakpoints
= cpu
->num_breakpoints
;
1059 dc
->num_watchpoints
= cpu
->num_watchpoints
;
1062 dc
->is_jmp
= DISAS_NEXT
;
1064 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1067 qemu_log_mask(LOG_GUEST_ERROR
,
1068 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start
);
1072 page_start
= pc_start
& TARGET_PAGE_MASK
;
1077 tcg_gen_insn_start(dc
->pc
);
1080 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1081 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1082 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1083 dc
->is_jmp
= DISAS_UPDATE
;
1084 /* The address covered by the breakpoint must be included in
1085 [tb->pc, tb->pc + tb->size) in order to for it to be
1086 properly cleared -- thus we increment the PC here so that
1087 the logic setting tb->size below does the right thing. */
1093 LOG_DIS("%8.8x:\t", dc
->pc
);
1095 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1099 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1101 } while (!dc
->is_jmp
1102 && !tcg_op_buf_full()
1103 && !cs
->singlestep_enabled
1105 && (dc
->pc
- page_start
< TARGET_PAGE_SIZE
)
1106 && num_insns
< max_insns
);
1109 if (unlikely(cs
->singlestep_enabled
)) {
1110 if (dc
->is_jmp
== DISAS_NEXT
) {
1111 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1113 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1115 switch (dc
->is_jmp
) {
1117 gen_goto_tb(dc
, 1, dc
->pc
);
1122 /* indicate that the hash table must be used
1123 to find the next TB */
1124 tcg_gen_exit_tb(NULL
, 0);
1127 /* nothing more to generate */
1132 gen_tb_end(tb
, num_insns
);
1134 tb
->size
= dc
->pc
- pc_start
;
1135 tb
->icount
= num_insns
;
1138 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1139 && qemu_log_in_addr_range(pc_start
)) {
1140 FILE *logfile
= qemu_log_lock();
1142 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
);
1143 qemu_log_unlock(logfile
);
1148 void lm32_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1150 LM32CPU
*cpu
= LM32_CPU(cs
);
1151 CPULM32State
*env
= &cpu
->env
;
1158 qemu_fprintf(f
, "IN: PC=%x %s\n",
1159 env
->pc
, lookup_symbol(env
->pc
));
1161 qemu_fprintf(f
, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1163 (env
->ie
& IE_IE
) ? 1 : 0,
1164 (env
->ie
& IE_EIE
) ? 1 : 0,
1165 (env
->ie
& IE_BIE
) ? 1 : 0,
1166 lm32_pic_get_im(env
->pic_state
),
1167 lm32_pic_get_ip(env
->pic_state
));
1168 qemu_fprintf(f
, "eba=%8.8x deba=%8.8x\n",
1172 for (i
= 0; i
< 32; i
++) {
1173 qemu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1174 if ((i
+ 1) % 4 == 0) {
1175 qemu_fprintf(f
, "\n");
1178 qemu_fprintf(f
, "\n\n");
1181 void restore_state_to_opc(CPULM32State
*env
, TranslationBlock
*tb
,
1187 void lm32_translate_init(void)
1191 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1192 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1193 offsetof(CPULM32State
, regs
[i
]),
1197 for (i
= 0; i
< ARRAY_SIZE(cpu_bp
); i
++) {
1198 cpu_bp
[i
] = tcg_global_mem_new(cpu_env
,
1199 offsetof(CPULM32State
, bp
[i
]),
1203 for (i
= 0; i
< ARRAY_SIZE(cpu_wp
); i
++) {
1204 cpu_wp
[i
] = tcg_global_mem_new(cpu_env
,
1205 offsetof(CPULM32State
, wp
[i
]),
1209 cpu_pc
= tcg_global_mem_new(cpu_env
,
1210 offsetof(CPULM32State
, pc
),
1212 cpu_ie
= tcg_global_mem_new(cpu_env
,
1213 offsetof(CPULM32State
, ie
),
1215 cpu_icc
= tcg_global_mem_new(cpu_env
,
1216 offsetof(CPULM32State
, icc
),
1218 cpu_dcc
= tcg_global_mem_new(cpu_env
,
1219 offsetof(CPULM32State
, dcc
),
1221 cpu_cc
= tcg_global_mem_new(cpu_env
,
1222 offsetof(CPULM32State
, cc
),
1224 cpu_cfg
= tcg_global_mem_new(cpu_env
,
1225 offsetof(CPULM32State
, cfg
),
1227 cpu_eba
= tcg_global_mem_new(cpu_env
,
1228 offsetof(CPULM32State
, eba
),
1230 cpu_dc
= tcg_global_mem_new(cpu_env
,
1231 offsetof(CPULM32State
, dc
),
1233 cpu_deba
= tcg_global_mem_new(cpu_env
,
1234 offsetof(CPULM32State
, deba
),