2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/lm32/lm32_pic.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
38 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DIS(...) do { } while (0)
43 #define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48 static TCGv_env cpu_env
;
49 static TCGv cpu_R
[32];
59 static TCGv cpu_bp
[4];
60 static TCGv cpu_wp
[4];
62 #include "exec/gen-icount.h"
71 /* This is the state at translation time. */
72 typedef struct DisasContext
{
79 uint8_t r0
, r1
, r2
, csr
;
84 unsigned int delayed_branch
;
85 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
88 struct TranslationBlock
*tb
;
89 int singlestep_enabled
;
92 uint8_t num_breakpoints
;
93 uint8_t num_watchpoints
;
96 static const char *regnames
[] = {
97 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
98 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
99 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
100 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
101 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
105 static inline int zero_extend(unsigned int val
, int width
)
107 return val
& ((1 << width
) - 1);
110 static inline int sign_extend(unsigned int val
, int width
)
123 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
125 TCGv_i32 tmp
= tcg_const_i32(index
);
127 gen_helper_raise_exception(cpu_env
, tmp
);
128 tcg_temp_free_i32(tmp
);
131 static inline void t_gen_illegal_insn(DisasContext
*dc
)
133 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
134 gen_helper_ill(cpu_env
);
137 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
139 if (unlikely(dc
->singlestep_enabled
)) {
143 #ifndef CONFIG_USER_ONLY
144 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
150 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
152 if (use_goto_tb(dc
, dest
)) {
154 tcg_gen_movi_tl(cpu_pc
, dest
);
155 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
157 tcg_gen_movi_tl(cpu_pc
, dest
);
158 if (dc
->singlestep_enabled
) {
159 t_gen_raise_exception(dc
, EXCP_DEBUG
);
165 static void dec_add(DisasContext
*dc
)
167 if (dc
->format
== OP_FMT_RI
) {
168 if (dc
->r0
== R_R0
) {
169 if (dc
->r1
== R_R0
&& dc
->imm16
== 0) {
172 LOG_DIS("mvi r%d, %d\n", dc
->r1
, sign_extend(dc
->imm16
, 16));
175 LOG_DIS("addi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
176 sign_extend(dc
->imm16
, 16));
179 LOG_DIS("add r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
182 if (dc
->format
== OP_FMT_RI
) {
183 tcg_gen_addi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
184 sign_extend(dc
->imm16
, 16));
186 tcg_gen_add_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
190 static void dec_and(DisasContext
*dc
)
192 if (dc
->format
== OP_FMT_RI
) {
193 LOG_DIS("andi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
194 zero_extend(dc
->imm16
, 16));
196 LOG_DIS("and r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
199 if (dc
->format
== OP_FMT_RI
) {
200 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
201 zero_extend(dc
->imm16
, 16));
203 if (dc
->r0
== 0 && dc
->r1
== 0 && dc
->r2
== 0) {
204 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
205 gen_helper_hlt(cpu_env
);
207 tcg_gen_and_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
212 static void dec_andhi(DisasContext
*dc
)
214 LOG_DIS("andhi r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->imm16
);
216 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
219 static void dec_b(DisasContext
*dc
)
221 if (dc
->r0
== R_RA
) {
223 } else if (dc
->r0
== R_EA
) {
225 } else if (dc
->r0
== R_BA
) {
228 LOG_DIS("b r%d\n", dc
->r0
);
231 /* restore IE.IE in case of an eret */
232 if (dc
->r0
== R_EA
) {
233 TCGv t0
= tcg_temp_new();
234 TCGLabel
*l1
= gen_new_label();
235 tcg_gen_andi_tl(t0
, cpu_ie
, IE_EIE
);
236 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
237 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_EIE
, l1
);
238 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
241 } else if (dc
->r0
== R_BA
) {
242 TCGv t0
= tcg_temp_new();
243 TCGLabel
*l1
= gen_new_label();
244 tcg_gen_andi_tl(t0
, cpu_ie
, IE_BIE
);
245 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
246 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_BIE
, l1
);
247 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
251 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
253 dc
->is_jmp
= DISAS_JUMP
;
256 static void dec_bi(DisasContext
*dc
)
258 LOG_DIS("bi %d\n", sign_extend(dc
->imm26
<< 2, 26));
260 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
262 dc
->is_jmp
= DISAS_TB_JUMP
;
265 static inline void gen_cond_branch(DisasContext
*dc
, int cond
)
267 TCGLabel
*l1
= gen_new_label();
268 tcg_gen_brcond_tl(cond
, cpu_R
[dc
->r0
], cpu_R
[dc
->r1
], l1
);
269 gen_goto_tb(dc
, 0, dc
->pc
+ 4);
271 gen_goto_tb(dc
, 1, dc
->pc
+ (sign_extend(dc
->imm16
<< 2, 16)));
272 dc
->is_jmp
= DISAS_TB_JUMP
;
275 static void dec_be(DisasContext
*dc
)
277 LOG_DIS("be r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
278 sign_extend(dc
->imm16
, 16) * 4);
280 gen_cond_branch(dc
, TCG_COND_EQ
);
283 static void dec_bg(DisasContext
*dc
)
285 LOG_DIS("bg r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
286 sign_extend(dc
->imm16
, 16 * 4));
288 gen_cond_branch(dc
, TCG_COND_GT
);
291 static void dec_bge(DisasContext
*dc
)
293 LOG_DIS("bge r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
294 sign_extend(dc
->imm16
, 16) * 4);
296 gen_cond_branch(dc
, TCG_COND_GE
);
299 static void dec_bgeu(DisasContext
*dc
)
301 LOG_DIS("bgeu r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
302 sign_extend(dc
->imm16
, 16) * 4);
304 gen_cond_branch(dc
, TCG_COND_GEU
);
307 static void dec_bgu(DisasContext
*dc
)
309 LOG_DIS("bgu r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
310 sign_extend(dc
->imm16
, 16) * 4);
312 gen_cond_branch(dc
, TCG_COND_GTU
);
315 static void dec_bne(DisasContext
*dc
)
317 LOG_DIS("bne r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
318 sign_extend(dc
->imm16
, 16) * 4);
320 gen_cond_branch(dc
, TCG_COND_NE
);
323 static void dec_call(DisasContext
*dc
)
325 LOG_DIS("call r%d\n", dc
->r0
);
327 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
328 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
330 dc
->is_jmp
= DISAS_JUMP
;
333 static void dec_calli(DisasContext
*dc
)
335 LOG_DIS("calli %d\n", sign_extend(dc
->imm26
, 26) * 4);
337 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
338 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
340 dc
->is_jmp
= DISAS_TB_JUMP
;
343 static inline void gen_compare(DisasContext
*dc
, int cond
)
345 int rX
= (dc
->format
== OP_FMT_RR
) ? dc
->r2
: dc
->r1
;
346 int rY
= (dc
->format
== OP_FMT_RR
) ? dc
->r0
: dc
->r0
;
347 int rZ
= (dc
->format
== OP_FMT_RR
) ? dc
->r1
: -1;
350 if (dc
->format
== OP_FMT_RI
) {
354 i
= zero_extend(dc
->imm16
, 16);
357 i
= sign_extend(dc
->imm16
, 16);
361 tcg_gen_setcondi_tl(cond
, cpu_R
[rX
], cpu_R
[rY
], i
);
363 tcg_gen_setcond_tl(cond
, cpu_R
[rX
], cpu_R
[rY
], cpu_R
[rZ
]);
367 static void dec_cmpe(DisasContext
*dc
)
369 if (dc
->format
== OP_FMT_RI
) {
370 LOG_DIS("cmpei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
371 sign_extend(dc
->imm16
, 16));
373 LOG_DIS("cmpe r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
376 gen_compare(dc
, TCG_COND_EQ
);
379 static void dec_cmpg(DisasContext
*dc
)
381 if (dc
->format
== OP_FMT_RI
) {
382 LOG_DIS("cmpgi r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
383 sign_extend(dc
->imm16
, 16));
385 LOG_DIS("cmpg r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
388 gen_compare(dc
, TCG_COND_GT
);
391 static void dec_cmpge(DisasContext
*dc
)
393 if (dc
->format
== OP_FMT_RI
) {
394 LOG_DIS("cmpgei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
395 sign_extend(dc
->imm16
, 16));
397 LOG_DIS("cmpge r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
400 gen_compare(dc
, TCG_COND_GE
);
403 static void dec_cmpgeu(DisasContext
*dc
)
405 if (dc
->format
== OP_FMT_RI
) {
406 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
407 zero_extend(dc
->imm16
, 16));
409 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
412 gen_compare(dc
, TCG_COND_GEU
);
415 static void dec_cmpgu(DisasContext
*dc
)
417 if (dc
->format
== OP_FMT_RI
) {
418 LOG_DIS("cmpgui r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
419 zero_extend(dc
->imm16
, 16));
421 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
424 gen_compare(dc
, TCG_COND_GTU
);
427 static void dec_cmpne(DisasContext
*dc
)
429 if (dc
->format
== OP_FMT_RI
) {
430 LOG_DIS("cmpnei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
431 sign_extend(dc
->imm16
, 16));
433 LOG_DIS("cmpne r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
436 gen_compare(dc
, TCG_COND_NE
);
439 static void dec_divu(DisasContext
*dc
)
443 LOG_DIS("divu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
445 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
446 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
447 t_gen_illegal_insn(dc
);
451 l1
= gen_new_label();
452 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
453 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
454 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
456 tcg_gen_divu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
459 static void dec_lb(DisasContext
*dc
)
463 LOG_DIS("lb r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
466 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
467 tcg_gen_qemu_ld8s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
471 static void dec_lbu(DisasContext
*dc
)
475 LOG_DIS("lbu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
478 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
479 tcg_gen_qemu_ld8u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
483 static void dec_lh(DisasContext
*dc
)
487 LOG_DIS("lh r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
490 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
491 tcg_gen_qemu_ld16s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
495 static void dec_lhu(DisasContext
*dc
)
499 LOG_DIS("lhu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
502 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
503 tcg_gen_qemu_ld16u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
507 static void dec_lw(DisasContext
*dc
)
511 LOG_DIS("lw r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, sign_extend(dc
->imm16
, 16));
514 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
515 tcg_gen_qemu_ld32s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
519 static void dec_modu(DisasContext
*dc
)
523 LOG_DIS("modu r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->r1
);
525 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
526 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
527 t_gen_illegal_insn(dc
);
531 l1
= gen_new_label();
532 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
533 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
534 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
536 tcg_gen_remu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
539 static void dec_mul(DisasContext
*dc
)
541 if (dc
->format
== OP_FMT_RI
) {
542 LOG_DIS("muli r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
543 sign_extend(dc
->imm16
, 16));
545 LOG_DIS("mul r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
548 if (!(dc
->features
& LM32_FEATURE_MULTIPLY
)) {
549 qemu_log_mask(LOG_GUEST_ERROR
,
550 "hardware multiplier is not available\n");
551 t_gen_illegal_insn(dc
);
555 if (dc
->format
== OP_FMT_RI
) {
556 tcg_gen_muli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
557 sign_extend(dc
->imm16
, 16));
559 tcg_gen_mul_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
563 static void dec_nor(DisasContext
*dc
)
565 if (dc
->format
== OP_FMT_RI
) {
566 LOG_DIS("nori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
567 zero_extend(dc
->imm16
, 16));
569 LOG_DIS("nor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
572 if (dc
->format
== OP_FMT_RI
) {
573 TCGv t0
= tcg_temp_new();
574 tcg_gen_movi_tl(t0
, zero_extend(dc
->imm16
, 16));
575 tcg_gen_nor_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], t0
);
578 tcg_gen_nor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
582 static void dec_or(DisasContext
*dc
)
584 if (dc
->format
== OP_FMT_RI
) {
585 LOG_DIS("ori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
586 zero_extend(dc
->imm16
, 16));
588 if (dc
->r1
== R_R0
) {
589 LOG_DIS("mv r%d, r%d\n", dc
->r2
, dc
->r0
);
591 LOG_DIS("or r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
595 if (dc
->format
== OP_FMT_RI
) {
596 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
597 zero_extend(dc
->imm16
, 16));
599 tcg_gen_or_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
603 static void dec_orhi(DisasContext
*dc
)
605 if (dc
->r0
== R_R0
) {
606 LOG_DIS("mvhi r%d, %d\n", dc
->r1
, dc
->imm16
);
608 LOG_DIS("orhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
611 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
614 static void dec_scall(DisasContext
*dc
)
619 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
620 t_gen_raise_exception(dc
, EXCP_BREAKPOINT
);
624 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
625 t_gen_raise_exception(dc
, EXCP_SYSTEMCALL
);
628 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode @0x%x", dc
->pc
);
629 t_gen_illegal_insn(dc
);
634 static void dec_rcsr(DisasContext
*dc
)
636 LOG_DIS("rcsr r%d, %d\n", dc
->r2
, dc
->csr
);
640 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_ie
);
643 gen_helper_rcsr_im(cpu_R
[dc
->r2
], cpu_env
);
646 gen_helper_rcsr_ip(cpu_R
[dc
->r2
], cpu_env
);
649 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cc
);
652 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cfg
);
655 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_eba
);
658 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_dc
);
661 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_deba
);
664 gen_helper_rcsr_jtx(cpu_R
[dc
->r2
], cpu_env
);
667 gen_helper_rcsr_jrx(cpu_R
[dc
->r2
], cpu_env
);
679 qemu_log_mask(LOG_GUEST_ERROR
, "invalid read access csr=%x\n", dc
->csr
);
682 qemu_log_mask(LOG_GUEST_ERROR
, "read_csr: unknown csr=%x\n", dc
->csr
);
687 static void dec_sb(DisasContext
*dc
)
691 LOG_DIS("sb (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
694 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
695 tcg_gen_qemu_st8(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
699 static void dec_sextb(DisasContext
*dc
)
701 LOG_DIS("sextb r%d, r%d\n", dc
->r2
, dc
->r0
);
703 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
704 qemu_log_mask(LOG_GUEST_ERROR
,
705 "hardware sign extender is not available\n");
706 t_gen_illegal_insn(dc
);
710 tcg_gen_ext8s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
713 static void dec_sexth(DisasContext
*dc
)
715 LOG_DIS("sexth r%d, r%d\n", dc
->r2
, dc
->r0
);
717 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
718 qemu_log_mask(LOG_GUEST_ERROR
,
719 "hardware sign extender is not available\n");
720 t_gen_illegal_insn(dc
);
724 tcg_gen_ext16s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
727 static void dec_sh(DisasContext
*dc
)
731 LOG_DIS("sh (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
734 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
735 tcg_gen_qemu_st16(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
739 static void dec_sl(DisasContext
*dc
)
741 if (dc
->format
== OP_FMT_RI
) {
742 LOG_DIS("sli r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
744 LOG_DIS("sl r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
747 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
748 qemu_log_mask(LOG_GUEST_ERROR
, "hardware shifter is not available\n");
749 t_gen_illegal_insn(dc
);
753 if (dc
->format
== OP_FMT_RI
) {
754 tcg_gen_shli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
756 TCGv t0
= tcg_temp_new();
757 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
758 tcg_gen_shl_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
763 static void dec_sr(DisasContext
*dc
)
765 if (dc
->format
== OP_FMT_RI
) {
766 LOG_DIS("sri r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
768 LOG_DIS("sr r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
771 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
773 if (dc
->format
== OP_FMT_RI
) {
774 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
775 qemu_log_mask(LOG_GUEST_ERROR
,
776 "hardware shifter is not available\n");
777 t_gen_illegal_insn(dc
);
780 tcg_gen_sari_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
782 TCGLabel
*l1
= gen_new_label();
783 TCGLabel
*l2
= gen_new_label();
784 TCGv t0
= tcg_temp_local_new();
785 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
787 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
788 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
789 t_gen_illegal_insn(dc
);
794 tcg_gen_sar_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
801 static void dec_sru(DisasContext
*dc
)
803 if (dc
->format
== OP_FMT_RI
) {
804 LOG_DIS("srui r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
806 LOG_DIS("sru r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
809 if (dc
->format
== OP_FMT_RI
) {
810 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
811 qemu_log_mask(LOG_GUEST_ERROR
,
812 "hardware shifter is not available\n");
813 t_gen_illegal_insn(dc
);
816 tcg_gen_shri_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
818 TCGLabel
*l1
= gen_new_label();
819 TCGLabel
*l2
= gen_new_label();
820 TCGv t0
= tcg_temp_local_new();
821 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
823 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
824 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
825 t_gen_illegal_insn(dc
);
830 tcg_gen_shr_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
837 static void dec_sub(DisasContext
*dc
)
839 LOG_DIS("sub r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
841 tcg_gen_sub_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
844 static void dec_sw(DisasContext
*dc
)
848 LOG_DIS("sw (r%d+%d), r%d\n", dc
->r0
, sign_extend(dc
->imm16
, 16), dc
->r1
);
851 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
852 tcg_gen_qemu_st32(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
856 static void dec_user(DisasContext
*dc
)
860 qemu_log_mask(LOG_GUEST_ERROR
, "user instruction undefined\n");
861 t_gen_illegal_insn(dc
);
864 static void dec_wcsr(DisasContext
*dc
)
868 LOG_DIS("wcsr r%d, %d\n", dc
->r1
, dc
->csr
);
872 tcg_gen_mov_tl(cpu_ie
, cpu_R
[dc
->r1
]);
873 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
874 dc
->is_jmp
= DISAS_UPDATE
;
877 /* mark as an io operation because it could cause an interrupt */
878 if (dc
->tb
->cflags
& CF_USE_ICOUNT
) {
881 gen_helper_wcsr_im(cpu_env
, cpu_R
[dc
->r1
]);
882 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
883 if (dc
->tb
->cflags
& CF_USE_ICOUNT
) {
886 dc
->is_jmp
= DISAS_UPDATE
;
889 /* mark as an io operation because it could cause an interrupt */
890 if (dc
->tb
->cflags
& CF_USE_ICOUNT
) {
893 gen_helper_wcsr_ip(cpu_env
, cpu_R
[dc
->r1
]);
894 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
895 if (dc
->tb
->cflags
& CF_USE_ICOUNT
) {
898 dc
->is_jmp
= DISAS_UPDATE
;
907 tcg_gen_mov_tl(cpu_eba
, cpu_R
[dc
->r1
]);
910 tcg_gen_mov_tl(cpu_deba
, cpu_R
[dc
->r1
]);
913 gen_helper_wcsr_jtx(cpu_env
, cpu_R
[dc
->r1
]);
916 gen_helper_wcsr_jrx(cpu_env
, cpu_R
[dc
->r1
]);
919 gen_helper_wcsr_dc(cpu_env
, cpu_R
[dc
->r1
]);
925 no
= dc
->csr
- CSR_BP0
;
926 if (dc
->num_breakpoints
<= no
) {
927 qemu_log_mask(LOG_GUEST_ERROR
,
928 "breakpoint #%i is not available\n", no
);
929 t_gen_illegal_insn(dc
);
932 gen_helper_wcsr_bp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
938 no
= dc
->csr
- CSR_WP0
;
939 if (dc
->num_watchpoints
<= no
) {
940 qemu_log_mask(LOG_GUEST_ERROR
,
941 "watchpoint #%i is not available\n", no
);
942 t_gen_illegal_insn(dc
);
945 gen_helper_wcsr_wp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
949 qemu_log_mask(LOG_GUEST_ERROR
, "invalid write access csr=%x\n",
953 qemu_log_mask(LOG_GUEST_ERROR
, "write_csr: unknown csr=%x\n",
959 static void dec_xnor(DisasContext
*dc
)
961 if (dc
->format
== OP_FMT_RI
) {
962 LOG_DIS("xnori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
963 zero_extend(dc
->imm16
, 16));
965 if (dc
->r1
== R_R0
) {
966 LOG_DIS("not r%d, r%d\n", dc
->r2
, dc
->r0
);
968 LOG_DIS("xnor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
972 if (dc
->format
== OP_FMT_RI
) {
973 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
974 zero_extend(dc
->imm16
, 16));
975 tcg_gen_not_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r1
]);
977 tcg_gen_eqv_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
981 static void dec_xor(DisasContext
*dc
)
983 if (dc
->format
== OP_FMT_RI
) {
984 LOG_DIS("xori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
985 zero_extend(dc
->imm16
, 16));
987 LOG_DIS("xor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
990 if (dc
->format
== OP_FMT_RI
) {
991 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
992 zero_extend(dc
->imm16
, 16));
994 tcg_gen_xor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
998 static void dec_ill(DisasContext
*dc
)
1000 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode 0x%02x\n", dc
->opcode
);
1001 t_gen_illegal_insn(dc
);
1004 typedef void (*DecoderInfo
)(DisasContext
*dc
);
1005 static const DecoderInfo decinfo
[] = {
1006 dec_sru
, dec_nor
, dec_mul
, dec_sh
, dec_lb
, dec_sr
, dec_xor
, dec_lh
,
1007 dec_and
, dec_xnor
, dec_lw
, dec_lhu
, dec_sb
, dec_add
, dec_or
, dec_sl
,
1008 dec_lbu
, dec_be
, dec_bg
, dec_bge
, dec_bgeu
, dec_bgu
, dec_sw
, dec_bne
,
1009 dec_andhi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_orhi
,
1011 dec_sru
, dec_nor
, dec_mul
, dec_divu
, dec_rcsr
, dec_sr
, dec_xor
, dec_ill
,
1012 dec_and
, dec_xnor
, dec_ill
, dec_scall
, dec_sextb
, dec_add
, dec_or
, dec_sl
,
1013 dec_b
, dec_modu
, dec_sub
, dec_user
, dec_wcsr
, dec_ill
, dec_call
, dec_sexth
,
1014 dec_bi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_calli
,
1018 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1021 LOG_DIS("%8.8x\t", dc
->ir
);
1023 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1025 dc
->imm5
= EXTRACT_FIELD(ir
, 0, 4);
1026 dc
->imm16
= EXTRACT_FIELD(ir
, 0, 15);
1027 dc
->imm26
= EXTRACT_FIELD(ir
, 0, 25);
1029 dc
->csr
= EXTRACT_FIELD(ir
, 21, 25);
1030 dc
->r0
= EXTRACT_FIELD(ir
, 21, 25);
1031 dc
->r1
= EXTRACT_FIELD(ir
, 16, 20);
1032 dc
->r2
= EXTRACT_FIELD(ir
, 11, 15);
1034 /* bit 31 seems to indicate insn type. */
1035 if (ir
& (1 << 31)) {
1036 dc
->format
= OP_FMT_RR
;
1038 dc
->format
= OP_FMT_RI
;
1041 assert(ARRAY_SIZE(decinfo
) == 64);
1042 assert(dc
->opcode
< 64);
1044 decinfo
[dc
->opcode
](dc
);
1047 /* generate intermediate code for basic block 'tb'. */
1048 void gen_intermediate_code(CPULM32State
*env
, struct TranslationBlock
*tb
)
1050 LM32CPU
*cpu
= lm32_env_get_cpu(env
);
1051 CPUState
*cs
= CPU(cpu
);
1052 struct DisasContext ctx
, *dc
= &ctx
;
1054 uint32_t next_page_start
;
1059 dc
->features
= cpu
->features
;
1060 dc
->num_breakpoints
= cpu
->num_breakpoints
;
1061 dc
->num_watchpoints
= cpu
->num_watchpoints
;
1064 dc
->is_jmp
= DISAS_NEXT
;
1066 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1069 qemu_log_mask(LOG_GUEST_ERROR
,
1070 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start
);
1074 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1076 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1077 if (max_insns
== 0) {
1078 max_insns
= CF_COUNT_MASK
;
1080 if (max_insns
> TCG_MAX_INSNS
) {
1081 max_insns
= TCG_MAX_INSNS
;
1086 tcg_gen_insn_start(dc
->pc
);
1089 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1090 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1091 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1092 dc
->is_jmp
= DISAS_UPDATE
;
1093 /* The address covered by the breakpoint must be included in
1094 [tb->pc, tb->pc + tb->size) in order to for it to be
1095 properly cleared -- thus we increment the PC here so that
1096 the logic setting tb->size below does the right thing. */
1102 LOG_DIS("%8.8x:\t", dc
->pc
);
1104 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1108 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1110 } while (!dc
->is_jmp
1111 && !tcg_op_buf_full()
1112 && !cs
->singlestep_enabled
1114 && (dc
->pc
< next_page_start
)
1115 && num_insns
< max_insns
);
1117 if (tb
->cflags
& CF_LAST_IO
) {
1121 if (unlikely(cs
->singlestep_enabled
)) {
1122 if (dc
->is_jmp
== DISAS_NEXT
) {
1123 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1125 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1127 switch (dc
->is_jmp
) {
1129 gen_goto_tb(dc
, 1, dc
->pc
);
1134 /* indicate that the hash table must be used
1135 to find the next TB */
1139 /* nothing more to generate */
1144 gen_tb_end(tb
, num_insns
);
1146 tb
->size
= dc
->pc
- pc_start
;
1147 tb
->icount
= num_insns
;
1150 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1151 && qemu_log_in_addr_range(pc_start
)) {
1153 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1154 qemu_log("\nisize=%d osize=%d\n",
1155 dc
->pc
- pc_start
, tcg_op_buf_count());
1160 void lm32_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1163 LM32CPU
*cpu
= LM32_CPU(cs
);
1164 CPULM32State
*env
= &cpu
->env
;
1171 cpu_fprintf(f
, "IN: PC=%x %s\n",
1172 env
->pc
, lookup_symbol(env
->pc
));
1174 cpu_fprintf(f
, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1176 (env
->ie
& IE_IE
) ? 1 : 0,
1177 (env
->ie
& IE_EIE
) ? 1 : 0,
1178 (env
->ie
& IE_BIE
) ? 1 : 0,
1179 lm32_pic_get_im(env
->pic_state
),
1180 lm32_pic_get_ip(env
->pic_state
));
1181 cpu_fprintf(f
, "eba=%8.8x deba=%8.8x\n",
1185 for (i
= 0; i
< 32; i
++) {
1186 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1187 if ((i
+ 1) % 4 == 0) {
1188 cpu_fprintf(f
, "\n");
1191 cpu_fprintf(f
, "\n\n");
1194 void restore_state_to_opc(CPULM32State
*env
, TranslationBlock
*tb
,
1200 void lm32_translate_init(void)
1204 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1205 tcg_ctx
.tcg_env
= cpu_env
;
1207 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1208 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1209 offsetof(CPULM32State
, regs
[i
]),
1213 for (i
= 0; i
< ARRAY_SIZE(cpu_bp
); i
++) {
1214 cpu_bp
[i
] = tcg_global_mem_new(cpu_env
,
1215 offsetof(CPULM32State
, bp
[i
]),
1219 for (i
= 0; i
< ARRAY_SIZE(cpu_wp
); i
++) {
1220 cpu_wp
[i
] = tcg_global_mem_new(cpu_env
,
1221 offsetof(CPULM32State
, wp
[i
]),
1225 cpu_pc
= tcg_global_mem_new(cpu_env
,
1226 offsetof(CPULM32State
, pc
),
1228 cpu_ie
= tcg_global_mem_new(cpu_env
,
1229 offsetof(CPULM32State
, ie
),
1231 cpu_icc
= tcg_global_mem_new(cpu_env
,
1232 offsetof(CPULM32State
, icc
),
1234 cpu_dcc
= tcg_global_mem_new(cpu_env
,
1235 offsetof(CPULM32State
, dcc
),
1237 cpu_cc
= tcg_global_mem_new(cpu_env
,
1238 offsetof(CPULM32State
, cc
),
1240 cpu_cfg
= tcg_global_mem_new(cpu_env
,
1241 offsetof(CPULM32State
, cfg
),
1243 cpu_eba
= tcg_global_mem_new(cpu_env
,
1244 offsetof(CPULM32State
, eba
),
1246 cpu_dc
= tcg_global_mem_new(cpu_env
,
1247 offsetof(CPULM32State
, dc
),
1249 cpu_deba
= tcg_global_mem_new(cpu_env
,
1250 offsetof(CPULM32State
, deba
),