2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "hw/lm32/lm32_pic.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
35 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37 # define LOG_DIS(...) do { } while (0)
40 #define EXTRACT_FIELD(src, start, end) \
41 (((src) >> start) & ((1 << (end - start + 1)) - 1))
45 static TCGv_ptr cpu_env
;
46 static TCGv cpu_R
[32];
56 static TCGv cpu_bp
[4];
57 static TCGv cpu_wp
[4];
59 #include "exec/gen-icount.h"
68 /* This is the state at translation time. */
69 typedef struct DisasContext
{
76 uint8_t r0
, r1
, r2
, csr
;
81 unsigned int delayed_branch
;
82 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
85 struct TranslationBlock
*tb
;
86 int singlestep_enabled
;
89 uint8_t num_breakpoints
;
90 uint8_t num_watchpoints
;
93 static const char *regnames
[] = {
94 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
96 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
97 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
98 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
102 static inline int zero_extend(unsigned int val
, int width
)
104 return val
& ((1 << width
) - 1);
107 static inline int sign_extend(unsigned int val
, int width
)
120 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
122 TCGv_i32 tmp
= tcg_const_i32(index
);
124 gen_helper_raise_exception(cpu_env
, tmp
);
125 tcg_temp_free_i32(tmp
);
128 static inline void t_gen_illegal_insn(DisasContext
*dc
)
130 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
131 gen_helper_ill(cpu_env
);
134 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
136 TranslationBlock
*tb
;
139 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
140 likely(!dc
->singlestep_enabled
)) {
142 tcg_gen_movi_tl(cpu_pc
, dest
);
143 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
145 tcg_gen_movi_tl(cpu_pc
, dest
);
146 if (dc
->singlestep_enabled
) {
147 t_gen_raise_exception(dc
, EXCP_DEBUG
);
153 static void dec_add(DisasContext
*dc
)
155 if (dc
->format
== OP_FMT_RI
) {
156 if (dc
->r0
== R_R0
) {
157 if (dc
->r1
== R_R0
&& dc
->imm16
== 0) {
160 LOG_DIS("mvi r%d, %d\n", dc
->r1
, sign_extend(dc
->imm16
, 16));
163 LOG_DIS("addi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
164 sign_extend(dc
->imm16
, 16));
167 LOG_DIS("add r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
170 if (dc
->format
== OP_FMT_RI
) {
171 tcg_gen_addi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
172 sign_extend(dc
->imm16
, 16));
174 tcg_gen_add_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
178 static void dec_and(DisasContext
*dc
)
180 if (dc
->format
== OP_FMT_RI
) {
181 LOG_DIS("andi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
182 zero_extend(dc
->imm16
, 16));
184 LOG_DIS("and r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
187 if (dc
->format
== OP_FMT_RI
) {
188 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
189 zero_extend(dc
->imm16
, 16));
191 if (dc
->r0
== 0 && dc
->r1
== 0 && dc
->r2
== 0) {
192 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
193 gen_helper_hlt(cpu_env
);
195 tcg_gen_and_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
200 static void dec_andhi(DisasContext
*dc
)
202 LOG_DIS("andhi r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->imm16
);
204 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
207 static void dec_b(DisasContext
*dc
)
209 if (dc
->r0
== R_RA
) {
211 } else if (dc
->r0
== R_EA
) {
213 } else if (dc
->r0
== R_BA
) {
216 LOG_DIS("b r%d\n", dc
->r0
);
219 /* restore IE.IE in case of an eret */
220 if (dc
->r0
== R_EA
) {
221 TCGv t0
= tcg_temp_new();
222 int l1
= gen_new_label();
223 tcg_gen_andi_tl(t0
, cpu_ie
, IE_EIE
);
224 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
225 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_EIE
, l1
);
226 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
229 } else if (dc
->r0
== R_BA
) {
230 TCGv t0
= tcg_temp_new();
231 int l1
= gen_new_label();
232 tcg_gen_andi_tl(t0
, cpu_ie
, IE_BIE
);
233 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
234 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_BIE
, l1
);
235 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
239 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
241 dc
->is_jmp
= DISAS_JUMP
;
244 static void dec_bi(DisasContext
*dc
)
246 LOG_DIS("bi %d\n", sign_extend(dc
->imm26
<< 2, 26));
248 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
250 dc
->is_jmp
= DISAS_TB_JUMP
;
253 static inline void gen_cond_branch(DisasContext
*dc
, int cond
)
257 l1
= gen_new_label();
258 tcg_gen_brcond_tl(cond
, cpu_R
[dc
->r0
], cpu_R
[dc
->r1
], l1
);
259 gen_goto_tb(dc
, 0, dc
->pc
+ 4);
261 gen_goto_tb(dc
, 1, dc
->pc
+ (sign_extend(dc
->imm16
<< 2, 16)));
262 dc
->is_jmp
= DISAS_TB_JUMP
;
265 static void dec_be(DisasContext
*dc
)
267 LOG_DIS("be r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
268 sign_extend(dc
->imm16
, 16) * 4);
270 gen_cond_branch(dc
, TCG_COND_EQ
);
273 static void dec_bg(DisasContext
*dc
)
275 LOG_DIS("bg r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
276 sign_extend(dc
->imm16
, 16 * 4));
278 gen_cond_branch(dc
, TCG_COND_GT
);
281 static void dec_bge(DisasContext
*dc
)
283 LOG_DIS("bge r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
284 sign_extend(dc
->imm16
, 16) * 4);
286 gen_cond_branch(dc
, TCG_COND_GE
);
289 static void dec_bgeu(DisasContext
*dc
)
291 LOG_DIS("bgeu r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
292 sign_extend(dc
->imm16
, 16) * 4);
294 gen_cond_branch(dc
, TCG_COND_GEU
);
297 static void dec_bgu(DisasContext
*dc
)
299 LOG_DIS("bgu r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
300 sign_extend(dc
->imm16
, 16) * 4);
302 gen_cond_branch(dc
, TCG_COND_GTU
);
305 static void dec_bne(DisasContext
*dc
)
307 LOG_DIS("bne r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
308 sign_extend(dc
->imm16
, 16) * 4);
310 gen_cond_branch(dc
, TCG_COND_NE
);
313 static void dec_call(DisasContext
*dc
)
315 LOG_DIS("call r%d\n", dc
->r0
);
317 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
318 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
320 dc
->is_jmp
= DISAS_JUMP
;
323 static void dec_calli(DisasContext
*dc
)
325 LOG_DIS("calli %d\n", sign_extend(dc
->imm26
, 26) * 4);
327 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
328 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
330 dc
->is_jmp
= DISAS_TB_JUMP
;
333 static inline void gen_compare(DisasContext
*dc
, int cond
)
335 int rX
= (dc
->format
== OP_FMT_RR
) ? dc
->r2
: dc
->r1
;
336 int rY
= (dc
->format
== OP_FMT_RR
) ? dc
->r0
: dc
->r0
;
337 int rZ
= (dc
->format
== OP_FMT_RR
) ? dc
->r1
: -1;
340 if (dc
->format
== OP_FMT_RI
) {
344 i
= zero_extend(dc
->imm16
, 16);
347 i
= sign_extend(dc
->imm16
, 16);
351 tcg_gen_setcondi_tl(cond
, cpu_R
[rX
], cpu_R
[rY
], i
);
353 tcg_gen_setcond_tl(cond
, cpu_R
[rX
], cpu_R
[rY
], cpu_R
[rZ
]);
357 static void dec_cmpe(DisasContext
*dc
)
359 if (dc
->format
== OP_FMT_RI
) {
360 LOG_DIS("cmpei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
361 sign_extend(dc
->imm16
, 16));
363 LOG_DIS("cmpe r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
366 gen_compare(dc
, TCG_COND_EQ
);
369 static void dec_cmpg(DisasContext
*dc
)
371 if (dc
->format
== OP_FMT_RI
) {
372 LOG_DIS("cmpgi r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
373 sign_extend(dc
->imm16
, 16));
375 LOG_DIS("cmpg r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
378 gen_compare(dc
, TCG_COND_GT
);
381 static void dec_cmpge(DisasContext
*dc
)
383 if (dc
->format
== OP_FMT_RI
) {
384 LOG_DIS("cmpgei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
385 sign_extend(dc
->imm16
, 16));
387 LOG_DIS("cmpge r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
390 gen_compare(dc
, TCG_COND_GE
);
393 static void dec_cmpgeu(DisasContext
*dc
)
395 if (dc
->format
== OP_FMT_RI
) {
396 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
397 zero_extend(dc
->imm16
, 16));
399 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
402 gen_compare(dc
, TCG_COND_GEU
);
405 static void dec_cmpgu(DisasContext
*dc
)
407 if (dc
->format
== OP_FMT_RI
) {
408 LOG_DIS("cmpgui r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
409 zero_extend(dc
->imm16
, 16));
411 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
414 gen_compare(dc
, TCG_COND_GTU
);
417 static void dec_cmpne(DisasContext
*dc
)
419 if (dc
->format
== OP_FMT_RI
) {
420 LOG_DIS("cmpnei r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
421 sign_extend(dc
->imm16
, 16));
423 LOG_DIS("cmpne r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
426 gen_compare(dc
, TCG_COND_NE
);
429 static void dec_divu(DisasContext
*dc
)
433 LOG_DIS("divu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
435 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
436 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
437 t_gen_illegal_insn(dc
);
441 l1
= gen_new_label();
442 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
443 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
444 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
446 tcg_gen_divu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
449 static void dec_lb(DisasContext
*dc
)
453 LOG_DIS("lb r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
456 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
457 tcg_gen_qemu_ld8s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
461 static void dec_lbu(DisasContext
*dc
)
465 LOG_DIS("lbu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
468 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
469 tcg_gen_qemu_ld8u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
473 static void dec_lh(DisasContext
*dc
)
477 LOG_DIS("lh r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
480 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
481 tcg_gen_qemu_ld16s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
485 static void dec_lhu(DisasContext
*dc
)
489 LOG_DIS("lhu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
492 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
493 tcg_gen_qemu_ld16u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
497 static void dec_lw(DisasContext
*dc
)
501 LOG_DIS("lw r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, sign_extend(dc
->imm16
, 16));
504 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
505 tcg_gen_qemu_ld32s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
509 static void dec_modu(DisasContext
*dc
)
513 LOG_DIS("modu r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->r1
);
515 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
516 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
517 t_gen_illegal_insn(dc
);
521 l1
= gen_new_label();
522 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
523 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
524 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
526 tcg_gen_remu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
529 static void dec_mul(DisasContext
*dc
)
531 if (dc
->format
== OP_FMT_RI
) {
532 LOG_DIS("muli r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
533 sign_extend(dc
->imm16
, 16));
535 LOG_DIS("mul r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
538 if (!(dc
->features
& LM32_FEATURE_MULTIPLY
)) {
539 qemu_log_mask(LOG_GUEST_ERROR
,
540 "hardware multiplier is not available\n");
541 t_gen_illegal_insn(dc
);
545 if (dc
->format
== OP_FMT_RI
) {
546 tcg_gen_muli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
547 sign_extend(dc
->imm16
, 16));
549 tcg_gen_mul_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
553 static void dec_nor(DisasContext
*dc
)
555 if (dc
->format
== OP_FMT_RI
) {
556 LOG_DIS("nori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
557 zero_extend(dc
->imm16
, 16));
559 LOG_DIS("nor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
562 if (dc
->format
== OP_FMT_RI
) {
563 TCGv t0
= tcg_temp_new();
564 tcg_gen_movi_tl(t0
, zero_extend(dc
->imm16
, 16));
565 tcg_gen_nor_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], t0
);
568 tcg_gen_nor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
572 static void dec_or(DisasContext
*dc
)
574 if (dc
->format
== OP_FMT_RI
) {
575 LOG_DIS("ori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
576 zero_extend(dc
->imm16
, 16));
578 if (dc
->r1
== R_R0
) {
579 LOG_DIS("mv r%d, r%d\n", dc
->r2
, dc
->r0
);
581 LOG_DIS("or r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
585 if (dc
->format
== OP_FMT_RI
) {
586 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
587 zero_extend(dc
->imm16
, 16));
589 tcg_gen_or_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
593 static void dec_orhi(DisasContext
*dc
)
595 if (dc
->r0
== R_R0
) {
596 LOG_DIS("mvhi r%d, %d\n", dc
->r1
, dc
->imm16
);
598 LOG_DIS("orhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
601 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
604 static void dec_scall(DisasContext
*dc
)
609 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
610 t_gen_raise_exception(dc
, EXCP_BREAKPOINT
);
614 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
615 t_gen_raise_exception(dc
, EXCP_SYSTEMCALL
);
618 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode @0x%x", dc
->pc
);
619 t_gen_illegal_insn(dc
);
624 static void dec_rcsr(DisasContext
*dc
)
626 LOG_DIS("rcsr r%d, %d\n", dc
->r2
, dc
->csr
);
630 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_ie
);
633 gen_helper_rcsr_im(cpu_R
[dc
->r2
], cpu_env
);
636 gen_helper_rcsr_ip(cpu_R
[dc
->r2
], cpu_env
);
639 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cc
);
642 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cfg
);
645 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_eba
);
648 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_dc
);
651 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_deba
);
654 gen_helper_rcsr_jtx(cpu_R
[dc
->r2
], cpu_env
);
657 gen_helper_rcsr_jrx(cpu_R
[dc
->r2
], cpu_env
);
669 qemu_log_mask(LOG_GUEST_ERROR
, "invalid read access csr=%x\n", dc
->csr
);
672 qemu_log_mask(LOG_GUEST_ERROR
, "read_csr: unknown csr=%x\n", dc
->csr
);
677 static void dec_sb(DisasContext
*dc
)
681 LOG_DIS("sb (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
684 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
685 tcg_gen_qemu_st8(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
689 static void dec_sextb(DisasContext
*dc
)
691 LOG_DIS("sextb r%d, r%d\n", dc
->r2
, dc
->r0
);
693 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
694 qemu_log_mask(LOG_GUEST_ERROR
,
695 "hardware sign extender is not available\n");
696 t_gen_illegal_insn(dc
);
700 tcg_gen_ext8s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
703 static void dec_sexth(DisasContext
*dc
)
705 LOG_DIS("sexth r%d, r%d\n", dc
->r2
, dc
->r0
);
707 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
708 qemu_log_mask(LOG_GUEST_ERROR
,
709 "hardware sign extender is not available\n");
710 t_gen_illegal_insn(dc
);
714 tcg_gen_ext16s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
717 static void dec_sh(DisasContext
*dc
)
721 LOG_DIS("sh (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
724 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
725 tcg_gen_qemu_st16(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
729 static void dec_sl(DisasContext
*dc
)
731 if (dc
->format
== OP_FMT_RI
) {
732 LOG_DIS("sli r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
734 LOG_DIS("sl r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
737 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
738 qemu_log_mask(LOG_GUEST_ERROR
, "hardware shifter is not available\n");
739 t_gen_illegal_insn(dc
);
743 if (dc
->format
== OP_FMT_RI
) {
744 tcg_gen_shli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
746 TCGv t0
= tcg_temp_new();
747 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
748 tcg_gen_shl_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
753 static void dec_sr(DisasContext
*dc
)
755 if (dc
->format
== OP_FMT_RI
) {
756 LOG_DIS("sri r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
758 LOG_DIS("sr r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
761 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
763 if (dc
->format
== OP_FMT_RI
) {
764 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
765 qemu_log_mask(LOG_GUEST_ERROR
,
766 "hardware shifter is not available\n");
767 t_gen_illegal_insn(dc
);
770 tcg_gen_sari_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
772 int l1
= gen_new_label();
773 int l2
= gen_new_label();
774 TCGv t0
= tcg_temp_local_new();
775 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
777 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
778 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
779 t_gen_illegal_insn(dc
);
784 tcg_gen_sar_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
791 static void dec_sru(DisasContext
*dc
)
793 if (dc
->format
== OP_FMT_RI
) {
794 LOG_DIS("srui r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
796 LOG_DIS("sru r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
799 if (dc
->format
== OP_FMT_RI
) {
800 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
801 qemu_log_mask(LOG_GUEST_ERROR
,
802 "hardware shifter is not available\n");
803 t_gen_illegal_insn(dc
);
806 tcg_gen_shri_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
808 int l1
= gen_new_label();
809 int l2
= gen_new_label();
810 TCGv t0
= tcg_temp_local_new();
811 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
813 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
814 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
815 t_gen_illegal_insn(dc
);
820 tcg_gen_shr_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
827 static void dec_sub(DisasContext
*dc
)
829 LOG_DIS("sub r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
831 tcg_gen_sub_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
834 static void dec_sw(DisasContext
*dc
)
838 LOG_DIS("sw (r%d+%d), r%d\n", dc
->r0
, sign_extend(dc
->imm16
, 16), dc
->r1
);
841 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
842 tcg_gen_qemu_st32(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
846 static void dec_user(DisasContext
*dc
)
850 qemu_log_mask(LOG_GUEST_ERROR
, "user instruction undefined\n");
851 t_gen_illegal_insn(dc
);
854 static void dec_wcsr(DisasContext
*dc
)
858 LOG_DIS("wcsr r%d, %d\n", dc
->r1
, dc
->csr
);
862 tcg_gen_mov_tl(cpu_ie
, cpu_R
[dc
->r1
]);
863 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
864 dc
->is_jmp
= DISAS_UPDATE
;
867 /* mark as an io operation because it could cause an interrupt */
871 gen_helper_wcsr_im(cpu_env
, cpu_R
[dc
->r1
]);
872 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
876 dc
->is_jmp
= DISAS_UPDATE
;
879 /* mark as an io operation because it could cause an interrupt */
883 gen_helper_wcsr_ip(cpu_env
, cpu_R
[dc
->r1
]);
884 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
888 dc
->is_jmp
= DISAS_UPDATE
;
897 tcg_gen_mov_tl(cpu_eba
, cpu_R
[dc
->r1
]);
900 tcg_gen_mov_tl(cpu_deba
, cpu_R
[dc
->r1
]);
903 gen_helper_wcsr_jtx(cpu_env
, cpu_R
[dc
->r1
]);
906 gen_helper_wcsr_jrx(cpu_env
, cpu_R
[dc
->r1
]);
909 gen_helper_wcsr_dc(cpu_env
, cpu_R
[dc
->r1
]);
915 no
= dc
->csr
- CSR_BP0
;
916 if (dc
->num_breakpoints
<= no
) {
917 qemu_log_mask(LOG_GUEST_ERROR
,
918 "breakpoint #%i is not available\n", no
);
919 t_gen_illegal_insn(dc
);
922 gen_helper_wcsr_bp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
928 no
= dc
->csr
- CSR_WP0
;
929 if (dc
->num_watchpoints
<= no
) {
930 qemu_log_mask(LOG_GUEST_ERROR
,
931 "watchpoint #%i is not available\n", no
);
932 t_gen_illegal_insn(dc
);
935 gen_helper_wcsr_wp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
939 qemu_log_mask(LOG_GUEST_ERROR
, "invalid write access csr=%x\n",
943 qemu_log_mask(LOG_GUEST_ERROR
, "write_csr: unknown csr=%x\n",
949 static void dec_xnor(DisasContext
*dc
)
951 if (dc
->format
== OP_FMT_RI
) {
952 LOG_DIS("xnori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
953 zero_extend(dc
->imm16
, 16));
955 if (dc
->r1
== R_R0
) {
956 LOG_DIS("not r%d, r%d\n", dc
->r2
, dc
->r0
);
958 LOG_DIS("xnor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
962 if (dc
->format
== OP_FMT_RI
) {
963 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
964 zero_extend(dc
->imm16
, 16));
965 tcg_gen_not_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r1
]);
967 tcg_gen_eqv_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
971 static void dec_xor(DisasContext
*dc
)
973 if (dc
->format
== OP_FMT_RI
) {
974 LOG_DIS("xori r%d, r%d, %d\n", dc
->r0
, dc
->r1
,
975 zero_extend(dc
->imm16
, 16));
977 LOG_DIS("xor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
980 if (dc
->format
== OP_FMT_RI
) {
981 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
982 zero_extend(dc
->imm16
, 16));
984 tcg_gen_xor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
988 static void dec_ill(DisasContext
*dc
)
990 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode 0x%02x\n", dc
->opcode
);
991 t_gen_illegal_insn(dc
);
994 typedef void (*DecoderInfo
)(DisasContext
*dc
);
995 static const DecoderInfo decinfo
[] = {
996 dec_sru
, dec_nor
, dec_mul
, dec_sh
, dec_lb
, dec_sr
, dec_xor
, dec_lh
,
997 dec_and
, dec_xnor
, dec_lw
, dec_lhu
, dec_sb
, dec_add
, dec_or
, dec_sl
,
998 dec_lbu
, dec_be
, dec_bg
, dec_bge
, dec_bgeu
, dec_bgu
, dec_sw
, dec_bne
,
999 dec_andhi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_orhi
,
1001 dec_sru
, dec_nor
, dec_mul
, dec_divu
, dec_rcsr
, dec_sr
, dec_xor
, dec_ill
,
1002 dec_and
, dec_xnor
, dec_ill
, dec_scall
, dec_sextb
, dec_add
, dec_or
, dec_sl
,
1003 dec_b
, dec_modu
, dec_sub
, dec_user
, dec_wcsr
, dec_ill
, dec_call
, dec_sexth
,
1004 dec_bi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_calli
,
1008 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1010 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1011 tcg_gen_debug_insn_start(dc
->pc
);
1015 LOG_DIS("%8.8x\t", dc
->ir
);
1017 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1019 dc
->imm5
= EXTRACT_FIELD(ir
, 0, 4);
1020 dc
->imm16
= EXTRACT_FIELD(ir
, 0, 15);
1021 dc
->imm26
= EXTRACT_FIELD(ir
, 0, 25);
1023 dc
->csr
= EXTRACT_FIELD(ir
, 21, 25);
1024 dc
->r0
= EXTRACT_FIELD(ir
, 21, 25);
1025 dc
->r1
= EXTRACT_FIELD(ir
, 16, 20);
1026 dc
->r2
= EXTRACT_FIELD(ir
, 11, 15);
1028 /* bit 31 seems to indicate insn type. */
1029 if (ir
& (1 << 31)) {
1030 dc
->format
= OP_FMT_RR
;
1032 dc
->format
= OP_FMT_RI
;
1035 assert(ARRAY_SIZE(decinfo
) == 64);
1036 assert(dc
->opcode
< 64);
1038 decinfo
[dc
->opcode
](dc
);
1041 static void check_breakpoint(CPULM32State
*env
, DisasContext
*dc
)
1043 CPUState
*cs
= CPU(lm32_env_get_cpu(env
));
1046 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1047 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1048 if (bp
->pc
== dc
->pc
) {
1049 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1050 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1051 dc
->is_jmp
= DISAS_UPDATE
;
1057 /* generate intermediate code for basic block 'tb'. */
1059 void gen_intermediate_code_internal(LM32CPU
*cpu
,
1060 TranslationBlock
*tb
, bool search_pc
)
1062 CPUState
*cs
= CPU(cpu
);
1063 CPULM32State
*env
= &cpu
->env
;
1064 struct DisasContext ctx
, *dc
= &ctx
;
1065 uint16_t *gen_opc_end
;
1068 uint32_t next_page_start
;
1073 dc
->features
= cpu
->features
;
1074 dc
->num_breakpoints
= cpu
->num_breakpoints
;
1075 dc
->num_watchpoints
= cpu
->num_watchpoints
;
1078 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1080 dc
->is_jmp
= DISAS_NEXT
;
1082 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1085 qemu_log_mask(LOG_GUEST_ERROR
,
1086 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start
);
1090 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1093 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1094 if (max_insns
== 0) {
1095 max_insns
= CF_COUNT_MASK
;
1100 check_breakpoint(env
, dc
);
1103 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1107 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1110 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
1111 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
1112 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
1116 LOG_DIS("%8.8x:\t", dc
->pc
);
1118 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1122 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1126 } while (!dc
->is_jmp
1127 && tcg_ctx
.gen_opc_ptr
< gen_opc_end
1128 && !cs
->singlestep_enabled
1130 && (dc
->pc
< next_page_start
)
1131 && num_insns
< max_insns
);
1133 if (tb
->cflags
& CF_LAST_IO
) {
1137 if (unlikely(cs
->singlestep_enabled
)) {
1138 if (dc
->is_jmp
== DISAS_NEXT
) {
1139 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1141 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1143 switch (dc
->is_jmp
) {
1145 gen_goto_tb(dc
, 1, dc
->pc
);
1150 /* indicate that the hash table must be used
1151 to find the next TB */
1155 /* nothing more to generate */
1160 gen_tb_end(tb
, num_insns
);
1161 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1163 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1166 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1169 tb
->size
= dc
->pc
- pc_start
;
1170 tb
->icount
= num_insns
;
1174 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1176 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
, 0);
1177 qemu_log("\nisize=%d osize=%td\n",
1178 dc
->pc
- pc_start
, tcg_ctx
.gen_opc_ptr
-
1179 tcg_ctx
.gen_opc_buf
);
1184 void gen_intermediate_code(CPULM32State
*env
, struct TranslationBlock
*tb
)
1186 gen_intermediate_code_internal(lm32_env_get_cpu(env
), tb
, false);
1189 void gen_intermediate_code_pc(CPULM32State
*env
, struct TranslationBlock
*tb
)
1191 gen_intermediate_code_internal(lm32_env_get_cpu(env
), tb
, true);
1194 void lm32_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1197 LM32CPU
*cpu
= LM32_CPU(cs
);
1198 CPULM32State
*env
= &cpu
->env
;
1205 cpu_fprintf(f
, "IN: PC=%x %s\n",
1206 env
->pc
, lookup_symbol(env
->pc
));
1208 cpu_fprintf(f
, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1210 (env
->ie
& IE_IE
) ? 1 : 0,
1211 (env
->ie
& IE_EIE
) ? 1 : 0,
1212 (env
->ie
& IE_BIE
) ? 1 : 0,
1213 lm32_pic_get_im(env
->pic_state
),
1214 lm32_pic_get_ip(env
->pic_state
));
1215 cpu_fprintf(f
, "eba=%8.8x deba=%8.8x\n",
1219 for (i
= 0; i
< 32; i
++) {
1220 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1221 if ((i
+ 1) % 4 == 0) {
1222 cpu_fprintf(f
, "\n");
1225 cpu_fprintf(f
, "\n\n");
1228 void restore_state_to_opc(CPULM32State
*env
, TranslationBlock
*tb
, int pc_pos
)
1230 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1233 void lm32_translate_init(void)
1237 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1239 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1240 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1241 offsetof(CPULM32State
, regs
[i
]),
1245 for (i
= 0; i
< ARRAY_SIZE(cpu_bp
); i
++) {
1246 cpu_bp
[i
] = tcg_global_mem_new(TCG_AREG0
,
1247 offsetof(CPULM32State
, bp
[i
]),
1251 for (i
= 0; i
< ARRAY_SIZE(cpu_wp
); i
++) {
1252 cpu_wp
[i
] = tcg_global_mem_new(TCG_AREG0
,
1253 offsetof(CPULM32State
, wp
[i
]),
1257 cpu_pc
= tcg_global_mem_new(TCG_AREG0
,
1258 offsetof(CPULM32State
, pc
),
1260 cpu_ie
= tcg_global_mem_new(TCG_AREG0
,
1261 offsetof(CPULM32State
, ie
),
1263 cpu_icc
= tcg_global_mem_new(TCG_AREG0
,
1264 offsetof(CPULM32State
, icc
),
1266 cpu_dcc
= tcg_global_mem_new(TCG_AREG0
,
1267 offsetof(CPULM32State
, dcc
),
1269 cpu_cc
= tcg_global_mem_new(TCG_AREG0
,
1270 offsetof(CPULM32State
, cc
),
1272 cpu_cfg
= tcg_global_mem_new(TCG_AREG0
,
1273 offsetof(CPULM32State
, cfg
),
1275 cpu_eba
= tcg_global_mem_new(TCG_AREG0
,
1276 offsetof(CPULM32State
, eba
),
1278 cpu_dc
= tcg_global_mem_new(TCG_AREG0
,
1279 offsetof(CPULM32State
, dc
),
1281 cpu_deba
= tcg_global_mem_new(TCG_AREG0
,
1282 offsetof(CPULM32State
, deba
),