trace: count number of enabled events
[qemu/ar7.git] / target-lm32 / translate.c
blob477d4285a5213be4ffd31fe15c4c8568c266ca21
1 /*
2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/helper-proto.h"
24 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "hw/lm32/lm32_pic.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 #define DISAS_LM32 1
35 #if DISAS_LM32
36 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37 #else
38 # define LOG_DIS(...) do { } while (0)
39 #endif
41 #define EXTRACT_FIELD(src, start, end) \
42 (((src) >> start) & ((1 << (end - start + 1)) - 1))
44 #define MEM_INDEX 0
46 static TCGv_ptr cpu_env;
47 static TCGv cpu_R[32];
48 static TCGv cpu_pc;
49 static TCGv cpu_ie;
50 static TCGv cpu_icc;
51 static TCGv cpu_dcc;
52 static TCGv cpu_cc;
53 static TCGv cpu_cfg;
54 static TCGv cpu_eba;
55 static TCGv cpu_dc;
56 static TCGv cpu_deba;
57 static TCGv cpu_bp[4];
58 static TCGv cpu_wp[4];
60 #include "exec/gen-icount.h"
62 enum {
63 OP_FMT_RI,
64 OP_FMT_RR,
65 OP_FMT_CR,
66 OP_FMT_I
69 /* This is the state at translation time. */
70 typedef struct DisasContext {
71 target_ulong pc;
73 /* Decoder. */
74 int format;
75 uint32_t ir;
76 uint8_t opcode;
77 uint8_t r0, r1, r2, csr;
78 uint16_t imm5;
79 uint16_t imm16;
80 uint32_t imm26;
82 unsigned int delayed_branch;
83 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
84 int is_jmp;
86 struct TranslationBlock *tb;
87 int singlestep_enabled;
89 uint32_t features;
90 uint8_t num_breakpoints;
91 uint8_t num_watchpoints;
92 } DisasContext;
94 static const char *regnames[] = {
95 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
96 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
97 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
98 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
99 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
100 "wp1", "wp2", "wp3"
103 static inline int zero_extend(unsigned int val, int width)
105 return val & ((1 << width) - 1);
108 static inline int sign_extend(unsigned int val, int width)
110 int sval;
112 /* LSL. */
113 val <<= 32 - width;
114 sval = val;
115 /* ASR. */
116 sval >>= 32 - width;
118 return sval;
121 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
123 TCGv_i32 tmp = tcg_const_i32(index);
125 gen_helper_raise_exception(cpu_env, tmp);
126 tcg_temp_free_i32(tmp);
129 static inline void t_gen_illegal_insn(DisasContext *dc)
131 tcg_gen_movi_tl(cpu_pc, dc->pc);
132 gen_helper_ill(cpu_env);
135 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
137 TranslationBlock *tb;
139 tb = dc->tb;
140 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
141 likely(!dc->singlestep_enabled)) {
142 tcg_gen_goto_tb(n);
143 tcg_gen_movi_tl(cpu_pc, dest);
144 tcg_gen_exit_tb((uintptr_t)tb + n);
145 } else {
146 tcg_gen_movi_tl(cpu_pc, dest);
147 if (dc->singlestep_enabled) {
148 t_gen_raise_exception(dc, EXCP_DEBUG);
150 tcg_gen_exit_tb(0);
154 static void dec_add(DisasContext *dc)
156 if (dc->format == OP_FMT_RI) {
157 if (dc->r0 == R_R0) {
158 if (dc->r1 == R_R0 && dc->imm16 == 0) {
159 LOG_DIS("nop\n");
160 } else {
161 LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
163 } else {
164 LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
165 sign_extend(dc->imm16, 16));
167 } else {
168 LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
171 if (dc->format == OP_FMT_RI) {
172 tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
173 sign_extend(dc->imm16, 16));
174 } else {
175 tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
179 static void dec_and(DisasContext *dc)
181 if (dc->format == OP_FMT_RI) {
182 LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
183 zero_extend(dc->imm16, 16));
184 } else {
185 LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
188 if (dc->format == OP_FMT_RI) {
189 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
190 zero_extend(dc->imm16, 16));
191 } else {
192 if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
193 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
194 gen_helper_hlt(cpu_env);
195 } else {
196 tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
201 static void dec_andhi(DisasContext *dc)
203 LOG_DIS("andhi r%d, r%d, %d\n", dc->r2, dc->r0, dc->imm16);
205 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
208 static void dec_b(DisasContext *dc)
210 if (dc->r0 == R_RA) {
211 LOG_DIS("ret\n");
212 } else if (dc->r0 == R_EA) {
213 LOG_DIS("eret\n");
214 } else if (dc->r0 == R_BA) {
215 LOG_DIS("bret\n");
216 } else {
217 LOG_DIS("b r%d\n", dc->r0);
220 /* restore IE.IE in case of an eret */
221 if (dc->r0 == R_EA) {
222 TCGv t0 = tcg_temp_new();
223 TCGLabel *l1 = gen_new_label();
224 tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
225 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
226 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
227 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
228 gen_set_label(l1);
229 tcg_temp_free(t0);
230 } else if (dc->r0 == R_BA) {
231 TCGv t0 = tcg_temp_new();
232 TCGLabel *l1 = gen_new_label();
233 tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
234 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
235 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
236 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
237 gen_set_label(l1);
238 tcg_temp_free(t0);
240 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
242 dc->is_jmp = DISAS_JUMP;
245 static void dec_bi(DisasContext *dc)
247 LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
249 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
251 dc->is_jmp = DISAS_TB_JUMP;
254 static inline void gen_cond_branch(DisasContext *dc, int cond)
256 TCGLabel *l1 = gen_new_label();
257 tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
258 gen_goto_tb(dc, 0, dc->pc + 4);
259 gen_set_label(l1);
260 gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
261 dc->is_jmp = DISAS_TB_JUMP;
264 static void dec_be(DisasContext *dc)
266 LOG_DIS("be r%d, r%d, %d\n", dc->r0, dc->r1,
267 sign_extend(dc->imm16, 16) * 4);
269 gen_cond_branch(dc, TCG_COND_EQ);
272 static void dec_bg(DisasContext *dc)
274 LOG_DIS("bg r%d, r%d, %d\n", dc->r0, dc->r1,
275 sign_extend(dc->imm16, 16 * 4));
277 gen_cond_branch(dc, TCG_COND_GT);
280 static void dec_bge(DisasContext *dc)
282 LOG_DIS("bge r%d, r%d, %d\n", dc->r0, dc->r1,
283 sign_extend(dc->imm16, 16) * 4);
285 gen_cond_branch(dc, TCG_COND_GE);
288 static void dec_bgeu(DisasContext *dc)
290 LOG_DIS("bgeu r%d, r%d, %d\n", dc->r0, dc->r1,
291 sign_extend(dc->imm16, 16) * 4);
293 gen_cond_branch(dc, TCG_COND_GEU);
296 static void dec_bgu(DisasContext *dc)
298 LOG_DIS("bgu r%d, r%d, %d\n", dc->r0, dc->r1,
299 sign_extend(dc->imm16, 16) * 4);
301 gen_cond_branch(dc, TCG_COND_GTU);
304 static void dec_bne(DisasContext *dc)
306 LOG_DIS("bne r%d, r%d, %d\n", dc->r0, dc->r1,
307 sign_extend(dc->imm16, 16) * 4);
309 gen_cond_branch(dc, TCG_COND_NE);
312 static void dec_call(DisasContext *dc)
314 LOG_DIS("call r%d\n", dc->r0);
316 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
317 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
319 dc->is_jmp = DISAS_JUMP;
322 static void dec_calli(DisasContext *dc)
324 LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
326 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
327 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
329 dc->is_jmp = DISAS_TB_JUMP;
332 static inline void gen_compare(DisasContext *dc, int cond)
334 int rX = (dc->format == OP_FMT_RR) ? dc->r2 : dc->r1;
335 int rY = (dc->format == OP_FMT_RR) ? dc->r0 : dc->r0;
336 int rZ = (dc->format == OP_FMT_RR) ? dc->r1 : -1;
337 int i;
339 if (dc->format == OP_FMT_RI) {
340 switch (cond) {
341 case TCG_COND_GEU:
342 case TCG_COND_GTU:
343 i = zero_extend(dc->imm16, 16);
344 break;
345 default:
346 i = sign_extend(dc->imm16, 16);
347 break;
350 tcg_gen_setcondi_tl(cond, cpu_R[rX], cpu_R[rY], i);
351 } else {
352 tcg_gen_setcond_tl(cond, cpu_R[rX], cpu_R[rY], cpu_R[rZ]);
356 static void dec_cmpe(DisasContext *dc)
358 if (dc->format == OP_FMT_RI) {
359 LOG_DIS("cmpei r%d, r%d, %d\n", dc->r0, dc->r1,
360 sign_extend(dc->imm16, 16));
361 } else {
362 LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
365 gen_compare(dc, TCG_COND_EQ);
368 static void dec_cmpg(DisasContext *dc)
370 if (dc->format == OP_FMT_RI) {
371 LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r0, dc->r1,
372 sign_extend(dc->imm16, 16));
373 } else {
374 LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
377 gen_compare(dc, TCG_COND_GT);
380 static void dec_cmpge(DisasContext *dc)
382 if (dc->format == OP_FMT_RI) {
383 LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r0, dc->r1,
384 sign_extend(dc->imm16, 16));
385 } else {
386 LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
389 gen_compare(dc, TCG_COND_GE);
392 static void dec_cmpgeu(DisasContext *dc)
394 if (dc->format == OP_FMT_RI) {
395 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r0, dc->r1,
396 zero_extend(dc->imm16, 16));
397 } else {
398 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
401 gen_compare(dc, TCG_COND_GEU);
404 static void dec_cmpgu(DisasContext *dc)
406 if (dc->format == OP_FMT_RI) {
407 LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r0, dc->r1,
408 zero_extend(dc->imm16, 16));
409 } else {
410 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
413 gen_compare(dc, TCG_COND_GTU);
416 static void dec_cmpne(DisasContext *dc)
418 if (dc->format == OP_FMT_RI) {
419 LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r0, dc->r1,
420 sign_extend(dc->imm16, 16));
421 } else {
422 LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
425 gen_compare(dc, TCG_COND_NE);
428 static void dec_divu(DisasContext *dc)
430 TCGLabel *l1;
432 LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
434 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
435 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
436 t_gen_illegal_insn(dc);
437 return;
440 l1 = gen_new_label();
441 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
442 tcg_gen_movi_tl(cpu_pc, dc->pc);
443 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
444 gen_set_label(l1);
445 tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
448 static void dec_lb(DisasContext *dc)
450 TCGv t0;
452 LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
454 t0 = tcg_temp_new();
455 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
456 tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
457 tcg_temp_free(t0);
460 static void dec_lbu(DisasContext *dc)
462 TCGv t0;
464 LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
466 t0 = tcg_temp_new();
467 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
468 tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
469 tcg_temp_free(t0);
472 static void dec_lh(DisasContext *dc)
474 TCGv t0;
476 LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
478 t0 = tcg_temp_new();
479 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
480 tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
481 tcg_temp_free(t0);
484 static void dec_lhu(DisasContext *dc)
486 TCGv t0;
488 LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
490 t0 = tcg_temp_new();
491 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
492 tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
493 tcg_temp_free(t0);
496 static void dec_lw(DisasContext *dc)
498 TCGv t0;
500 LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
502 t0 = tcg_temp_new();
503 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
504 tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
505 tcg_temp_free(t0);
508 static void dec_modu(DisasContext *dc)
510 TCGLabel *l1;
512 LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
514 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
515 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
516 t_gen_illegal_insn(dc);
517 return;
520 l1 = gen_new_label();
521 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
522 tcg_gen_movi_tl(cpu_pc, dc->pc);
523 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
524 gen_set_label(l1);
525 tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
528 static void dec_mul(DisasContext *dc)
530 if (dc->format == OP_FMT_RI) {
531 LOG_DIS("muli r%d, r%d, %d\n", dc->r0, dc->r1,
532 sign_extend(dc->imm16, 16));
533 } else {
534 LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
537 if (!(dc->features & LM32_FEATURE_MULTIPLY)) {
538 qemu_log_mask(LOG_GUEST_ERROR,
539 "hardware multiplier is not available\n");
540 t_gen_illegal_insn(dc);
541 return;
544 if (dc->format == OP_FMT_RI) {
545 tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
546 sign_extend(dc->imm16, 16));
547 } else {
548 tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
552 static void dec_nor(DisasContext *dc)
554 if (dc->format == OP_FMT_RI) {
555 LOG_DIS("nori r%d, r%d, %d\n", dc->r0, dc->r1,
556 zero_extend(dc->imm16, 16));
557 } else {
558 LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
561 if (dc->format == OP_FMT_RI) {
562 TCGv t0 = tcg_temp_new();
563 tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
564 tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
565 tcg_temp_free(t0);
566 } else {
567 tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
571 static void dec_or(DisasContext *dc)
573 if (dc->format == OP_FMT_RI) {
574 LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
575 zero_extend(dc->imm16, 16));
576 } else {
577 if (dc->r1 == R_R0) {
578 LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
579 } else {
580 LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
584 if (dc->format == OP_FMT_RI) {
585 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
586 zero_extend(dc->imm16, 16));
587 } else {
588 tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
592 static void dec_orhi(DisasContext *dc)
594 if (dc->r0 == R_R0) {
595 LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
596 } else {
597 LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
600 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
603 static void dec_scall(DisasContext *dc)
605 switch (dc->imm5) {
606 case 2:
607 LOG_DIS("break\n");
608 tcg_gen_movi_tl(cpu_pc, dc->pc);
609 t_gen_raise_exception(dc, EXCP_BREAKPOINT);
610 break;
611 case 7:
612 LOG_DIS("scall\n");
613 tcg_gen_movi_tl(cpu_pc, dc->pc);
614 t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
615 break;
616 default:
617 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc);
618 t_gen_illegal_insn(dc);
619 break;
623 static void dec_rcsr(DisasContext *dc)
625 LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
627 switch (dc->csr) {
628 case CSR_IE:
629 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
630 break;
631 case CSR_IM:
632 gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env);
633 break;
634 case CSR_IP:
635 gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env);
636 break;
637 case CSR_CC:
638 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
639 break;
640 case CSR_CFG:
641 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
642 break;
643 case CSR_EBA:
644 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
645 break;
646 case CSR_DC:
647 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
648 break;
649 case CSR_DEBA:
650 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
651 break;
652 case CSR_JTX:
653 gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env);
654 break;
655 case CSR_JRX:
656 gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env);
657 break;
658 case CSR_ICC:
659 case CSR_DCC:
660 case CSR_BP0:
661 case CSR_BP1:
662 case CSR_BP2:
663 case CSR_BP3:
664 case CSR_WP0:
665 case CSR_WP1:
666 case CSR_WP2:
667 case CSR_WP3:
668 qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr);
669 break;
670 default:
671 qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr);
672 break;
676 static void dec_sb(DisasContext *dc)
678 TCGv t0;
680 LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
682 t0 = tcg_temp_new();
683 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
684 tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
685 tcg_temp_free(t0);
688 static void dec_sextb(DisasContext *dc)
690 LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
692 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
693 qemu_log_mask(LOG_GUEST_ERROR,
694 "hardware sign extender is not available\n");
695 t_gen_illegal_insn(dc);
696 return;
699 tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
702 static void dec_sexth(DisasContext *dc)
704 LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
706 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
707 qemu_log_mask(LOG_GUEST_ERROR,
708 "hardware sign extender is not available\n");
709 t_gen_illegal_insn(dc);
710 return;
713 tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
716 static void dec_sh(DisasContext *dc)
718 TCGv t0;
720 LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
722 t0 = tcg_temp_new();
723 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
724 tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
725 tcg_temp_free(t0);
728 static void dec_sl(DisasContext *dc)
730 if (dc->format == OP_FMT_RI) {
731 LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
732 } else {
733 LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
736 if (!(dc->features & LM32_FEATURE_SHIFT)) {
737 qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n");
738 t_gen_illegal_insn(dc);
739 return;
742 if (dc->format == OP_FMT_RI) {
743 tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
744 } else {
745 TCGv t0 = tcg_temp_new();
746 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
747 tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
748 tcg_temp_free(t0);
752 static void dec_sr(DisasContext *dc)
754 if (dc->format == OP_FMT_RI) {
755 LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
756 } else {
757 LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
760 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
761 * one bit */
762 if (dc->format == OP_FMT_RI) {
763 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
764 qemu_log_mask(LOG_GUEST_ERROR,
765 "hardware shifter is not available\n");
766 t_gen_illegal_insn(dc);
767 return;
769 tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
770 } else {
771 TCGLabel *l1 = gen_new_label();
772 TCGLabel *l2 = gen_new_label();
773 TCGv t0 = tcg_temp_local_new();
774 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
776 if (!(dc->features & LM32_FEATURE_SHIFT)) {
777 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
778 t_gen_illegal_insn(dc);
779 tcg_gen_br(l2);
782 gen_set_label(l1);
783 tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
784 gen_set_label(l2);
786 tcg_temp_free(t0);
790 static void dec_sru(DisasContext *dc)
792 if (dc->format == OP_FMT_RI) {
793 LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
794 } else {
795 LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
798 if (dc->format == OP_FMT_RI) {
799 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
800 qemu_log_mask(LOG_GUEST_ERROR,
801 "hardware shifter is not available\n");
802 t_gen_illegal_insn(dc);
803 return;
805 tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
806 } else {
807 TCGLabel *l1 = gen_new_label();
808 TCGLabel *l2 = gen_new_label();
809 TCGv t0 = tcg_temp_local_new();
810 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
812 if (!(dc->features & LM32_FEATURE_SHIFT)) {
813 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
814 t_gen_illegal_insn(dc);
815 tcg_gen_br(l2);
818 gen_set_label(l1);
819 tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
820 gen_set_label(l2);
822 tcg_temp_free(t0);
826 static void dec_sub(DisasContext *dc)
828 LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
830 tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
833 static void dec_sw(DisasContext *dc)
835 TCGv t0;
837 LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
839 t0 = tcg_temp_new();
840 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
841 tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
842 tcg_temp_free(t0);
845 static void dec_user(DisasContext *dc)
847 LOG_DIS("user");
849 qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n");
850 t_gen_illegal_insn(dc);
853 static void dec_wcsr(DisasContext *dc)
855 int no;
857 LOG_DIS("wcsr r%d, %d\n", dc->r1, dc->csr);
859 switch (dc->csr) {
860 case CSR_IE:
861 tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
862 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
863 dc->is_jmp = DISAS_UPDATE;
864 break;
865 case CSR_IM:
866 /* mark as an io operation because it could cause an interrupt */
867 if (dc->tb->cflags & CF_USE_ICOUNT) {
868 gen_io_start();
870 gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
871 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
872 if (dc->tb->cflags & CF_USE_ICOUNT) {
873 gen_io_end();
875 dc->is_jmp = DISAS_UPDATE;
876 break;
877 case CSR_IP:
878 /* mark as an io operation because it could cause an interrupt */
879 if (dc->tb->cflags & CF_USE_ICOUNT) {
880 gen_io_start();
882 gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
883 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
884 if (dc->tb->cflags & CF_USE_ICOUNT) {
885 gen_io_end();
887 dc->is_jmp = DISAS_UPDATE;
888 break;
889 case CSR_ICC:
890 /* TODO */
891 break;
892 case CSR_DCC:
893 /* TODO */
894 break;
895 case CSR_EBA:
896 tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
897 break;
898 case CSR_DEBA:
899 tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
900 break;
901 case CSR_JTX:
902 gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]);
903 break;
904 case CSR_JRX:
905 gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
906 break;
907 case CSR_DC:
908 gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]);
909 break;
910 case CSR_BP0:
911 case CSR_BP1:
912 case CSR_BP2:
913 case CSR_BP3:
914 no = dc->csr - CSR_BP0;
915 if (dc->num_breakpoints <= no) {
916 qemu_log_mask(LOG_GUEST_ERROR,
917 "breakpoint #%i is not available\n", no);
918 t_gen_illegal_insn(dc);
919 break;
921 gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
922 break;
923 case CSR_WP0:
924 case CSR_WP1:
925 case CSR_WP2:
926 case CSR_WP3:
927 no = dc->csr - CSR_WP0;
928 if (dc->num_watchpoints <= no) {
929 qemu_log_mask(LOG_GUEST_ERROR,
930 "watchpoint #%i is not available\n", no);
931 t_gen_illegal_insn(dc);
932 break;
934 gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
935 break;
936 case CSR_CC:
937 case CSR_CFG:
938 qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n",
939 dc->csr);
940 break;
941 default:
942 qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n",
943 dc->csr);
944 break;
948 static void dec_xnor(DisasContext *dc)
950 if (dc->format == OP_FMT_RI) {
951 LOG_DIS("xnori r%d, r%d, %d\n", dc->r0, dc->r1,
952 zero_extend(dc->imm16, 16));
953 } else {
954 if (dc->r1 == R_R0) {
955 LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
956 } else {
957 LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
961 if (dc->format == OP_FMT_RI) {
962 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
963 zero_extend(dc->imm16, 16));
964 tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
965 } else {
966 tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
970 static void dec_xor(DisasContext *dc)
972 if (dc->format == OP_FMT_RI) {
973 LOG_DIS("xori r%d, r%d, %d\n", dc->r0, dc->r1,
974 zero_extend(dc->imm16, 16));
975 } else {
976 LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
979 if (dc->format == OP_FMT_RI) {
980 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
981 zero_extend(dc->imm16, 16));
982 } else {
983 tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
987 static void dec_ill(DisasContext *dc)
989 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode);
990 t_gen_illegal_insn(dc);
993 typedef void (*DecoderInfo)(DisasContext *dc);
994 static const DecoderInfo decinfo[] = {
995 dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
996 dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
997 dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
998 dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
999 dec_cmpne,
1000 dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
1001 dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
1002 dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
1003 dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
1004 dec_cmpne
1007 static inline void decode(DisasContext *dc, uint32_t ir)
1009 dc->ir = ir;
1010 LOG_DIS("%8.8x\t", dc->ir);
1012 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1014 dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
1015 dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
1016 dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
1018 dc->csr = EXTRACT_FIELD(ir, 21, 25);
1019 dc->r0 = EXTRACT_FIELD(ir, 21, 25);
1020 dc->r1 = EXTRACT_FIELD(ir, 16, 20);
1021 dc->r2 = EXTRACT_FIELD(ir, 11, 15);
1023 /* bit 31 seems to indicate insn type. */
1024 if (ir & (1 << 31)) {
1025 dc->format = OP_FMT_RR;
1026 } else {
1027 dc->format = OP_FMT_RI;
1030 assert(ARRAY_SIZE(decinfo) == 64);
1031 assert(dc->opcode < 64);
1033 decinfo[dc->opcode](dc);
1036 /* generate intermediate code for basic block 'tb'. */
1037 void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
1039 LM32CPU *cpu = lm32_env_get_cpu(env);
1040 CPUState *cs = CPU(cpu);
1041 struct DisasContext ctx, *dc = &ctx;
1042 uint32_t pc_start;
1043 uint32_t next_page_start;
1044 int num_insns;
1045 int max_insns;
1047 pc_start = tb->pc;
1048 dc->features = cpu->features;
1049 dc->num_breakpoints = cpu->num_breakpoints;
1050 dc->num_watchpoints = cpu->num_watchpoints;
1051 dc->tb = tb;
1053 dc->is_jmp = DISAS_NEXT;
1054 dc->pc = pc_start;
1055 dc->singlestep_enabled = cs->singlestep_enabled;
1057 if (pc_start & 3) {
1058 qemu_log_mask(LOG_GUEST_ERROR,
1059 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start);
1060 pc_start &= ~3;
1063 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1064 num_insns = 0;
1065 max_insns = tb->cflags & CF_COUNT_MASK;
1066 if (max_insns == 0) {
1067 max_insns = CF_COUNT_MASK;
1069 if (max_insns > TCG_MAX_INSNS) {
1070 max_insns = TCG_MAX_INSNS;
1073 gen_tb_start(tb);
1074 do {
1075 tcg_gen_insn_start(dc->pc);
1076 num_insns++;
1078 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1079 tcg_gen_movi_tl(cpu_pc, dc->pc);
1080 t_gen_raise_exception(dc, EXCP_DEBUG);
1081 dc->is_jmp = DISAS_UPDATE;
1082 /* The address covered by the breakpoint must be included in
1083 [tb->pc, tb->pc + tb->size) in order to for it to be
1084 properly cleared -- thus we increment the PC here so that
1085 the logic setting tb->size below does the right thing. */
1086 dc->pc += 4;
1087 break;
1090 /* Pretty disas. */
1091 LOG_DIS("%8.8x:\t", dc->pc);
1093 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1094 gen_io_start();
1097 decode(dc, cpu_ldl_code(env, dc->pc));
1098 dc->pc += 4;
1099 } while (!dc->is_jmp
1100 && !tcg_op_buf_full()
1101 && !cs->singlestep_enabled
1102 && !singlestep
1103 && (dc->pc < next_page_start)
1104 && num_insns < max_insns);
1106 if (tb->cflags & CF_LAST_IO) {
1107 gen_io_end();
1110 if (unlikely(cs->singlestep_enabled)) {
1111 if (dc->is_jmp == DISAS_NEXT) {
1112 tcg_gen_movi_tl(cpu_pc, dc->pc);
1114 t_gen_raise_exception(dc, EXCP_DEBUG);
1115 } else {
1116 switch (dc->is_jmp) {
1117 case DISAS_NEXT:
1118 gen_goto_tb(dc, 1, dc->pc);
1119 break;
1120 default:
1121 case DISAS_JUMP:
1122 case DISAS_UPDATE:
1123 /* indicate that the hash table must be used
1124 to find the next TB */
1125 tcg_gen_exit_tb(0);
1126 break;
1127 case DISAS_TB_JUMP:
1128 /* nothing more to generate */
1129 break;
1133 gen_tb_end(tb, num_insns);
1135 tb->size = dc->pc - pc_start;
1136 tb->icount = num_insns;
1138 #ifdef DEBUG_DISAS
1139 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1140 qemu_log("\n");
1141 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1142 qemu_log("\nisize=%d osize=%d\n",
1143 dc->pc - pc_start, tcg_op_buf_count());
1145 #endif
1148 void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1149 int flags)
1151 LM32CPU *cpu = LM32_CPU(cs);
1152 CPULM32State *env = &cpu->env;
1153 int i;
1155 if (!env || !f) {
1156 return;
1159 cpu_fprintf(f, "IN: PC=%x %s\n",
1160 env->pc, lookup_symbol(env->pc));
1162 cpu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1163 env->ie,
1164 (env->ie & IE_IE) ? 1 : 0,
1165 (env->ie & IE_EIE) ? 1 : 0,
1166 (env->ie & IE_BIE) ? 1 : 0,
1167 lm32_pic_get_im(env->pic_state),
1168 lm32_pic_get_ip(env->pic_state));
1169 cpu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
1170 env->eba,
1171 env->deba);
1173 for (i = 0; i < 32; i++) {
1174 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1175 if ((i + 1) % 4 == 0) {
1176 cpu_fprintf(f, "\n");
1179 cpu_fprintf(f, "\n\n");
1182 void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
1183 target_ulong *data)
1185 env->pc = data[0];
1188 void lm32_translate_init(void)
1190 int i;
1192 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1194 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1195 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1196 offsetof(CPULM32State, regs[i]),
1197 regnames[i]);
1200 for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
1201 cpu_bp[i] = tcg_global_mem_new(TCG_AREG0,
1202 offsetof(CPULM32State, bp[i]),
1203 regnames[32+i]);
1206 for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
1207 cpu_wp[i] = tcg_global_mem_new(TCG_AREG0,
1208 offsetof(CPULM32State, wp[i]),
1209 regnames[36+i]);
1212 cpu_pc = tcg_global_mem_new(TCG_AREG0,
1213 offsetof(CPULM32State, pc),
1214 "pc");
1215 cpu_ie = tcg_global_mem_new(TCG_AREG0,
1216 offsetof(CPULM32State, ie),
1217 "ie");
1218 cpu_icc = tcg_global_mem_new(TCG_AREG0,
1219 offsetof(CPULM32State, icc),
1220 "icc");
1221 cpu_dcc = tcg_global_mem_new(TCG_AREG0,
1222 offsetof(CPULM32State, dcc),
1223 "dcc");
1224 cpu_cc = tcg_global_mem_new(TCG_AREG0,
1225 offsetof(CPULM32State, cc),
1226 "cc");
1227 cpu_cfg = tcg_global_mem_new(TCG_AREG0,
1228 offsetof(CPULM32State, cfg),
1229 "cfg");
1230 cpu_eba = tcg_global_mem_new(TCG_AREG0,
1231 offsetof(CPULM32State, eba),
1232 "eba");
1233 cpu_dc = tcg_global_mem_new(TCG_AREG0,
1234 offsetof(CPULM32State, dc),
1235 "dc");
1236 cpu_deba = tcg_global_mem_new(TCG_AREG0,
1237 offsetof(CPULM32State, deba),
1238 "deba");