block: iostatus: Drop BDRV_IOS_INVAL
[qemu/ar7.git] / target-lm32 / translate.c
blob0be105d018dce8f57beea9bfcf338f37844940c6
1 /*
2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <assert.h>
27 #include "cpu.h"
28 #include "disas.h"
29 #include "helper.h"
30 #include "tcg-op.h"
31 #include "qemu-common.h"
33 #include "hw/lm32_pic.h"
35 #define GEN_HELPER 1
36 #include "helper.h"
38 #define DISAS_LM32 1
39 #if DISAS_LM32
40 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 #else
42 # define LOG_DIS(...) do { } while (0)
43 #endif
45 #define EXTRACT_FIELD(src, start, end) \
46 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48 #define MEM_INDEX 0
50 static TCGv_ptr cpu_env;
51 static TCGv cpu_R[32];
52 static TCGv cpu_pc;
53 static TCGv cpu_ie;
54 static TCGv cpu_icc;
55 static TCGv cpu_dcc;
56 static TCGv cpu_cc;
57 static TCGv cpu_cfg;
58 static TCGv cpu_eba;
59 static TCGv cpu_dc;
60 static TCGv cpu_deba;
61 static TCGv cpu_bp[4];
62 static TCGv cpu_wp[4];
64 #include "gen-icount.h"
66 enum {
67 OP_FMT_RI,
68 OP_FMT_RR,
69 OP_FMT_CR,
70 OP_FMT_I
73 /* This is the state at translation time. */
74 typedef struct DisasContext {
75 CPUState *env;
76 target_ulong pc;
78 /* Decoder. */
79 int format;
80 uint32_t ir;
81 uint8_t opcode;
82 uint8_t r0, r1, r2, csr;
83 uint16_t imm5;
84 uint16_t imm16;
85 uint32_t imm26;
87 unsigned int delayed_branch;
88 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
89 int is_jmp;
91 int nr_nops;
92 struct TranslationBlock *tb;
93 int singlestep_enabled;
94 } DisasContext;
96 static const char *regnames[] = {
97 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
98 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
99 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
100 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
101 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
102 "wp1", "wp2", "wp3"
105 static inline int zero_extend(unsigned int val, int width)
107 return val & ((1 << width) - 1);
110 static inline int sign_extend(unsigned int val, int width)
112 int sval;
114 /* LSL. */
115 val <<= 32 - width;
116 sval = val;
117 /* ASR. */
118 sval >>= 32 - width;
120 return sval;
123 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
125 TCGv_i32 tmp = tcg_const_i32(index);
127 gen_helper_raise_exception(tmp);
128 tcg_temp_free_i32(tmp);
131 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
133 TranslationBlock *tb;
135 tb = dc->tb;
136 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
137 likely(!dc->singlestep_enabled)) {
138 tcg_gen_goto_tb(n);
139 tcg_gen_movi_tl(cpu_pc, dest);
140 tcg_gen_exit_tb((tcg_target_long)tb + n);
141 } else {
142 tcg_gen_movi_tl(cpu_pc, dest);
143 if (dc->singlestep_enabled) {
144 t_gen_raise_exception(dc, EXCP_DEBUG);
146 tcg_gen_exit_tb(0);
150 static void dec_add(DisasContext *dc)
152 if (dc->format == OP_FMT_RI) {
153 if (dc->r0 == R_R0) {
154 if (dc->r1 == R_R0 && dc->imm16 == 0) {
155 LOG_DIS("nop\n");
156 } else {
157 LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
159 } else {
160 LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
161 sign_extend(dc->imm16, 16));
163 } else {
164 LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
167 if (dc->format == OP_FMT_RI) {
168 tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
169 sign_extend(dc->imm16, 16));
170 } else {
171 tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
175 static void dec_and(DisasContext *dc)
177 if (dc->format == OP_FMT_RI) {
178 LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
179 zero_extend(dc->imm16, 16));
180 } else {
181 LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
184 if (dc->format == OP_FMT_RI) {
185 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
186 zero_extend(dc->imm16, 16));
187 } else {
188 if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
189 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
190 gen_helper_hlt();
191 } else {
192 tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
197 static void dec_andhi(DisasContext *dc)
199 LOG_DIS("andhi r%d, r%d, %d\n", dc->r2, dc->r0, dc->imm16);
201 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
204 static void dec_b(DisasContext *dc)
206 if (dc->r0 == R_RA) {
207 LOG_DIS("ret\n");
208 } else if (dc->r0 == R_EA) {
209 LOG_DIS("eret\n");
210 } else if (dc->r0 == R_BA) {
211 LOG_DIS("bret\n");
212 } else {
213 LOG_DIS("b r%d\n", dc->r0);
216 /* restore IE.IE in case of an eret */
217 if (dc->r0 == R_EA) {
218 TCGv t0 = tcg_temp_new();
219 int l1 = gen_new_label();
220 tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
221 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
222 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
223 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
224 gen_set_label(l1);
225 tcg_temp_free(t0);
226 } else if (dc->r0 == R_BA) {
227 TCGv t0 = tcg_temp_new();
228 int l1 = gen_new_label();
229 tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
230 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
231 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
232 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
233 gen_set_label(l1);
234 tcg_temp_free(t0);
236 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
238 dc->is_jmp = DISAS_JUMP;
241 static void dec_bi(DisasContext *dc)
243 LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
245 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
247 dc->is_jmp = DISAS_TB_JUMP;
250 static inline void gen_cond_branch(DisasContext *dc, int cond)
252 int l1;
254 l1 = gen_new_label();
255 tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
256 gen_goto_tb(dc, 0, dc->pc + 4);
257 gen_set_label(l1);
258 gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
259 dc->is_jmp = DISAS_TB_JUMP;
262 static void dec_be(DisasContext *dc)
264 LOG_DIS("be r%d, r%d, %d\n", dc->r0, dc->r1,
265 sign_extend(dc->imm16, 16) * 4);
267 gen_cond_branch(dc, TCG_COND_EQ);
270 static void dec_bg(DisasContext *dc)
272 LOG_DIS("bg r%d, r%d, %d\n", dc->r0, dc->r1,
273 sign_extend(dc->imm16, 16 * 4));
275 gen_cond_branch(dc, TCG_COND_GT);
278 static void dec_bge(DisasContext *dc)
280 LOG_DIS("bge r%d, r%d, %d\n", dc->r0, dc->r1,
281 sign_extend(dc->imm16, 16) * 4);
283 gen_cond_branch(dc, TCG_COND_GE);
286 static void dec_bgeu(DisasContext *dc)
288 LOG_DIS("bgeu r%d, r%d, %d\n", dc->r0, dc->r1,
289 sign_extend(dc->imm16, 16) * 4);
291 gen_cond_branch(dc, TCG_COND_GEU);
294 static void dec_bgu(DisasContext *dc)
296 LOG_DIS("bgu r%d, r%d, %d\n", dc->r0, dc->r1,
297 sign_extend(dc->imm16, 16) * 4);
299 gen_cond_branch(dc, TCG_COND_GTU);
302 static void dec_bne(DisasContext *dc)
304 LOG_DIS("bne r%d, r%d, %d\n", dc->r0, dc->r1,
305 sign_extend(dc->imm16, 16) * 4);
307 gen_cond_branch(dc, TCG_COND_NE);
310 static void dec_call(DisasContext *dc)
312 LOG_DIS("call r%d\n", dc->r0);
314 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
315 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
317 dc->is_jmp = DISAS_JUMP;
320 static void dec_calli(DisasContext *dc)
322 LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
324 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
325 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
327 dc->is_jmp = DISAS_TB_JUMP;
330 static inline void gen_compare(DisasContext *dc, int cond)
332 int rX = (dc->format == OP_FMT_RR) ? dc->r2 : dc->r1;
333 int rY = (dc->format == OP_FMT_RR) ? dc->r0 : dc->r0;
334 int rZ = (dc->format == OP_FMT_RR) ? dc->r1 : -1;
336 if (dc->format == OP_FMT_RI) {
337 tcg_gen_setcondi_tl(cond, cpu_R[rX], cpu_R[rY],
338 sign_extend(dc->imm16, 16));
339 } else {
340 tcg_gen_setcond_tl(cond, cpu_R[rX], cpu_R[rY], cpu_R[rZ]);
344 static void dec_cmpe(DisasContext *dc)
346 if (dc->format == OP_FMT_RI) {
347 LOG_DIS("cmpei r%d, r%d, %d\n", dc->r0, dc->r1,
348 sign_extend(dc->imm16, 16));
349 } else {
350 LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
353 gen_compare(dc, TCG_COND_EQ);
356 static void dec_cmpg(DisasContext *dc)
358 if (dc->format == OP_FMT_RI) {
359 LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r0, dc->r1,
360 sign_extend(dc->imm16, 16));
361 } else {
362 LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
365 gen_compare(dc, TCG_COND_GT);
368 static void dec_cmpge(DisasContext *dc)
370 if (dc->format == OP_FMT_RI) {
371 LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r0, dc->r1,
372 sign_extend(dc->imm16, 16));
373 } else {
374 LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
377 gen_compare(dc, TCG_COND_GE);
380 static void dec_cmpgeu(DisasContext *dc)
382 if (dc->format == OP_FMT_RI) {
383 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r0, dc->r1,
384 sign_extend(dc->imm16, 16));
385 } else {
386 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
389 gen_compare(dc, TCG_COND_GEU);
392 static void dec_cmpgu(DisasContext *dc)
394 if (dc->format == OP_FMT_RI) {
395 LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r0, dc->r1,
396 sign_extend(dc->imm16, 16));
397 } else {
398 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
401 gen_compare(dc, TCG_COND_GTU);
404 static void dec_cmpne(DisasContext *dc)
406 if (dc->format == OP_FMT_RI) {
407 LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r0, dc->r1,
408 sign_extend(dc->imm16, 16));
409 } else {
410 LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
413 gen_compare(dc, TCG_COND_NE);
416 static void dec_divu(DisasContext *dc)
418 int l1;
420 LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
422 if (!(dc->env->features & LM32_FEATURE_DIVIDE)) {
423 cpu_abort(dc->env, "hardware divider is not available\n");
426 l1 = gen_new_label();
427 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
428 tcg_gen_movi_tl(cpu_pc, dc->pc);
429 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
430 gen_set_label(l1);
431 tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
434 static void dec_lb(DisasContext *dc)
436 TCGv t0;
438 LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
440 t0 = tcg_temp_new();
441 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
442 tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
443 tcg_temp_free(t0);
446 static void dec_lbu(DisasContext *dc)
448 TCGv t0;
450 LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
452 t0 = tcg_temp_new();
453 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
454 tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
455 tcg_temp_free(t0);
458 static void dec_lh(DisasContext *dc)
460 TCGv t0;
462 LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
464 t0 = tcg_temp_new();
465 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
466 tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
467 tcg_temp_free(t0);
470 static void dec_lhu(DisasContext *dc)
472 TCGv t0;
474 LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
476 t0 = tcg_temp_new();
477 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
478 tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
479 tcg_temp_free(t0);
482 static void dec_lw(DisasContext *dc)
484 TCGv t0;
486 LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
488 t0 = tcg_temp_new();
489 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
490 tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
491 tcg_temp_free(t0);
494 static void dec_modu(DisasContext *dc)
496 int l1;
498 LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
500 if (!(dc->env->features & LM32_FEATURE_DIVIDE)) {
501 cpu_abort(dc->env, "hardware divider is not available\n");
504 l1 = gen_new_label();
505 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
506 tcg_gen_movi_tl(cpu_pc, dc->pc);
507 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
508 gen_set_label(l1);
509 tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
512 static void dec_mul(DisasContext *dc)
514 if (dc->format == OP_FMT_RI) {
515 LOG_DIS("muli r%d, r%d, %d\n", dc->r0, dc->r1,
516 sign_extend(dc->imm16, 16));
517 } else {
518 LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
521 if (!(dc->env->features & LM32_FEATURE_MULTIPLY)) {
522 cpu_abort(dc->env, "hardware multiplier is not available\n");
525 if (dc->format == OP_FMT_RI) {
526 tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
527 sign_extend(dc->imm16, 16));
528 } else {
529 tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
533 static void dec_nor(DisasContext *dc)
535 if (dc->format == OP_FMT_RI) {
536 LOG_DIS("nori r%d, r%d, %d\n", dc->r0, dc->r1,
537 zero_extend(dc->imm16, 16));
538 } else {
539 LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
542 if (dc->format == OP_FMT_RI) {
543 TCGv t0 = tcg_temp_new();
544 tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
545 tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
546 tcg_temp_free(t0);
547 } else {
548 tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
552 static void dec_or(DisasContext *dc)
554 if (dc->format == OP_FMT_RI) {
555 LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
556 zero_extend(dc->imm16, 16));
557 } else {
558 if (dc->r1 == R_R0) {
559 LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
560 } else {
561 LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
565 if (dc->format == OP_FMT_RI) {
566 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
567 zero_extend(dc->imm16, 16));
568 } else {
569 tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
573 static void dec_orhi(DisasContext *dc)
575 if (dc->r0 == R_R0) {
576 LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
577 } else {
578 LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
581 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
584 static void dec_scall(DisasContext *dc)
586 if (dc->imm5 == 7) {
587 LOG_DIS("scall\n");
588 } else if (dc->imm5 == 2) {
589 LOG_DIS("break\n");
590 } else {
591 cpu_abort(dc->env, "invalid opcode\n");
594 if (dc->imm5 == 7) {
595 tcg_gen_movi_tl(cpu_pc, dc->pc);
596 t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
597 } else {
598 tcg_gen_movi_tl(cpu_pc, dc->pc);
599 t_gen_raise_exception(dc, EXCP_BREAKPOINT);
603 static void dec_rcsr(DisasContext *dc)
605 LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
607 switch (dc->csr) {
608 case CSR_IE:
609 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
610 break;
611 case CSR_IM:
612 gen_helper_rcsr_im(cpu_R[dc->r2]);
613 break;
614 case CSR_IP:
615 gen_helper_rcsr_ip(cpu_R[dc->r2]);
616 break;
617 case CSR_CC:
618 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
619 break;
620 case CSR_CFG:
621 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
622 break;
623 case CSR_EBA:
624 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
625 break;
626 case CSR_DC:
627 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
628 break;
629 case CSR_DEBA:
630 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
631 break;
632 case CSR_JTX:
633 gen_helper_rcsr_jtx(cpu_R[dc->r2]);
634 break;
635 case CSR_JRX:
636 gen_helper_rcsr_jrx(cpu_R[dc->r2]);
637 break;
638 case CSR_ICC:
639 case CSR_DCC:
640 case CSR_BP0:
641 case CSR_BP1:
642 case CSR_BP2:
643 case CSR_BP3:
644 case CSR_WP0:
645 case CSR_WP1:
646 case CSR_WP2:
647 case CSR_WP3:
648 cpu_abort(dc->env, "invalid read access csr=%x\n", dc->csr);
649 break;
650 default:
651 cpu_abort(dc->env, "read_csr: unknown csr=%x\n", dc->csr);
652 break;
656 static void dec_sb(DisasContext *dc)
658 TCGv t0;
660 LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
662 t0 = tcg_temp_new();
663 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
664 tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
665 tcg_temp_free(t0);
668 static void dec_sextb(DisasContext *dc)
670 LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
672 if (!(dc->env->features & LM32_FEATURE_SIGN_EXTEND)) {
673 cpu_abort(dc->env, "hardware sign extender is not available\n");
676 tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
679 static void dec_sexth(DisasContext *dc)
681 LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
683 if (!(dc->env->features & LM32_FEATURE_SIGN_EXTEND)) {
684 cpu_abort(dc->env, "hardware sign extender is not available\n");
687 tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
690 static void dec_sh(DisasContext *dc)
692 TCGv t0;
694 LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
696 t0 = tcg_temp_new();
697 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
698 tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
699 tcg_temp_free(t0);
702 static void dec_sl(DisasContext *dc)
704 if (dc->format == OP_FMT_RI) {
705 LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
706 } else {
707 LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
710 if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
711 cpu_abort(dc->env, "hardware shifter is not available\n");
714 if (dc->format == OP_FMT_RI) {
715 tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
716 } else {
717 TCGv t0 = tcg_temp_new();
718 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
719 tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
720 tcg_temp_free(t0);
724 static void dec_sr(DisasContext *dc)
726 if (dc->format == OP_FMT_RI) {
727 LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
728 } else {
729 LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
732 if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
733 if (dc->format == OP_FMT_RI) {
734 /* TODO: check r1 == 1 during runtime */
735 } else {
736 if (dc->imm5 != 1) {
737 cpu_abort(dc->env, "hardware shifter is not available\n");
742 if (dc->format == OP_FMT_RI) {
743 tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
744 } else {
745 TCGv t0 = tcg_temp_new();
746 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
747 tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
748 tcg_temp_free(t0);
752 static void dec_sru(DisasContext *dc)
754 if (dc->format == OP_FMT_RI) {
755 LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
756 } else {
757 LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
760 if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
761 if (dc->format == OP_FMT_RI) {
762 /* TODO: check r1 == 1 during runtime */
763 } else {
764 if (dc->imm5 != 1) {
765 cpu_abort(dc->env, "hardware shifter is not available\n");
770 if (dc->format == OP_FMT_RI) {
771 tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
772 } else {
773 TCGv t0 = tcg_temp_new();
774 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
775 tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
776 tcg_temp_free(t0);
780 static void dec_sub(DisasContext *dc)
782 LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
784 tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
787 static void dec_sw(DisasContext *dc)
789 TCGv t0;
791 LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
793 t0 = tcg_temp_new();
794 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
795 tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
796 tcg_temp_free(t0);
799 static void dec_user(DisasContext *dc)
801 LOG_DIS("user");
803 cpu_abort(dc->env, "user insn undefined\n");
806 static void dec_wcsr(DisasContext *dc)
808 int no;
810 LOG_DIS("wcsr r%d, %d\n", dc->r1, dc->csr);
812 switch (dc->csr) {
813 case CSR_IE:
814 tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
815 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
816 dc->is_jmp = DISAS_UPDATE;
817 break;
818 case CSR_IM:
819 /* mark as an io operation because it could cause an interrupt */
820 if (use_icount) {
821 gen_io_start();
823 gen_helper_wcsr_im(cpu_R[dc->r1]);
824 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
825 if (use_icount) {
826 gen_io_end();
828 dc->is_jmp = DISAS_UPDATE;
829 break;
830 case CSR_IP:
831 /* mark as an io operation because it could cause an interrupt */
832 if (use_icount) {
833 gen_io_start();
835 gen_helper_wcsr_ip(cpu_R[dc->r1]);
836 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
837 if (use_icount) {
838 gen_io_end();
840 dc->is_jmp = DISAS_UPDATE;
841 break;
842 case CSR_ICC:
843 /* TODO */
844 break;
845 case CSR_DCC:
846 /* TODO */
847 break;
848 case CSR_EBA:
849 tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
850 break;
851 case CSR_DEBA:
852 tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
853 break;
854 case CSR_JTX:
855 gen_helper_wcsr_jtx(cpu_R[dc->r1]);
856 break;
857 case CSR_JRX:
858 gen_helper_wcsr_jrx(cpu_R[dc->r1]);
859 break;
860 case CSR_DC:
861 tcg_gen_mov_tl(cpu_dc, cpu_R[dc->r1]);
862 break;
863 case CSR_BP0:
864 case CSR_BP1:
865 case CSR_BP2:
866 case CSR_BP3:
867 no = dc->csr - CSR_BP0;
868 if (dc->env->num_bps <= no) {
869 cpu_abort(dc->env, "breakpoint #%i is not available\n", no);
871 tcg_gen_mov_tl(cpu_bp[no], cpu_R[dc->r1]);
872 break;
873 case CSR_WP0:
874 case CSR_WP1:
875 case CSR_WP2:
876 case CSR_WP3:
877 no = dc->csr - CSR_WP0;
878 if (dc->env->num_wps <= no) {
879 cpu_abort(dc->env, "watchpoint #%i is not available\n", no);
881 tcg_gen_mov_tl(cpu_wp[no], cpu_R[dc->r1]);
882 break;
883 case CSR_CC:
884 case CSR_CFG:
885 cpu_abort(dc->env, "invalid write access csr=%x\n", dc->csr);
886 break;
887 default:
888 cpu_abort(dc->env, "write_csr unknown csr=%x\n", dc->csr);
889 break;
893 static void dec_xnor(DisasContext *dc)
895 if (dc->format == OP_FMT_RI) {
896 LOG_DIS("xnori r%d, r%d, %d\n", dc->r0, dc->r1,
897 zero_extend(dc->imm16, 16));
898 } else {
899 if (dc->r1 == R_R0) {
900 LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
901 } else {
902 LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
906 if (dc->format == OP_FMT_RI) {
907 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
908 zero_extend(dc->imm16, 16));
909 tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
910 } else {
911 tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
915 static void dec_xor(DisasContext *dc)
917 if (dc->format == OP_FMT_RI) {
918 LOG_DIS("xori r%d, r%d, %d\n", dc->r0, dc->r1,
919 zero_extend(dc->imm16, 16));
920 } else {
921 LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
924 if (dc->format == OP_FMT_RI) {
925 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
926 zero_extend(dc->imm16, 16));
927 } else {
928 tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
932 static void dec_ill(DisasContext *dc)
934 cpu_abort(dc->env, "unknown opcode 0x%02x\n", dc->opcode);
937 typedef void (*DecoderInfo)(DisasContext *dc);
938 static const DecoderInfo decinfo[] = {
939 dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
940 dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
941 dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
942 dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
943 dec_cmpne,
944 dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
945 dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
946 dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
947 dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
948 dec_cmpne
951 static inline void decode(DisasContext *dc)
953 uint32_t ir;
955 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
956 tcg_gen_debug_insn_start(dc->pc);
959 dc->ir = ir = ldl_code(dc->pc);
960 LOG_DIS("%8.8x\t", dc->ir);
962 /* try guessing 'empty' instruction memory, although it may be a valid
963 * instruction sequence (eg. srui r0, r0, 0) */
964 if (dc->ir) {
965 dc->nr_nops = 0;
966 } else {
967 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
968 dc->nr_nops++;
969 if (dc->nr_nops > 4) {
970 cpu_abort(dc->env, "fetching nop sequence\n");
974 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
976 dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
977 dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
978 dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
980 dc->csr = EXTRACT_FIELD(ir, 21, 25);
981 dc->r0 = EXTRACT_FIELD(ir, 21, 25);
982 dc->r1 = EXTRACT_FIELD(ir, 16, 20);
983 dc->r2 = EXTRACT_FIELD(ir, 11, 15);
985 /* bit 31 seems to indicate insn type. */
986 if (ir & (1 << 31)) {
987 dc->format = OP_FMT_RR;
988 } else {
989 dc->format = OP_FMT_RI;
992 assert(ARRAY_SIZE(decinfo) == 64);
993 assert(dc->opcode < 64);
995 decinfo[dc->opcode](dc);
998 static void check_breakpoint(CPUState *env, DisasContext *dc)
1000 CPUBreakpoint *bp;
1002 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1003 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1004 if (bp->pc == dc->pc) {
1005 tcg_gen_movi_tl(cpu_pc, dc->pc);
1006 t_gen_raise_exception(dc, EXCP_DEBUG);
1007 dc->is_jmp = DISAS_UPDATE;
1013 /* generate intermediate code for basic block 'tb'. */
1014 static void gen_intermediate_code_internal(CPUState *env,
1015 TranslationBlock *tb, int search_pc)
1017 struct DisasContext ctx, *dc = &ctx;
1018 uint16_t *gen_opc_end;
1019 uint32_t pc_start;
1020 int j, lj;
1021 uint32_t next_page_start;
1022 int num_insns;
1023 int max_insns;
1025 qemu_log_try_set_file(stderr);
1027 pc_start = tb->pc;
1028 dc->env = env;
1029 dc->tb = tb;
1031 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1033 dc->is_jmp = DISAS_NEXT;
1034 dc->pc = pc_start;
1035 dc->singlestep_enabled = env->singlestep_enabled;
1036 dc->nr_nops = 0;
1038 if (pc_start & 3) {
1039 cpu_abort(env, "LM32: unaligned PC=%x\n", pc_start);
1042 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1043 qemu_log("-----------------------------------------\n");
1044 log_cpu_state(env, 0);
1047 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1048 lj = -1;
1049 num_insns = 0;
1050 max_insns = tb->cflags & CF_COUNT_MASK;
1051 if (max_insns == 0) {
1052 max_insns = CF_COUNT_MASK;
1055 gen_icount_start();
1056 do {
1057 check_breakpoint(env, dc);
1059 if (search_pc) {
1060 j = gen_opc_ptr - gen_opc_buf;
1061 if (lj < j) {
1062 lj++;
1063 while (lj < j) {
1064 gen_opc_instr_start[lj++] = 0;
1067 gen_opc_pc[lj] = dc->pc;
1068 gen_opc_instr_start[lj] = 1;
1069 gen_opc_icount[lj] = num_insns;
1072 /* Pretty disas. */
1073 LOG_DIS("%8.8x:\t", dc->pc);
1075 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1076 gen_io_start();
1079 decode(dc);
1080 dc->pc += 4;
1081 num_insns++;
1083 } while (!dc->is_jmp
1084 && gen_opc_ptr < gen_opc_end
1085 && !env->singlestep_enabled
1086 && !singlestep
1087 && (dc->pc < next_page_start)
1088 && num_insns < max_insns);
1090 if (tb->cflags & CF_LAST_IO) {
1091 gen_io_end();
1094 if (unlikely(env->singlestep_enabled)) {
1095 if (dc->is_jmp == DISAS_NEXT) {
1096 tcg_gen_movi_tl(cpu_pc, dc->pc);
1098 t_gen_raise_exception(dc, EXCP_DEBUG);
1099 } else {
1100 switch (dc->is_jmp) {
1101 case DISAS_NEXT:
1102 gen_goto_tb(dc, 1, dc->pc);
1103 break;
1104 default:
1105 case DISAS_JUMP:
1106 case DISAS_UPDATE:
1107 /* indicate that the hash table must be used
1108 to find the next TB */
1109 tcg_gen_exit_tb(0);
1110 break;
1111 case DISAS_TB_JUMP:
1112 /* nothing more to generate */
1113 break;
1117 gen_icount_end(tb, num_insns);
1118 *gen_opc_ptr = INDEX_op_end;
1119 if (search_pc) {
1120 j = gen_opc_ptr - gen_opc_buf;
1121 lj++;
1122 while (lj <= j) {
1123 gen_opc_instr_start[lj++] = 0;
1125 } else {
1126 tb->size = dc->pc - pc_start;
1127 tb->icount = num_insns;
1130 #ifdef DEBUG_DISAS
1131 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1132 qemu_log("\n");
1133 log_target_disas(pc_start, dc->pc - pc_start, 0);
1134 qemu_log("\nisize=%d osize=%td\n",
1135 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
1137 #endif
1140 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb)
1142 gen_intermediate_code_internal(env, tb, 0);
1145 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb)
1147 gen_intermediate_code_internal(env, tb, 1);
1150 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
1151 int flags)
1153 int i;
1155 if (!env || !f) {
1156 return;
1159 cpu_fprintf(f, "IN: PC=%x %s\n",
1160 env->pc, lookup_symbol(env->pc));
1162 cpu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1163 env->ie,
1164 (env->ie & IE_IE) ? 1 : 0,
1165 (env->ie & IE_EIE) ? 1 : 0,
1166 (env->ie & IE_BIE) ? 1 : 0,
1167 lm32_pic_get_im(env->pic_state),
1168 lm32_pic_get_ip(env->pic_state));
1169 cpu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
1170 env->eba,
1171 env->deba);
1173 for (i = 0; i < 32; i++) {
1174 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1175 if ((i + 1) % 4 == 0) {
1176 cpu_fprintf(f, "\n");
1179 cpu_fprintf(f, "\n\n");
1182 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
1184 env->pc = gen_opc_pc[pc_pos];
1187 void lm32_translate_init(void)
1189 int i;
1191 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1193 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1194 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1195 offsetof(CPUState, regs[i]),
1196 regnames[i]);
1199 for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
1200 cpu_bp[i] = tcg_global_mem_new(TCG_AREG0,
1201 offsetof(CPUState, bp[i]),
1202 regnames[32+i]);
1205 for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
1206 cpu_wp[i] = tcg_global_mem_new(TCG_AREG0,
1207 offsetof(CPUState, wp[i]),
1208 regnames[36+i]);
1211 cpu_pc = tcg_global_mem_new(TCG_AREG0,
1212 offsetof(CPUState, pc),
1213 "pc");
1214 cpu_ie = tcg_global_mem_new(TCG_AREG0,
1215 offsetof(CPUState, ie),
1216 "ie");
1217 cpu_icc = tcg_global_mem_new(TCG_AREG0,
1218 offsetof(CPUState, icc),
1219 "icc");
1220 cpu_dcc = tcg_global_mem_new(TCG_AREG0,
1221 offsetof(CPUState, dcc),
1222 "dcc");
1223 cpu_cc = tcg_global_mem_new(TCG_AREG0,
1224 offsetof(CPUState, cc),
1225 "cc");
1226 cpu_cfg = tcg_global_mem_new(TCG_AREG0,
1227 offsetof(CPUState, cfg),
1228 "cfg");
1229 cpu_eba = tcg_global_mem_new(TCG_AREG0,
1230 offsetof(CPUState, eba),
1231 "eba");
1232 cpu_dc = tcg_global_mem_new(TCG_AREG0,
1233 offsetof(CPUState, dc),
1234 "dc");
1235 cpu_deba = tcg_global_mem_new(TCG_AREG0,
1236 offsetof(CPUState, deba),
1237 "deba");