Merge remote-tracking branch 'qmp/for-anthony' into staging
[qemu.git] / target-lm32 / translate.c
blobeb2115814cec3f29847026596b774b783e8a61a4
1 /*
2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <assert.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "helper.h"
31 #include "tcg-op.h"
32 #include "qemu-common.h"
34 #include "hw/lm32_pic.h"
36 #define GEN_HELPER 1
37 #include "helper.h"
39 #define DISAS_LM32 1
40 #if DISAS_LM32
41 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
42 #else
43 # define LOG_DIS(...) do { } while (0)
44 #endif
46 #define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 #define MEM_INDEX 0
51 static TCGv_ptr cpu_env;
52 static TCGv cpu_R[32];
53 static TCGv cpu_pc;
54 static TCGv cpu_ie;
55 static TCGv cpu_icc;
56 static TCGv cpu_dcc;
57 static TCGv cpu_cc;
58 static TCGv cpu_cfg;
59 static TCGv cpu_eba;
60 static TCGv cpu_dc;
61 static TCGv cpu_deba;
62 static TCGv cpu_bp[4];
63 static TCGv cpu_wp[4];
65 #include "gen-icount.h"
67 enum {
68 OP_FMT_RI,
69 OP_FMT_RR,
70 OP_FMT_CR,
71 OP_FMT_I
74 /* This is the state at translation time. */
75 typedef struct DisasContext {
76 CPUState *env;
77 target_ulong pc;
79 /* Decoder. */
80 int format;
81 uint32_t ir;
82 uint8_t opcode;
83 uint8_t r0, r1, r2, csr;
84 uint16_t imm5;
85 uint16_t imm16;
86 uint32_t imm26;
88 unsigned int delayed_branch;
89 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
90 int is_jmp;
92 int nr_nops;
93 struct TranslationBlock *tb;
94 int singlestep_enabled;
95 } DisasContext;
97 static const char *regnames[] = {
98 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
99 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
100 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
101 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
102 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
103 "wp1", "wp2", "wp3"
106 static inline int zero_extend(unsigned int val, int width)
108 return val & ((1 << width) - 1);
111 static inline int sign_extend(unsigned int val, int width)
113 int sval;
115 /* LSL. */
116 val <<= 32 - width;
117 sval = val;
118 /* ASR. */
119 sval >>= 32 - width;
121 return sval;
124 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
126 TCGv_i32 tmp = tcg_const_i32(index);
128 gen_helper_raise_exception(tmp);
129 tcg_temp_free_i32(tmp);
132 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
134 TranslationBlock *tb;
136 tb = dc->tb;
137 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
138 likely(!dc->singlestep_enabled)) {
139 tcg_gen_goto_tb(n);
140 tcg_gen_movi_tl(cpu_pc, dest);
141 tcg_gen_exit_tb((tcg_target_long)tb + n);
142 } else {
143 tcg_gen_movi_tl(cpu_pc, dest);
144 if (dc->singlestep_enabled) {
145 t_gen_raise_exception(dc, EXCP_DEBUG);
147 tcg_gen_exit_tb(0);
151 static void dec_add(DisasContext *dc)
153 if (dc->format == OP_FMT_RI) {
154 if (dc->r0 == R_R0) {
155 if (dc->r1 == R_R0 && dc->imm16 == 0) {
156 LOG_DIS("nop\n");
157 } else {
158 LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
160 } else {
161 LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
162 sign_extend(dc->imm16, 16));
164 } else {
165 LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
168 if (dc->format == OP_FMT_RI) {
169 tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
170 sign_extend(dc->imm16, 16));
171 } else {
172 tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
176 static void dec_and(DisasContext *dc)
178 if (dc->format == OP_FMT_RI) {
179 LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
180 zero_extend(dc->imm16, 16));
181 } else {
182 LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
185 if (dc->format == OP_FMT_RI) {
186 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
187 zero_extend(dc->imm16, 16));
188 } else {
189 if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
190 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
191 gen_helper_hlt();
192 } else {
193 tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
198 static void dec_andhi(DisasContext *dc)
200 LOG_DIS("andhi r%d, r%d, %d\n", dc->r2, dc->r0, dc->imm16);
202 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
205 static void dec_b(DisasContext *dc)
207 if (dc->r0 == R_RA) {
208 LOG_DIS("ret\n");
209 } else if (dc->r0 == R_EA) {
210 LOG_DIS("eret\n");
211 } else if (dc->r0 == R_BA) {
212 LOG_DIS("bret\n");
213 } else {
214 LOG_DIS("b r%d\n", dc->r0);
217 /* restore IE.IE in case of an eret */
218 if (dc->r0 == R_EA) {
219 TCGv t0 = tcg_temp_new();
220 int l1 = gen_new_label();
221 tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
222 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
223 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
224 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
225 gen_set_label(l1);
226 tcg_temp_free(t0);
227 } else if (dc->r0 == R_BA) {
228 TCGv t0 = tcg_temp_new();
229 int l1 = gen_new_label();
230 tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
231 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
232 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
233 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
234 gen_set_label(l1);
235 tcg_temp_free(t0);
237 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
239 dc->is_jmp = DISAS_JUMP;
242 static void dec_bi(DisasContext *dc)
244 LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
246 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
248 dc->is_jmp = DISAS_TB_JUMP;
251 static inline void gen_cond_branch(DisasContext *dc, int cond)
253 int l1;
255 l1 = gen_new_label();
256 tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
257 gen_goto_tb(dc, 0, dc->pc + 4);
258 gen_set_label(l1);
259 gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
260 dc->is_jmp = DISAS_TB_JUMP;
263 static void dec_be(DisasContext *dc)
265 LOG_DIS("be r%d, r%d, %d\n", dc->r0, dc->r1,
266 sign_extend(dc->imm16, 16) * 4);
268 gen_cond_branch(dc, TCG_COND_EQ);
271 static void dec_bg(DisasContext *dc)
273 LOG_DIS("bg r%d, r%d, %d\n", dc->r0, dc->r1,
274 sign_extend(dc->imm16, 16 * 4));
276 gen_cond_branch(dc, TCG_COND_GT);
279 static void dec_bge(DisasContext *dc)
281 LOG_DIS("bge r%d, r%d, %d\n", dc->r0, dc->r1,
282 sign_extend(dc->imm16, 16) * 4);
284 gen_cond_branch(dc, TCG_COND_GE);
287 static void dec_bgeu(DisasContext *dc)
289 LOG_DIS("bgeu r%d, r%d, %d\n", dc->r0, dc->r1,
290 sign_extend(dc->imm16, 16) * 4);
292 gen_cond_branch(dc, TCG_COND_GEU);
295 static void dec_bgu(DisasContext *dc)
297 LOG_DIS("bgu r%d, r%d, %d\n", dc->r0, dc->r1,
298 sign_extend(dc->imm16, 16) * 4);
300 gen_cond_branch(dc, TCG_COND_GTU);
303 static void dec_bne(DisasContext *dc)
305 LOG_DIS("bne r%d, r%d, %d\n", dc->r0, dc->r1,
306 sign_extend(dc->imm16, 16) * 4);
308 gen_cond_branch(dc, TCG_COND_NE);
311 static void dec_call(DisasContext *dc)
313 LOG_DIS("call r%d\n", dc->r0);
315 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
316 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
318 dc->is_jmp = DISAS_JUMP;
321 static void dec_calli(DisasContext *dc)
323 LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
325 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
326 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
328 dc->is_jmp = DISAS_TB_JUMP;
331 static inline void gen_compare(DisasContext *dc, int cond)
333 int rX = (dc->format == OP_FMT_RR) ? dc->r2 : dc->r1;
334 int rY = (dc->format == OP_FMT_RR) ? dc->r0 : dc->r0;
335 int rZ = (dc->format == OP_FMT_RR) ? dc->r1 : -1;
337 if (dc->format == OP_FMT_RI) {
338 tcg_gen_setcondi_tl(cond, cpu_R[rX], cpu_R[rY],
339 sign_extend(dc->imm16, 16));
340 } else {
341 tcg_gen_setcond_tl(cond, cpu_R[rX], cpu_R[rY], cpu_R[rZ]);
345 static void dec_cmpe(DisasContext *dc)
347 if (dc->format == OP_FMT_RI) {
348 LOG_DIS("cmpei r%d, r%d, %d\n", dc->r0, dc->r1,
349 sign_extend(dc->imm16, 16));
350 } else {
351 LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
354 gen_compare(dc, TCG_COND_EQ);
357 static void dec_cmpg(DisasContext *dc)
359 if (dc->format == OP_FMT_RI) {
360 LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r0, dc->r1,
361 sign_extend(dc->imm16, 16));
362 } else {
363 LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
366 gen_compare(dc, TCG_COND_GT);
369 static void dec_cmpge(DisasContext *dc)
371 if (dc->format == OP_FMT_RI) {
372 LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r0, dc->r1,
373 sign_extend(dc->imm16, 16));
374 } else {
375 LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
378 gen_compare(dc, TCG_COND_GE);
381 static void dec_cmpgeu(DisasContext *dc)
383 if (dc->format == OP_FMT_RI) {
384 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r0, dc->r1,
385 sign_extend(dc->imm16, 16));
386 } else {
387 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
390 gen_compare(dc, TCG_COND_GEU);
393 static void dec_cmpgu(DisasContext *dc)
395 if (dc->format == OP_FMT_RI) {
396 LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r0, dc->r1,
397 sign_extend(dc->imm16, 16));
398 } else {
399 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
402 gen_compare(dc, TCG_COND_GTU);
405 static void dec_cmpne(DisasContext *dc)
407 if (dc->format == OP_FMT_RI) {
408 LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r0, dc->r1,
409 sign_extend(dc->imm16, 16));
410 } else {
411 LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
414 gen_compare(dc, TCG_COND_NE);
417 static void dec_divu(DisasContext *dc)
419 int l1;
421 LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
423 if (!(dc->env->features & LM32_FEATURE_DIVIDE)) {
424 cpu_abort(dc->env, "hardware divider is not available\n");
427 l1 = gen_new_label();
428 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
429 tcg_gen_movi_tl(cpu_pc, dc->pc);
430 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
431 gen_set_label(l1);
432 tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
435 static void dec_lb(DisasContext *dc)
437 TCGv t0;
439 LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
441 t0 = tcg_temp_new();
442 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
443 tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
444 tcg_temp_free(t0);
447 static void dec_lbu(DisasContext *dc)
449 TCGv t0;
451 LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
453 t0 = tcg_temp_new();
454 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
455 tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
456 tcg_temp_free(t0);
459 static void dec_lh(DisasContext *dc)
461 TCGv t0;
463 LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
465 t0 = tcg_temp_new();
466 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
467 tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
468 tcg_temp_free(t0);
471 static void dec_lhu(DisasContext *dc)
473 TCGv t0;
475 LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
477 t0 = tcg_temp_new();
478 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
479 tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
480 tcg_temp_free(t0);
483 static void dec_lw(DisasContext *dc)
485 TCGv t0;
487 LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
489 t0 = tcg_temp_new();
490 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
491 tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
492 tcg_temp_free(t0);
495 static void dec_modu(DisasContext *dc)
497 int l1;
499 LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
501 if (!(dc->env->features & LM32_FEATURE_DIVIDE)) {
502 cpu_abort(dc->env, "hardware divider is not available\n");
505 l1 = gen_new_label();
506 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
507 tcg_gen_movi_tl(cpu_pc, dc->pc);
508 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
509 gen_set_label(l1);
510 tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
513 static void dec_mul(DisasContext *dc)
515 if (dc->format == OP_FMT_RI) {
516 LOG_DIS("muli r%d, r%d, %d\n", dc->r0, dc->r1,
517 sign_extend(dc->imm16, 16));
518 } else {
519 LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
522 if (!(dc->env->features & LM32_FEATURE_MULTIPLY)) {
523 cpu_abort(dc->env, "hardware multiplier is not available\n");
526 if (dc->format == OP_FMT_RI) {
527 tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
528 sign_extend(dc->imm16, 16));
529 } else {
530 tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
534 static void dec_nor(DisasContext *dc)
536 if (dc->format == OP_FMT_RI) {
537 LOG_DIS("nori r%d, r%d, %d\n", dc->r0, dc->r1,
538 zero_extend(dc->imm16, 16));
539 } else {
540 LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
543 if (dc->format == OP_FMT_RI) {
544 TCGv t0 = tcg_temp_new();
545 tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
546 tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
547 tcg_temp_free(t0);
548 } else {
549 tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
553 static void dec_or(DisasContext *dc)
555 if (dc->format == OP_FMT_RI) {
556 LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
557 zero_extend(dc->imm16, 16));
558 } else {
559 if (dc->r1 == R_R0) {
560 LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
561 } else {
562 LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
566 if (dc->format == OP_FMT_RI) {
567 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
568 zero_extend(dc->imm16, 16));
569 } else {
570 tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
574 static void dec_orhi(DisasContext *dc)
576 if (dc->r0 == R_R0) {
577 LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
578 } else {
579 LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
582 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
585 static void dec_scall(DisasContext *dc)
587 if (dc->imm5 == 7) {
588 LOG_DIS("scall\n");
589 } else if (dc->imm5 == 2) {
590 LOG_DIS("break\n");
591 } else {
592 cpu_abort(dc->env, "invalid opcode\n");
595 if (dc->imm5 == 7) {
596 tcg_gen_movi_tl(cpu_pc, dc->pc);
597 t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
598 } else {
599 tcg_gen_movi_tl(cpu_pc, dc->pc);
600 t_gen_raise_exception(dc, EXCP_BREAKPOINT);
604 static void dec_rcsr(DisasContext *dc)
606 LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
608 switch (dc->csr) {
609 case CSR_IE:
610 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
611 break;
612 case CSR_IM:
613 gen_helper_rcsr_im(cpu_R[dc->r2]);
614 break;
615 case CSR_IP:
616 gen_helper_rcsr_ip(cpu_R[dc->r2]);
617 break;
618 case CSR_CC:
619 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
620 break;
621 case CSR_CFG:
622 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
623 break;
624 case CSR_EBA:
625 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
626 break;
627 case CSR_DC:
628 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
629 break;
630 case CSR_DEBA:
631 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
632 break;
633 case CSR_JTX:
634 gen_helper_rcsr_jtx(cpu_R[dc->r2]);
635 break;
636 case CSR_JRX:
637 gen_helper_rcsr_jrx(cpu_R[dc->r2]);
638 break;
639 case CSR_ICC:
640 case CSR_DCC:
641 case CSR_BP0:
642 case CSR_BP1:
643 case CSR_BP2:
644 case CSR_BP3:
645 case CSR_WP0:
646 case CSR_WP1:
647 case CSR_WP2:
648 case CSR_WP3:
649 cpu_abort(dc->env, "invalid read access csr=%x\n", dc->csr);
650 break;
651 default:
652 cpu_abort(dc->env, "read_csr: unknown csr=%x\n", dc->csr);
653 break;
657 static void dec_sb(DisasContext *dc)
659 TCGv t0;
661 LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
663 t0 = tcg_temp_new();
664 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
665 tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
666 tcg_temp_free(t0);
669 static void dec_sextb(DisasContext *dc)
671 LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
673 if (!(dc->env->features & LM32_FEATURE_SIGN_EXTEND)) {
674 cpu_abort(dc->env, "hardware sign extender is not available\n");
677 tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
680 static void dec_sexth(DisasContext *dc)
682 LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
684 if (!(dc->env->features & LM32_FEATURE_SIGN_EXTEND)) {
685 cpu_abort(dc->env, "hardware sign extender is not available\n");
688 tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
691 static void dec_sh(DisasContext *dc)
693 TCGv t0;
695 LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
697 t0 = tcg_temp_new();
698 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
699 tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
700 tcg_temp_free(t0);
703 static void dec_sl(DisasContext *dc)
705 if (dc->format == OP_FMT_RI) {
706 LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
707 } else {
708 LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
711 if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
712 cpu_abort(dc->env, "hardware shifter is not available\n");
715 if (dc->format == OP_FMT_RI) {
716 tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
717 } else {
718 TCGv t0 = tcg_temp_new();
719 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
720 tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
721 tcg_temp_free(t0);
725 static void dec_sr(DisasContext *dc)
727 if (dc->format == OP_FMT_RI) {
728 LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
729 } else {
730 LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
733 if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
734 if (dc->format == OP_FMT_RI) {
735 /* TODO: check r1 == 1 during runtime */
736 } else {
737 if (dc->imm5 != 1) {
738 cpu_abort(dc->env, "hardware shifter is not available\n");
743 if (dc->format == OP_FMT_RI) {
744 tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
745 } else {
746 TCGv t0 = tcg_temp_new();
747 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
748 tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
749 tcg_temp_free(t0);
753 static void dec_sru(DisasContext *dc)
755 if (dc->format == OP_FMT_RI) {
756 LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
757 } else {
758 LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
761 if (!(dc->env->features & LM32_FEATURE_SHIFT)) {
762 if (dc->format == OP_FMT_RI) {
763 /* TODO: check r1 == 1 during runtime */
764 } else {
765 if (dc->imm5 != 1) {
766 cpu_abort(dc->env, "hardware shifter is not available\n");
771 if (dc->format == OP_FMT_RI) {
772 tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
773 } else {
774 TCGv t0 = tcg_temp_new();
775 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
776 tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
777 tcg_temp_free(t0);
781 static void dec_sub(DisasContext *dc)
783 LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
785 tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
788 static void dec_sw(DisasContext *dc)
790 TCGv t0;
792 LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
794 t0 = tcg_temp_new();
795 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
796 tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
797 tcg_temp_free(t0);
800 static void dec_user(DisasContext *dc)
802 LOG_DIS("user");
804 cpu_abort(dc->env, "user insn undefined\n");
807 static void dec_wcsr(DisasContext *dc)
809 int no;
811 LOG_DIS("wcsr r%d, %d\n", dc->r1, dc->csr);
813 switch (dc->csr) {
814 case CSR_IE:
815 tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
816 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
817 dc->is_jmp = DISAS_UPDATE;
818 break;
819 case CSR_IM:
820 /* mark as an io operation because it could cause an interrupt */
821 if (use_icount) {
822 gen_io_start();
824 gen_helper_wcsr_im(cpu_R[dc->r1]);
825 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
826 if (use_icount) {
827 gen_io_end();
829 dc->is_jmp = DISAS_UPDATE;
830 break;
831 case CSR_IP:
832 /* mark as an io operation because it could cause an interrupt */
833 if (use_icount) {
834 gen_io_start();
836 gen_helper_wcsr_ip(cpu_R[dc->r1]);
837 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
838 if (use_icount) {
839 gen_io_end();
841 dc->is_jmp = DISAS_UPDATE;
842 break;
843 case CSR_ICC:
844 /* TODO */
845 break;
846 case CSR_DCC:
847 /* TODO */
848 break;
849 case CSR_EBA:
850 tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
851 break;
852 case CSR_DEBA:
853 tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
854 break;
855 case CSR_JTX:
856 gen_helper_wcsr_jtx(cpu_R[dc->r1]);
857 break;
858 case CSR_JRX:
859 gen_helper_wcsr_jrx(cpu_R[dc->r1]);
860 break;
861 case CSR_DC:
862 tcg_gen_mov_tl(cpu_dc, cpu_R[dc->r1]);
863 break;
864 case CSR_BP0:
865 case CSR_BP1:
866 case CSR_BP2:
867 case CSR_BP3:
868 no = dc->csr - CSR_BP0;
869 if (dc->env->num_bps <= no) {
870 cpu_abort(dc->env, "breakpoint #%i is not available\n", no);
872 tcg_gen_mov_tl(cpu_bp[no], cpu_R[dc->r1]);
873 break;
874 case CSR_WP0:
875 case CSR_WP1:
876 case CSR_WP2:
877 case CSR_WP3:
878 no = dc->csr - CSR_WP0;
879 if (dc->env->num_wps <= no) {
880 cpu_abort(dc->env, "watchpoint #%i is not available\n", no);
882 tcg_gen_mov_tl(cpu_wp[no], cpu_R[dc->r1]);
883 break;
884 case CSR_CC:
885 case CSR_CFG:
886 cpu_abort(dc->env, "invalid write access csr=%x\n", dc->csr);
887 break;
888 default:
889 cpu_abort(dc->env, "write_csr unknown csr=%x\n", dc->csr);
890 break;
894 static void dec_xnor(DisasContext *dc)
896 if (dc->format == OP_FMT_RI) {
897 LOG_DIS("xnori r%d, r%d, %d\n", dc->r0, dc->r1,
898 zero_extend(dc->imm16, 16));
899 } else {
900 if (dc->r1 == R_R0) {
901 LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
902 } else {
903 LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
907 if (dc->format == OP_FMT_RI) {
908 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
909 zero_extend(dc->imm16, 16));
910 tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
911 } else {
912 tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
916 static void dec_xor(DisasContext *dc)
918 if (dc->format == OP_FMT_RI) {
919 LOG_DIS("xori r%d, r%d, %d\n", dc->r0, dc->r1,
920 zero_extend(dc->imm16, 16));
921 } else {
922 LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
925 if (dc->format == OP_FMT_RI) {
926 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
927 zero_extend(dc->imm16, 16));
928 } else {
929 tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
933 static void dec_ill(DisasContext *dc)
935 cpu_abort(dc->env, "unknown opcode 0x%02x\n", dc->opcode);
938 typedef void (*DecoderInfo)(DisasContext *dc);
939 static const DecoderInfo decinfo[] = {
940 dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
941 dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
942 dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
943 dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
944 dec_cmpne,
945 dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
946 dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
947 dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
948 dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
949 dec_cmpne
952 static inline void decode(DisasContext *dc)
954 uint32_t ir;
956 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
957 tcg_gen_debug_insn_start(dc->pc);
960 dc->ir = ir = ldl_code(dc->pc);
961 LOG_DIS("%8.8x\t", dc->ir);
963 /* try guessing 'empty' instruction memory, although it may be a valid
964 * instruction sequence (eg. srui r0, r0, 0) */
965 if (dc->ir) {
966 dc->nr_nops = 0;
967 } else {
968 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
969 dc->nr_nops++;
970 if (dc->nr_nops > 4) {
971 cpu_abort(dc->env, "fetching nop sequence\n");
975 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
977 dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
978 dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
979 dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
981 dc->csr = EXTRACT_FIELD(ir, 21, 25);
982 dc->r0 = EXTRACT_FIELD(ir, 21, 25);
983 dc->r1 = EXTRACT_FIELD(ir, 16, 20);
984 dc->r2 = EXTRACT_FIELD(ir, 11, 15);
986 /* bit 31 seems to indicate insn type. */
987 if (ir & (1 << 31)) {
988 dc->format = OP_FMT_RR;
989 } else {
990 dc->format = OP_FMT_RI;
993 assert(ARRAY_SIZE(decinfo) == 64);
994 assert(dc->opcode < 64);
996 decinfo[dc->opcode](dc);
999 static void check_breakpoint(CPUState *env, DisasContext *dc)
1001 CPUBreakpoint *bp;
1003 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1004 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1005 if (bp->pc == dc->pc) {
1006 tcg_gen_movi_tl(cpu_pc, dc->pc);
1007 t_gen_raise_exception(dc, EXCP_DEBUG);
1008 dc->is_jmp = DISAS_UPDATE;
1014 /* generate intermediate code for basic block 'tb'. */
1015 static void gen_intermediate_code_internal(CPUState *env,
1016 TranslationBlock *tb, int search_pc)
1018 struct DisasContext ctx, *dc = &ctx;
1019 uint16_t *gen_opc_end;
1020 uint32_t pc_start;
1021 int j, lj;
1022 uint32_t next_page_start;
1023 int num_insns;
1024 int max_insns;
1026 qemu_log_try_set_file(stderr);
1028 pc_start = tb->pc;
1029 dc->env = env;
1030 dc->tb = tb;
1032 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1034 dc->is_jmp = DISAS_NEXT;
1035 dc->pc = pc_start;
1036 dc->singlestep_enabled = env->singlestep_enabled;
1037 dc->nr_nops = 0;
1039 if (pc_start & 3) {
1040 cpu_abort(env, "LM32: unaligned PC=%x\n", pc_start);
1043 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1044 qemu_log("-----------------------------------------\n");
1045 log_cpu_state(env, 0);
1048 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1049 lj = -1;
1050 num_insns = 0;
1051 max_insns = tb->cflags & CF_COUNT_MASK;
1052 if (max_insns == 0) {
1053 max_insns = CF_COUNT_MASK;
1056 gen_icount_start();
1057 do {
1058 check_breakpoint(env, dc);
1060 if (search_pc) {
1061 j = gen_opc_ptr - gen_opc_buf;
1062 if (lj < j) {
1063 lj++;
1064 while (lj < j) {
1065 gen_opc_instr_start[lj++] = 0;
1068 gen_opc_pc[lj] = dc->pc;
1069 gen_opc_instr_start[lj] = 1;
1070 gen_opc_icount[lj] = num_insns;
1073 /* Pretty disas. */
1074 LOG_DIS("%8.8x:\t", dc->pc);
1076 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
1077 gen_io_start();
1080 decode(dc);
1081 dc->pc += 4;
1082 num_insns++;
1084 } while (!dc->is_jmp
1085 && gen_opc_ptr < gen_opc_end
1086 && !env->singlestep_enabled
1087 && !singlestep
1088 && (dc->pc < next_page_start)
1089 && num_insns < max_insns);
1091 if (tb->cflags & CF_LAST_IO) {
1092 gen_io_end();
1095 if (unlikely(env->singlestep_enabled)) {
1096 if (dc->is_jmp == DISAS_NEXT) {
1097 tcg_gen_movi_tl(cpu_pc, dc->pc);
1099 t_gen_raise_exception(dc, EXCP_DEBUG);
1100 } else {
1101 switch (dc->is_jmp) {
1102 case DISAS_NEXT:
1103 gen_goto_tb(dc, 1, dc->pc);
1104 break;
1105 default:
1106 case DISAS_JUMP:
1107 case DISAS_UPDATE:
1108 /* indicate that the hash table must be used
1109 to find the next TB */
1110 tcg_gen_exit_tb(0);
1111 break;
1112 case DISAS_TB_JUMP:
1113 /* nothing more to generate */
1114 break;
1118 gen_icount_end(tb, num_insns);
1119 *gen_opc_ptr = INDEX_op_end;
1120 if (search_pc) {
1121 j = gen_opc_ptr - gen_opc_buf;
1122 lj++;
1123 while (lj <= j) {
1124 gen_opc_instr_start[lj++] = 0;
1126 } else {
1127 tb->size = dc->pc - pc_start;
1128 tb->icount = num_insns;
1131 #ifdef DEBUG_DISAS
1132 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1133 qemu_log("\n");
1134 log_target_disas(pc_start, dc->pc - pc_start, 0);
1135 qemu_log("\nisize=%d osize=%zd\n",
1136 dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
1138 #endif
1141 void gen_intermediate_code(CPUState *env, struct TranslationBlock *tb)
1143 gen_intermediate_code_internal(env, tb, 0);
1146 void gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb)
1148 gen_intermediate_code_internal(env, tb, 1);
1151 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
1152 int flags)
1154 int i;
1156 if (!env || !f) {
1157 return;
1160 cpu_fprintf(f, "IN: PC=%x %s\n",
1161 env->pc, lookup_symbol(env->pc));
1163 cpu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1164 env->ie,
1165 (env->ie & IE_IE) ? 1 : 0,
1166 (env->ie & IE_EIE) ? 1 : 0,
1167 (env->ie & IE_BIE) ? 1 : 0,
1168 lm32_pic_get_im(env->pic_state),
1169 lm32_pic_get_ip(env->pic_state));
1170 cpu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
1171 env->eba,
1172 env->deba);
1174 for (i = 0; i < 32; i++) {
1175 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1176 if ((i + 1) % 4 == 0) {
1177 cpu_fprintf(f, "\n");
1180 cpu_fprintf(f, "\n\n");
1183 void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
1185 env->pc = gen_opc_pc[pc_pos];
1188 void lm32_translate_init(void)
1190 int i;
1192 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1194 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1195 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1196 offsetof(CPUState, regs[i]),
1197 regnames[i]);
1200 for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
1201 cpu_bp[i] = tcg_global_mem_new(TCG_AREG0,
1202 offsetof(CPUState, bp[i]),
1203 regnames[32+i]);
1206 for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
1207 cpu_wp[i] = tcg_global_mem_new(TCG_AREG0,
1208 offsetof(CPUState, wp[i]),
1209 regnames[36+i]);
1212 cpu_pc = tcg_global_mem_new(TCG_AREG0,
1213 offsetof(CPUState, pc),
1214 "pc");
1215 cpu_ie = tcg_global_mem_new(TCG_AREG0,
1216 offsetof(CPUState, ie),
1217 "ie");
1218 cpu_icc = tcg_global_mem_new(TCG_AREG0,
1219 offsetof(CPUState, icc),
1220 "icc");
1221 cpu_dcc = tcg_global_mem_new(TCG_AREG0,
1222 offsetof(CPUState, dcc),
1223 "dcc");
1224 cpu_cc = tcg_global_mem_new(TCG_AREG0,
1225 offsetof(CPUState, cc),
1226 "cc");
1227 cpu_cfg = tcg_global_mem_new(TCG_AREG0,
1228 offsetof(CPUState, cfg),
1229 "cfg");
1230 cpu_eba = tcg_global_mem_new(TCG_AREG0,
1231 offsetof(CPUState, eba),
1232 "eba");
1233 cpu_dc = tcg_global_mem_new(TCG_AREG0,
1234 offsetof(CPUState, dc),
1235 "dc");
1236 cpu_deba = tcg_global_mem_new(TCG_AREG0,
1237 offsetof(CPUState, deba),
1238 "deba");