ide/via: Implement and use native PCI IDE mode
[qemu/ar7.git] / target / lm32 / translate.c
blobb32feb75643a4c2d8a13f2921472756c4966cd43
1 /*
2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/translator.h"
26 #include "tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "hw/lm32/lm32_pic.h"
31 #include "exec/helper-gen.h"
33 #include "trace-tcg.h"
34 #include "exec/log.h"
37 #define DISAS_LM32 0
39 #define LOG_DIS(...) \
40 do { \
41 if (DISAS_LM32) { \
42 qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
43 } \
44 } while (0)
46 #define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49 #define MEM_INDEX 0
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
56 static TCGv cpu_R[32];
57 static TCGv cpu_pc;
58 static TCGv cpu_ie;
59 static TCGv cpu_icc;
60 static TCGv cpu_dcc;
61 static TCGv cpu_cc;
62 static TCGv cpu_cfg;
63 static TCGv cpu_eba;
64 static TCGv cpu_dc;
65 static TCGv cpu_deba;
66 static TCGv cpu_bp[4];
67 static TCGv cpu_wp[4];
69 #include "exec/gen-icount.h"
71 enum {
72 OP_FMT_RI,
73 OP_FMT_RR,
74 OP_FMT_CR,
75 OP_FMT_I
78 /* This is the state at translation time. */
79 typedef struct DisasContext {
80 target_ulong pc;
82 /* Decoder. */
83 int format;
84 uint32_t ir;
85 uint8_t opcode;
86 uint8_t r0, r1, r2, csr;
87 uint16_t imm5;
88 uint16_t imm16;
89 uint32_t imm26;
91 unsigned int delayed_branch;
92 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
93 int is_jmp;
95 struct TranslationBlock *tb;
96 int singlestep_enabled;
98 uint32_t features;
99 uint8_t num_breakpoints;
100 uint8_t num_watchpoints;
101 } DisasContext;
103 static const char *regnames[] = {
104 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
105 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
106 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
107 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
108 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
109 "wp1", "wp2", "wp3"
112 static inline int zero_extend(unsigned int val, int width)
114 return val & ((1 << width) - 1);
117 static inline int sign_extend(unsigned int val, int width)
119 int sval;
121 /* LSL. */
122 val <<= 32 - width;
123 sval = val;
124 /* ASR. */
125 sval >>= 32 - width;
127 return sval;
130 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
132 TCGv_i32 tmp = tcg_const_i32(index);
134 gen_helper_raise_exception(cpu_env, tmp);
135 tcg_temp_free_i32(tmp);
138 static inline void t_gen_illegal_insn(DisasContext *dc)
140 tcg_gen_movi_tl(cpu_pc, dc->pc);
141 gen_helper_ill(cpu_env);
144 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
146 if (unlikely(dc->singlestep_enabled)) {
147 return false;
150 #ifndef CONFIG_USER_ONLY
151 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
152 #else
153 return true;
154 #endif
157 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
159 if (use_goto_tb(dc, dest)) {
160 tcg_gen_goto_tb(n);
161 tcg_gen_movi_tl(cpu_pc, dest);
162 tcg_gen_exit_tb(dc->tb, n);
163 } else {
164 tcg_gen_movi_tl(cpu_pc, dest);
165 if (dc->singlestep_enabled) {
166 t_gen_raise_exception(dc, EXCP_DEBUG);
168 tcg_gen_exit_tb(NULL, 0);
172 static void dec_add(DisasContext *dc)
174 if (dc->format == OP_FMT_RI) {
175 if (dc->r0 == R_R0) {
176 if (dc->r1 == R_R0 && dc->imm16 == 0) {
177 LOG_DIS("nop\n");
178 } else {
179 LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
181 } else {
182 LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
183 sign_extend(dc->imm16, 16));
185 } else {
186 LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
189 if (dc->format == OP_FMT_RI) {
190 tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
191 sign_extend(dc->imm16, 16));
192 } else {
193 tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
197 static void dec_and(DisasContext *dc)
199 if (dc->format == OP_FMT_RI) {
200 LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
201 zero_extend(dc->imm16, 16));
202 } else {
203 LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
206 if (dc->format == OP_FMT_RI) {
207 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
208 zero_extend(dc->imm16, 16));
209 } else {
210 if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
211 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
212 gen_helper_hlt(cpu_env);
213 } else {
214 tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
219 static void dec_andhi(DisasContext *dc)
221 LOG_DIS("andhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
223 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
226 static void dec_b(DisasContext *dc)
228 if (dc->r0 == R_RA) {
229 LOG_DIS("ret\n");
230 } else if (dc->r0 == R_EA) {
231 LOG_DIS("eret\n");
232 } else if (dc->r0 == R_BA) {
233 LOG_DIS("bret\n");
234 } else {
235 LOG_DIS("b r%d\n", dc->r0);
238 /* restore IE.IE in case of an eret */
239 if (dc->r0 == R_EA) {
240 TCGv t0 = tcg_temp_new();
241 TCGLabel *l1 = gen_new_label();
242 tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
243 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
244 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
245 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
246 gen_set_label(l1);
247 tcg_temp_free(t0);
248 } else if (dc->r0 == R_BA) {
249 TCGv t0 = tcg_temp_new();
250 TCGLabel *l1 = gen_new_label();
251 tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
252 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
253 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
254 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
255 gen_set_label(l1);
256 tcg_temp_free(t0);
258 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
260 dc->is_jmp = DISAS_JUMP;
263 static void dec_bi(DisasContext *dc)
265 LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
267 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
269 dc->is_jmp = DISAS_TB_JUMP;
272 static inline void gen_cond_branch(DisasContext *dc, int cond)
274 TCGLabel *l1 = gen_new_label();
275 tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
276 gen_goto_tb(dc, 0, dc->pc + 4);
277 gen_set_label(l1);
278 gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
279 dc->is_jmp = DISAS_TB_JUMP;
282 static void dec_be(DisasContext *dc)
284 LOG_DIS("be r%d, r%d, %d\n", dc->r1, dc->r0,
285 sign_extend(dc->imm16, 16) * 4);
287 gen_cond_branch(dc, TCG_COND_EQ);
290 static void dec_bg(DisasContext *dc)
292 LOG_DIS("bg r%d, r%d, %d\n", dc->r1, dc->r0,
293 sign_extend(dc->imm16, 16 * 4));
295 gen_cond_branch(dc, TCG_COND_GT);
298 static void dec_bge(DisasContext *dc)
300 LOG_DIS("bge r%d, r%d, %d\n", dc->r1, dc->r0,
301 sign_extend(dc->imm16, 16) * 4);
303 gen_cond_branch(dc, TCG_COND_GE);
306 static void dec_bgeu(DisasContext *dc)
308 LOG_DIS("bgeu r%d, r%d, %d\n", dc->r1, dc->r0,
309 sign_extend(dc->imm16, 16) * 4);
311 gen_cond_branch(dc, TCG_COND_GEU);
314 static void dec_bgu(DisasContext *dc)
316 LOG_DIS("bgu r%d, r%d, %d\n", dc->r1, dc->r0,
317 sign_extend(dc->imm16, 16) * 4);
319 gen_cond_branch(dc, TCG_COND_GTU);
322 static void dec_bne(DisasContext *dc)
324 LOG_DIS("bne r%d, r%d, %d\n", dc->r1, dc->r0,
325 sign_extend(dc->imm16, 16) * 4);
327 gen_cond_branch(dc, TCG_COND_NE);
330 static void dec_call(DisasContext *dc)
332 LOG_DIS("call r%d\n", dc->r0);
334 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
335 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
337 dc->is_jmp = DISAS_JUMP;
340 static void dec_calli(DisasContext *dc)
342 LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
344 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
345 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
347 dc->is_jmp = DISAS_TB_JUMP;
350 static inline void gen_compare(DisasContext *dc, int cond)
352 int i;
354 if (dc->format == OP_FMT_RI) {
355 switch (cond) {
356 case TCG_COND_GEU:
357 case TCG_COND_GTU:
358 i = zero_extend(dc->imm16, 16);
359 break;
360 default:
361 i = sign_extend(dc->imm16, 16);
362 break;
365 tcg_gen_setcondi_tl(cond, cpu_R[dc->r1], cpu_R[dc->r0], i);
366 } else {
367 tcg_gen_setcond_tl(cond, cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
371 static void dec_cmpe(DisasContext *dc)
373 if (dc->format == OP_FMT_RI) {
374 LOG_DIS("cmpei r%d, r%d, %d\n", dc->r1, dc->r0,
375 sign_extend(dc->imm16, 16));
376 } else {
377 LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
380 gen_compare(dc, TCG_COND_EQ);
383 static void dec_cmpg(DisasContext *dc)
385 if (dc->format == OP_FMT_RI) {
386 LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r1, dc->r0,
387 sign_extend(dc->imm16, 16));
388 } else {
389 LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
392 gen_compare(dc, TCG_COND_GT);
395 static void dec_cmpge(DisasContext *dc)
397 if (dc->format == OP_FMT_RI) {
398 LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r1, dc->r0,
399 sign_extend(dc->imm16, 16));
400 } else {
401 LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
404 gen_compare(dc, TCG_COND_GE);
407 static void dec_cmpgeu(DisasContext *dc)
409 if (dc->format == OP_FMT_RI) {
410 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r1, dc->r0,
411 zero_extend(dc->imm16, 16));
412 } else {
413 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
416 gen_compare(dc, TCG_COND_GEU);
419 static void dec_cmpgu(DisasContext *dc)
421 if (dc->format == OP_FMT_RI) {
422 LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r1, dc->r0,
423 zero_extend(dc->imm16, 16));
424 } else {
425 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
428 gen_compare(dc, TCG_COND_GTU);
431 static void dec_cmpne(DisasContext *dc)
433 if (dc->format == OP_FMT_RI) {
434 LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r1, dc->r0,
435 sign_extend(dc->imm16, 16));
436 } else {
437 LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
440 gen_compare(dc, TCG_COND_NE);
443 static void dec_divu(DisasContext *dc)
445 TCGLabel *l1;
447 LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
449 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
450 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
451 t_gen_illegal_insn(dc);
452 return;
455 l1 = gen_new_label();
456 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
457 tcg_gen_movi_tl(cpu_pc, dc->pc);
458 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
459 gen_set_label(l1);
460 tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
463 static void dec_lb(DisasContext *dc)
465 TCGv t0;
467 LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
469 t0 = tcg_temp_new();
470 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
471 tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
472 tcg_temp_free(t0);
475 static void dec_lbu(DisasContext *dc)
477 TCGv t0;
479 LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
481 t0 = tcg_temp_new();
482 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
483 tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
484 tcg_temp_free(t0);
487 static void dec_lh(DisasContext *dc)
489 TCGv t0;
491 LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
493 t0 = tcg_temp_new();
494 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
495 tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
496 tcg_temp_free(t0);
499 static void dec_lhu(DisasContext *dc)
501 TCGv t0;
503 LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
505 t0 = tcg_temp_new();
506 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
507 tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
508 tcg_temp_free(t0);
511 static void dec_lw(DisasContext *dc)
513 TCGv t0;
515 LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
517 t0 = tcg_temp_new();
518 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
519 tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
520 tcg_temp_free(t0);
523 static void dec_modu(DisasContext *dc)
525 TCGLabel *l1;
527 LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
529 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
530 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
531 t_gen_illegal_insn(dc);
532 return;
535 l1 = gen_new_label();
536 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
537 tcg_gen_movi_tl(cpu_pc, dc->pc);
538 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
539 gen_set_label(l1);
540 tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
543 static void dec_mul(DisasContext *dc)
545 if (dc->format == OP_FMT_RI) {
546 LOG_DIS("muli r%d, r%d, %d\n", dc->r1, dc->r0,
547 sign_extend(dc->imm16, 16));
548 } else {
549 LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
552 if (!(dc->features & LM32_FEATURE_MULTIPLY)) {
553 qemu_log_mask(LOG_GUEST_ERROR,
554 "hardware multiplier is not available\n");
555 t_gen_illegal_insn(dc);
556 return;
559 if (dc->format == OP_FMT_RI) {
560 tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
561 sign_extend(dc->imm16, 16));
562 } else {
563 tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
567 static void dec_nor(DisasContext *dc)
569 if (dc->format == OP_FMT_RI) {
570 LOG_DIS("nori r%d, r%d, %d\n", dc->r1, dc->r0,
571 zero_extend(dc->imm16, 16));
572 } else {
573 LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
576 if (dc->format == OP_FMT_RI) {
577 TCGv t0 = tcg_temp_new();
578 tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
579 tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
580 tcg_temp_free(t0);
581 } else {
582 tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
586 static void dec_or(DisasContext *dc)
588 if (dc->format == OP_FMT_RI) {
589 LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
590 zero_extend(dc->imm16, 16));
591 } else {
592 if (dc->r1 == R_R0) {
593 LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
594 } else {
595 LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
599 if (dc->format == OP_FMT_RI) {
600 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
601 zero_extend(dc->imm16, 16));
602 } else {
603 tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
607 static void dec_orhi(DisasContext *dc)
609 if (dc->r0 == R_R0) {
610 LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
611 } else {
612 LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
615 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
618 static void dec_scall(DisasContext *dc)
620 switch (dc->imm5) {
621 case 2:
622 LOG_DIS("break\n");
623 tcg_gen_movi_tl(cpu_pc, dc->pc);
624 t_gen_raise_exception(dc, EXCP_BREAKPOINT);
625 break;
626 case 7:
627 LOG_DIS("scall\n");
628 tcg_gen_movi_tl(cpu_pc, dc->pc);
629 t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
630 break;
631 default:
632 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc);
633 t_gen_illegal_insn(dc);
634 break;
638 static void dec_rcsr(DisasContext *dc)
640 LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
642 switch (dc->csr) {
643 case CSR_IE:
644 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
645 break;
646 case CSR_IM:
647 gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env);
648 break;
649 case CSR_IP:
650 gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env);
651 break;
652 case CSR_CC:
653 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
654 break;
655 case CSR_CFG:
656 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
657 break;
658 case CSR_EBA:
659 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
660 break;
661 case CSR_DC:
662 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
663 break;
664 case CSR_DEBA:
665 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
666 break;
667 case CSR_JTX:
668 gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env);
669 break;
670 case CSR_JRX:
671 gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env);
672 break;
673 case CSR_ICC:
674 case CSR_DCC:
675 case CSR_BP0:
676 case CSR_BP1:
677 case CSR_BP2:
678 case CSR_BP3:
679 case CSR_WP0:
680 case CSR_WP1:
681 case CSR_WP2:
682 case CSR_WP3:
683 qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr);
684 break;
685 default:
686 qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr);
687 break;
691 static void dec_sb(DisasContext *dc)
693 TCGv t0;
695 LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
697 t0 = tcg_temp_new();
698 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
699 tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
700 tcg_temp_free(t0);
703 static void dec_sextb(DisasContext *dc)
705 LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
707 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
708 qemu_log_mask(LOG_GUEST_ERROR,
709 "hardware sign extender is not available\n");
710 t_gen_illegal_insn(dc);
711 return;
714 tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
717 static void dec_sexth(DisasContext *dc)
719 LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
721 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
722 qemu_log_mask(LOG_GUEST_ERROR,
723 "hardware sign extender is not available\n");
724 t_gen_illegal_insn(dc);
725 return;
728 tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
731 static void dec_sh(DisasContext *dc)
733 TCGv t0;
735 LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
737 t0 = tcg_temp_new();
738 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
739 tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
740 tcg_temp_free(t0);
743 static void dec_sl(DisasContext *dc)
745 if (dc->format == OP_FMT_RI) {
746 LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
747 } else {
748 LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
751 if (!(dc->features & LM32_FEATURE_SHIFT)) {
752 qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n");
753 t_gen_illegal_insn(dc);
754 return;
757 if (dc->format == OP_FMT_RI) {
758 tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
759 } else {
760 TCGv t0 = tcg_temp_new();
761 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
762 tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
763 tcg_temp_free(t0);
767 static void dec_sr(DisasContext *dc)
769 if (dc->format == OP_FMT_RI) {
770 LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
771 } else {
772 LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
775 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
776 * one bit */
777 if (dc->format == OP_FMT_RI) {
778 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
779 qemu_log_mask(LOG_GUEST_ERROR,
780 "hardware shifter is not available\n");
781 t_gen_illegal_insn(dc);
782 return;
784 tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
785 } else {
786 TCGLabel *l1 = gen_new_label();
787 TCGLabel *l2 = gen_new_label();
788 TCGv t0 = tcg_temp_local_new();
789 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
791 if (!(dc->features & LM32_FEATURE_SHIFT)) {
792 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
793 t_gen_illegal_insn(dc);
794 tcg_gen_br(l2);
797 gen_set_label(l1);
798 tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
799 gen_set_label(l2);
801 tcg_temp_free(t0);
805 static void dec_sru(DisasContext *dc)
807 if (dc->format == OP_FMT_RI) {
808 LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
809 } else {
810 LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
813 if (dc->format == OP_FMT_RI) {
814 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
815 qemu_log_mask(LOG_GUEST_ERROR,
816 "hardware shifter is not available\n");
817 t_gen_illegal_insn(dc);
818 return;
820 tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
821 } else {
822 TCGLabel *l1 = gen_new_label();
823 TCGLabel *l2 = gen_new_label();
824 TCGv t0 = tcg_temp_local_new();
825 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
827 if (!(dc->features & LM32_FEATURE_SHIFT)) {
828 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
829 t_gen_illegal_insn(dc);
830 tcg_gen_br(l2);
833 gen_set_label(l1);
834 tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
835 gen_set_label(l2);
837 tcg_temp_free(t0);
841 static void dec_sub(DisasContext *dc)
843 LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
845 tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
848 static void dec_sw(DisasContext *dc)
850 TCGv t0;
852 LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
854 t0 = tcg_temp_new();
855 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
856 tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
857 tcg_temp_free(t0);
860 static void dec_user(DisasContext *dc)
862 LOG_DIS("user");
864 qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n");
865 t_gen_illegal_insn(dc);
868 static void dec_wcsr(DisasContext *dc)
870 int no;
872 LOG_DIS("wcsr %d, r%d\n", dc->csr, dc->r1);
874 switch (dc->csr) {
875 case CSR_IE:
876 tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
877 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
878 dc->is_jmp = DISAS_UPDATE;
879 break;
880 case CSR_IM:
881 /* mark as an io operation because it could cause an interrupt */
882 if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
883 gen_io_start();
885 gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
886 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
887 if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
888 gen_io_end();
890 dc->is_jmp = DISAS_UPDATE;
891 break;
892 case CSR_IP:
893 /* mark as an io operation because it could cause an interrupt */
894 if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
895 gen_io_start();
897 gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
898 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
899 if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
900 gen_io_end();
902 dc->is_jmp = DISAS_UPDATE;
903 break;
904 case CSR_ICC:
905 /* TODO */
906 break;
907 case CSR_DCC:
908 /* TODO */
909 break;
910 case CSR_EBA:
911 tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
912 break;
913 case CSR_DEBA:
914 tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
915 break;
916 case CSR_JTX:
917 gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]);
918 break;
919 case CSR_JRX:
920 gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
921 break;
922 case CSR_DC:
923 gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]);
924 break;
925 case CSR_BP0:
926 case CSR_BP1:
927 case CSR_BP2:
928 case CSR_BP3:
929 no = dc->csr - CSR_BP0;
930 if (dc->num_breakpoints <= no) {
931 qemu_log_mask(LOG_GUEST_ERROR,
932 "breakpoint #%i is not available\n", no);
933 t_gen_illegal_insn(dc);
934 break;
936 gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
937 break;
938 case CSR_WP0:
939 case CSR_WP1:
940 case CSR_WP2:
941 case CSR_WP3:
942 no = dc->csr - CSR_WP0;
943 if (dc->num_watchpoints <= no) {
944 qemu_log_mask(LOG_GUEST_ERROR,
945 "watchpoint #%i is not available\n", no);
946 t_gen_illegal_insn(dc);
947 break;
949 gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
950 break;
951 case CSR_CC:
952 case CSR_CFG:
953 qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n",
954 dc->csr);
955 break;
956 default:
957 qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n",
958 dc->csr);
959 break;
963 static void dec_xnor(DisasContext *dc)
965 if (dc->format == OP_FMT_RI) {
966 LOG_DIS("xnori r%d, r%d, %d\n", dc->r1, dc->r0,
967 zero_extend(dc->imm16, 16));
968 } else {
969 if (dc->r1 == R_R0) {
970 LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
971 } else {
972 LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
976 if (dc->format == OP_FMT_RI) {
977 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
978 zero_extend(dc->imm16, 16));
979 tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
980 } else {
981 tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
985 static void dec_xor(DisasContext *dc)
987 if (dc->format == OP_FMT_RI) {
988 LOG_DIS("xori r%d, r%d, %d\n", dc->r1, dc->r0,
989 zero_extend(dc->imm16, 16));
990 } else {
991 LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
994 if (dc->format == OP_FMT_RI) {
995 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
996 zero_extend(dc->imm16, 16));
997 } else {
998 tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
1002 static void dec_ill(DisasContext *dc)
1004 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode);
1005 t_gen_illegal_insn(dc);
1008 typedef void (*DecoderInfo)(DisasContext *dc);
1009 static const DecoderInfo decinfo[] = {
1010 dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
1011 dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
1012 dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
1013 dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
1014 dec_cmpne,
1015 dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
1016 dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
1017 dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
1018 dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
1019 dec_cmpne
1022 static inline void decode(DisasContext *dc, uint32_t ir)
1024 dc->ir = ir;
1025 LOG_DIS("%8.8x\t", dc->ir);
1027 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1029 dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
1030 dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
1031 dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
1033 dc->csr = EXTRACT_FIELD(ir, 21, 25);
1034 dc->r0 = EXTRACT_FIELD(ir, 21, 25);
1035 dc->r1 = EXTRACT_FIELD(ir, 16, 20);
1036 dc->r2 = EXTRACT_FIELD(ir, 11, 15);
1038 /* bit 31 seems to indicate insn type. */
1039 if (ir & (1 << 31)) {
1040 dc->format = OP_FMT_RR;
1041 } else {
1042 dc->format = OP_FMT_RI;
1045 assert(ARRAY_SIZE(decinfo) == 64);
1046 assert(dc->opcode < 64);
1048 decinfo[dc->opcode](dc);
1051 /* generate intermediate code for basic block 'tb'. */
1052 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
1054 CPULM32State *env = cs->env_ptr;
1055 LM32CPU *cpu = lm32_env_get_cpu(env);
1056 struct DisasContext ctx, *dc = &ctx;
1057 uint32_t pc_start;
1058 uint32_t page_start;
1059 int num_insns;
1060 int max_insns;
1062 pc_start = tb->pc;
1063 dc->features = cpu->features;
1064 dc->num_breakpoints = cpu->num_breakpoints;
1065 dc->num_watchpoints = cpu->num_watchpoints;
1066 dc->tb = tb;
1068 dc->is_jmp = DISAS_NEXT;
1069 dc->pc = pc_start;
1070 dc->singlestep_enabled = cs->singlestep_enabled;
1072 if (pc_start & 3) {
1073 qemu_log_mask(LOG_GUEST_ERROR,
1074 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start);
1075 pc_start &= ~3;
1078 page_start = pc_start & TARGET_PAGE_MASK;
1079 num_insns = 0;
1080 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
1081 if (max_insns == 0) {
1082 max_insns = CF_COUNT_MASK;
1084 if (max_insns > TCG_MAX_INSNS) {
1085 max_insns = TCG_MAX_INSNS;
1088 gen_tb_start(tb);
1089 do {
1090 tcg_gen_insn_start(dc->pc);
1091 num_insns++;
1093 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1094 tcg_gen_movi_tl(cpu_pc, dc->pc);
1095 t_gen_raise_exception(dc, EXCP_DEBUG);
1096 dc->is_jmp = DISAS_UPDATE;
1097 /* The address covered by the breakpoint must be included in
1098 [tb->pc, tb->pc + tb->size) in order to for it to be
1099 properly cleared -- thus we increment the PC here so that
1100 the logic setting tb->size below does the right thing. */
1101 dc->pc += 4;
1102 break;
1105 /* Pretty disas. */
1106 LOG_DIS("%8.8x:\t", dc->pc);
1108 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1109 gen_io_start();
1112 decode(dc, cpu_ldl_code(env, dc->pc));
1113 dc->pc += 4;
1114 } while (!dc->is_jmp
1115 && !tcg_op_buf_full()
1116 && !cs->singlestep_enabled
1117 && !singlestep
1118 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1119 && num_insns < max_insns);
1121 if (tb_cflags(tb) & CF_LAST_IO) {
1122 gen_io_end();
1125 if (unlikely(cs->singlestep_enabled)) {
1126 if (dc->is_jmp == DISAS_NEXT) {
1127 tcg_gen_movi_tl(cpu_pc, dc->pc);
1129 t_gen_raise_exception(dc, EXCP_DEBUG);
1130 } else {
1131 switch (dc->is_jmp) {
1132 case DISAS_NEXT:
1133 gen_goto_tb(dc, 1, dc->pc);
1134 break;
1135 default:
1136 case DISAS_JUMP:
1137 case DISAS_UPDATE:
1138 /* indicate that the hash table must be used
1139 to find the next TB */
1140 tcg_gen_exit_tb(NULL, 0);
1141 break;
1142 case DISAS_TB_JUMP:
1143 /* nothing more to generate */
1144 break;
1148 gen_tb_end(tb, num_insns);
1150 tb->size = dc->pc - pc_start;
1151 tb->icount = num_insns;
1153 #ifdef DEBUG_DISAS
1154 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1155 && qemu_log_in_addr_range(pc_start)) {
1156 qemu_log_lock();
1157 qemu_log("\n");
1158 log_target_disas(cs, pc_start, dc->pc - pc_start);
1159 qemu_log_unlock();
1161 #endif
1164 void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1165 int flags)
1167 LM32CPU *cpu = LM32_CPU(cs);
1168 CPULM32State *env = &cpu->env;
1169 int i;
1171 if (!env || !f) {
1172 return;
1175 cpu_fprintf(f, "IN: PC=%x %s\n",
1176 env->pc, lookup_symbol(env->pc));
1178 cpu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1179 env->ie,
1180 (env->ie & IE_IE) ? 1 : 0,
1181 (env->ie & IE_EIE) ? 1 : 0,
1182 (env->ie & IE_BIE) ? 1 : 0,
1183 lm32_pic_get_im(env->pic_state),
1184 lm32_pic_get_ip(env->pic_state));
1185 cpu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
1186 env->eba,
1187 env->deba);
1189 for (i = 0; i < 32; i++) {
1190 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1191 if ((i + 1) % 4 == 0) {
1192 cpu_fprintf(f, "\n");
1195 cpu_fprintf(f, "\n\n");
1198 void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
1199 target_ulong *data)
1201 env->pc = data[0];
1204 void lm32_translate_init(void)
1206 int i;
1208 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1209 cpu_R[i] = tcg_global_mem_new(cpu_env,
1210 offsetof(CPULM32State, regs[i]),
1211 regnames[i]);
1214 for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
1215 cpu_bp[i] = tcg_global_mem_new(cpu_env,
1216 offsetof(CPULM32State, bp[i]),
1217 regnames[32+i]);
1220 for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
1221 cpu_wp[i] = tcg_global_mem_new(cpu_env,
1222 offsetof(CPULM32State, wp[i]),
1223 regnames[36+i]);
1226 cpu_pc = tcg_global_mem_new(cpu_env,
1227 offsetof(CPULM32State, pc),
1228 "pc");
1229 cpu_ie = tcg_global_mem_new(cpu_env,
1230 offsetof(CPULM32State, ie),
1231 "ie");
1232 cpu_icc = tcg_global_mem_new(cpu_env,
1233 offsetof(CPULM32State, icc),
1234 "icc");
1235 cpu_dcc = tcg_global_mem_new(cpu_env,
1236 offsetof(CPULM32State, dcc),
1237 "dcc");
1238 cpu_cc = tcg_global_mem_new(cpu_env,
1239 offsetof(CPULM32State, cc),
1240 "cc");
1241 cpu_cfg = tcg_global_mem_new(cpu_env,
1242 offsetof(CPULM32State, cfg),
1243 "cfg");
1244 cpu_eba = tcg_global_mem_new(cpu_env,
1245 offsetof(CPULM32State, eba),
1246 "eba");
1247 cpu_dc = tcg_global_mem_new(cpu_env,
1248 offsetof(CPULM32State, dc),
1249 "dc");
1250 cpu_deba = tcg_global_mem_new(cpu_env,
1251 offsetof(CPULM32State, deba),
1252 "deba");