2 * Moxie emulation for qemu: main translation routines.
4 * Copyright (c) 2009, 2013 Anthony Green
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public License
8 * as published by the Free Software Foundation; either version 2.1 of
9 * the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 /* For information on the Moxie architecture, see
21 * http://moxielogic.org/wiki
24 #include "qemu/osdep.h"
27 #include "exec/exec-all.h"
28 #include "disas/disas.h"
29 #include "tcg/tcg-op.h"
30 #include "exec/cpu_ldst.h"
31 #include "qemu/qemu-print.h"
33 #include "exec/helper-proto.h"
34 #include "exec/helper-gen.h"
37 /* This is the state at translation time. */
38 typedef struct DisasContext
{
39 struct TranslationBlock
*tb
;
40 target_ulong pc
, saved_pc
;
43 /* Routine used to access memory */
47 int singlestep_enabled
;
51 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
52 * exception condition */
53 BS_STOP
= 1, /* We want to stop translation for any reason */
54 BS_BRANCH
= 2, /* We reached a branch condition */
55 BS_EXCP
= 3, /* We reached an exception condition */
59 static TCGv cpu_gregs
[16];
60 static TCGv cc_a
, cc_b
;
62 #include "exec/gen-icount.h"
64 #define REG(x) (cpu_gregs[x])
66 /* Extract the signed 10-bit offset from a 16-bit branch
68 static int extract_branch_offset(int opcode
)
70 return (((signed short)((opcode
& ((1 << 10) - 1)) << 6)) >> 6) << 1;
73 void moxie_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
75 MoxieCPU
*cpu
= MOXIE_CPU(cs
);
76 CPUMoxieState
*env
= &cpu
->env
;
78 qemu_fprintf(f
, "pc=0x%08x\n", env
->pc
);
79 qemu_fprintf(f
, "$fp=0x%08x $sp=0x%08x $r0=0x%08x $r1=0x%08x\n",
80 env
->gregs
[0], env
->gregs
[1], env
->gregs
[2], env
->gregs
[3]);
81 for (i
= 4; i
< 16; i
+= 4) {
82 qemu_fprintf(f
, "$r%d=0x%08x $r%d=0x%08x $r%d=0x%08x $r%d=0x%08x\n",
83 i
- 2, env
->gregs
[i
], i
- 1, env
->gregs
[i
+ 1],
84 i
, env
->gregs
[i
+ 2], i
+ 1, env
->gregs
[i
+ 3]);
86 for (i
= 4; i
< 16; i
+= 4) {
87 qemu_fprintf(f
, "sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x\n",
88 i
- 2, env
->sregs
[i
], i
- 1, env
->sregs
[i
+ 1],
89 i
, env
->sregs
[i
+ 2], i
+ 1, env
->sregs
[i
+ 3]);
93 void moxie_translate_init(void)
96 static const char * const gregnames
[16] = {
97 "$fp", "$sp", "$r0", "$r1",
98 "$r2", "$r3", "$r4", "$r5",
99 "$r6", "$r7", "$r8", "$r9",
100 "$r10", "$r11", "$r12", "$r13"
103 cpu_pc
= tcg_global_mem_new_i32(cpu_env
,
104 offsetof(CPUMoxieState
, pc
), "$pc");
105 for (i
= 0; i
< 16; i
++)
106 cpu_gregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
107 offsetof(CPUMoxieState
, gregs
[i
]),
110 cc_a
= tcg_global_mem_new_i32(cpu_env
,
111 offsetof(CPUMoxieState
, cc_a
), "cc_a");
112 cc_b
= tcg_global_mem_new_i32(cpu_env
,
113 offsetof(CPUMoxieState
, cc_b
), "cc_b");
116 static inline bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
118 if (unlikely(ctx
->singlestep_enabled
)) {
122 #ifndef CONFIG_USER_ONLY
123 return (ctx
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
129 static inline void gen_goto_tb(CPUMoxieState
*env
, DisasContext
*ctx
,
130 int n
, target_ulong dest
)
132 if (use_goto_tb(ctx
, dest
)) {
134 tcg_gen_movi_i32(cpu_pc
, dest
);
135 tcg_gen_exit_tb(ctx
->tb
, n
);
137 tcg_gen_movi_i32(cpu_pc
, dest
);
138 if (ctx
->singlestep_enabled
) {
139 gen_helper_debug(cpu_env
);
141 tcg_gen_exit_tb(NULL
, 0);
145 static int decode_opc(MoxieCPU
*cpu
, DisasContext
*ctx
)
147 CPUMoxieState
*env
= &cpu
->env
;
149 /* Local cache for the instruction opcode. */
151 /* Set the default instruction length. */
154 /* Examine the 16-bit opcode. */
155 opcode
= ctx
->opcode
;
157 /* Decode instruction. */
158 if (opcode
& (1 << 15)) {
159 if (opcode
& (1 << 14)) {
160 /* This is a Form 3 instruction. */
161 int inst
= (opcode
>> 10 & 0xf);
163 #define BRANCH(cond) \
165 TCGLabel *l1 = gen_new_label(); \
166 tcg_gen_brcond_i32(cond, cc_a, cc_b, l1); \
167 gen_goto_tb(env, ctx, 1, ctx->pc+2); \
169 gen_goto_tb(env, ctx, 0, extract_branch_offset(opcode) + ctx->pc+2); \
170 ctx->bstate = BS_BRANCH; \
186 case 0x04: /* bltu */
187 BRANCH(TCG_COND_LTU
);
189 case 0x05: /* bgtu */
190 BRANCH(TCG_COND_GTU
);
198 case 0x08: /* bgeu */
199 BRANCH(TCG_COND_GEU
);
201 case 0x09: /* bleu */
202 BRANCH(TCG_COND_LEU
);
206 TCGv temp
= tcg_temp_new_i32();
207 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
208 tcg_gen_movi_i32(temp
, MOXIE_EX_BAD
);
209 gen_helper_raise_exception(cpu_env
, temp
);
210 tcg_temp_free_i32(temp
);
215 /* This is a Form 2 instruction. */
216 int inst
= (opcode
>> 12 & 0x3);
220 int a
= (opcode
>> 8) & 0xf;
221 unsigned int v
= (opcode
& 0xff);
222 tcg_gen_addi_i32(REG(a
), REG(a
), v
);
227 int a
= (opcode
>> 8) & 0xf;
228 unsigned int v
= (opcode
& 0xff);
229 tcg_gen_subi_i32(REG(a
), REG(a
), v
);
234 int a
= (opcode
>> 8) & 0xf;
235 unsigned v
= (opcode
& 0xff);
236 tcg_gen_ld_i32(REG(a
), cpu_env
,
237 offsetof(CPUMoxieState
, sregs
[v
]));
242 int a
= (opcode
>> 8) & 0xf;
243 unsigned v
= (opcode
& 0xff);
244 tcg_gen_st_i32(REG(a
), cpu_env
,
245 offsetof(CPUMoxieState
, sregs
[v
]));
250 TCGv temp
= tcg_temp_new_i32();
251 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
252 tcg_gen_movi_i32(temp
, MOXIE_EX_BAD
);
253 gen_helper_raise_exception(cpu_env
, temp
);
254 tcg_temp_free_i32(temp
);
260 /* This is a Form 1 instruction. */
261 int inst
= opcode
>> 8;
265 case 0x01: /* ldi.l (immediate) */
267 int reg
= (opcode
>> 4) & 0xf;
268 int val
= cpu_ldl_code(env
, ctx
->pc
+2);
269 tcg_gen_movi_i32(REG(reg
), val
);
273 case 0x02: /* mov (register-to-register) */
275 int dest
= (opcode
>> 4) & 0xf;
276 int src
= opcode
& 0xf;
277 tcg_gen_mov_i32(REG(dest
), REG(src
));
280 case 0x03: /* jsra */
282 TCGv t1
= tcg_temp_new_i32();
283 TCGv t2
= tcg_temp_new_i32();
285 tcg_gen_movi_i32(t1
, ctx
->pc
+ 6);
287 /* Make space for the static chain and return address. */
288 tcg_gen_subi_i32(t2
, REG(1), 8);
289 tcg_gen_mov_i32(REG(1), t2
);
290 tcg_gen_qemu_st32(t1
, REG(1), ctx
->memidx
);
292 /* Push the current frame pointer. */
293 tcg_gen_subi_i32(t2
, REG(1), 4);
294 tcg_gen_mov_i32(REG(1), t2
);
295 tcg_gen_qemu_st32(REG(0), REG(1), ctx
->memidx
);
297 /* Set the pc and $fp. */
298 tcg_gen_mov_i32(REG(0), REG(1));
300 gen_goto_tb(env
, ctx
, 0, cpu_ldl_code(env
, ctx
->pc
+2));
302 tcg_temp_free_i32(t1
);
303 tcg_temp_free_i32(t2
);
305 ctx
->bstate
= BS_BRANCH
;
311 TCGv t1
= tcg_temp_new_i32();
313 /* The new $sp is the old $fp. */
314 tcg_gen_mov_i32(REG(1), REG(0));
316 /* Pop the frame pointer. */
317 tcg_gen_qemu_ld32u(REG(0), REG(1), ctx
->memidx
);
318 tcg_gen_addi_i32(t1
, REG(1), 4);
319 tcg_gen_mov_i32(REG(1), t1
);
322 /* Pop the return address and skip over the static chain
324 tcg_gen_qemu_ld32u(cpu_pc
, REG(1), ctx
->memidx
);
325 tcg_gen_addi_i32(t1
, REG(1), 8);
326 tcg_gen_mov_i32(REG(1), t1
);
328 tcg_temp_free_i32(t1
);
331 tcg_gen_exit_tb(NULL
, 0);
333 ctx
->bstate
= BS_BRANCH
;
336 case 0x05: /* add.l */
338 int a
= (opcode
>> 4) & 0xf;
339 int b
= opcode
& 0xf;
341 tcg_gen_add_i32(REG(a
), REG(a
), REG(b
));
344 case 0x06: /* push */
346 int a
= (opcode
>> 4) & 0xf;
347 int b
= opcode
& 0xf;
349 TCGv t1
= tcg_temp_new_i32();
350 tcg_gen_subi_i32(t1
, REG(a
), 4);
351 tcg_gen_mov_i32(REG(a
), t1
);
352 tcg_gen_qemu_st32(REG(b
), REG(a
), ctx
->memidx
);
353 tcg_temp_free_i32(t1
);
358 int a
= (opcode
>> 4) & 0xf;
359 int b
= opcode
& 0xf;
360 TCGv t1
= tcg_temp_new_i32();
362 tcg_gen_qemu_ld32u(REG(b
), REG(a
), ctx
->memidx
);
363 tcg_gen_addi_i32(t1
, REG(a
), 4);
364 tcg_gen_mov_i32(REG(a
), t1
);
365 tcg_temp_free_i32(t1
);
368 case 0x08: /* lda.l */
370 int reg
= (opcode
>> 4) & 0xf;
372 TCGv ptr
= tcg_temp_new_i32();
373 tcg_gen_movi_i32(ptr
, cpu_ldl_code(env
, ctx
->pc
+2));
374 tcg_gen_qemu_ld32u(REG(reg
), ptr
, ctx
->memidx
);
375 tcg_temp_free_i32(ptr
);
380 case 0x09: /* sta.l */
382 int val
= (opcode
>> 4) & 0xf;
384 TCGv ptr
= tcg_temp_new_i32();
385 tcg_gen_movi_i32(ptr
, cpu_ldl_code(env
, ctx
->pc
+2));
386 tcg_gen_qemu_st32(REG(val
), ptr
, ctx
->memidx
);
387 tcg_temp_free_i32(ptr
);
392 case 0x0a: /* ld.l (register indirect) */
394 int src
= opcode
& 0xf;
395 int dest
= (opcode
>> 4) & 0xf;
397 tcg_gen_qemu_ld32u(REG(dest
), REG(src
), ctx
->memidx
);
400 case 0x0b: /* st.l */
402 int dest
= (opcode
>> 4) & 0xf;
403 int val
= opcode
& 0xf;
405 tcg_gen_qemu_st32(REG(val
), REG(dest
), ctx
->memidx
);
408 case 0x0c: /* ldo.l */
410 int a
= (opcode
>> 4) & 0xf;
411 int b
= opcode
& 0xf;
413 TCGv t1
= tcg_temp_new_i32();
414 TCGv t2
= tcg_temp_new_i32();
415 tcg_gen_addi_i32(t1
, REG(b
), cpu_ldl_code(env
, ctx
->pc
+2));
416 tcg_gen_qemu_ld32u(t2
, t1
, ctx
->memidx
);
417 tcg_gen_mov_i32(REG(a
), t2
);
419 tcg_temp_free_i32(t1
);
420 tcg_temp_free_i32(t2
);
425 case 0x0d: /* sto.l */
427 int a
= (opcode
>> 4) & 0xf;
428 int b
= opcode
& 0xf;
430 TCGv t1
= tcg_temp_new_i32();
431 TCGv t2
= tcg_temp_new_i32();
432 tcg_gen_addi_i32(t1
, REG(a
), cpu_ldl_code(env
, ctx
->pc
+2));
433 tcg_gen_qemu_st32(REG(b
), t1
, ctx
->memidx
);
435 tcg_temp_free_i32(t1
);
436 tcg_temp_free_i32(t2
);
443 int a
= (opcode
>> 4) & 0xf;
444 int b
= opcode
& 0xf;
446 tcg_gen_mov_i32(cc_a
, REG(a
));
447 tcg_gen_mov_i32(cc_b
, REG(b
));
452 int fnreg
= (opcode
>> 4) & 0xf;
454 /* Load the stack pointer into T0. */
455 TCGv t1
= tcg_temp_new_i32();
456 TCGv t2
= tcg_temp_new_i32();
458 tcg_gen_movi_i32(t1
, ctx
->pc
+2);
460 /* Make space for the static chain and return address. */
461 tcg_gen_subi_i32(t2
, REG(1), 8);
462 tcg_gen_mov_i32(REG(1), t2
);
463 tcg_gen_qemu_st32(t1
, REG(1), ctx
->memidx
);
465 /* Push the current frame pointer. */
466 tcg_gen_subi_i32(t2
, REG(1), 4);
467 tcg_gen_mov_i32(REG(1), t2
);
468 tcg_gen_qemu_st32(REG(0), REG(1), ctx
->memidx
);
470 /* Set the pc and $fp. */
471 tcg_gen_mov_i32(REG(0), REG(1));
472 tcg_gen_mov_i32(cpu_pc
, REG(fnreg
));
473 tcg_temp_free_i32(t1
);
474 tcg_temp_free_i32(t2
);
475 tcg_gen_exit_tb(NULL
, 0);
476 ctx
->bstate
= BS_BRANCH
;
479 case 0x1a: /* jmpa */
481 tcg_gen_movi_i32(cpu_pc
, cpu_ldl_code(env
, ctx
->pc
+2));
482 tcg_gen_exit_tb(NULL
, 0);
483 ctx
->bstate
= BS_BRANCH
;
487 case 0x1b: /* ldi.b (immediate) */
489 int reg
= (opcode
>> 4) & 0xf;
490 int val
= cpu_ldl_code(env
, ctx
->pc
+2);
491 tcg_gen_movi_i32(REG(reg
), val
);
495 case 0x1c: /* ld.b (register indirect) */
497 int src
= opcode
& 0xf;
498 int dest
= (opcode
>> 4) & 0xf;
500 tcg_gen_qemu_ld8u(REG(dest
), REG(src
), ctx
->memidx
);
503 case 0x1d: /* lda.b */
505 int reg
= (opcode
>> 4) & 0xf;
507 TCGv ptr
= tcg_temp_new_i32();
508 tcg_gen_movi_i32(ptr
, cpu_ldl_code(env
, ctx
->pc
+2));
509 tcg_gen_qemu_ld8u(REG(reg
), ptr
, ctx
->memidx
);
510 tcg_temp_free_i32(ptr
);
515 case 0x1e: /* st.b */
517 int dest
= (opcode
>> 4) & 0xf;
518 int val
= opcode
& 0xf;
520 tcg_gen_qemu_st8(REG(val
), REG(dest
), ctx
->memidx
);
523 case 0x1f: /* sta.b */
525 int val
= (opcode
>> 4) & 0xf;
527 TCGv ptr
= tcg_temp_new_i32();
528 tcg_gen_movi_i32(ptr
, cpu_ldl_code(env
, ctx
->pc
+2));
529 tcg_gen_qemu_st8(REG(val
), ptr
, ctx
->memidx
);
530 tcg_temp_free_i32(ptr
);
535 case 0x20: /* ldi.s (immediate) */
537 int reg
= (opcode
>> 4) & 0xf;
538 int val
= cpu_ldl_code(env
, ctx
->pc
+2);
539 tcg_gen_movi_i32(REG(reg
), val
);
543 case 0x21: /* ld.s (register indirect) */
545 int src
= opcode
& 0xf;
546 int dest
= (opcode
>> 4) & 0xf;
548 tcg_gen_qemu_ld16u(REG(dest
), REG(src
), ctx
->memidx
);
551 case 0x22: /* lda.s */
553 int reg
= (opcode
>> 4) & 0xf;
555 TCGv ptr
= tcg_temp_new_i32();
556 tcg_gen_movi_i32(ptr
, cpu_ldl_code(env
, ctx
->pc
+2));
557 tcg_gen_qemu_ld16u(REG(reg
), ptr
, ctx
->memidx
);
558 tcg_temp_free_i32(ptr
);
563 case 0x23: /* st.s */
565 int dest
= (opcode
>> 4) & 0xf;
566 int val
= opcode
& 0xf;
568 tcg_gen_qemu_st16(REG(val
), REG(dest
), ctx
->memidx
);
571 case 0x24: /* sta.s */
573 int val
= (opcode
>> 4) & 0xf;
575 TCGv ptr
= tcg_temp_new_i32();
576 tcg_gen_movi_i32(ptr
, cpu_ldl_code(env
, ctx
->pc
+2));
577 tcg_gen_qemu_st16(REG(val
), ptr
, ctx
->memidx
);
578 tcg_temp_free_i32(ptr
);
585 int reg
= (opcode
>> 4) & 0xf;
586 tcg_gen_mov_i32(cpu_pc
, REG(reg
));
587 tcg_gen_exit_tb(NULL
, 0);
588 ctx
->bstate
= BS_BRANCH
;
593 int a
= (opcode
>> 4) & 0xf;
594 int b
= opcode
& 0xf;
596 tcg_gen_and_i32(REG(a
), REG(a
), REG(b
));
599 case 0x27: /* lshr */
601 int a
= (opcode
>> 4) & 0xf;
602 int b
= opcode
& 0xf;
604 TCGv sv
= tcg_temp_new_i32();
605 tcg_gen_andi_i32(sv
, REG(b
), 0x1f);
606 tcg_gen_shr_i32(REG(a
), REG(a
), sv
);
607 tcg_temp_free_i32(sv
);
610 case 0x28: /* ashl */
612 int a
= (opcode
>> 4) & 0xf;
613 int b
= opcode
& 0xf;
615 TCGv sv
= tcg_temp_new_i32();
616 tcg_gen_andi_i32(sv
, REG(b
), 0x1f);
617 tcg_gen_shl_i32(REG(a
), REG(a
), sv
);
618 tcg_temp_free_i32(sv
);
621 case 0x29: /* sub.l */
623 int a
= (opcode
>> 4) & 0xf;
624 int b
= opcode
& 0xf;
626 tcg_gen_sub_i32(REG(a
), REG(a
), REG(b
));
631 int a
= (opcode
>> 4) & 0xf;
632 int b
= opcode
& 0xf;
634 tcg_gen_neg_i32(REG(a
), REG(b
));
639 int a
= (opcode
>> 4) & 0xf;
640 int b
= opcode
& 0xf;
642 tcg_gen_or_i32(REG(a
), REG(a
), REG(b
));
647 int a
= (opcode
>> 4) & 0xf;
648 int b
= opcode
& 0xf;
650 tcg_gen_not_i32(REG(a
), REG(b
));
653 case 0x2d: /* ashr */
655 int a
= (opcode
>> 4) & 0xf;
656 int b
= opcode
& 0xf;
658 TCGv sv
= tcg_temp_new_i32();
659 tcg_gen_andi_i32(sv
, REG(b
), 0x1f);
660 tcg_gen_sar_i32(REG(a
), REG(a
), sv
);
661 tcg_temp_free_i32(sv
);
666 int a
= (opcode
>> 4) & 0xf;
667 int b
= opcode
& 0xf;
669 tcg_gen_xor_i32(REG(a
), REG(a
), REG(b
));
672 case 0x2f: /* mul.l */
674 int a
= (opcode
>> 4) & 0xf;
675 int b
= opcode
& 0xf;
677 tcg_gen_mul_i32(REG(a
), REG(a
), REG(b
));
682 int val
= cpu_ldl_code(env
, ctx
->pc
+2);
684 TCGv temp
= tcg_temp_new_i32();
685 tcg_gen_movi_i32(temp
, val
);
686 tcg_gen_st_i32(temp
, cpu_env
,
687 offsetof(CPUMoxieState
, sregs
[3]));
688 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
689 tcg_gen_movi_i32(temp
, MOXIE_EX_SWI
);
690 gen_helper_raise_exception(cpu_env
, temp
);
691 tcg_temp_free_i32(temp
);
696 case 0x31: /* div.l */
698 int a
= (opcode
>> 4) & 0xf;
699 int b
= opcode
& 0xf;
700 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
701 gen_helper_div(REG(a
), cpu_env
, REG(a
), REG(b
));
704 case 0x32: /* udiv.l */
706 int a
= (opcode
>> 4) & 0xf;
707 int b
= opcode
& 0xf;
708 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
709 gen_helper_udiv(REG(a
), cpu_env
, REG(a
), REG(b
));
712 case 0x33: /* mod.l */
714 int a
= (opcode
>> 4) & 0xf;
715 int b
= opcode
& 0xf;
716 tcg_gen_rem_i32(REG(a
), REG(a
), REG(b
));
719 case 0x34: /* umod.l */
721 int a
= (opcode
>> 4) & 0xf;
722 int b
= opcode
& 0xf;
723 tcg_gen_remu_i32(REG(a
), REG(a
), REG(b
));
728 TCGv temp
= tcg_temp_new_i32();
729 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
730 tcg_gen_movi_i32(temp
, MOXIE_EX_BREAK
);
731 gen_helper_raise_exception(cpu_env
, temp
);
732 tcg_temp_free_i32(temp
);
735 case 0x36: /* ldo.b */
737 int a
= (opcode
>> 4) & 0xf;
738 int b
= opcode
& 0xf;
740 TCGv t1
= tcg_temp_new_i32();
741 TCGv t2
= tcg_temp_new_i32();
742 tcg_gen_addi_i32(t1
, REG(b
), cpu_ldl_code(env
, ctx
->pc
+2));
743 tcg_gen_qemu_ld8u(t2
, t1
, ctx
->memidx
);
744 tcg_gen_mov_i32(REG(a
), t2
);
746 tcg_temp_free_i32(t1
);
747 tcg_temp_free_i32(t2
);
752 case 0x37: /* sto.b */
754 int a
= (opcode
>> 4) & 0xf;
755 int b
= opcode
& 0xf;
757 TCGv t1
= tcg_temp_new_i32();
758 TCGv t2
= tcg_temp_new_i32();
759 tcg_gen_addi_i32(t1
, REG(a
), cpu_ldl_code(env
, ctx
->pc
+2));
760 tcg_gen_qemu_st8(REG(b
), t1
, ctx
->memidx
);
762 tcg_temp_free_i32(t1
);
763 tcg_temp_free_i32(t2
);
768 case 0x38: /* ldo.s */
770 int a
= (opcode
>> 4) & 0xf;
771 int b
= opcode
& 0xf;
773 TCGv t1
= tcg_temp_new_i32();
774 TCGv t2
= tcg_temp_new_i32();
775 tcg_gen_addi_i32(t1
, REG(b
), cpu_ldl_code(env
, ctx
->pc
+2));
776 tcg_gen_qemu_ld16u(t2
, t1
, ctx
->memidx
);
777 tcg_gen_mov_i32(REG(a
), t2
);
779 tcg_temp_free_i32(t1
);
780 tcg_temp_free_i32(t2
);
785 case 0x39: /* sto.s */
787 int a
= (opcode
>> 4) & 0xf;
788 int b
= opcode
& 0xf;
790 TCGv t1
= tcg_temp_new_i32();
791 TCGv t2
= tcg_temp_new_i32();
792 tcg_gen_addi_i32(t1
, REG(a
), cpu_ldl_code(env
, ctx
->pc
+2));
793 tcg_gen_qemu_st16(REG(b
), t1
, ctx
->memidx
);
794 tcg_temp_free_i32(t1
);
795 tcg_temp_free_i32(t2
);
802 TCGv temp
= tcg_temp_new_i32();
803 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
804 tcg_gen_movi_i32(temp
, MOXIE_EX_BAD
);
805 gen_helper_raise_exception(cpu_env
, temp
);
806 tcg_temp_free_i32(temp
);
815 /* generate intermediate code for basic block 'tb'. */
816 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
)
818 CPUMoxieState
*env
= cs
->env_ptr
;
819 MoxieCPU
*cpu
= env_archcpu(env
);
821 target_ulong pc_start
;
829 ctx
.singlestep_enabled
= 0;
830 ctx
.bstate
= BS_NONE
;
835 tcg_gen_insn_start(ctx
.pc
);
838 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
839 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
840 gen_helper_debug(cpu_env
);
841 ctx
.bstate
= BS_EXCP
;
842 /* The address covered by the breakpoint must be included in
843 [tb->pc, tb->pc + tb->size) in order to for it to be
844 properly cleared -- thus we increment the PC here so that
845 the logic setting tb->size below does the right thing. */
847 goto done_generating
;
850 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
851 ctx
.pc
+= decode_opc(cpu
, &ctx
);
853 if (num_insns
>= max_insns
) {
856 if (cs
->singlestep_enabled
) {
859 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0) {
862 } while (ctx
.bstate
== BS_NONE
&& !tcg_op_buf_full());
864 if (cs
->singlestep_enabled
) {
865 tcg_gen_movi_tl(cpu_pc
, ctx
.pc
);
866 gen_helper_debug(cpu_env
);
868 switch (ctx
.bstate
) {
871 gen_goto_tb(env
, &ctx
, 0, ctx
.pc
);
874 tcg_gen_exit_tb(NULL
, 0);
882 gen_tb_end(tb
, num_insns
);
884 tb
->size
= ctx
.pc
- pc_start
;
885 tb
->icount
= num_insns
;
888 void restore_state_to_opc(CPUMoxieState
*env
, TranslationBlock
*tb
,