qapi: Hoist tag collision check to Variants.check()
[qemu/ar7.git] / target-microblaze / translate.c
blob154b9d68361e4fd63ebd0d13962f15191ce60b43
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "disas/disas.h"
23 #include "tcg-op.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
29 #include "trace-tcg.h"
32 #define SIM_COMPAT 0
33 #define DISAS_GNU 1
34 #define DISAS_MB 1
35 #if DISAS_MB && !SIM_COMPAT
36 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
37 #else
38 # define LOG_DIS(...) do { } while (0)
39 #endif
41 #define D(x)
43 #define EXTRACT_FIELD(src, start, end) \
44 (((src) >> start) & ((1 << (end - start + 1)) - 1))
46 static TCGv env_debug;
47 static TCGv_ptr cpu_env;
48 static TCGv cpu_R[32];
49 static TCGv cpu_SR[18];
50 static TCGv env_imm;
51 static TCGv env_btaken;
52 static TCGv env_btarget;
53 static TCGv env_iflags;
54 static TCGv env_res_addr;
55 static TCGv env_res_val;
57 #include "exec/gen-icount.h"
59 /* This is the state at translation time. */
60 typedef struct DisasContext {
61 MicroBlazeCPU *cpu;
62 target_ulong pc;
64 /* Decoder. */
65 int type_b;
66 uint32_t ir;
67 uint8_t opcode;
68 uint8_t rd, ra, rb;
69 uint16_t imm;
71 unsigned int cpustate_changed;
72 unsigned int delayed_branch;
73 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
74 unsigned int clear_imm;
75 int is_jmp;
77 #define JMP_NOJMP 0
78 #define JMP_DIRECT 1
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
81 unsigned int jmp;
82 uint32_t jmp_pc;
84 int abort_at_next_insn;
85 int nr_nops;
86 struct TranslationBlock *tb;
87 int singlestep_enabled;
88 } DisasContext;
90 static const char *regnames[] =
92 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
93 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
94 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
95 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
98 static const char *special_regnames[] =
100 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
101 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
102 "sr16", "sr17", "sr18"
105 static inline void t_sync_flags(DisasContext *dc)
107 /* Synch the tb dependent flags between translator and runtime. */
108 if (dc->tb_flags != dc->synced_flags) {
109 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
110 dc->synced_flags = dc->tb_flags;
114 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
116 TCGv_i32 tmp = tcg_const_i32(index);
118 t_sync_flags(dc);
119 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
120 gen_helper_raise_exception(cpu_env, tmp);
121 tcg_temp_free_i32(tmp);
122 dc->is_jmp = DISAS_UPDATE;
125 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
127 TranslationBlock *tb;
128 tb = dc->tb;
129 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
130 tcg_gen_goto_tb(n);
131 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
132 tcg_gen_exit_tb((uintptr_t)tb + n);
133 } else {
134 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
135 tcg_gen_exit_tb(0);
139 static void read_carry(DisasContext *dc, TCGv d)
141 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
145 * write_carry sets the carry bits in MSR based on bit 0 of v.
146 * v[31:1] are ignored.
148 static void write_carry(DisasContext *dc, TCGv v)
150 TCGv t0 = tcg_temp_new();
151 tcg_gen_shli_tl(t0, v, 31);
152 tcg_gen_sari_tl(t0, t0, 31);
153 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
154 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
155 ~(MSR_C | MSR_CC));
156 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
157 tcg_temp_free(t0);
160 static void write_carryi(DisasContext *dc, bool carry)
162 TCGv t0 = tcg_temp_new();
163 tcg_gen_movi_tl(t0, carry);
164 write_carry(dc, t0);
165 tcg_temp_free(t0);
168 /* True if ALU operand b is a small immediate that may deserve
169 faster treatment. */
170 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
172 /* Immediate insn without the imm prefix ? */
173 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
176 static inline TCGv *dec_alu_op_b(DisasContext *dc)
178 if (dc->type_b) {
179 if (dc->tb_flags & IMM_FLAG)
180 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
181 else
182 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
183 return &env_imm;
184 } else
185 return &cpu_R[dc->rb];
188 static void dec_add(DisasContext *dc)
190 unsigned int k, c;
191 TCGv cf;
193 k = dc->opcode & 4;
194 c = dc->opcode & 2;
196 LOG_DIS("add%s%s%s r%d r%d r%d\n",
197 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
198 dc->rd, dc->ra, dc->rb);
200 /* Take care of the easy cases first. */
201 if (k) {
202 /* k - keep carry, no need to update MSR. */
203 /* If rd == r0, it's a nop. */
204 if (dc->rd) {
205 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
207 if (c) {
208 /* c - Add carry into the result. */
209 cf = tcg_temp_new();
211 read_carry(dc, cf);
212 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
213 tcg_temp_free(cf);
216 return;
219 /* From now on, we can assume k is zero. So we need to update MSR. */
220 /* Extract carry. */
221 cf = tcg_temp_new();
222 if (c) {
223 read_carry(dc, cf);
224 } else {
225 tcg_gen_movi_tl(cf, 0);
228 if (dc->rd) {
229 TCGv ncf = tcg_temp_new();
230 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
231 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
232 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
233 write_carry(dc, ncf);
234 tcg_temp_free(ncf);
235 } else {
236 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
237 write_carry(dc, cf);
239 tcg_temp_free(cf);
242 static void dec_sub(DisasContext *dc)
244 unsigned int u, cmp, k, c;
245 TCGv cf, na;
247 u = dc->imm & 2;
248 k = dc->opcode & 4;
249 c = dc->opcode & 2;
250 cmp = (dc->imm & 1) && (!dc->type_b) && k;
252 if (cmp) {
253 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
254 if (dc->rd) {
255 if (u)
256 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
257 else
258 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
260 return;
263 LOG_DIS("sub%s%s r%d, r%d r%d\n",
264 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
266 /* Take care of the easy cases first. */
267 if (k) {
268 /* k - keep carry, no need to update MSR. */
269 /* If rd == r0, it's a nop. */
270 if (dc->rd) {
271 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
273 if (c) {
274 /* c - Add carry into the result. */
275 cf = tcg_temp_new();
277 read_carry(dc, cf);
278 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
279 tcg_temp_free(cf);
282 return;
285 /* From now on, we can assume k is zero. So we need to update MSR. */
286 /* Extract carry. And complement a into na. */
287 cf = tcg_temp_new();
288 na = tcg_temp_new();
289 if (c) {
290 read_carry(dc, cf);
291 } else {
292 tcg_gen_movi_tl(cf, 1);
295 /* d = b + ~a + c. carry defaults to 1. */
296 tcg_gen_not_tl(na, cpu_R[dc->ra]);
298 if (dc->rd) {
299 TCGv ncf = tcg_temp_new();
300 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
301 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
302 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
303 write_carry(dc, ncf);
304 tcg_temp_free(ncf);
305 } else {
306 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
307 write_carry(dc, cf);
309 tcg_temp_free(cf);
310 tcg_temp_free(na);
313 static void dec_pattern(DisasContext *dc)
315 unsigned int mode;
317 if ((dc->tb_flags & MSR_EE_FLAG)
318 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
319 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
320 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
321 t_gen_raise_exception(dc, EXCP_HW_EXCP);
324 mode = dc->opcode & 3;
325 switch (mode) {
326 case 0:
327 /* pcmpbf. */
328 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
329 if (dc->rd)
330 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
331 break;
332 case 2:
333 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
334 if (dc->rd) {
335 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
336 cpu_R[dc->ra], cpu_R[dc->rb]);
338 break;
339 case 3:
340 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
341 if (dc->rd) {
342 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
343 cpu_R[dc->ra], cpu_R[dc->rb]);
345 break;
346 default:
347 cpu_abort(CPU(dc->cpu),
348 "unsupported pattern insn opcode=%x\n", dc->opcode);
349 break;
353 static void dec_and(DisasContext *dc)
355 unsigned int not;
357 if (!dc->type_b && (dc->imm & (1 << 10))) {
358 dec_pattern(dc);
359 return;
362 not = dc->opcode & (1 << 1);
363 LOG_DIS("and%s\n", not ? "n" : "");
365 if (!dc->rd)
366 return;
368 if (not) {
369 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
370 } else
371 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
374 static void dec_or(DisasContext *dc)
376 if (!dc->type_b && (dc->imm & (1 << 10))) {
377 dec_pattern(dc);
378 return;
381 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
382 if (dc->rd)
383 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
386 static void dec_xor(DisasContext *dc)
388 if (!dc->type_b && (dc->imm & (1 << 10))) {
389 dec_pattern(dc);
390 return;
393 LOG_DIS("xor r%d\n", dc->rd);
394 if (dc->rd)
395 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
398 static inline void msr_read(DisasContext *dc, TCGv d)
400 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
403 static inline void msr_write(DisasContext *dc, TCGv v)
405 TCGv t;
407 t = tcg_temp_new();
408 dc->cpustate_changed = 1;
409 /* PVR bit is not writable. */
410 tcg_gen_andi_tl(t, v, ~MSR_PVR);
411 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
412 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
413 tcg_temp_free(t);
416 static void dec_msr(DisasContext *dc)
418 CPUState *cs = CPU(dc->cpu);
419 TCGv t0, t1;
420 unsigned int sr, to, rn;
421 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
423 sr = dc->imm & ((1 << 14) - 1);
424 to = dc->imm & (1 << 14);
425 dc->type_b = 1;
426 if (to)
427 dc->cpustate_changed = 1;
429 /* msrclr and msrset. */
430 if (!(dc->imm & (1 << 15))) {
431 unsigned int clr = dc->ir & (1 << 16);
433 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
434 dc->rd, dc->imm);
436 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
437 /* nop??? */
438 return;
441 if ((dc->tb_flags & MSR_EE_FLAG)
442 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
443 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
444 t_gen_raise_exception(dc, EXCP_HW_EXCP);
445 return;
448 if (dc->rd)
449 msr_read(dc, cpu_R[dc->rd]);
451 t0 = tcg_temp_new();
452 t1 = tcg_temp_new();
453 msr_read(dc, t0);
454 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
456 if (clr) {
457 tcg_gen_not_tl(t1, t1);
458 tcg_gen_and_tl(t0, t0, t1);
459 } else
460 tcg_gen_or_tl(t0, t0, t1);
461 msr_write(dc, t0);
462 tcg_temp_free(t0);
463 tcg_temp_free(t1);
464 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
465 dc->is_jmp = DISAS_UPDATE;
466 return;
469 if (to) {
470 if ((dc->tb_flags & MSR_EE_FLAG)
471 && mem_index == MMU_USER_IDX) {
472 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
473 t_gen_raise_exception(dc, EXCP_HW_EXCP);
474 return;
478 #if !defined(CONFIG_USER_ONLY)
479 /* Catch read/writes to the mmu block. */
480 if ((sr & ~0xff) == 0x1000) {
481 sr &= 7;
482 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
483 if (to)
484 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
485 else
486 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
487 return;
489 #endif
491 if (to) {
492 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
493 switch (sr) {
494 case 0:
495 break;
496 case 1:
497 msr_write(dc, cpu_R[dc->ra]);
498 break;
499 case 0x3:
500 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
501 break;
502 case 0x5:
503 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
504 break;
505 case 0x7:
506 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
507 break;
508 case 0x800:
509 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
510 break;
511 case 0x802:
512 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
513 break;
514 default:
515 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
516 break;
518 } else {
519 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
521 switch (sr) {
522 case 0:
523 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
524 break;
525 case 1:
526 msr_read(dc, cpu_R[dc->rd]);
527 break;
528 case 0x3:
529 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
530 break;
531 case 0x5:
532 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
533 break;
534 case 0x7:
535 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
536 break;
537 case 0xb:
538 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
539 break;
540 case 0x800:
541 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
542 break;
543 case 0x802:
544 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
545 break;
546 case 0x2000:
547 case 0x2001:
548 case 0x2002:
549 case 0x2003:
550 case 0x2004:
551 case 0x2005:
552 case 0x2006:
553 case 0x2007:
554 case 0x2008:
555 case 0x2009:
556 case 0x200a:
557 case 0x200b:
558 case 0x200c:
559 rn = sr & 0xf;
560 tcg_gen_ld_tl(cpu_R[dc->rd],
561 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
562 break;
563 default:
564 cpu_abort(cs, "unknown mfs reg %x\n", sr);
565 break;
569 if (dc->rd == 0) {
570 tcg_gen_movi_tl(cpu_R[0], 0);
574 /* 64-bit signed mul, lower result in d and upper in d2. */
575 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
577 TCGv_i64 t0, t1;
579 t0 = tcg_temp_new_i64();
580 t1 = tcg_temp_new_i64();
582 tcg_gen_ext_i32_i64(t0, a);
583 tcg_gen_ext_i32_i64(t1, b);
584 tcg_gen_mul_i64(t0, t0, t1);
586 tcg_gen_extrl_i64_i32(d, t0);
587 tcg_gen_shri_i64(t0, t0, 32);
588 tcg_gen_extrl_i64_i32(d2, t0);
590 tcg_temp_free_i64(t0);
591 tcg_temp_free_i64(t1);
594 /* 64-bit unsigned muls, lower result in d and upper in d2. */
595 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
597 TCGv_i64 t0, t1;
599 t0 = tcg_temp_new_i64();
600 t1 = tcg_temp_new_i64();
602 tcg_gen_extu_i32_i64(t0, a);
603 tcg_gen_extu_i32_i64(t1, b);
604 tcg_gen_mul_i64(t0, t0, t1);
606 tcg_gen_extrl_i64_i32(d, t0);
607 tcg_gen_shri_i64(t0, t0, 32);
608 tcg_gen_extrl_i64_i32(d2, t0);
610 tcg_temp_free_i64(t0);
611 tcg_temp_free_i64(t1);
614 /* Multiplier unit. */
615 static void dec_mul(DisasContext *dc)
617 TCGv d[2];
618 unsigned int subcode;
620 if ((dc->tb_flags & MSR_EE_FLAG)
621 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
622 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
623 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
624 t_gen_raise_exception(dc, EXCP_HW_EXCP);
625 return;
628 subcode = dc->imm & 3;
629 d[0] = tcg_temp_new();
630 d[1] = tcg_temp_new();
632 if (dc->type_b) {
633 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
634 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
635 goto done;
638 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
639 if (subcode >= 1 && subcode <= 3
640 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
641 /* nop??? */
644 switch (subcode) {
645 case 0:
646 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
647 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
648 break;
649 case 1:
650 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
651 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
652 break;
653 case 2:
654 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
655 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
656 break;
657 case 3:
658 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
659 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
660 break;
661 default:
662 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
663 break;
665 done:
666 tcg_temp_free(d[0]);
667 tcg_temp_free(d[1]);
670 /* Div unit. */
671 static void dec_div(DisasContext *dc)
673 unsigned int u;
675 u = dc->imm & 2;
676 LOG_DIS("div\n");
678 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
679 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
680 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
681 t_gen_raise_exception(dc, EXCP_HW_EXCP);
684 if (u)
685 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
686 cpu_R[dc->ra]);
687 else
688 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
689 cpu_R[dc->ra]);
690 if (!dc->rd)
691 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
694 static void dec_barrel(DisasContext *dc)
696 TCGv t0;
697 unsigned int s, t;
699 if ((dc->tb_flags & MSR_EE_FLAG)
700 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
701 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
702 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
703 t_gen_raise_exception(dc, EXCP_HW_EXCP);
704 return;
707 s = dc->imm & (1 << 10);
708 t = dc->imm & (1 << 9);
710 LOG_DIS("bs%s%s r%d r%d r%d\n",
711 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
713 t0 = tcg_temp_new();
715 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
716 tcg_gen_andi_tl(t0, t0, 31);
718 if (s)
719 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
720 else {
721 if (t)
722 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
723 else
724 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
728 static void dec_bit(DisasContext *dc)
730 CPUState *cs = CPU(dc->cpu);
731 TCGv t0;
732 unsigned int op;
733 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
735 op = dc->ir & ((1 << 9) - 1);
736 switch (op) {
737 case 0x21:
738 /* src. */
739 t0 = tcg_temp_new();
741 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
742 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
743 write_carry(dc, cpu_R[dc->ra]);
744 if (dc->rd) {
745 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
746 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
748 tcg_temp_free(t0);
749 break;
751 case 0x1:
752 case 0x41:
753 /* srl. */
754 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
756 /* Update carry. Note that write carry only looks at the LSB. */
757 write_carry(dc, cpu_R[dc->ra]);
758 if (dc->rd) {
759 if (op == 0x41)
760 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
761 else
762 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
764 break;
765 case 0x60:
766 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
767 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
768 break;
769 case 0x61:
770 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
771 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
772 break;
773 case 0x64:
774 case 0x66:
775 case 0x74:
776 case 0x76:
777 /* wdc. */
778 LOG_DIS("wdc r%d\n", dc->ra);
779 if ((dc->tb_flags & MSR_EE_FLAG)
780 && mem_index == MMU_USER_IDX) {
781 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
782 t_gen_raise_exception(dc, EXCP_HW_EXCP);
783 return;
785 break;
786 case 0x68:
787 /* wic. */
788 LOG_DIS("wic r%d\n", dc->ra);
789 if ((dc->tb_flags & MSR_EE_FLAG)
790 && mem_index == MMU_USER_IDX) {
791 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
792 t_gen_raise_exception(dc, EXCP_HW_EXCP);
793 return;
795 break;
796 case 0xe0:
797 if ((dc->tb_flags & MSR_EE_FLAG)
798 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
799 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
800 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
801 t_gen_raise_exception(dc, EXCP_HW_EXCP);
803 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
804 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
806 break;
807 case 0x1e0:
808 /* swapb */
809 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
810 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
811 break;
812 case 0x1e2:
813 /*swaph */
814 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
815 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
816 break;
817 default:
818 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
819 dc->pc, op, dc->rd, dc->ra, dc->rb);
820 break;
824 static inline void sync_jmpstate(DisasContext *dc)
826 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
827 if (dc->jmp == JMP_DIRECT) {
828 tcg_gen_movi_tl(env_btaken, 1);
830 dc->jmp = JMP_INDIRECT;
831 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
835 static void dec_imm(DisasContext *dc)
837 LOG_DIS("imm %x\n", dc->imm << 16);
838 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
839 dc->tb_flags |= IMM_FLAG;
840 dc->clear_imm = 0;
843 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
845 unsigned int extimm = dc->tb_flags & IMM_FLAG;
846 /* Should be set to one if r1 is used by loadstores. */
847 int stackprot = 0;
849 /* All load/stores use ra. */
850 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
851 stackprot = 1;
854 /* Treat the common cases first. */
855 if (!dc->type_b) {
856 /* If any of the regs is r0, return a ptr to the other. */
857 if (dc->ra == 0) {
858 return &cpu_R[dc->rb];
859 } else if (dc->rb == 0) {
860 return &cpu_R[dc->ra];
863 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
864 stackprot = 1;
867 *t = tcg_temp_new();
868 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
870 if (stackprot) {
871 gen_helper_stackprot(cpu_env, *t);
873 return t;
875 /* Immediate. */
876 if (!extimm) {
877 if (dc->imm == 0) {
878 return &cpu_R[dc->ra];
880 *t = tcg_temp_new();
881 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
882 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
883 } else {
884 *t = tcg_temp_new();
885 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
888 if (stackprot) {
889 gen_helper_stackprot(cpu_env, *t);
891 return t;
894 static void dec_load(DisasContext *dc)
896 TCGv t, v, *addr;
897 unsigned int size, rev = 0, ex = 0;
898 TCGMemOp mop;
900 mop = dc->opcode & 3;
901 size = 1 << mop;
902 if (!dc->type_b) {
903 rev = (dc->ir >> 9) & 1;
904 ex = (dc->ir >> 10) & 1;
906 mop |= MO_TE;
907 if (rev) {
908 mop ^= MO_BSWAP;
911 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
912 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
913 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
914 t_gen_raise_exception(dc, EXCP_HW_EXCP);
915 return;
918 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
919 ex ? "x" : "");
921 t_sync_flags(dc);
922 addr = compute_ldst_addr(dc, &t);
925 * When doing reverse accesses we need to do two things.
927 * 1. Reverse the address wrt endianness.
928 * 2. Byteswap the data lanes on the way back into the CPU core.
930 if (rev && size != 4) {
931 /* Endian reverse the address. t is addr. */
932 switch (size) {
933 case 1:
935 /* 00 -> 11
936 01 -> 10
937 10 -> 10
938 11 -> 00 */
939 TCGv low = tcg_temp_new();
941 /* Force addr into the temp. */
942 if (addr != &t) {
943 t = tcg_temp_new();
944 tcg_gen_mov_tl(t, *addr);
945 addr = &t;
948 tcg_gen_andi_tl(low, t, 3);
949 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
950 tcg_gen_andi_tl(t, t, ~3);
951 tcg_gen_or_tl(t, t, low);
952 tcg_gen_mov_tl(env_imm, t);
953 tcg_temp_free(low);
954 break;
957 case 2:
958 /* 00 -> 10
959 10 -> 00. */
960 /* Force addr into the temp. */
961 if (addr != &t) {
962 t = tcg_temp_new();
963 tcg_gen_xori_tl(t, *addr, 2);
964 addr = &t;
965 } else {
966 tcg_gen_xori_tl(t, t, 2);
968 break;
969 default:
970 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
971 break;
975 /* lwx does not throw unaligned access errors, so force alignment */
976 if (ex) {
977 /* Force addr into the temp. */
978 if (addr != &t) {
979 t = tcg_temp_new();
980 tcg_gen_mov_tl(t, *addr);
981 addr = &t;
983 tcg_gen_andi_tl(t, t, ~3);
986 /* If we get a fault on a dslot, the jmpstate better be in sync. */
987 sync_jmpstate(dc);
989 /* Verify alignment if needed. */
991 * Microblaze gives MMU faults priority over faults due to
992 * unaligned addresses. That's why we speculatively do the load
993 * into v. If the load succeeds, we verify alignment of the
994 * address and if that succeeds we write into the destination reg.
996 v = tcg_temp_new();
997 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
999 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1000 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1001 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1002 tcg_const_tl(0), tcg_const_tl(size - 1));
1005 if (ex) {
1006 tcg_gen_mov_tl(env_res_addr, *addr);
1007 tcg_gen_mov_tl(env_res_val, v);
1009 if (dc->rd) {
1010 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1012 tcg_temp_free(v);
1014 if (ex) { /* lwx */
1015 /* no support for AXI exclusive so always clear C */
1016 write_carryi(dc, 0);
1019 if (addr == &t)
1020 tcg_temp_free(t);
1023 static void dec_store(DisasContext *dc)
1025 TCGv t, *addr, swx_addr;
1026 TCGLabel *swx_skip = NULL;
1027 unsigned int size, rev = 0, ex = 0;
1028 TCGMemOp mop;
1030 mop = dc->opcode & 3;
1031 size = 1 << mop;
1032 if (!dc->type_b) {
1033 rev = (dc->ir >> 9) & 1;
1034 ex = (dc->ir >> 10) & 1;
1036 mop |= MO_TE;
1037 if (rev) {
1038 mop ^= MO_BSWAP;
1041 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1042 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1043 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1044 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1045 return;
1048 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1049 ex ? "x" : "");
1050 t_sync_flags(dc);
1051 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1052 sync_jmpstate(dc);
1053 addr = compute_ldst_addr(dc, &t);
1055 swx_addr = tcg_temp_local_new();
1056 if (ex) { /* swx */
1057 TCGv tval;
1059 /* Force addr into the swx_addr. */
1060 tcg_gen_mov_tl(swx_addr, *addr);
1061 addr = &swx_addr;
1062 /* swx does not throw unaligned access errors, so force alignment */
1063 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1065 write_carryi(dc, 1);
1066 swx_skip = gen_new_label();
1067 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1069 /* Compare the value loaded at lwx with current contents of
1070 the reserved location.
1071 FIXME: This only works for system emulation where we can expect
1072 this compare and the following write to be atomic. For user
1073 emulation we need to add atomicity between threads. */
1074 tval = tcg_temp_new();
1075 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1076 MO_TEUL);
1077 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1078 write_carryi(dc, 0);
1079 tcg_temp_free(tval);
1082 if (rev && size != 4) {
1083 /* Endian reverse the address. t is addr. */
1084 switch (size) {
1085 case 1:
1087 /* 00 -> 11
1088 01 -> 10
1089 10 -> 10
1090 11 -> 00 */
1091 TCGv low = tcg_temp_new();
1093 /* Force addr into the temp. */
1094 if (addr != &t) {
1095 t = tcg_temp_new();
1096 tcg_gen_mov_tl(t, *addr);
1097 addr = &t;
1100 tcg_gen_andi_tl(low, t, 3);
1101 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1102 tcg_gen_andi_tl(t, t, ~3);
1103 tcg_gen_or_tl(t, t, low);
1104 tcg_gen_mov_tl(env_imm, t);
1105 tcg_temp_free(low);
1106 break;
1109 case 2:
1110 /* 00 -> 10
1111 10 -> 00. */
1112 /* Force addr into the temp. */
1113 if (addr != &t) {
1114 t = tcg_temp_new();
1115 tcg_gen_xori_tl(t, *addr, 2);
1116 addr = &t;
1117 } else {
1118 tcg_gen_xori_tl(t, t, 2);
1120 break;
1121 default:
1122 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1123 break;
1126 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1128 /* Verify alignment if needed. */
1129 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1130 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1131 /* FIXME: if the alignment is wrong, we should restore the value
1132 * in memory. One possible way to achieve this is to probe
1133 * the MMU prior to the memaccess, thay way we could put
1134 * the alignment checks in between the probe and the mem
1135 * access.
1137 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1138 tcg_const_tl(1), tcg_const_tl(size - 1));
1141 if (ex) {
1142 gen_set_label(swx_skip);
1144 tcg_temp_free(swx_addr);
1146 if (addr == &t)
1147 tcg_temp_free(t);
1150 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1151 TCGv d, TCGv a, TCGv b)
1153 switch (cc) {
1154 case CC_EQ:
1155 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1156 break;
1157 case CC_NE:
1158 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1159 break;
1160 case CC_LT:
1161 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1162 break;
1163 case CC_LE:
1164 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1165 break;
1166 case CC_GE:
1167 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1168 break;
1169 case CC_GT:
1170 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1171 break;
1172 default:
1173 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1174 break;
1178 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1180 TCGLabel *l1 = gen_new_label();
1181 /* Conditional jmp. */
1182 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1183 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1184 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1185 gen_set_label(l1);
1188 static void dec_bcc(DisasContext *dc)
1190 unsigned int cc;
1191 unsigned int dslot;
1193 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1194 dslot = dc->ir & (1 << 25);
1195 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1197 dc->delayed_branch = 1;
1198 if (dslot) {
1199 dc->delayed_branch = 2;
1200 dc->tb_flags |= D_FLAG;
1201 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1202 cpu_env, offsetof(CPUMBState, bimm));
1205 if (dec_alu_op_b_is_small_imm(dc)) {
1206 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1208 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1209 dc->jmp = JMP_DIRECT_CC;
1210 dc->jmp_pc = dc->pc + offset;
1211 } else {
1212 dc->jmp = JMP_INDIRECT;
1213 tcg_gen_movi_tl(env_btarget, dc->pc);
1214 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1216 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1219 static void dec_br(DisasContext *dc)
1221 unsigned int dslot, link, abs, mbar;
1222 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1224 dslot = dc->ir & (1 << 20);
1225 abs = dc->ir & (1 << 19);
1226 link = dc->ir & (1 << 18);
1228 /* Memory barrier. */
1229 mbar = (dc->ir >> 16) & 31;
1230 if (mbar == 2 && dc->imm == 4) {
1231 /* mbar IMM & 16 decodes to sleep. */
1232 if (dc->rd & 16) {
1233 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1234 TCGv_i32 tmp_1 = tcg_const_i32(1);
1236 LOG_DIS("sleep\n");
1238 t_sync_flags(dc);
1239 tcg_gen_st_i32(tmp_1, cpu_env,
1240 -offsetof(MicroBlazeCPU, env)
1241 +offsetof(CPUState, halted));
1242 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1243 gen_helper_raise_exception(cpu_env, tmp_hlt);
1244 tcg_temp_free_i32(tmp_hlt);
1245 tcg_temp_free_i32(tmp_1);
1246 return;
1248 LOG_DIS("mbar %d\n", dc->rd);
1249 /* Break the TB. */
1250 dc->cpustate_changed = 1;
1251 return;
1254 LOG_DIS("br%s%s%s%s imm=%x\n",
1255 abs ? "a" : "", link ? "l" : "",
1256 dc->type_b ? "i" : "", dslot ? "d" : "",
1257 dc->imm);
1259 dc->delayed_branch = 1;
1260 if (dslot) {
1261 dc->delayed_branch = 2;
1262 dc->tb_flags |= D_FLAG;
1263 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1264 cpu_env, offsetof(CPUMBState, bimm));
1266 if (link && dc->rd)
1267 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1269 dc->jmp = JMP_INDIRECT;
1270 if (abs) {
1271 tcg_gen_movi_tl(env_btaken, 1);
1272 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1273 if (link && !dslot) {
1274 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1275 t_gen_raise_exception(dc, EXCP_BREAK);
1276 if (dc->imm == 0) {
1277 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1278 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1279 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1280 return;
1283 t_gen_raise_exception(dc, EXCP_DEBUG);
1286 } else {
1287 if (dec_alu_op_b_is_small_imm(dc)) {
1288 dc->jmp = JMP_DIRECT;
1289 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1290 } else {
1291 tcg_gen_movi_tl(env_btaken, 1);
1292 tcg_gen_movi_tl(env_btarget, dc->pc);
1293 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1298 static inline void do_rti(DisasContext *dc)
1300 TCGv t0, t1;
1301 t0 = tcg_temp_new();
1302 t1 = tcg_temp_new();
1303 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1304 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1305 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1307 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1308 tcg_gen_or_tl(t1, t1, t0);
1309 msr_write(dc, t1);
1310 tcg_temp_free(t1);
1311 tcg_temp_free(t0);
1312 dc->tb_flags &= ~DRTI_FLAG;
1315 static inline void do_rtb(DisasContext *dc)
1317 TCGv t0, t1;
1318 t0 = tcg_temp_new();
1319 t1 = tcg_temp_new();
1320 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1321 tcg_gen_shri_tl(t0, t1, 1);
1322 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1324 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1325 tcg_gen_or_tl(t1, t1, t0);
1326 msr_write(dc, t1);
1327 tcg_temp_free(t1);
1328 tcg_temp_free(t0);
1329 dc->tb_flags &= ~DRTB_FLAG;
1332 static inline void do_rte(DisasContext *dc)
1334 TCGv t0, t1;
1335 t0 = tcg_temp_new();
1336 t1 = tcg_temp_new();
1338 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1339 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1340 tcg_gen_shri_tl(t0, t1, 1);
1341 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1343 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1344 tcg_gen_or_tl(t1, t1, t0);
1345 msr_write(dc, t1);
1346 tcg_temp_free(t1);
1347 tcg_temp_free(t0);
1348 dc->tb_flags &= ~DRTE_FLAG;
1351 static void dec_rts(DisasContext *dc)
1353 unsigned int b_bit, i_bit, e_bit;
1354 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1356 i_bit = dc->ir & (1 << 21);
1357 b_bit = dc->ir & (1 << 22);
1358 e_bit = dc->ir & (1 << 23);
1360 dc->delayed_branch = 2;
1361 dc->tb_flags |= D_FLAG;
1362 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1363 cpu_env, offsetof(CPUMBState, bimm));
1365 if (i_bit) {
1366 LOG_DIS("rtid ir=%x\n", dc->ir);
1367 if ((dc->tb_flags & MSR_EE_FLAG)
1368 && mem_index == MMU_USER_IDX) {
1369 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1370 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1372 dc->tb_flags |= DRTI_FLAG;
1373 } else if (b_bit) {
1374 LOG_DIS("rtbd ir=%x\n", dc->ir);
1375 if ((dc->tb_flags & MSR_EE_FLAG)
1376 && mem_index == MMU_USER_IDX) {
1377 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1378 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1380 dc->tb_flags |= DRTB_FLAG;
1381 } else if (e_bit) {
1382 LOG_DIS("rted ir=%x\n", dc->ir);
1383 if ((dc->tb_flags & MSR_EE_FLAG)
1384 && mem_index == MMU_USER_IDX) {
1385 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1386 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1388 dc->tb_flags |= DRTE_FLAG;
1389 } else
1390 LOG_DIS("rts ir=%x\n", dc->ir);
1392 dc->jmp = JMP_INDIRECT;
1393 tcg_gen_movi_tl(env_btaken, 1);
1394 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1397 static int dec_check_fpuv2(DisasContext *dc)
1399 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1400 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1401 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1403 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1406 static void dec_fpu(DisasContext *dc)
1408 unsigned int fpu_insn;
1410 if ((dc->tb_flags & MSR_EE_FLAG)
1411 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1412 && (dc->cpu->cfg.use_fpu != 1)) {
1413 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1414 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1415 return;
1418 fpu_insn = (dc->ir >> 7) & 7;
1420 switch (fpu_insn) {
1421 case 0:
1422 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1423 cpu_R[dc->rb]);
1424 break;
1426 case 1:
1427 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1428 cpu_R[dc->rb]);
1429 break;
1431 case 2:
1432 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1433 cpu_R[dc->rb]);
1434 break;
1436 case 3:
1437 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1438 cpu_R[dc->rb]);
1439 break;
1441 case 4:
1442 switch ((dc->ir >> 4) & 7) {
1443 case 0:
1444 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1445 cpu_R[dc->ra], cpu_R[dc->rb]);
1446 break;
1447 case 1:
1448 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1449 cpu_R[dc->ra], cpu_R[dc->rb]);
1450 break;
1451 case 2:
1452 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1453 cpu_R[dc->ra], cpu_R[dc->rb]);
1454 break;
1455 case 3:
1456 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1457 cpu_R[dc->ra], cpu_R[dc->rb]);
1458 break;
1459 case 4:
1460 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1461 cpu_R[dc->ra], cpu_R[dc->rb]);
1462 break;
1463 case 5:
1464 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1465 cpu_R[dc->ra], cpu_R[dc->rb]);
1466 break;
1467 case 6:
1468 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1469 cpu_R[dc->ra], cpu_R[dc->rb]);
1470 break;
1471 default:
1472 qemu_log_mask(LOG_UNIMP,
1473 "unimplemented fcmp fpu_insn=%x pc=%x"
1474 " opc=%x\n",
1475 fpu_insn, dc->pc, dc->opcode);
1476 dc->abort_at_next_insn = 1;
1477 break;
1479 break;
1481 case 5:
1482 if (!dec_check_fpuv2(dc)) {
1483 return;
1485 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1486 break;
1488 case 6:
1489 if (!dec_check_fpuv2(dc)) {
1490 return;
1492 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1493 break;
1495 case 7:
1496 if (!dec_check_fpuv2(dc)) {
1497 return;
1499 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1500 break;
1502 default:
1503 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1504 " opc=%x\n",
1505 fpu_insn, dc->pc, dc->opcode);
1506 dc->abort_at_next_insn = 1;
1507 break;
1511 static void dec_null(DisasContext *dc)
1513 if ((dc->tb_flags & MSR_EE_FLAG)
1514 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1515 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1516 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1517 return;
1519 qemu_log ("unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1520 dc->abort_at_next_insn = 1;
1523 /* Insns connected to FSL or AXI stream attached devices. */
1524 static void dec_stream(DisasContext *dc)
1526 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1527 TCGv_i32 t_id, t_ctrl;
1528 int ctrl;
1530 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1531 dc->type_b ? "" : "d", dc->imm);
1533 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1534 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1535 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1536 return;
1539 t_id = tcg_temp_new();
1540 if (dc->type_b) {
1541 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1542 ctrl = dc->imm >> 10;
1543 } else {
1544 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1545 ctrl = dc->imm >> 5;
1548 t_ctrl = tcg_const_tl(ctrl);
1550 if (dc->rd == 0) {
1551 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1552 } else {
1553 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1555 tcg_temp_free(t_id);
1556 tcg_temp_free(t_ctrl);
1559 static struct decoder_info {
1560 struct {
1561 uint32_t bits;
1562 uint32_t mask;
1564 void (*dec)(DisasContext *dc);
1565 } decinfo[] = {
1566 {DEC_ADD, dec_add},
1567 {DEC_SUB, dec_sub},
1568 {DEC_AND, dec_and},
1569 {DEC_XOR, dec_xor},
1570 {DEC_OR, dec_or},
1571 {DEC_BIT, dec_bit},
1572 {DEC_BARREL, dec_barrel},
1573 {DEC_LD, dec_load},
1574 {DEC_ST, dec_store},
1575 {DEC_IMM, dec_imm},
1576 {DEC_BR, dec_br},
1577 {DEC_BCC, dec_bcc},
1578 {DEC_RTS, dec_rts},
1579 {DEC_FPU, dec_fpu},
1580 {DEC_MUL, dec_mul},
1581 {DEC_DIV, dec_div},
1582 {DEC_MSR, dec_msr},
1583 {DEC_STREAM, dec_stream},
1584 {{0, 0}, dec_null}
1587 static inline void decode(DisasContext *dc, uint32_t ir)
1589 int i;
1591 dc->ir = ir;
1592 LOG_DIS("%8.8x\t", dc->ir);
1594 if (dc->ir)
1595 dc->nr_nops = 0;
1596 else {
1597 if ((dc->tb_flags & MSR_EE_FLAG)
1598 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1599 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1600 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1601 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1602 return;
1605 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1606 dc->nr_nops++;
1607 if (dc->nr_nops > 4) {
1608 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1611 /* bit 2 seems to indicate insn type. */
1612 dc->type_b = ir & (1 << 29);
1614 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1615 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1616 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1617 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1618 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1620 /* Large switch for all insns. */
1621 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1622 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1623 decinfo[i].dec(dc);
1624 break;
1629 /* generate intermediate code for basic block 'tb'. */
1630 void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1632 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1633 CPUState *cs = CPU(cpu);
1634 uint32_t pc_start;
1635 struct DisasContext ctx;
1636 struct DisasContext *dc = &ctx;
1637 uint32_t next_page_start, org_flags;
1638 target_ulong npc;
1639 int num_insns;
1640 int max_insns;
1642 pc_start = tb->pc;
1643 dc->cpu = cpu;
1644 dc->tb = tb;
1645 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1647 dc->is_jmp = DISAS_NEXT;
1648 dc->jmp = 0;
1649 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1650 if (dc->delayed_branch) {
1651 dc->jmp = JMP_INDIRECT;
1653 dc->pc = pc_start;
1654 dc->singlestep_enabled = cs->singlestep_enabled;
1655 dc->cpustate_changed = 0;
1656 dc->abort_at_next_insn = 0;
1657 dc->nr_nops = 0;
1659 if (pc_start & 3) {
1660 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1663 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1664 #if !SIM_COMPAT
1665 qemu_log("--------------\n");
1666 log_cpu_state(CPU(cpu), 0);
1667 #endif
1670 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1671 num_insns = 0;
1672 max_insns = tb->cflags & CF_COUNT_MASK;
1673 if (max_insns == 0) {
1674 max_insns = CF_COUNT_MASK;
1676 if (max_insns > TCG_MAX_INSNS) {
1677 max_insns = TCG_MAX_INSNS;
1680 gen_tb_start(tb);
1683 tcg_gen_insn_start(dc->pc);
1684 num_insns++;
1686 #if SIM_COMPAT
1687 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1688 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1689 gen_helper_debug();
1691 #endif
1693 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1694 t_gen_raise_exception(dc, EXCP_DEBUG);
1695 dc->is_jmp = DISAS_UPDATE;
1696 /* The address covered by the breakpoint must be included in
1697 [tb->pc, tb->pc + tb->size) in order to for it to be
1698 properly cleared -- thus we increment the PC here so that
1699 the logic setting tb->size below does the right thing. */
1700 dc->pc += 4;
1701 break;
1704 /* Pretty disas. */
1705 LOG_DIS("%8.8x:\t", dc->pc);
1707 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1708 gen_io_start();
1711 dc->clear_imm = 1;
1712 decode(dc, cpu_ldl_code(env, dc->pc));
1713 if (dc->clear_imm)
1714 dc->tb_flags &= ~IMM_FLAG;
1715 dc->pc += 4;
1717 if (dc->delayed_branch) {
1718 dc->delayed_branch--;
1719 if (!dc->delayed_branch) {
1720 if (dc->tb_flags & DRTI_FLAG)
1721 do_rti(dc);
1722 if (dc->tb_flags & DRTB_FLAG)
1723 do_rtb(dc);
1724 if (dc->tb_flags & DRTE_FLAG)
1725 do_rte(dc);
1726 /* Clear the delay slot flag. */
1727 dc->tb_flags &= ~D_FLAG;
1728 /* If it is a direct jump, try direct chaining. */
1729 if (dc->jmp == JMP_INDIRECT) {
1730 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1731 dc->is_jmp = DISAS_JUMP;
1732 } else if (dc->jmp == JMP_DIRECT) {
1733 t_sync_flags(dc);
1734 gen_goto_tb(dc, 0, dc->jmp_pc);
1735 dc->is_jmp = DISAS_TB_JUMP;
1736 } else if (dc->jmp == JMP_DIRECT_CC) {
1737 TCGLabel *l1 = gen_new_label();
1738 t_sync_flags(dc);
1739 /* Conditional jmp. */
1740 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1741 gen_goto_tb(dc, 1, dc->pc);
1742 gen_set_label(l1);
1743 gen_goto_tb(dc, 0, dc->jmp_pc);
1745 dc->is_jmp = DISAS_TB_JUMP;
1747 break;
1750 if (cs->singlestep_enabled) {
1751 break;
1753 } while (!dc->is_jmp && !dc->cpustate_changed
1754 && !tcg_op_buf_full()
1755 && !singlestep
1756 && (dc->pc < next_page_start)
1757 && num_insns < max_insns);
1759 npc = dc->pc;
1760 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1761 if (dc->tb_flags & D_FLAG) {
1762 dc->is_jmp = DISAS_UPDATE;
1763 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1764 sync_jmpstate(dc);
1765 } else
1766 npc = dc->jmp_pc;
1769 if (tb->cflags & CF_LAST_IO)
1770 gen_io_end();
1771 /* Force an update if the per-tb cpu state has changed. */
1772 if (dc->is_jmp == DISAS_NEXT
1773 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1774 dc->is_jmp = DISAS_UPDATE;
1775 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1777 t_sync_flags(dc);
1779 if (unlikely(cs->singlestep_enabled)) {
1780 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1782 if (dc->is_jmp != DISAS_JUMP) {
1783 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1785 gen_helper_raise_exception(cpu_env, tmp);
1786 tcg_temp_free_i32(tmp);
1787 } else {
1788 switch(dc->is_jmp) {
1789 case DISAS_NEXT:
1790 gen_goto_tb(dc, 1, npc);
1791 break;
1792 default:
1793 case DISAS_JUMP:
1794 case DISAS_UPDATE:
1795 /* indicate that the hash table must be used
1796 to find the next TB */
1797 tcg_gen_exit_tb(0);
1798 break;
1799 case DISAS_TB_JUMP:
1800 /* nothing more to generate */
1801 break;
1804 gen_tb_end(tb, num_insns);
1806 tb->size = dc->pc - pc_start;
1807 tb->icount = num_insns;
1809 #ifdef DEBUG_DISAS
1810 #if !SIM_COMPAT
1811 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1812 qemu_log("\n");
1813 #if DISAS_GNU
1814 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1815 #endif
1816 qemu_log("\nisize=%d osize=%d\n",
1817 dc->pc - pc_start, tcg_op_buf_count());
1819 #endif
1820 #endif
1821 assert(!dc->abort_at_next_insn);
1824 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1825 int flags)
1827 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1828 CPUMBState *env = &cpu->env;
1829 int i;
1831 if (!env || !f)
1832 return;
1834 cpu_fprintf(f, "IN: PC=%x %s\n",
1835 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1836 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1837 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1838 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1839 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1840 env->btaken, env->btarget,
1841 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1842 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1843 (env->sregs[SR_MSR] & MSR_EIP),
1844 (env->sregs[SR_MSR] & MSR_IE));
1846 for (i = 0; i < 32; i++) {
1847 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1848 if ((i + 1) % 4 == 0)
1849 cpu_fprintf(f, "\n");
1851 cpu_fprintf(f, "\n\n");
1854 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1856 MicroBlazeCPU *cpu;
1858 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1860 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1862 return cpu;
1865 void mb_tcg_init(void)
1867 int i;
1869 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1871 env_debug = tcg_global_mem_new(TCG_AREG0,
1872 offsetof(CPUMBState, debug),
1873 "debug0");
1874 env_iflags = tcg_global_mem_new(TCG_AREG0,
1875 offsetof(CPUMBState, iflags),
1876 "iflags");
1877 env_imm = tcg_global_mem_new(TCG_AREG0,
1878 offsetof(CPUMBState, imm),
1879 "imm");
1880 env_btarget = tcg_global_mem_new(TCG_AREG0,
1881 offsetof(CPUMBState, btarget),
1882 "btarget");
1883 env_btaken = tcg_global_mem_new(TCG_AREG0,
1884 offsetof(CPUMBState, btaken),
1885 "btaken");
1886 env_res_addr = tcg_global_mem_new(TCG_AREG0,
1887 offsetof(CPUMBState, res_addr),
1888 "res_addr");
1889 env_res_val = tcg_global_mem_new(TCG_AREG0,
1890 offsetof(CPUMBState, res_val),
1891 "res_val");
1892 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1893 cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
1894 offsetof(CPUMBState, regs[i]),
1895 regnames[i]);
1897 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1898 cpu_SR[i] = tcg_global_mem_new(TCG_AREG0,
1899 offsetof(CPUMBState, sregs[i]),
1900 special_regnames[i]);
1904 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1905 target_ulong *data)
1907 env->sregs[SR_PC] = data[0];