s390x/pci: enhance mpcifc_service_call
[qemu.git] / target-microblaze / translate.c
bloba7a8ac8f995fd107bb59101d644912c6a40be53d
1 /*
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/helper-proto.h"
26 #include "microblaze-decode.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
34 #define SIM_COMPAT 0
35 #define DISAS_GNU 1
36 #define DISAS_MB 1
37 #if DISAS_MB && !SIM_COMPAT
38 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 #else
40 # define LOG_DIS(...) do { } while (0)
41 #endif
43 #define D(x)
45 #define EXTRACT_FIELD(src, start, end) \
46 (((src) >> start) & ((1 << (end - start + 1)) - 1))
48 static TCGv env_debug;
49 static TCGv_env cpu_env;
50 static TCGv cpu_R[32];
51 static TCGv cpu_SR[18];
52 static TCGv env_imm;
53 static TCGv env_btaken;
54 static TCGv env_btarget;
55 static TCGv env_iflags;
56 static TCGv env_res_addr;
57 static TCGv env_res_val;
59 #include "exec/gen-icount.h"
61 /* This is the state at translation time. */
62 typedef struct DisasContext {
63 MicroBlazeCPU *cpu;
64 target_ulong pc;
66 /* Decoder. */
67 int type_b;
68 uint32_t ir;
69 uint8_t opcode;
70 uint8_t rd, ra, rb;
71 uint16_t imm;
73 unsigned int cpustate_changed;
74 unsigned int delayed_branch;
75 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
76 unsigned int clear_imm;
77 int is_jmp;
79 #define JMP_NOJMP 0
80 #define JMP_DIRECT 1
81 #define JMP_DIRECT_CC 2
82 #define JMP_INDIRECT 3
83 unsigned int jmp;
84 uint32_t jmp_pc;
86 int abort_at_next_insn;
87 int nr_nops;
88 struct TranslationBlock *tb;
89 int singlestep_enabled;
90 } DisasContext;
92 static const char *regnames[] =
94 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
96 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
97 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
100 static const char *special_regnames[] =
102 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
103 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
104 "sr16", "sr17", "sr18"
107 static inline void t_sync_flags(DisasContext *dc)
109 /* Synch the tb dependent flags between translator and runtime. */
110 if (dc->tb_flags != dc->synced_flags) {
111 tcg_gen_movi_tl(env_iflags, dc->tb_flags);
112 dc->synced_flags = dc->tb_flags;
116 static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
118 TCGv_i32 tmp = tcg_const_i32(index);
120 t_sync_flags(dc);
121 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
122 gen_helper_raise_exception(cpu_env, tmp);
123 tcg_temp_free_i32(tmp);
124 dc->is_jmp = DISAS_UPDATE;
127 static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
129 #ifndef CONFIG_USER_ONLY
130 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
131 #else
132 return true;
133 #endif
136 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
138 if (use_goto_tb(dc, dest)) {
139 tcg_gen_goto_tb(n);
140 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
141 tcg_gen_exit_tb((uintptr_t)dc->tb + n);
142 } else {
143 tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
144 tcg_gen_exit_tb(0);
148 static void read_carry(DisasContext *dc, TCGv d)
150 tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
154 * write_carry sets the carry bits in MSR based on bit 0 of v.
155 * v[31:1] are ignored.
157 static void write_carry(DisasContext *dc, TCGv v)
159 TCGv t0 = tcg_temp_new();
160 tcg_gen_shli_tl(t0, v, 31);
161 tcg_gen_sari_tl(t0, t0, 31);
162 tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
163 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
164 ~(MSR_C | MSR_CC));
165 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
166 tcg_temp_free(t0);
169 static void write_carryi(DisasContext *dc, bool carry)
171 TCGv t0 = tcg_temp_new();
172 tcg_gen_movi_tl(t0, carry);
173 write_carry(dc, t0);
174 tcg_temp_free(t0);
177 /* True if ALU operand b is a small immediate that may deserve
178 faster treatment. */
179 static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
181 /* Immediate insn without the imm prefix ? */
182 return dc->type_b && !(dc->tb_flags & IMM_FLAG);
185 static inline TCGv *dec_alu_op_b(DisasContext *dc)
187 if (dc->type_b) {
188 if (dc->tb_flags & IMM_FLAG)
189 tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
190 else
191 tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
192 return &env_imm;
193 } else
194 return &cpu_R[dc->rb];
197 static void dec_add(DisasContext *dc)
199 unsigned int k, c;
200 TCGv cf;
202 k = dc->opcode & 4;
203 c = dc->opcode & 2;
205 LOG_DIS("add%s%s%s r%d r%d r%d\n",
206 dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
207 dc->rd, dc->ra, dc->rb);
209 /* Take care of the easy cases first. */
210 if (k) {
211 /* k - keep carry, no need to update MSR. */
212 /* If rd == r0, it's a nop. */
213 if (dc->rd) {
214 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
216 if (c) {
217 /* c - Add carry into the result. */
218 cf = tcg_temp_new();
220 read_carry(dc, cf);
221 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
222 tcg_temp_free(cf);
225 return;
228 /* From now on, we can assume k is zero. So we need to update MSR. */
229 /* Extract carry. */
230 cf = tcg_temp_new();
231 if (c) {
232 read_carry(dc, cf);
233 } else {
234 tcg_gen_movi_tl(cf, 0);
237 if (dc->rd) {
238 TCGv ncf = tcg_temp_new();
239 gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
240 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
241 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
242 write_carry(dc, ncf);
243 tcg_temp_free(ncf);
244 } else {
245 gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
246 write_carry(dc, cf);
248 tcg_temp_free(cf);
251 static void dec_sub(DisasContext *dc)
253 unsigned int u, cmp, k, c;
254 TCGv cf, na;
256 u = dc->imm & 2;
257 k = dc->opcode & 4;
258 c = dc->opcode & 2;
259 cmp = (dc->imm & 1) && (!dc->type_b) && k;
261 if (cmp) {
262 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
263 if (dc->rd) {
264 if (u)
265 gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
266 else
267 gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
269 return;
272 LOG_DIS("sub%s%s r%d, r%d r%d\n",
273 k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
275 /* Take care of the easy cases first. */
276 if (k) {
277 /* k - keep carry, no need to update MSR. */
278 /* If rd == r0, it's a nop. */
279 if (dc->rd) {
280 tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
282 if (c) {
283 /* c - Add carry into the result. */
284 cf = tcg_temp_new();
286 read_carry(dc, cf);
287 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
288 tcg_temp_free(cf);
291 return;
294 /* From now on, we can assume k is zero. So we need to update MSR. */
295 /* Extract carry. And complement a into na. */
296 cf = tcg_temp_new();
297 na = tcg_temp_new();
298 if (c) {
299 read_carry(dc, cf);
300 } else {
301 tcg_gen_movi_tl(cf, 1);
304 /* d = b + ~a + c. carry defaults to 1. */
305 tcg_gen_not_tl(na, cpu_R[dc->ra]);
307 if (dc->rd) {
308 TCGv ncf = tcg_temp_new();
309 gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
310 tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
311 tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
312 write_carry(dc, ncf);
313 tcg_temp_free(ncf);
314 } else {
315 gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
316 write_carry(dc, cf);
318 tcg_temp_free(cf);
319 tcg_temp_free(na);
322 static void dec_pattern(DisasContext *dc)
324 unsigned int mode;
326 if ((dc->tb_flags & MSR_EE_FLAG)
327 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
328 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
329 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
330 t_gen_raise_exception(dc, EXCP_HW_EXCP);
333 mode = dc->opcode & 3;
334 switch (mode) {
335 case 0:
336 /* pcmpbf. */
337 LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
338 if (dc->rd)
339 gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
340 break;
341 case 2:
342 LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
343 if (dc->rd) {
344 tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
345 cpu_R[dc->ra], cpu_R[dc->rb]);
347 break;
348 case 3:
349 LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
350 if (dc->rd) {
351 tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
352 cpu_R[dc->ra], cpu_R[dc->rb]);
354 break;
355 default:
356 cpu_abort(CPU(dc->cpu),
357 "unsupported pattern insn opcode=%x\n", dc->opcode);
358 break;
362 static void dec_and(DisasContext *dc)
364 unsigned int not;
366 if (!dc->type_b && (dc->imm & (1 << 10))) {
367 dec_pattern(dc);
368 return;
371 not = dc->opcode & (1 << 1);
372 LOG_DIS("and%s\n", not ? "n" : "");
374 if (!dc->rd)
375 return;
377 if (not) {
378 tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
379 } else
380 tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
383 static void dec_or(DisasContext *dc)
385 if (!dc->type_b && (dc->imm & (1 << 10))) {
386 dec_pattern(dc);
387 return;
390 LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
391 if (dc->rd)
392 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
395 static void dec_xor(DisasContext *dc)
397 if (!dc->type_b && (dc->imm & (1 << 10))) {
398 dec_pattern(dc);
399 return;
402 LOG_DIS("xor r%d\n", dc->rd);
403 if (dc->rd)
404 tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
407 static inline void msr_read(DisasContext *dc, TCGv d)
409 tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
412 static inline void msr_write(DisasContext *dc, TCGv v)
414 TCGv t;
416 t = tcg_temp_new();
417 dc->cpustate_changed = 1;
418 /* PVR bit is not writable. */
419 tcg_gen_andi_tl(t, v, ~MSR_PVR);
420 tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
421 tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
422 tcg_temp_free(t);
425 static void dec_msr(DisasContext *dc)
427 CPUState *cs = CPU(dc->cpu);
428 TCGv t0, t1;
429 unsigned int sr, to, rn;
430 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
432 sr = dc->imm & ((1 << 14) - 1);
433 to = dc->imm & (1 << 14);
434 dc->type_b = 1;
435 if (to)
436 dc->cpustate_changed = 1;
438 /* msrclr and msrset. */
439 if (!(dc->imm & (1 << 15))) {
440 unsigned int clr = dc->ir & (1 << 16);
442 LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
443 dc->rd, dc->imm);
445 if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
446 /* nop??? */
447 return;
450 if ((dc->tb_flags & MSR_EE_FLAG)
451 && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
452 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
453 t_gen_raise_exception(dc, EXCP_HW_EXCP);
454 return;
457 if (dc->rd)
458 msr_read(dc, cpu_R[dc->rd]);
460 t0 = tcg_temp_new();
461 t1 = tcg_temp_new();
462 msr_read(dc, t0);
463 tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
465 if (clr) {
466 tcg_gen_not_tl(t1, t1);
467 tcg_gen_and_tl(t0, t0, t1);
468 } else
469 tcg_gen_or_tl(t0, t0, t1);
470 msr_write(dc, t0);
471 tcg_temp_free(t0);
472 tcg_temp_free(t1);
473 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
474 dc->is_jmp = DISAS_UPDATE;
475 return;
478 if (to) {
479 if ((dc->tb_flags & MSR_EE_FLAG)
480 && mem_index == MMU_USER_IDX) {
481 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
482 t_gen_raise_exception(dc, EXCP_HW_EXCP);
483 return;
487 #if !defined(CONFIG_USER_ONLY)
488 /* Catch read/writes to the mmu block. */
489 if ((sr & ~0xff) == 0x1000) {
490 sr &= 7;
491 LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
492 if (to)
493 gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
494 else
495 gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
496 return;
498 #endif
500 if (to) {
501 LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
502 switch (sr) {
503 case 0:
504 break;
505 case 1:
506 msr_write(dc, cpu_R[dc->ra]);
507 break;
508 case 0x3:
509 tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
510 break;
511 case 0x5:
512 tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
513 break;
514 case 0x7:
515 tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
516 break;
517 case 0x800:
518 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
519 break;
520 case 0x802:
521 tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
522 break;
523 default:
524 cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
525 break;
527 } else {
528 LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
530 switch (sr) {
531 case 0:
532 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
533 break;
534 case 1:
535 msr_read(dc, cpu_R[dc->rd]);
536 break;
537 case 0x3:
538 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
539 break;
540 case 0x5:
541 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
542 break;
543 case 0x7:
544 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
545 break;
546 case 0xb:
547 tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
548 break;
549 case 0x800:
550 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
551 break;
552 case 0x802:
553 tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
554 break;
555 case 0x2000:
556 case 0x2001:
557 case 0x2002:
558 case 0x2003:
559 case 0x2004:
560 case 0x2005:
561 case 0x2006:
562 case 0x2007:
563 case 0x2008:
564 case 0x2009:
565 case 0x200a:
566 case 0x200b:
567 case 0x200c:
568 rn = sr & 0xf;
569 tcg_gen_ld_tl(cpu_R[dc->rd],
570 cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
571 break;
572 default:
573 cpu_abort(cs, "unknown mfs reg %x\n", sr);
574 break;
578 if (dc->rd == 0) {
579 tcg_gen_movi_tl(cpu_R[0], 0);
583 /* 64-bit signed mul, lower result in d and upper in d2. */
584 static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b)
586 TCGv_i64 t0, t1;
588 t0 = tcg_temp_new_i64();
589 t1 = tcg_temp_new_i64();
591 tcg_gen_ext_i32_i64(t0, a);
592 tcg_gen_ext_i32_i64(t1, b);
593 tcg_gen_mul_i64(t0, t0, t1);
595 tcg_gen_extrl_i64_i32(d, t0);
596 tcg_gen_shri_i64(t0, t0, 32);
597 tcg_gen_extrl_i64_i32(d2, t0);
599 tcg_temp_free_i64(t0);
600 tcg_temp_free_i64(t1);
603 /* 64-bit unsigned muls, lower result in d and upper in d2. */
604 static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b)
606 TCGv_i64 t0, t1;
608 t0 = tcg_temp_new_i64();
609 t1 = tcg_temp_new_i64();
611 tcg_gen_extu_i32_i64(t0, a);
612 tcg_gen_extu_i32_i64(t1, b);
613 tcg_gen_mul_i64(t0, t0, t1);
615 tcg_gen_extrl_i64_i32(d, t0);
616 tcg_gen_shri_i64(t0, t0, 32);
617 tcg_gen_extrl_i64_i32(d2, t0);
619 tcg_temp_free_i64(t0);
620 tcg_temp_free_i64(t1);
623 /* Multiplier unit. */
624 static void dec_mul(DisasContext *dc)
626 TCGv d[2];
627 unsigned int subcode;
629 if ((dc->tb_flags & MSR_EE_FLAG)
630 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
631 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
632 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
633 t_gen_raise_exception(dc, EXCP_HW_EXCP);
634 return;
637 subcode = dc->imm & 3;
638 d[0] = tcg_temp_new();
639 d[1] = tcg_temp_new();
641 if (dc->type_b) {
642 LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
643 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
644 goto done;
647 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
648 if (subcode >= 1 && subcode <= 3
649 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
650 /* nop??? */
653 switch (subcode) {
654 case 0:
655 LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
656 t_gen_mulu(cpu_R[dc->rd], d[1], cpu_R[dc->ra], cpu_R[dc->rb]);
657 break;
658 case 1:
659 LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
660 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
661 break;
662 case 2:
663 LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
664 t_gen_muls(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
665 break;
666 case 3:
667 LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
668 t_gen_mulu(d[0], cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
669 break;
670 default:
671 cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
672 break;
674 done:
675 tcg_temp_free(d[0]);
676 tcg_temp_free(d[1]);
679 /* Div unit. */
680 static void dec_div(DisasContext *dc)
682 unsigned int u;
684 u = dc->imm & 2;
685 LOG_DIS("div\n");
687 if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
688 && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
689 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
690 t_gen_raise_exception(dc, EXCP_HW_EXCP);
693 if (u)
694 gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
695 cpu_R[dc->ra]);
696 else
697 gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
698 cpu_R[dc->ra]);
699 if (!dc->rd)
700 tcg_gen_movi_tl(cpu_R[dc->rd], 0);
703 static void dec_barrel(DisasContext *dc)
705 TCGv t0;
706 unsigned int s, t;
708 if ((dc->tb_flags & MSR_EE_FLAG)
709 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
710 && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
711 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
712 t_gen_raise_exception(dc, EXCP_HW_EXCP);
713 return;
716 s = dc->imm & (1 << 10);
717 t = dc->imm & (1 << 9);
719 LOG_DIS("bs%s%s r%d r%d r%d\n",
720 s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
722 t0 = tcg_temp_new();
724 tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
725 tcg_gen_andi_tl(t0, t0, 31);
727 if (s)
728 tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
729 else {
730 if (t)
731 tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
732 else
733 tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
737 static void dec_bit(DisasContext *dc)
739 CPUState *cs = CPU(dc->cpu);
740 TCGv t0;
741 unsigned int op;
742 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
744 op = dc->ir & ((1 << 9) - 1);
745 switch (op) {
746 case 0x21:
747 /* src. */
748 t0 = tcg_temp_new();
750 LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
751 tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
752 write_carry(dc, cpu_R[dc->ra]);
753 if (dc->rd) {
754 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
755 tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
757 tcg_temp_free(t0);
758 break;
760 case 0x1:
761 case 0x41:
762 /* srl. */
763 LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
765 /* Update carry. Note that write carry only looks at the LSB. */
766 write_carry(dc, cpu_R[dc->ra]);
767 if (dc->rd) {
768 if (op == 0x41)
769 tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
770 else
771 tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
773 break;
774 case 0x60:
775 LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
776 tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
777 break;
778 case 0x61:
779 LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
780 tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
781 break;
782 case 0x64:
783 case 0x66:
784 case 0x74:
785 case 0x76:
786 /* wdc. */
787 LOG_DIS("wdc r%d\n", dc->ra);
788 if ((dc->tb_flags & MSR_EE_FLAG)
789 && mem_index == MMU_USER_IDX) {
790 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
791 t_gen_raise_exception(dc, EXCP_HW_EXCP);
792 return;
794 break;
795 case 0x68:
796 /* wic. */
797 LOG_DIS("wic r%d\n", dc->ra);
798 if ((dc->tb_flags & MSR_EE_FLAG)
799 && mem_index == MMU_USER_IDX) {
800 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
801 t_gen_raise_exception(dc, EXCP_HW_EXCP);
802 return;
804 break;
805 case 0xe0:
806 if ((dc->tb_flags & MSR_EE_FLAG)
807 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
808 && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
809 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
810 t_gen_raise_exception(dc, EXCP_HW_EXCP);
812 if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
813 gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
815 break;
816 case 0x1e0:
817 /* swapb */
818 LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
819 tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
820 break;
821 case 0x1e2:
822 /*swaph */
823 LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
824 tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
825 break;
826 default:
827 cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
828 dc->pc, op, dc->rd, dc->ra, dc->rb);
829 break;
833 static inline void sync_jmpstate(DisasContext *dc)
835 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
836 if (dc->jmp == JMP_DIRECT) {
837 tcg_gen_movi_tl(env_btaken, 1);
839 dc->jmp = JMP_INDIRECT;
840 tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
844 static void dec_imm(DisasContext *dc)
846 LOG_DIS("imm %x\n", dc->imm << 16);
847 tcg_gen_movi_tl(env_imm, (dc->imm << 16));
848 dc->tb_flags |= IMM_FLAG;
849 dc->clear_imm = 0;
852 static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
854 unsigned int extimm = dc->tb_flags & IMM_FLAG;
855 /* Should be set to one if r1 is used by loadstores. */
856 int stackprot = 0;
858 /* All load/stores use ra. */
859 if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
860 stackprot = 1;
863 /* Treat the common cases first. */
864 if (!dc->type_b) {
865 /* If any of the regs is r0, return a ptr to the other. */
866 if (dc->ra == 0) {
867 return &cpu_R[dc->rb];
868 } else if (dc->rb == 0) {
869 return &cpu_R[dc->ra];
872 if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
873 stackprot = 1;
876 *t = tcg_temp_new();
877 tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
879 if (stackprot) {
880 gen_helper_stackprot(cpu_env, *t);
882 return t;
884 /* Immediate. */
885 if (!extimm) {
886 if (dc->imm == 0) {
887 return &cpu_R[dc->ra];
889 *t = tcg_temp_new();
890 tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
891 tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
892 } else {
893 *t = tcg_temp_new();
894 tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
897 if (stackprot) {
898 gen_helper_stackprot(cpu_env, *t);
900 return t;
903 static void dec_load(DisasContext *dc)
905 TCGv t, v, *addr;
906 unsigned int size, rev = 0, ex = 0;
907 TCGMemOp mop;
909 mop = dc->opcode & 3;
910 size = 1 << mop;
911 if (!dc->type_b) {
912 rev = (dc->ir >> 9) & 1;
913 ex = (dc->ir >> 10) & 1;
915 mop |= MO_TE;
916 if (rev) {
917 mop ^= MO_BSWAP;
920 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
921 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
922 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
923 t_gen_raise_exception(dc, EXCP_HW_EXCP);
924 return;
927 LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
928 ex ? "x" : "");
930 t_sync_flags(dc);
931 addr = compute_ldst_addr(dc, &t);
934 * When doing reverse accesses we need to do two things.
936 * 1. Reverse the address wrt endianness.
937 * 2. Byteswap the data lanes on the way back into the CPU core.
939 if (rev && size != 4) {
940 /* Endian reverse the address. t is addr. */
941 switch (size) {
942 case 1:
944 /* 00 -> 11
945 01 -> 10
946 10 -> 10
947 11 -> 00 */
948 TCGv low = tcg_temp_new();
950 /* Force addr into the temp. */
951 if (addr != &t) {
952 t = tcg_temp_new();
953 tcg_gen_mov_tl(t, *addr);
954 addr = &t;
957 tcg_gen_andi_tl(low, t, 3);
958 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
959 tcg_gen_andi_tl(t, t, ~3);
960 tcg_gen_or_tl(t, t, low);
961 tcg_gen_mov_tl(env_imm, t);
962 tcg_temp_free(low);
963 break;
966 case 2:
967 /* 00 -> 10
968 10 -> 00. */
969 /* Force addr into the temp. */
970 if (addr != &t) {
971 t = tcg_temp_new();
972 tcg_gen_xori_tl(t, *addr, 2);
973 addr = &t;
974 } else {
975 tcg_gen_xori_tl(t, t, 2);
977 break;
978 default:
979 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
980 break;
984 /* lwx does not throw unaligned access errors, so force alignment */
985 if (ex) {
986 /* Force addr into the temp. */
987 if (addr != &t) {
988 t = tcg_temp_new();
989 tcg_gen_mov_tl(t, *addr);
990 addr = &t;
992 tcg_gen_andi_tl(t, t, ~3);
995 /* If we get a fault on a dslot, the jmpstate better be in sync. */
996 sync_jmpstate(dc);
998 /* Verify alignment if needed. */
1000 * Microblaze gives MMU faults priority over faults due to
1001 * unaligned addresses. That's why we speculatively do the load
1002 * into v. If the load succeeds, we verify alignment of the
1003 * address and if that succeeds we write into the destination reg.
1005 v = tcg_temp_new();
1006 tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1008 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1009 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1010 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1011 tcg_const_tl(0), tcg_const_tl(size - 1));
1014 if (ex) {
1015 tcg_gen_mov_tl(env_res_addr, *addr);
1016 tcg_gen_mov_tl(env_res_val, v);
1018 if (dc->rd) {
1019 tcg_gen_mov_tl(cpu_R[dc->rd], v);
1021 tcg_temp_free(v);
1023 if (ex) { /* lwx */
1024 /* no support for AXI exclusive so always clear C */
1025 write_carryi(dc, 0);
1028 if (addr == &t)
1029 tcg_temp_free(t);
1032 static void dec_store(DisasContext *dc)
1034 TCGv t, *addr, swx_addr;
1035 TCGLabel *swx_skip = NULL;
1036 unsigned int size, rev = 0, ex = 0;
1037 TCGMemOp mop;
1039 mop = dc->opcode & 3;
1040 size = 1 << mop;
1041 if (!dc->type_b) {
1042 rev = (dc->ir >> 9) & 1;
1043 ex = (dc->ir >> 10) & 1;
1045 mop |= MO_TE;
1046 if (rev) {
1047 mop ^= MO_BSWAP;
1050 if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
1051 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1052 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1053 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1054 return;
1057 LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
1058 ex ? "x" : "");
1059 t_sync_flags(dc);
1060 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1061 sync_jmpstate(dc);
1062 addr = compute_ldst_addr(dc, &t);
1064 swx_addr = tcg_temp_local_new();
1065 if (ex) { /* swx */
1066 TCGv tval;
1068 /* Force addr into the swx_addr. */
1069 tcg_gen_mov_tl(swx_addr, *addr);
1070 addr = &swx_addr;
1071 /* swx does not throw unaligned access errors, so force alignment */
1072 tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
1074 write_carryi(dc, 1);
1075 swx_skip = gen_new_label();
1076 tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
1078 /* Compare the value loaded at lwx with current contents of
1079 the reserved location.
1080 FIXME: This only works for system emulation where we can expect
1081 this compare and the following write to be atomic. For user
1082 emulation we need to add atomicity between threads. */
1083 tval = tcg_temp_new();
1084 tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
1085 MO_TEUL);
1086 tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
1087 write_carryi(dc, 0);
1088 tcg_temp_free(tval);
1091 if (rev && size != 4) {
1092 /* Endian reverse the address. t is addr. */
1093 switch (size) {
1094 case 1:
1096 /* 00 -> 11
1097 01 -> 10
1098 10 -> 10
1099 11 -> 00 */
1100 TCGv low = tcg_temp_new();
1102 /* Force addr into the temp. */
1103 if (addr != &t) {
1104 t = tcg_temp_new();
1105 tcg_gen_mov_tl(t, *addr);
1106 addr = &t;
1109 tcg_gen_andi_tl(low, t, 3);
1110 tcg_gen_sub_tl(low, tcg_const_tl(3), low);
1111 tcg_gen_andi_tl(t, t, ~3);
1112 tcg_gen_or_tl(t, t, low);
1113 tcg_gen_mov_tl(env_imm, t);
1114 tcg_temp_free(low);
1115 break;
1118 case 2:
1119 /* 00 -> 10
1120 10 -> 00. */
1121 /* Force addr into the temp. */
1122 if (addr != &t) {
1123 t = tcg_temp_new();
1124 tcg_gen_xori_tl(t, *addr, 2);
1125 addr = &t;
1126 } else {
1127 tcg_gen_xori_tl(t, t, 2);
1129 break;
1130 default:
1131 cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
1132 break;
1135 tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
1137 /* Verify alignment if needed. */
1138 if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
1139 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1140 /* FIXME: if the alignment is wrong, we should restore the value
1141 * in memory. One possible way to achieve this is to probe
1142 * the MMU prior to the memaccess, thay way we could put
1143 * the alignment checks in between the probe and the mem
1144 * access.
1146 gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
1147 tcg_const_tl(1), tcg_const_tl(size - 1));
1150 if (ex) {
1151 gen_set_label(swx_skip);
1153 tcg_temp_free(swx_addr);
1155 if (addr == &t)
1156 tcg_temp_free(t);
1159 static inline void eval_cc(DisasContext *dc, unsigned int cc,
1160 TCGv d, TCGv a, TCGv b)
1162 switch (cc) {
1163 case CC_EQ:
1164 tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
1165 break;
1166 case CC_NE:
1167 tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
1168 break;
1169 case CC_LT:
1170 tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
1171 break;
1172 case CC_LE:
1173 tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
1174 break;
1175 case CC_GE:
1176 tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
1177 break;
1178 case CC_GT:
1179 tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
1180 break;
1181 default:
1182 cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
1183 break;
1187 static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
1189 TCGLabel *l1 = gen_new_label();
1190 /* Conditional jmp. */
1191 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
1192 tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
1193 tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
1194 gen_set_label(l1);
1197 static void dec_bcc(DisasContext *dc)
1199 unsigned int cc;
1200 unsigned int dslot;
1202 cc = EXTRACT_FIELD(dc->ir, 21, 23);
1203 dslot = dc->ir & (1 << 25);
1204 LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
1206 dc->delayed_branch = 1;
1207 if (dslot) {
1208 dc->delayed_branch = 2;
1209 dc->tb_flags |= D_FLAG;
1210 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1211 cpu_env, offsetof(CPUMBState, bimm));
1214 if (dec_alu_op_b_is_small_imm(dc)) {
1215 int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
1217 tcg_gen_movi_tl(env_btarget, dc->pc + offset);
1218 dc->jmp = JMP_DIRECT_CC;
1219 dc->jmp_pc = dc->pc + offset;
1220 } else {
1221 dc->jmp = JMP_INDIRECT;
1222 tcg_gen_movi_tl(env_btarget, dc->pc);
1223 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1225 eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
1228 static void dec_br(DisasContext *dc)
1230 unsigned int dslot, link, abs, mbar;
1231 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1233 dslot = dc->ir & (1 << 20);
1234 abs = dc->ir & (1 << 19);
1235 link = dc->ir & (1 << 18);
1237 /* Memory barrier. */
1238 mbar = (dc->ir >> 16) & 31;
1239 if (mbar == 2 && dc->imm == 4) {
1240 /* mbar IMM & 16 decodes to sleep. */
1241 if (dc->rd & 16) {
1242 TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
1243 TCGv_i32 tmp_1 = tcg_const_i32(1);
1245 LOG_DIS("sleep\n");
1247 t_sync_flags(dc);
1248 tcg_gen_st_i32(tmp_1, cpu_env,
1249 -offsetof(MicroBlazeCPU, env)
1250 +offsetof(CPUState, halted));
1251 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
1252 gen_helper_raise_exception(cpu_env, tmp_hlt);
1253 tcg_temp_free_i32(tmp_hlt);
1254 tcg_temp_free_i32(tmp_1);
1255 return;
1257 LOG_DIS("mbar %d\n", dc->rd);
1258 /* Break the TB. */
1259 dc->cpustate_changed = 1;
1260 return;
1263 LOG_DIS("br%s%s%s%s imm=%x\n",
1264 abs ? "a" : "", link ? "l" : "",
1265 dc->type_b ? "i" : "", dslot ? "d" : "",
1266 dc->imm);
1268 dc->delayed_branch = 1;
1269 if (dslot) {
1270 dc->delayed_branch = 2;
1271 dc->tb_flags |= D_FLAG;
1272 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1273 cpu_env, offsetof(CPUMBState, bimm));
1275 if (link && dc->rd)
1276 tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
1278 dc->jmp = JMP_INDIRECT;
1279 if (abs) {
1280 tcg_gen_movi_tl(env_btaken, 1);
1281 tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
1282 if (link && !dslot) {
1283 if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
1284 t_gen_raise_exception(dc, EXCP_BREAK);
1285 if (dc->imm == 0) {
1286 if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
1287 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1288 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1289 return;
1292 t_gen_raise_exception(dc, EXCP_DEBUG);
1295 } else {
1296 if (dec_alu_op_b_is_small_imm(dc)) {
1297 dc->jmp = JMP_DIRECT;
1298 dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
1299 } else {
1300 tcg_gen_movi_tl(env_btaken, 1);
1301 tcg_gen_movi_tl(env_btarget, dc->pc);
1302 tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
1307 static inline void do_rti(DisasContext *dc)
1309 TCGv t0, t1;
1310 t0 = tcg_temp_new();
1311 t1 = tcg_temp_new();
1312 tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
1313 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
1314 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1316 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1317 tcg_gen_or_tl(t1, t1, t0);
1318 msr_write(dc, t1);
1319 tcg_temp_free(t1);
1320 tcg_temp_free(t0);
1321 dc->tb_flags &= ~DRTI_FLAG;
1324 static inline void do_rtb(DisasContext *dc)
1326 TCGv t0, t1;
1327 t0 = tcg_temp_new();
1328 t1 = tcg_temp_new();
1329 tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
1330 tcg_gen_shri_tl(t0, t1, 1);
1331 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1333 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1334 tcg_gen_or_tl(t1, t1, t0);
1335 msr_write(dc, t1);
1336 tcg_temp_free(t1);
1337 tcg_temp_free(t0);
1338 dc->tb_flags &= ~DRTB_FLAG;
1341 static inline void do_rte(DisasContext *dc)
1343 TCGv t0, t1;
1344 t0 = tcg_temp_new();
1345 t1 = tcg_temp_new();
1347 tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
1348 tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
1349 tcg_gen_shri_tl(t0, t1, 1);
1350 tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
1352 tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
1353 tcg_gen_or_tl(t1, t1, t0);
1354 msr_write(dc, t1);
1355 tcg_temp_free(t1);
1356 tcg_temp_free(t0);
1357 dc->tb_flags &= ~DRTE_FLAG;
1360 static void dec_rts(DisasContext *dc)
1362 unsigned int b_bit, i_bit, e_bit;
1363 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1365 i_bit = dc->ir & (1 << 21);
1366 b_bit = dc->ir & (1 << 22);
1367 e_bit = dc->ir & (1 << 23);
1369 dc->delayed_branch = 2;
1370 dc->tb_flags |= D_FLAG;
1371 tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
1372 cpu_env, offsetof(CPUMBState, bimm));
1374 if (i_bit) {
1375 LOG_DIS("rtid ir=%x\n", dc->ir);
1376 if ((dc->tb_flags & MSR_EE_FLAG)
1377 && mem_index == MMU_USER_IDX) {
1378 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1379 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1381 dc->tb_flags |= DRTI_FLAG;
1382 } else if (b_bit) {
1383 LOG_DIS("rtbd ir=%x\n", dc->ir);
1384 if ((dc->tb_flags & MSR_EE_FLAG)
1385 && mem_index == MMU_USER_IDX) {
1386 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1387 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1389 dc->tb_flags |= DRTB_FLAG;
1390 } else if (e_bit) {
1391 LOG_DIS("rted ir=%x\n", dc->ir);
1392 if ((dc->tb_flags & MSR_EE_FLAG)
1393 && mem_index == MMU_USER_IDX) {
1394 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1395 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1397 dc->tb_flags |= DRTE_FLAG;
1398 } else
1399 LOG_DIS("rts ir=%x\n", dc->ir);
1401 dc->jmp = JMP_INDIRECT;
1402 tcg_gen_movi_tl(env_btaken, 1);
1403 tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
1406 static int dec_check_fpuv2(DisasContext *dc)
1408 if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
1409 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
1410 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1412 return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
1415 static void dec_fpu(DisasContext *dc)
1417 unsigned int fpu_insn;
1419 if ((dc->tb_flags & MSR_EE_FLAG)
1420 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1421 && (dc->cpu->cfg.use_fpu != 1)) {
1422 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1423 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1424 return;
1427 fpu_insn = (dc->ir >> 7) & 7;
1429 switch (fpu_insn) {
1430 case 0:
1431 gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1432 cpu_R[dc->rb]);
1433 break;
1435 case 1:
1436 gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1437 cpu_R[dc->rb]);
1438 break;
1440 case 2:
1441 gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1442 cpu_R[dc->rb]);
1443 break;
1445 case 3:
1446 gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
1447 cpu_R[dc->rb]);
1448 break;
1450 case 4:
1451 switch ((dc->ir >> 4) & 7) {
1452 case 0:
1453 gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
1454 cpu_R[dc->ra], cpu_R[dc->rb]);
1455 break;
1456 case 1:
1457 gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
1458 cpu_R[dc->ra], cpu_R[dc->rb]);
1459 break;
1460 case 2:
1461 gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
1462 cpu_R[dc->ra], cpu_R[dc->rb]);
1463 break;
1464 case 3:
1465 gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
1466 cpu_R[dc->ra], cpu_R[dc->rb]);
1467 break;
1468 case 4:
1469 gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
1470 cpu_R[dc->ra], cpu_R[dc->rb]);
1471 break;
1472 case 5:
1473 gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
1474 cpu_R[dc->ra], cpu_R[dc->rb]);
1475 break;
1476 case 6:
1477 gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
1478 cpu_R[dc->ra], cpu_R[dc->rb]);
1479 break;
1480 default:
1481 qemu_log_mask(LOG_UNIMP,
1482 "unimplemented fcmp fpu_insn=%x pc=%x"
1483 " opc=%x\n",
1484 fpu_insn, dc->pc, dc->opcode);
1485 dc->abort_at_next_insn = 1;
1486 break;
1488 break;
1490 case 5:
1491 if (!dec_check_fpuv2(dc)) {
1492 return;
1494 gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1495 break;
1497 case 6:
1498 if (!dec_check_fpuv2(dc)) {
1499 return;
1501 gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1502 break;
1504 case 7:
1505 if (!dec_check_fpuv2(dc)) {
1506 return;
1508 gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
1509 break;
1511 default:
1512 qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
1513 " opc=%x\n",
1514 fpu_insn, dc->pc, dc->opcode);
1515 dc->abort_at_next_insn = 1;
1516 break;
1520 static void dec_null(DisasContext *dc)
1522 if ((dc->tb_flags & MSR_EE_FLAG)
1523 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
1524 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1525 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1526 return;
1528 qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
1529 dc->abort_at_next_insn = 1;
1532 /* Insns connected to FSL or AXI stream attached devices. */
1533 static void dec_stream(DisasContext *dc)
1535 int mem_index = cpu_mmu_index(&dc->cpu->env, false);
1536 TCGv_i32 t_id, t_ctrl;
1537 int ctrl;
1539 LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
1540 dc->type_b ? "" : "d", dc->imm);
1542 if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
1543 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
1544 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1545 return;
1548 t_id = tcg_temp_new();
1549 if (dc->type_b) {
1550 tcg_gen_movi_tl(t_id, dc->imm & 0xf);
1551 ctrl = dc->imm >> 10;
1552 } else {
1553 tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
1554 ctrl = dc->imm >> 5;
1557 t_ctrl = tcg_const_tl(ctrl);
1559 if (dc->rd == 0) {
1560 gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
1561 } else {
1562 gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
1564 tcg_temp_free(t_id);
1565 tcg_temp_free(t_ctrl);
1568 static struct decoder_info {
1569 struct {
1570 uint32_t bits;
1571 uint32_t mask;
1573 void (*dec)(DisasContext *dc);
1574 } decinfo[] = {
1575 {DEC_ADD, dec_add},
1576 {DEC_SUB, dec_sub},
1577 {DEC_AND, dec_and},
1578 {DEC_XOR, dec_xor},
1579 {DEC_OR, dec_or},
1580 {DEC_BIT, dec_bit},
1581 {DEC_BARREL, dec_barrel},
1582 {DEC_LD, dec_load},
1583 {DEC_ST, dec_store},
1584 {DEC_IMM, dec_imm},
1585 {DEC_BR, dec_br},
1586 {DEC_BCC, dec_bcc},
1587 {DEC_RTS, dec_rts},
1588 {DEC_FPU, dec_fpu},
1589 {DEC_MUL, dec_mul},
1590 {DEC_DIV, dec_div},
1591 {DEC_MSR, dec_msr},
1592 {DEC_STREAM, dec_stream},
1593 {{0, 0}, dec_null}
1596 static inline void decode(DisasContext *dc, uint32_t ir)
1598 int i;
1600 dc->ir = ir;
1601 LOG_DIS("%8.8x\t", dc->ir);
1603 if (dc->ir)
1604 dc->nr_nops = 0;
1605 else {
1606 if ((dc->tb_flags & MSR_EE_FLAG)
1607 && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
1608 && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
1609 tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
1610 t_gen_raise_exception(dc, EXCP_HW_EXCP);
1611 return;
1614 LOG_DIS("nr_nops=%d\t", dc->nr_nops);
1615 dc->nr_nops++;
1616 if (dc->nr_nops > 4) {
1617 cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
1620 /* bit 2 seems to indicate insn type. */
1621 dc->type_b = ir & (1 << 29);
1623 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1624 dc->rd = EXTRACT_FIELD(ir, 21, 25);
1625 dc->ra = EXTRACT_FIELD(ir, 16, 20);
1626 dc->rb = EXTRACT_FIELD(ir, 11, 15);
1627 dc->imm = EXTRACT_FIELD(ir, 0, 15);
1629 /* Large switch for all insns. */
1630 for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
1631 if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
1632 decinfo[i].dec(dc);
1633 break;
1638 /* generate intermediate code for basic block 'tb'. */
1639 void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
1641 MicroBlazeCPU *cpu = mb_env_get_cpu(env);
1642 CPUState *cs = CPU(cpu);
1643 uint32_t pc_start;
1644 struct DisasContext ctx;
1645 struct DisasContext *dc = &ctx;
1646 uint32_t next_page_start, org_flags;
1647 target_ulong npc;
1648 int num_insns;
1649 int max_insns;
1651 pc_start = tb->pc;
1652 dc->cpu = cpu;
1653 dc->tb = tb;
1654 org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
1656 dc->is_jmp = DISAS_NEXT;
1657 dc->jmp = 0;
1658 dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
1659 if (dc->delayed_branch) {
1660 dc->jmp = JMP_INDIRECT;
1662 dc->pc = pc_start;
1663 dc->singlestep_enabled = cs->singlestep_enabled;
1664 dc->cpustate_changed = 0;
1665 dc->abort_at_next_insn = 0;
1666 dc->nr_nops = 0;
1668 if (pc_start & 3) {
1669 cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
1672 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1673 #if !SIM_COMPAT
1674 qemu_log("--------------\n");
1675 log_cpu_state(CPU(cpu), 0);
1676 #endif
1679 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1680 num_insns = 0;
1681 max_insns = tb->cflags & CF_COUNT_MASK;
1682 if (max_insns == 0) {
1683 max_insns = CF_COUNT_MASK;
1685 if (max_insns > TCG_MAX_INSNS) {
1686 max_insns = TCG_MAX_INSNS;
1689 gen_tb_start(tb);
1692 tcg_gen_insn_start(dc->pc);
1693 num_insns++;
1695 #if SIM_COMPAT
1696 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1697 tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
1698 gen_helper_debug();
1700 #endif
1702 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1703 t_gen_raise_exception(dc, EXCP_DEBUG);
1704 dc->is_jmp = DISAS_UPDATE;
1705 /* The address covered by the breakpoint must be included in
1706 [tb->pc, tb->pc + tb->size) in order to for it to be
1707 properly cleared -- thus we increment the PC here so that
1708 the logic setting tb->size below does the right thing. */
1709 dc->pc += 4;
1710 break;
1713 /* Pretty disas. */
1714 LOG_DIS("%8.8x:\t", dc->pc);
1716 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
1717 gen_io_start();
1720 dc->clear_imm = 1;
1721 decode(dc, cpu_ldl_code(env, dc->pc));
1722 if (dc->clear_imm)
1723 dc->tb_flags &= ~IMM_FLAG;
1724 dc->pc += 4;
1726 if (dc->delayed_branch) {
1727 dc->delayed_branch--;
1728 if (!dc->delayed_branch) {
1729 if (dc->tb_flags & DRTI_FLAG)
1730 do_rti(dc);
1731 if (dc->tb_flags & DRTB_FLAG)
1732 do_rtb(dc);
1733 if (dc->tb_flags & DRTE_FLAG)
1734 do_rte(dc);
1735 /* Clear the delay slot flag. */
1736 dc->tb_flags &= ~D_FLAG;
1737 /* If it is a direct jump, try direct chaining. */
1738 if (dc->jmp == JMP_INDIRECT) {
1739 eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
1740 dc->is_jmp = DISAS_JUMP;
1741 } else if (dc->jmp == JMP_DIRECT) {
1742 t_sync_flags(dc);
1743 gen_goto_tb(dc, 0, dc->jmp_pc);
1744 dc->is_jmp = DISAS_TB_JUMP;
1745 } else if (dc->jmp == JMP_DIRECT_CC) {
1746 TCGLabel *l1 = gen_new_label();
1747 t_sync_flags(dc);
1748 /* Conditional jmp. */
1749 tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
1750 gen_goto_tb(dc, 1, dc->pc);
1751 gen_set_label(l1);
1752 gen_goto_tb(dc, 0, dc->jmp_pc);
1754 dc->is_jmp = DISAS_TB_JUMP;
1756 break;
1759 if (cs->singlestep_enabled) {
1760 break;
1762 } while (!dc->is_jmp && !dc->cpustate_changed
1763 && !tcg_op_buf_full()
1764 && !singlestep
1765 && (dc->pc < next_page_start)
1766 && num_insns < max_insns);
1768 npc = dc->pc;
1769 if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
1770 if (dc->tb_flags & D_FLAG) {
1771 dc->is_jmp = DISAS_UPDATE;
1772 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1773 sync_jmpstate(dc);
1774 } else
1775 npc = dc->jmp_pc;
1778 if (tb->cflags & CF_LAST_IO)
1779 gen_io_end();
1780 /* Force an update if the per-tb cpu state has changed. */
1781 if (dc->is_jmp == DISAS_NEXT
1782 && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
1783 dc->is_jmp = DISAS_UPDATE;
1784 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1786 t_sync_flags(dc);
1788 if (unlikely(cs->singlestep_enabled)) {
1789 TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
1791 if (dc->is_jmp != DISAS_JUMP) {
1792 tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
1794 gen_helper_raise_exception(cpu_env, tmp);
1795 tcg_temp_free_i32(tmp);
1796 } else {
1797 switch(dc->is_jmp) {
1798 case DISAS_NEXT:
1799 gen_goto_tb(dc, 1, npc);
1800 break;
1801 default:
1802 case DISAS_JUMP:
1803 case DISAS_UPDATE:
1804 /* indicate that the hash table must be used
1805 to find the next TB */
1806 tcg_gen_exit_tb(0);
1807 break;
1808 case DISAS_TB_JUMP:
1809 /* nothing more to generate */
1810 break;
1813 gen_tb_end(tb, num_insns);
1815 tb->size = dc->pc - pc_start;
1816 tb->icount = num_insns;
1818 #ifdef DEBUG_DISAS
1819 #if !SIM_COMPAT
1820 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1821 qemu_log("\n");
1822 #if DISAS_GNU
1823 log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
1824 #endif
1825 qemu_log("\nisize=%d osize=%d\n",
1826 dc->pc - pc_start, tcg_op_buf_count());
1828 #endif
1829 #endif
1830 assert(!dc->abort_at_next_insn);
1833 void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1834 int flags)
1836 MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
1837 CPUMBState *env = &cpu->env;
1838 int i;
1840 if (!env || !f)
1841 return;
1843 cpu_fprintf(f, "IN: PC=%x %s\n",
1844 env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
1845 cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1846 env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
1847 env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
1848 cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1849 env->btaken, env->btarget,
1850 (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
1851 (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
1852 (env->sregs[SR_MSR] & MSR_EIP),
1853 (env->sregs[SR_MSR] & MSR_IE));
1855 for (i = 0; i < 32; i++) {
1856 cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1857 if ((i + 1) % 4 == 0)
1858 cpu_fprintf(f, "\n");
1860 cpu_fprintf(f, "\n\n");
1863 MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
1865 MicroBlazeCPU *cpu;
1867 cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
1869 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
1871 return cpu;
1874 void mb_tcg_init(void)
1876 int i;
1878 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
1880 env_debug = tcg_global_mem_new(cpu_env,
1881 offsetof(CPUMBState, debug),
1882 "debug0");
1883 env_iflags = tcg_global_mem_new(cpu_env,
1884 offsetof(CPUMBState, iflags),
1885 "iflags");
1886 env_imm = tcg_global_mem_new(cpu_env,
1887 offsetof(CPUMBState, imm),
1888 "imm");
1889 env_btarget = tcg_global_mem_new(cpu_env,
1890 offsetof(CPUMBState, btarget),
1891 "btarget");
1892 env_btaken = tcg_global_mem_new(cpu_env,
1893 offsetof(CPUMBState, btaken),
1894 "btaken");
1895 env_res_addr = tcg_global_mem_new(cpu_env,
1896 offsetof(CPUMBState, res_addr),
1897 "res_addr");
1898 env_res_val = tcg_global_mem_new(cpu_env,
1899 offsetof(CPUMBState, res_val),
1900 "res_val");
1901 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1902 cpu_R[i] = tcg_global_mem_new(cpu_env,
1903 offsetof(CPUMBState, regs[i]),
1904 regnames[i]);
1906 for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
1907 cpu_SR[i] = tcg_global_mem_new(cpu_env,
1908 offsetof(CPUMBState, sregs[i]),
1909 special_regnames[i]);
1913 void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
1914 target_ulong *data)
1916 env->sregs[SR_PC] = data[0];