ACPI: Add definitions for the SPCR table
[qemu/ar7.git] / target-s390x / translate.c
blob9b877148c6030954942f9260f071beed7ab25d9b
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
139 #endif
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
146 #endif
148 cpu_fprintf(f, "\n");
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
154 static TCGv_i32 cc_op;
155 static TCGv_i64 cc_src;
156 static TCGv_i64 cc_dst;
157 static TCGv_i64 cc_vr;
159 static char cpu_reg_names[32][4];
160 static TCGv_i64 regs[16];
161 static TCGv_i64 fregs[16];
163 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
165 void s390x_translate_init(void)
167 int i;
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
172 "psw_addr");
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
175 "psw_mask");
177 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
178 "cc_op");
179 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
180 "cc_src");
181 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
182 "cc_dst");
183 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
184 "cc_vr");
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
188 regs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, regs[i]),
190 cpu_reg_names[i]);
193 for (i = 0; i < 16; i++) {
194 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
195 fregs[i] = tcg_global_mem_new(TCG_AREG0,
196 offsetof(CPUS390XState, vregs[i][0].d),
197 cpu_reg_names[i + 16]);
201 static TCGv_i64 load_reg(int reg)
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
205 return r;
208 static TCGv_i64 load_freg32_i64(int reg)
210 TCGv_i64 r = tcg_temp_new_i64();
211 tcg_gen_shri_i64(r, fregs[reg], 32);
212 return r;
215 static void store_reg(int reg, TCGv_i64 v)
217 tcg_gen_mov_i64(regs[reg], v);
220 static void store_freg(int reg, TCGv_i64 v)
222 tcg_gen_mov_i64(fregs[reg], v);
225 static void store_reg32_i64(int reg, TCGv_i64 v)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
231 static void store_reg32h_i64(int reg, TCGv_i64 v)
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
236 static void store_freg32_i64(int reg, TCGv_i64 v)
238 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
241 static void return_low128(TCGv_i64 dest)
243 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
246 static void update_psw_addr(DisasContext *s)
248 /* psw.addr */
249 tcg_gen_movi_i64(psw_addr, s->pc);
252 static void update_cc_op(DisasContext *s)
254 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
255 tcg_gen_movi_i32(cc_op, s->cc_op);
259 static void potential_page_fault(DisasContext *s)
261 update_psw_addr(s);
262 update_cc_op(s);
265 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
267 return (uint64_t)cpu_lduw_code(env, pc);
270 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
272 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
275 static int get_mem_index(DisasContext *s)
277 switch (s->tb->flags & FLAG_MASK_ASC) {
278 case PSW_ASC_PRIMARY >> 32:
279 return 0;
280 case PSW_ASC_SECONDARY >> 32:
281 return 1;
282 case PSW_ASC_HOME >> 32:
283 return 2;
284 default:
285 tcg_abort();
286 break;
290 static void gen_exception(int excp)
292 TCGv_i32 tmp = tcg_const_i32(excp);
293 gen_helper_exception(cpu_env, tmp);
294 tcg_temp_free_i32(tmp);
297 static void gen_program_exception(DisasContext *s, int code)
299 TCGv_i32 tmp;
301 /* Remember what pgm exeption this was. */
302 tmp = tcg_const_i32(code);
303 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
304 tcg_temp_free_i32(tmp);
306 tmp = tcg_const_i32(s->next_pc - s->pc);
307 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
308 tcg_temp_free_i32(tmp);
310 /* Advance past instruction. */
311 s->pc = s->next_pc;
312 update_psw_addr(s);
314 /* Save off cc. */
315 update_cc_op(s);
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM);
321 static inline void gen_illegal_opcode(DisasContext *s)
323 gen_program_exception(s, PGM_OPERATION);
326 static inline void gen_trap(DisasContext *s)
328 TCGv_i32 t;
330 /* Set DXC to 0xff. */
331 t = tcg_temp_new_i32();
332 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
333 tcg_gen_ori_i32(t, t, 0xff00);
334 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
335 tcg_temp_free_i32(t);
337 gen_program_exception(s, PGM_DATA);
340 #ifndef CONFIG_USER_ONLY
341 static void check_privileged(DisasContext *s)
343 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
344 gen_program_exception(s, PGM_PRIVILEGED);
347 #endif
349 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
351 TCGv_i64 tmp = tcg_temp_new_i64();
352 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
354 /* Note that d2 is limited to 20 bits, signed. If we crop negative
355 displacements early we create larger immedate addends. */
357 /* Note that addi optimizes the imm==0 case. */
358 if (b2 && x2) {
359 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
360 tcg_gen_addi_i64(tmp, tmp, d2);
361 } else if (b2) {
362 tcg_gen_addi_i64(tmp, regs[b2], d2);
363 } else if (x2) {
364 tcg_gen_addi_i64(tmp, regs[x2], d2);
365 } else {
366 if (need_31) {
367 d2 &= 0x7fffffff;
368 need_31 = false;
370 tcg_gen_movi_i64(tmp, d2);
372 if (need_31) {
373 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
376 return tmp;
379 static inline bool live_cc_data(DisasContext *s)
381 return (s->cc_op != CC_OP_DYNAMIC
382 && s->cc_op != CC_OP_STATIC
383 && s->cc_op > 3);
386 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
388 if (live_cc_data(s)) {
389 tcg_gen_discard_i64(cc_src);
390 tcg_gen_discard_i64(cc_dst);
391 tcg_gen_discard_i64(cc_vr);
393 s->cc_op = CC_OP_CONST0 + val;
396 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
398 if (live_cc_data(s)) {
399 tcg_gen_discard_i64(cc_src);
400 tcg_gen_discard_i64(cc_vr);
402 tcg_gen_mov_i64(cc_dst, dst);
403 s->cc_op = op;
406 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
407 TCGv_i64 dst)
409 if (live_cc_data(s)) {
410 tcg_gen_discard_i64(cc_vr);
412 tcg_gen_mov_i64(cc_src, src);
413 tcg_gen_mov_i64(cc_dst, dst);
414 s->cc_op = op;
417 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
418 TCGv_i64 dst, TCGv_i64 vr)
420 tcg_gen_mov_i64(cc_src, src);
421 tcg_gen_mov_i64(cc_dst, dst);
422 tcg_gen_mov_i64(cc_vr, vr);
423 s->cc_op = op;
426 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
428 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
431 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
433 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
436 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
438 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
441 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
443 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
446 /* CC value is in env->cc_op */
447 static void set_cc_static(DisasContext *s)
449 if (live_cc_data(s)) {
450 tcg_gen_discard_i64(cc_src);
451 tcg_gen_discard_i64(cc_dst);
452 tcg_gen_discard_i64(cc_vr);
454 s->cc_op = CC_OP_STATIC;
457 /* calculates cc into cc_op */
458 static void gen_op_calc_cc(DisasContext *s)
460 TCGv_i32 local_cc_op;
461 TCGv_i64 dummy;
463 TCGV_UNUSED_I32(local_cc_op);
464 TCGV_UNUSED_I64(dummy);
465 switch (s->cc_op) {
466 default:
467 dummy = tcg_const_i64(0);
468 /* FALLTHRU */
469 case CC_OP_ADD_64:
470 case CC_OP_ADDU_64:
471 case CC_OP_ADDC_64:
472 case CC_OP_SUB_64:
473 case CC_OP_SUBU_64:
474 case CC_OP_SUBB_64:
475 case CC_OP_ADD_32:
476 case CC_OP_ADDU_32:
477 case CC_OP_ADDC_32:
478 case CC_OP_SUB_32:
479 case CC_OP_SUBU_32:
480 case CC_OP_SUBB_32:
481 local_cc_op = tcg_const_i32(s->cc_op);
482 break;
483 case CC_OP_CONST0:
484 case CC_OP_CONST1:
485 case CC_OP_CONST2:
486 case CC_OP_CONST3:
487 case CC_OP_STATIC:
488 case CC_OP_DYNAMIC:
489 break;
492 switch (s->cc_op) {
493 case CC_OP_CONST0:
494 case CC_OP_CONST1:
495 case CC_OP_CONST2:
496 case CC_OP_CONST3:
497 /* s->cc_op is the cc value */
498 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
499 break;
500 case CC_OP_STATIC:
501 /* env->cc_op already is the cc value */
502 break;
503 case CC_OP_NZ:
504 case CC_OP_ABS_64:
505 case CC_OP_NABS_64:
506 case CC_OP_ABS_32:
507 case CC_OP_NABS_32:
508 case CC_OP_LTGT0_32:
509 case CC_OP_LTGT0_64:
510 case CC_OP_COMP_32:
511 case CC_OP_COMP_64:
512 case CC_OP_NZ_F32:
513 case CC_OP_NZ_F64:
514 case CC_OP_FLOGR:
515 /* 1 argument */
516 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
517 break;
518 case CC_OP_ICM:
519 case CC_OP_LTGT_32:
520 case CC_OP_LTGT_64:
521 case CC_OP_LTUGTU_32:
522 case CC_OP_LTUGTU_64:
523 case CC_OP_TM_32:
524 case CC_OP_TM_64:
525 case CC_OP_SLA_32:
526 case CC_OP_SLA_64:
527 case CC_OP_NZ_F128:
528 /* 2 arguments */
529 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
530 break;
531 case CC_OP_ADD_64:
532 case CC_OP_ADDU_64:
533 case CC_OP_ADDC_64:
534 case CC_OP_SUB_64:
535 case CC_OP_SUBU_64:
536 case CC_OP_SUBB_64:
537 case CC_OP_ADD_32:
538 case CC_OP_ADDU_32:
539 case CC_OP_ADDC_32:
540 case CC_OP_SUB_32:
541 case CC_OP_SUBU_32:
542 case CC_OP_SUBB_32:
543 /* 3 arguments */
544 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
545 break;
546 case CC_OP_DYNAMIC:
547 /* unknown operation - assume 3 arguments and cc_op in env */
548 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
549 break;
550 default:
551 tcg_abort();
554 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
555 tcg_temp_free_i32(local_cc_op);
557 if (!TCGV_IS_UNUSED_I64(dummy)) {
558 tcg_temp_free_i64(dummy);
561 /* We now have cc in cc_op as constant */
562 set_cc_static(s);
565 static int use_goto_tb(DisasContext *s, uint64_t dest)
567 /* NOTE: we handle the case where the TB spans two pages here */
568 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
569 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
570 && !s->singlestep_enabled
571 && !(s->tb->cflags & CF_LAST_IO));
574 static void account_noninline_branch(DisasContext *s, int cc_op)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss[cc_op]++;
578 #endif
581 static void account_inline_branch(DisasContext *s, int cc_op)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit[cc_op]++;
585 #endif
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond[16] = {
591 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
592 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
593 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
594 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
595 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
596 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
597 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
598 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond[16] = {
604 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
605 TCG_COND_NEVER, TCG_COND_NEVER,
606 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
607 TCG_COND_NE, TCG_COND_NE,
608 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
609 TCG_COND_EQ, TCG_COND_EQ,
610 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
618 TCGCond cond;
619 enum cc_op old_cc_op = s->cc_op;
621 if (mask == 15 || mask == 0) {
622 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
623 c->u.s32.a = cc_op;
624 c->u.s32.b = cc_op;
625 c->g1 = c->g2 = true;
626 c->is_64 = false;
627 return;
630 /* Find the TCG condition for the mask + cc op. */
631 switch (old_cc_op) {
632 case CC_OP_LTGT0_32:
633 case CC_OP_LTGT0_64:
634 case CC_OP_LTGT_32:
635 case CC_OP_LTGT_64:
636 cond = ltgt_cond[mask];
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
640 account_inline_branch(s, old_cc_op);
641 break;
643 case CC_OP_LTUGTU_32:
644 case CC_OP_LTUGTU_64:
645 cond = tcg_unsigned_cond(ltgt_cond[mask]);
646 if (cond == TCG_COND_NEVER) {
647 goto do_dynamic;
649 account_inline_branch(s, old_cc_op);
650 break;
652 case CC_OP_NZ:
653 cond = nz_cond[mask];
654 if (cond == TCG_COND_NEVER) {
655 goto do_dynamic;
657 account_inline_branch(s, old_cc_op);
658 break;
660 case CC_OP_TM_32:
661 case CC_OP_TM_64:
662 switch (mask) {
663 case 8:
664 cond = TCG_COND_EQ;
665 break;
666 case 4 | 2 | 1:
667 cond = TCG_COND_NE;
668 break;
669 default:
670 goto do_dynamic;
672 account_inline_branch(s, old_cc_op);
673 break;
675 case CC_OP_ICM:
676 switch (mask) {
677 case 8:
678 cond = TCG_COND_EQ;
679 break;
680 case 4 | 2 | 1:
681 case 4 | 2:
682 cond = TCG_COND_NE;
683 break;
684 default:
685 goto do_dynamic;
687 account_inline_branch(s, old_cc_op);
688 break;
690 case CC_OP_FLOGR:
691 switch (mask & 0xa) {
692 case 8: /* src == 0 -> no one bit found */
693 cond = TCG_COND_EQ;
694 break;
695 case 2: /* src != 0 -> one bit found */
696 cond = TCG_COND_NE;
697 break;
698 default:
699 goto do_dynamic;
701 account_inline_branch(s, old_cc_op);
702 break;
704 case CC_OP_ADDU_32:
705 case CC_OP_ADDU_64:
706 switch (mask) {
707 case 8 | 2: /* vr == 0 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* vr != 0 */
711 cond = TCG_COND_NE;
712 break;
713 case 8 | 4: /* no carry -> vr >= src */
714 cond = TCG_COND_GEU;
715 break;
716 case 2 | 1: /* carry -> vr < src */
717 cond = TCG_COND_LTU;
718 break;
719 default:
720 goto do_dynamic;
722 account_inline_branch(s, old_cc_op);
723 break;
725 case CC_OP_SUBU_32:
726 case CC_OP_SUBU_64:
727 /* Note that CC=0 is impossible; treat it as dont-care. */
728 switch (mask & 7) {
729 case 2: /* zero -> op1 == op2 */
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 1: /* !zero -> op1 != op2 */
733 cond = TCG_COND_NE;
734 break;
735 case 4: /* borrow (!carry) -> op1 < op2 */
736 cond = TCG_COND_LTU;
737 break;
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
739 cond = TCG_COND_GEU;
740 break;
741 default:
742 goto do_dynamic;
744 account_inline_branch(s, old_cc_op);
745 break;
747 default:
748 do_dynamic:
749 /* Calculate cc value. */
750 gen_op_calc_cc(s);
751 /* FALLTHRU */
753 case CC_OP_STATIC:
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s, old_cc_op);
757 old_cc_op = CC_OP_STATIC;
758 cond = TCG_COND_NEVER;
759 break;
762 /* Load up the arguments of the comparison. */
763 c->is_64 = true;
764 c->g1 = c->g2 = false;
765 switch (old_cc_op) {
766 case CC_OP_LTGT0_32:
767 c->is_64 = false;
768 c->u.s32.a = tcg_temp_new_i32();
769 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
770 c->u.s32.b = tcg_const_i32(0);
771 break;
772 case CC_OP_LTGT_32:
773 case CC_OP_LTUGTU_32:
774 case CC_OP_SUBU_32:
775 c->is_64 = false;
776 c->u.s32.a = tcg_temp_new_i32();
777 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
778 c->u.s32.b = tcg_temp_new_i32();
779 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
780 break;
782 case CC_OP_LTGT0_64:
783 case CC_OP_NZ:
784 case CC_OP_FLOGR:
785 c->u.s64.a = cc_dst;
786 c->u.s64.b = tcg_const_i64(0);
787 c->g1 = true;
788 break;
789 case CC_OP_LTGT_64:
790 case CC_OP_LTUGTU_64:
791 case CC_OP_SUBU_64:
792 c->u.s64.a = cc_src;
793 c->u.s64.b = cc_dst;
794 c->g1 = c->g2 = true;
795 break;
797 case CC_OP_TM_32:
798 case CC_OP_TM_64:
799 case CC_OP_ICM:
800 c->u.s64.a = tcg_temp_new_i64();
801 c->u.s64.b = tcg_const_i64(0);
802 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
803 break;
805 case CC_OP_ADDU_32:
806 c->is_64 = false;
807 c->u.s32.a = tcg_temp_new_i32();
808 c->u.s32.b = tcg_temp_new_i32();
809 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 tcg_gen_movi_i32(c->u.s32.b, 0);
812 } else {
813 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
815 break;
817 case CC_OP_ADDU_64:
818 c->u.s64.a = cc_vr;
819 c->g1 = true;
820 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
821 c->u.s64.b = tcg_const_i64(0);
822 } else {
823 c->u.s64.b = cc_src;
824 c->g2 = true;
826 break;
828 case CC_OP_STATIC:
829 c->is_64 = false;
830 c->u.s32.a = cc_op;
831 c->g1 = true;
832 switch (mask) {
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
834 cond = TCG_COND_NE;
835 c->u.s32.b = tcg_const_i32(3);
836 break;
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
838 cond = TCG_COND_NE;
839 c->u.s32.b = tcg_const_i32(2);
840 break;
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
842 cond = TCG_COND_NE;
843 c->u.s32.b = tcg_const_i32(1);
844 break;
845 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
846 cond = TCG_COND_EQ;
847 c->g1 = false;
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
851 break;
852 case 0x8 | 0x4: /* cc < 2 */
853 cond = TCG_COND_LTU;
854 c->u.s32.b = tcg_const_i32(2);
855 break;
856 case 0x8: /* cc == 0 */
857 cond = TCG_COND_EQ;
858 c->u.s32.b = tcg_const_i32(0);
859 break;
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
861 cond = TCG_COND_NE;
862 c->u.s32.b = tcg_const_i32(0);
863 break;
864 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
865 cond = TCG_COND_NE;
866 c->g1 = false;
867 c->u.s32.a = tcg_temp_new_i32();
868 c->u.s32.b = tcg_const_i32(0);
869 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
870 break;
871 case 0x4: /* cc == 1 */
872 cond = TCG_COND_EQ;
873 c->u.s32.b = tcg_const_i32(1);
874 break;
875 case 0x2 | 0x1: /* cc > 1 */
876 cond = TCG_COND_GTU;
877 c->u.s32.b = tcg_const_i32(1);
878 break;
879 case 0x2: /* cc == 2 */
880 cond = TCG_COND_EQ;
881 c->u.s32.b = tcg_const_i32(2);
882 break;
883 case 0x1: /* cc == 3 */
884 cond = TCG_COND_EQ;
885 c->u.s32.b = tcg_const_i32(3);
886 break;
887 default:
888 /* CC is masked by something else: (8 >> cc) & mask. */
889 cond = TCG_COND_NE;
890 c->g1 = false;
891 c->u.s32.a = tcg_const_i32(8);
892 c->u.s32.b = tcg_const_i32(0);
893 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
894 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
895 break;
897 break;
899 default:
900 abort();
902 c->cond = cond;
905 static void free_compare(DisasCompare *c)
907 if (!c->g1) {
908 if (c->is_64) {
909 tcg_temp_free_i64(c->u.s64.a);
910 } else {
911 tcg_temp_free_i32(c->u.s32.a);
914 if (!c->g2) {
915 if (c->is_64) {
916 tcg_temp_free_i64(c->u.s64.b);
917 } else {
918 tcg_temp_free_i32(c->u.s32.b);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
932 typedef enum {
933 #include "insn-format.def"
934 } DisasFormat;
936 #undef F0
937 #undef F1
938 #undef F2
939 #undef F3
940 #undef F4
941 #undef F5
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO {
950 FLD_O_r1,
951 FLD_O_r2,
952 FLD_O_r3,
953 FLD_O_m1,
954 FLD_O_m3,
955 FLD_O_m4,
956 FLD_O_b1,
957 FLD_O_b2,
958 FLD_O_b4,
959 FLD_O_d1,
960 FLD_O_d2,
961 FLD_O_d4,
962 FLD_O_x2,
963 FLD_O_l1,
964 FLD_O_l2,
965 FLD_O_i1,
966 FLD_O_i2,
967 FLD_O_i3,
968 FLD_O_i4,
969 FLD_O_i5
972 enum DisasFieldIndexC {
973 FLD_C_r1 = 0,
974 FLD_C_m1 = 0,
975 FLD_C_b1 = 0,
976 FLD_C_i1 = 0,
978 FLD_C_r2 = 1,
979 FLD_C_b2 = 1,
980 FLD_C_i2 = 1,
982 FLD_C_r3 = 2,
983 FLD_C_m3 = 2,
984 FLD_C_i3 = 2,
986 FLD_C_m4 = 3,
987 FLD_C_b4 = 3,
988 FLD_C_i4 = 3,
989 FLD_C_l1 = 3,
991 FLD_C_i5 = 4,
992 FLD_C_d1 = 4,
994 FLD_C_d2 = 5,
996 FLD_C_d4 = 6,
997 FLD_C_x2 = 6,
998 FLD_C_l2 = 6,
1000 NUM_C_FIELD = 7
1003 struct DisasFields {
1004 unsigned op:8;
1005 unsigned op2:8;
1006 unsigned presentC:16;
1007 unsigned int presentO;
1008 int c[NUM_C_FIELD];
1011 /* This is the way fields are to be accessed out of DisasFields. */
1012 #define have_field(S, F) have_field1((S), FLD_O_##F)
1013 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1017 return (f->presentO >> c) & 1;
1020 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1021 enum DisasFieldIndexC c)
1023 assert(have_field1(f, o));
1024 return f->c[c];
1027 /* Describe the layout of each field in each format. */
1028 typedef struct DisasField {
1029 unsigned int beg:8;
1030 unsigned int size:8;
1031 unsigned int type:2;
1032 unsigned int indexC:6;
1033 enum DisasFieldIndexO indexO:8;
1034 } DisasField;
1036 typedef struct DisasFormatInfo {
1037 DisasField op[NUM_C_FIELD];
1038 } DisasFormatInfo;
1040 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1041 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1042 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1044 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1047 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1048 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1049 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1051 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1052 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1053 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055 #define F0(N) { { } },
1056 #define F1(N, X1) { { X1 } },
1057 #define F2(N, X1, X2) { { X1, X2 } },
1058 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1059 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1060 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062 static const DisasFormatInfo format_info[] = {
1063 #include "insn-format.def"
1066 #undef F0
1067 #undef F1
1068 #undef F2
1069 #undef F3
1070 #undef F4
1071 #undef F5
1072 #undef R
1073 #undef M
1074 #undef BD
1075 #undef BXD
1076 #undef BDL
1077 #undef BXDL
1078 #undef I
1079 #undef L
1081 /* Generally, we'll extract operands into this structures, operate upon
1082 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1083 of routines below for more details. */
1084 typedef struct {
1085 bool g_out, g_out2, g_in1, g_in2;
1086 TCGv_i64 out, out2, in1, in2;
1087 TCGv_i64 addr1;
1088 } DisasOps;
1090 /* Instructions can place constraints on their operands, raising specification
1091 exceptions if they are violated. To make this easy to automate, each "in1",
1092 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1093 of the following, or 0. To make this easy to document, we'll put the
1094 SPEC_<name> defines next to <name>. */
1096 #define SPEC_r1_even 1
1097 #define SPEC_r2_even 2
1098 #define SPEC_r3_even 4
1099 #define SPEC_r1_f128 8
1100 #define SPEC_r2_f128 16
1102 /* Return values from translate_one, indicating the state of the TB. */
1103 typedef enum {
1104 /* Continue the TB. */
1105 NO_EXIT,
1106 /* We have emitted one or more goto_tb. No fixup required. */
1107 EXIT_GOTO_TB,
1108 /* We are not using a goto_tb (for whatever reason), but have updated
1109 the PC (for whatever reason), so there's no need to do it again on
1110 exiting the TB. */
1111 EXIT_PC_UPDATED,
1112 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1113 updated the PC for the next instruction to be executed. */
1114 EXIT_PC_STALE,
1115 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1116 No following code will be executed. */
1117 EXIT_NORETURN,
1118 } ExitStatus;
1120 typedef enum DisasFacility {
1121 FAC_Z, /* zarch (default) */
1122 FAC_CASS, /* compare and swap and store */
1123 FAC_CASS2, /* compare and swap and store 2*/
1124 FAC_DFP, /* decimal floating point */
1125 FAC_DFPR, /* decimal floating point rounding */
1126 FAC_DO, /* distinct operands */
1127 FAC_EE, /* execute extensions */
1128 FAC_EI, /* extended immediate */
1129 FAC_FPE, /* floating point extension */
1130 FAC_FPSSH, /* floating point support sign handling */
1131 FAC_FPRGR, /* FPR-GR transfer */
1132 FAC_GIE, /* general instructions extension */
1133 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1134 FAC_HW, /* high-word */
1135 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1136 FAC_MIE, /* miscellaneous-instruction-extensions */
1137 FAC_LAT, /* load-and-trap */
1138 FAC_LOC, /* load/store on condition */
1139 FAC_LD, /* long displacement */
1140 FAC_PC, /* population count */
1141 FAC_SCF, /* store clock fast */
1142 FAC_SFLE, /* store facility list extended */
1143 FAC_ILA, /* interlocked access facility 1 */
1144 } DisasFacility;
1146 struct DisasInsn {
1147 unsigned opc:16;
1148 DisasFormat fmt:8;
1149 DisasFacility fac:8;
1150 unsigned spec:8;
1152 const char *name;
1154 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1155 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1156 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1157 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1158 void (*help_cout)(DisasContext *, DisasOps *);
1159 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1161 uint64_t data;
1164 /* ====================================================================== */
1165 /* Miscellaneous helpers, used by several operations. */
1167 static void help_l2_shift(DisasContext *s, DisasFields *f,
1168 DisasOps *o, int mask)
1170 int b2 = get_field(f, b2);
1171 int d2 = get_field(f, d2);
1173 if (b2 == 0) {
1174 o->in2 = tcg_const_i64(d2 & mask);
1175 } else {
1176 o->in2 = get_address(s, 0, b2, d2);
1177 tcg_gen_andi_i64(o->in2, o->in2, mask);
1181 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1183 if (dest == s->next_pc) {
1184 return NO_EXIT;
1186 if (use_goto_tb(s, dest)) {
1187 update_cc_op(s);
1188 tcg_gen_goto_tb(0);
1189 tcg_gen_movi_i64(psw_addr, dest);
1190 tcg_gen_exit_tb((uintptr_t)s->tb);
1191 return EXIT_GOTO_TB;
1192 } else {
1193 tcg_gen_movi_i64(psw_addr, dest);
1194 return EXIT_PC_UPDATED;
1198 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1199 bool is_imm, int imm, TCGv_i64 cdest)
1201 ExitStatus ret;
1202 uint64_t dest = s->pc + 2 * imm;
1203 TCGLabel *lab;
1205 /* Take care of the special cases first. */
1206 if (c->cond == TCG_COND_NEVER) {
1207 ret = NO_EXIT;
1208 goto egress;
1210 if (is_imm) {
1211 if (dest == s->next_pc) {
1212 /* Branch to next. */
1213 ret = NO_EXIT;
1214 goto egress;
1216 if (c->cond == TCG_COND_ALWAYS) {
1217 ret = help_goto_direct(s, dest);
1218 goto egress;
1220 } else {
1221 if (TCGV_IS_UNUSED_I64(cdest)) {
1222 /* E.g. bcr %r0 -> no branch. */
1223 ret = NO_EXIT;
1224 goto egress;
1226 if (c->cond == TCG_COND_ALWAYS) {
1227 tcg_gen_mov_i64(psw_addr, cdest);
1228 ret = EXIT_PC_UPDATED;
1229 goto egress;
1233 if (use_goto_tb(s, s->next_pc)) {
1234 if (is_imm && use_goto_tb(s, dest)) {
1235 /* Both exits can use goto_tb. */
1236 update_cc_op(s);
1238 lab = gen_new_label();
1239 if (c->is_64) {
1240 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1241 } else {
1242 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1245 /* Branch not taken. */
1246 tcg_gen_goto_tb(0);
1247 tcg_gen_movi_i64(psw_addr, s->next_pc);
1248 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1250 /* Branch taken. */
1251 gen_set_label(lab);
1252 tcg_gen_goto_tb(1);
1253 tcg_gen_movi_i64(psw_addr, dest);
1254 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1256 ret = EXIT_GOTO_TB;
1257 } else {
1258 /* Fallthru can use goto_tb, but taken branch cannot. */
1259 /* Store taken branch destination before the brcond. This
1260 avoids having to allocate a new local temp to hold it.
1261 We'll overwrite this in the not taken case anyway. */
1262 if (!is_imm) {
1263 tcg_gen_mov_i64(psw_addr, cdest);
1266 lab = gen_new_label();
1267 if (c->is_64) {
1268 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1269 } else {
1270 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1273 /* Branch not taken. */
1274 update_cc_op(s);
1275 tcg_gen_goto_tb(0);
1276 tcg_gen_movi_i64(psw_addr, s->next_pc);
1277 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1279 gen_set_label(lab);
1280 if (is_imm) {
1281 tcg_gen_movi_i64(psw_addr, dest);
1283 ret = EXIT_PC_UPDATED;
1285 } else {
1286 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1287 Most commonly we're single-stepping or some other condition that
1288 disables all use of goto_tb. Just update the PC and exit. */
1290 TCGv_i64 next = tcg_const_i64(s->next_pc);
1291 if (is_imm) {
1292 cdest = tcg_const_i64(dest);
1295 if (c->is_64) {
1296 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1297 cdest, next);
1298 } else {
1299 TCGv_i32 t0 = tcg_temp_new_i32();
1300 TCGv_i64 t1 = tcg_temp_new_i64();
1301 TCGv_i64 z = tcg_const_i64(0);
1302 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1303 tcg_gen_extu_i32_i64(t1, t0);
1304 tcg_temp_free_i32(t0);
1305 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1306 tcg_temp_free_i64(t1);
1307 tcg_temp_free_i64(z);
1310 if (is_imm) {
1311 tcg_temp_free_i64(cdest);
1313 tcg_temp_free_i64(next);
1315 ret = EXIT_PC_UPDATED;
1318 egress:
1319 free_compare(c);
1320 return ret;
1323 /* ====================================================================== */
1324 /* The operations. These perform the bulk of the work for any insn,
1325 usually after the operands have been loaded and output initialized. */
1327 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1329 TCGv_i64 z, n;
1330 z = tcg_const_i64(0);
1331 n = tcg_temp_new_i64();
1332 tcg_gen_neg_i64(n, o->in2);
1333 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1334 tcg_temp_free_i64(n);
1335 tcg_temp_free_i64(z);
1336 return NO_EXIT;
1339 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1341 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1342 return NO_EXIT;
1345 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1347 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1348 return NO_EXIT;
1351 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1353 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1354 tcg_gen_mov_i64(o->out2, o->in2);
1355 return NO_EXIT;
1358 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1360 tcg_gen_add_i64(o->out, o->in1, o->in2);
1361 return NO_EXIT;
1364 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1366 DisasCompare cmp;
1367 TCGv_i64 carry;
1369 tcg_gen_add_i64(o->out, o->in1, o->in2);
1371 /* The carry flag is the msb of CC, therefore the branch mask that would
1372 create that comparison is 3. Feeding the generated comparison to
1373 setcond produces the carry flag that we desire. */
1374 disas_jcc(s, &cmp, 3);
1375 carry = tcg_temp_new_i64();
1376 if (cmp.is_64) {
1377 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1378 } else {
1379 TCGv_i32 t = tcg_temp_new_i32();
1380 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1381 tcg_gen_extu_i32_i64(carry, t);
1382 tcg_temp_free_i32(t);
1384 free_compare(&cmp);
1386 tcg_gen_add_i64(o->out, o->out, carry);
1387 tcg_temp_free_i64(carry);
1388 return NO_EXIT;
1391 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1393 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1394 return NO_EXIT;
1397 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1399 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1400 return NO_EXIT;
1403 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1405 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1406 return_low128(o->out2);
1407 return NO_EXIT;
1410 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1412 tcg_gen_and_i64(o->out, o->in1, o->in2);
1413 return NO_EXIT;
1416 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1418 int shift = s->insn->data & 0xff;
1419 int size = s->insn->data >> 8;
1420 uint64_t mask = ((1ull << size) - 1) << shift;
1422 assert(!o->g_in2);
1423 tcg_gen_shli_i64(o->in2, o->in2, shift);
1424 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1425 tcg_gen_and_i64(o->out, o->in1, o->in2);
1427 /* Produce the CC from only the bits manipulated. */
1428 tcg_gen_andi_i64(cc_dst, o->out, mask);
1429 set_cc_nz_u64(s, cc_dst);
1430 return NO_EXIT;
1433 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1435 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1436 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1437 tcg_gen_mov_i64(psw_addr, o->in2);
1438 return EXIT_PC_UPDATED;
1439 } else {
1440 return NO_EXIT;
1444 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1446 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1447 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1450 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1452 int m1 = get_field(s->fields, m1);
1453 bool is_imm = have_field(s->fields, i2);
1454 int imm = is_imm ? get_field(s->fields, i2) : 0;
1455 DisasCompare c;
1457 disas_jcc(s, &c, m1);
1458 return help_branch(s, &c, is_imm, imm, o->in2);
1461 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1463 int r1 = get_field(s->fields, r1);
1464 bool is_imm = have_field(s->fields, i2);
1465 int imm = is_imm ? get_field(s->fields, i2) : 0;
1466 DisasCompare c;
1467 TCGv_i64 t;
1469 c.cond = TCG_COND_NE;
1470 c.is_64 = false;
1471 c.g1 = false;
1472 c.g2 = false;
1474 t = tcg_temp_new_i64();
1475 tcg_gen_subi_i64(t, regs[r1], 1);
1476 store_reg32_i64(r1, t);
1477 c.u.s32.a = tcg_temp_new_i32();
1478 c.u.s32.b = tcg_const_i32(0);
1479 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1480 tcg_temp_free_i64(t);
1482 return help_branch(s, &c, is_imm, imm, o->in2);
1485 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1487 int r1 = get_field(s->fields, r1);
1488 int imm = get_field(s->fields, i2);
1489 DisasCompare c;
1490 TCGv_i64 t;
1492 c.cond = TCG_COND_NE;
1493 c.is_64 = false;
1494 c.g1 = false;
1495 c.g2 = false;
1497 t = tcg_temp_new_i64();
1498 tcg_gen_shri_i64(t, regs[r1], 32);
1499 tcg_gen_subi_i64(t, t, 1);
1500 store_reg32h_i64(r1, t);
1501 c.u.s32.a = tcg_temp_new_i32();
1502 c.u.s32.b = tcg_const_i32(0);
1503 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1504 tcg_temp_free_i64(t);
1506 return help_branch(s, &c, 1, imm, o->in2);
1509 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1511 int r1 = get_field(s->fields, r1);
1512 bool is_imm = have_field(s->fields, i2);
1513 int imm = is_imm ? get_field(s->fields, i2) : 0;
1514 DisasCompare c;
1516 c.cond = TCG_COND_NE;
1517 c.is_64 = true;
1518 c.g1 = true;
1519 c.g2 = false;
1521 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1522 c.u.s64.a = regs[r1];
1523 c.u.s64.b = tcg_const_i64(0);
1525 return help_branch(s, &c, is_imm, imm, o->in2);
1528 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1530 int r1 = get_field(s->fields, r1);
1531 int r3 = get_field(s->fields, r3);
1532 bool is_imm = have_field(s->fields, i2);
1533 int imm = is_imm ? get_field(s->fields, i2) : 0;
1534 DisasCompare c;
1535 TCGv_i64 t;
1537 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1538 c.is_64 = false;
1539 c.g1 = false;
1540 c.g2 = false;
1542 t = tcg_temp_new_i64();
1543 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1544 c.u.s32.a = tcg_temp_new_i32();
1545 c.u.s32.b = tcg_temp_new_i32();
1546 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1547 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1548 store_reg32_i64(r1, t);
1549 tcg_temp_free_i64(t);
1551 return help_branch(s, &c, is_imm, imm, o->in2);
1554 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1556 int r1 = get_field(s->fields, r1);
1557 int r3 = get_field(s->fields, r3);
1558 bool is_imm = have_field(s->fields, i2);
1559 int imm = is_imm ? get_field(s->fields, i2) : 0;
1560 DisasCompare c;
1562 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1563 c.is_64 = true;
1565 if (r1 == (r3 | 1)) {
1566 c.u.s64.b = load_reg(r3 | 1);
1567 c.g2 = false;
1568 } else {
1569 c.u.s64.b = regs[r3 | 1];
1570 c.g2 = true;
1573 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1574 c.u.s64.a = regs[r1];
1575 c.g1 = true;
1577 return help_branch(s, &c, is_imm, imm, o->in2);
1580 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1582 int imm, m3 = get_field(s->fields, m3);
1583 bool is_imm;
1584 DisasCompare c;
1586 c.cond = ltgt_cond[m3];
1587 if (s->insn->data) {
1588 c.cond = tcg_unsigned_cond(c.cond);
1590 c.is_64 = c.g1 = c.g2 = true;
1591 c.u.s64.a = o->in1;
1592 c.u.s64.b = o->in2;
1594 is_imm = have_field(s->fields, i4);
1595 if (is_imm) {
1596 imm = get_field(s->fields, i4);
1597 } else {
1598 imm = 0;
1599 o->out = get_address(s, 0, get_field(s->fields, b4),
1600 get_field(s->fields, d4));
1603 return help_branch(s, &c, is_imm, imm, o->out);
1606 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1608 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1609 set_cc_static(s);
1610 return NO_EXIT;
1613 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1615 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1616 set_cc_static(s);
1617 return NO_EXIT;
1620 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1622 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1623 set_cc_static(s);
1624 return NO_EXIT;
1627 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1629 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1630 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1631 tcg_temp_free_i32(m3);
1632 gen_set_cc_nz_f32(s, o->in2);
1633 return NO_EXIT;
1636 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1638 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1639 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1640 tcg_temp_free_i32(m3);
1641 gen_set_cc_nz_f64(s, o->in2);
1642 return NO_EXIT;
1645 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1647 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1648 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1649 tcg_temp_free_i32(m3);
1650 gen_set_cc_nz_f128(s, o->in1, o->in2);
1651 return NO_EXIT;
1654 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1656 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1657 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1658 tcg_temp_free_i32(m3);
1659 gen_set_cc_nz_f32(s, o->in2);
1660 return NO_EXIT;
1663 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1665 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1666 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1667 tcg_temp_free_i32(m3);
1668 gen_set_cc_nz_f64(s, o->in2);
1669 return NO_EXIT;
1672 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1674 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1675 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1676 tcg_temp_free_i32(m3);
1677 gen_set_cc_nz_f128(s, o->in1, o->in2);
1678 return NO_EXIT;
1681 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1683 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1684 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1685 tcg_temp_free_i32(m3);
1686 gen_set_cc_nz_f32(s, o->in2);
1687 return NO_EXIT;
1690 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 gen_set_cc_nz_f64(s, o->in2);
1696 return NO_EXIT;
1699 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1701 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1702 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1703 tcg_temp_free_i32(m3);
1704 gen_set_cc_nz_f128(s, o->in1, o->in2);
1705 return NO_EXIT;
1708 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1710 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1711 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1712 tcg_temp_free_i32(m3);
1713 gen_set_cc_nz_f32(s, o->in2);
1714 return NO_EXIT;
1717 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1719 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1720 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1721 tcg_temp_free_i32(m3);
1722 gen_set_cc_nz_f64(s, o->in2);
1723 return NO_EXIT;
1726 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1728 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1729 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1730 tcg_temp_free_i32(m3);
1731 gen_set_cc_nz_f128(s, o->in1, o->in2);
1732 return NO_EXIT;
1735 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1737 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1738 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1739 tcg_temp_free_i32(m3);
1740 return NO_EXIT;
1743 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1745 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1746 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1747 tcg_temp_free_i32(m3);
1748 return NO_EXIT;
1751 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1753 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1754 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1755 tcg_temp_free_i32(m3);
1756 return_low128(o->out2);
1757 return NO_EXIT;
1760 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1762 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1763 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1764 tcg_temp_free_i32(m3);
1765 return NO_EXIT;
1768 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1770 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1771 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1772 tcg_temp_free_i32(m3);
1773 return NO_EXIT;
1776 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1778 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1779 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1780 tcg_temp_free_i32(m3);
1781 return_low128(o->out2);
1782 return NO_EXIT;
1785 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1787 int r2 = get_field(s->fields, r2);
1788 TCGv_i64 len = tcg_temp_new_i64();
1790 potential_page_fault(s);
1791 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1792 set_cc_static(s);
1793 return_low128(o->out);
1795 tcg_gen_add_i64(regs[r2], regs[r2], len);
1796 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1797 tcg_temp_free_i64(len);
1799 return NO_EXIT;
1802 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1804 int l = get_field(s->fields, l1);
1805 TCGv_i32 vl;
1807 switch (l + 1) {
1808 case 1:
1809 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1810 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1811 break;
1812 case 2:
1813 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1814 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1815 break;
1816 case 4:
1817 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1818 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1819 break;
1820 case 8:
1821 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1822 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1823 break;
1824 default:
1825 potential_page_fault(s);
1826 vl = tcg_const_i32(l);
1827 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1828 tcg_temp_free_i32(vl);
1829 set_cc_static(s);
1830 return NO_EXIT;
1832 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1833 return NO_EXIT;
1836 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1838 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1839 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1840 potential_page_fault(s);
1841 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1842 tcg_temp_free_i32(r1);
1843 tcg_temp_free_i32(r3);
1844 set_cc_static(s);
1845 return NO_EXIT;
1848 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1850 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1851 TCGv_i32 t1 = tcg_temp_new_i32();
1852 tcg_gen_trunc_i64_i32(t1, o->in1);
1853 potential_page_fault(s);
1854 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1855 set_cc_static(s);
1856 tcg_temp_free_i32(t1);
1857 tcg_temp_free_i32(m3);
1858 return NO_EXIT;
1861 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1863 potential_page_fault(s);
1864 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1865 set_cc_static(s);
1866 return_low128(o->in2);
1867 return NO_EXIT;
1870 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1872 TCGv_i64 t = tcg_temp_new_i64();
1873 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1874 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1875 tcg_gen_or_i64(o->out, o->out, t);
1876 tcg_temp_free_i64(t);
1877 return NO_EXIT;
1880 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1882 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1883 int d2 = get_field(s->fields, d2);
1884 int b2 = get_field(s->fields, b2);
1885 int is_64 = s->insn->data;
1886 TCGv_i64 addr, mem, cc, z;
1888 /* Note that in1 = R3 (new value) and
1889 in2 = (zero-extended) R1 (expected value). */
1891 /* Load the memory into the (temporary) output. While the PoO only talks
1892 about moving the memory to R1 on inequality, if we include equality it
1893 means that R1 is equal to the memory in all conditions. */
1894 addr = get_address(s, 0, b2, d2);
1895 if (is_64) {
1896 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1897 } else {
1898 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1901 /* Are the memory and expected values (un)equal? Note that this setcond
1902 produces the output CC value, thus the NE sense of the test. */
1903 cc = tcg_temp_new_i64();
1904 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1906 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1907 Recall that we are allowed to unconditionally issue the store (and
1908 thus any possible write trap), so (re-)store the original contents
1909 of MEM in case of inequality. */
1910 z = tcg_const_i64(0);
1911 mem = tcg_temp_new_i64();
1912 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1913 if (is_64) {
1914 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1915 } else {
1916 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1918 tcg_temp_free_i64(z);
1919 tcg_temp_free_i64(mem);
1920 tcg_temp_free_i64(addr);
1922 /* Store CC back to cc_op. Wait until after the store so that any
1923 exception gets the old cc_op value. */
1924 tcg_gen_trunc_i64_i32(cc_op, cc);
1925 tcg_temp_free_i64(cc);
1926 set_cc_static(s);
1927 return NO_EXIT;
1930 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1932 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1933 int r1 = get_field(s->fields, r1);
1934 int r3 = get_field(s->fields, r3);
1935 int d2 = get_field(s->fields, d2);
1936 int b2 = get_field(s->fields, b2);
1937 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1939 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1941 addrh = get_address(s, 0, b2, d2);
1942 addrl = get_address(s, 0, b2, d2 + 8);
1943 outh = tcg_temp_new_i64();
1944 outl = tcg_temp_new_i64();
1946 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1947 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1949 /* Fold the double-word compare with arithmetic. */
1950 cc = tcg_temp_new_i64();
1951 z = tcg_temp_new_i64();
1952 tcg_gen_xor_i64(cc, outh, regs[r1]);
1953 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1954 tcg_gen_or_i64(cc, cc, z);
1955 tcg_gen_movi_i64(z, 0);
1956 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1958 memh = tcg_temp_new_i64();
1959 meml = tcg_temp_new_i64();
1960 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1961 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1962 tcg_temp_free_i64(z);
1964 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1965 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1966 tcg_temp_free_i64(memh);
1967 tcg_temp_free_i64(meml);
1968 tcg_temp_free_i64(addrh);
1969 tcg_temp_free_i64(addrl);
1971 /* Save back state now that we've passed all exceptions. */
1972 tcg_gen_mov_i64(regs[r1], outh);
1973 tcg_gen_mov_i64(regs[r1 + 1], outl);
1974 tcg_gen_trunc_i64_i32(cc_op, cc);
1975 tcg_temp_free_i64(outh);
1976 tcg_temp_free_i64(outl);
1977 tcg_temp_free_i64(cc);
1978 set_cc_static(s);
1979 return NO_EXIT;
1982 #ifndef CONFIG_USER_ONLY
1983 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1985 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1986 check_privileged(s);
1987 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1988 tcg_temp_free_i32(r1);
1989 set_cc_static(s);
1990 return NO_EXIT;
1992 #endif
1994 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1996 TCGv_i64 t1 = tcg_temp_new_i64();
1997 TCGv_i32 t2 = tcg_temp_new_i32();
1998 tcg_gen_trunc_i64_i32(t2, o->in1);
1999 gen_helper_cvd(t1, t2);
2000 tcg_temp_free_i32(t2);
2001 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2002 tcg_temp_free_i64(t1);
2003 return NO_EXIT;
2006 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2008 int m3 = get_field(s->fields, m3);
2009 TCGLabel *lab = gen_new_label();
2010 TCGCond c;
2012 c = tcg_invert_cond(ltgt_cond[m3]);
2013 if (s->insn->data) {
2014 c = tcg_unsigned_cond(c);
2016 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2018 /* Trap. */
2019 gen_trap(s);
2021 gen_set_label(lab);
2022 return NO_EXIT;
2025 #ifndef CONFIG_USER_ONLY
2026 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2028 TCGv_i32 tmp;
2030 check_privileged(s);
2031 potential_page_fault(s);
2033 /* We pretend the format is RX_a so that D2 is the field we want. */
2034 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2035 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2036 tcg_temp_free_i32(tmp);
2037 return NO_EXIT;
2039 #endif
2041 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2043 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2044 return_low128(o->out);
2045 return NO_EXIT;
2048 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2050 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2051 return_low128(o->out);
2052 return NO_EXIT;
2055 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2057 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2058 return_low128(o->out);
2059 return NO_EXIT;
2062 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2064 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2065 return_low128(o->out);
2066 return NO_EXIT;
2069 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2071 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2072 return NO_EXIT;
2075 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2077 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2078 return NO_EXIT;
2081 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2083 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2084 return_low128(o->out2);
2085 return NO_EXIT;
2088 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2090 int r2 = get_field(s->fields, r2);
2091 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2092 return NO_EXIT;
2095 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2097 /* No cache information provided. */
2098 tcg_gen_movi_i64(o->out, -1);
2099 return NO_EXIT;
2102 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2104 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2105 return NO_EXIT;
2108 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2110 int r1 = get_field(s->fields, r1);
2111 int r2 = get_field(s->fields, r2);
2112 TCGv_i64 t = tcg_temp_new_i64();
2114 /* Note the "subsequently" in the PoO, which implies a defined result
2115 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2116 tcg_gen_shri_i64(t, psw_mask, 32);
2117 store_reg32_i64(r1, t);
2118 if (r2 != 0) {
2119 store_reg32_i64(r2, psw_mask);
2122 tcg_temp_free_i64(t);
2123 return NO_EXIT;
2126 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2128 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2129 tb->flags, (ab)use the tb->cs_base field as the address of
2130 the template in memory, and grab 8 bits of tb->flags/cflags for
2131 the contents of the register. We would then recognize all this
2132 in gen_intermediate_code_internal, generating code for exactly
2133 one instruction. This new TB then gets executed normally.
2135 On the other hand, this seems to be mostly used for modifying
2136 MVC inside of memcpy, which needs a helper call anyway. So
2137 perhaps this doesn't bear thinking about any further. */
2139 TCGv_i64 tmp;
2141 update_psw_addr(s);
2142 gen_op_calc_cc(s);
2144 tmp = tcg_const_i64(s->next_pc);
2145 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2146 tcg_temp_free_i64(tmp);
2148 return NO_EXIT;
2151 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2153 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2154 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2155 tcg_temp_free_i32(m3);
2156 return NO_EXIT;
2159 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2161 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2162 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2163 tcg_temp_free_i32(m3);
2164 return NO_EXIT;
2167 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2169 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2170 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2171 return_low128(o->out2);
2172 tcg_temp_free_i32(m3);
2173 return NO_EXIT;
2176 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2178 /* We'll use the original input for cc computation, since we get to
2179 compare that against 0, which ought to be better than comparing
2180 the real output against 64. It also lets cc_dst be a convenient
2181 temporary during our computation. */
2182 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2184 /* R1 = IN ? CLZ(IN) : 64. */
2185 gen_helper_clz(o->out, o->in2);
2187 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2188 value by 64, which is undefined. But since the shift is 64 iff the
2189 input is zero, we still get the correct result after and'ing. */
2190 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2191 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2192 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2193 return NO_EXIT;
2196 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2198 int m3 = get_field(s->fields, m3);
2199 int pos, len, base = s->insn->data;
2200 TCGv_i64 tmp = tcg_temp_new_i64();
2201 uint64_t ccm;
2203 switch (m3) {
2204 case 0xf:
2205 /* Effectively a 32-bit load. */
2206 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2207 len = 32;
2208 goto one_insert;
2210 case 0xc:
2211 case 0x6:
2212 case 0x3:
2213 /* Effectively a 16-bit load. */
2214 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2215 len = 16;
2216 goto one_insert;
2218 case 0x8:
2219 case 0x4:
2220 case 0x2:
2221 case 0x1:
2222 /* Effectively an 8-bit load. */
2223 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2224 len = 8;
2225 goto one_insert;
2227 one_insert:
2228 pos = base + ctz32(m3) * 8;
2229 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2230 ccm = ((1ull << len) - 1) << pos;
2231 break;
2233 default:
2234 /* This is going to be a sequence of loads and inserts. */
2235 pos = base + 32 - 8;
2236 ccm = 0;
2237 while (m3) {
2238 if (m3 & 0x8) {
2239 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2240 tcg_gen_addi_i64(o->in2, o->in2, 1);
2241 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2242 ccm |= 0xff << pos;
2244 m3 = (m3 << 1) & 0xf;
2245 pos -= 8;
2247 break;
2250 tcg_gen_movi_i64(tmp, ccm);
2251 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2252 tcg_temp_free_i64(tmp);
2253 return NO_EXIT;
2256 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2258 int shift = s->insn->data & 0xff;
2259 int size = s->insn->data >> 8;
2260 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2261 return NO_EXIT;
2264 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2266 TCGv_i64 t1;
2268 gen_op_calc_cc(s);
2269 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2271 t1 = tcg_temp_new_i64();
2272 tcg_gen_shli_i64(t1, psw_mask, 20);
2273 tcg_gen_shri_i64(t1, t1, 36);
2274 tcg_gen_or_i64(o->out, o->out, t1);
2276 tcg_gen_extu_i32_i64(t1, cc_op);
2277 tcg_gen_shli_i64(t1, t1, 28);
2278 tcg_gen_or_i64(o->out, o->out, t1);
2279 tcg_temp_free_i64(t1);
2280 return NO_EXIT;
2283 #ifndef CONFIG_USER_ONLY
2284 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2286 check_privileged(s);
2287 gen_helper_ipte(cpu_env, o->in1, o->in2);
2288 return NO_EXIT;
2291 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2293 check_privileged(s);
2294 gen_helper_iske(o->out, cpu_env, o->in2);
2295 return NO_EXIT;
2297 #endif
2299 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2301 gen_helper_ldeb(o->out, cpu_env, o->in2);
2302 return NO_EXIT;
2305 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2307 gen_helper_ledb(o->out, cpu_env, o->in2);
2308 return NO_EXIT;
2311 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2313 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2314 return NO_EXIT;
2317 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2319 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2320 return NO_EXIT;
2323 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2325 gen_helper_lxdb(o->out, cpu_env, o->in2);
2326 return_low128(o->out2);
2327 return NO_EXIT;
2330 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2332 gen_helper_lxeb(o->out, cpu_env, o->in2);
2333 return_low128(o->out2);
2334 return NO_EXIT;
2337 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2339 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2340 return NO_EXIT;
2343 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2345 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2346 return NO_EXIT;
2349 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2351 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2352 return NO_EXIT;
2355 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2357 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2358 return NO_EXIT;
2361 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2363 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2364 return NO_EXIT;
2367 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2369 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2370 return NO_EXIT;
2373 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2375 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2376 return NO_EXIT;
2379 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2381 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2382 return NO_EXIT;
2385 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2387 TCGLabel *lab = gen_new_label();
2388 store_reg32_i64(get_field(s->fields, r1), o->in2);
2389 /* The value is stored even in case of trap. */
2390 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2391 gen_trap(s);
2392 gen_set_label(lab);
2393 return NO_EXIT;
2396 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2398 TCGLabel *lab = gen_new_label();
2399 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2400 /* The value is stored even in case of trap. */
2401 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2402 gen_trap(s);
2403 gen_set_label(lab);
2404 return NO_EXIT;
2407 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2409 TCGLabel *lab = gen_new_label();
2410 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2411 /* The value is stored even in case of trap. */
2412 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2413 gen_trap(s);
2414 gen_set_label(lab);
2415 return NO_EXIT;
2418 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2420 TCGLabel *lab = gen_new_label();
2421 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2422 /* The value is stored even in case of trap. */
2423 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2424 gen_trap(s);
2425 gen_set_label(lab);
2426 return NO_EXIT;
2429 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2431 TCGLabel *lab = gen_new_label();
2432 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2433 /* The value is stored even in case of trap. */
2434 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2435 gen_trap(s);
2436 gen_set_label(lab);
2437 return NO_EXIT;
2440 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2442 DisasCompare c;
2444 disas_jcc(s, &c, get_field(s->fields, m3));
2446 if (c.is_64) {
2447 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2448 o->in2, o->in1);
2449 free_compare(&c);
2450 } else {
2451 TCGv_i32 t32 = tcg_temp_new_i32();
2452 TCGv_i64 t, z;
2454 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2455 free_compare(&c);
2457 t = tcg_temp_new_i64();
2458 tcg_gen_extu_i32_i64(t, t32);
2459 tcg_temp_free_i32(t32);
2461 z = tcg_const_i64(0);
2462 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2463 tcg_temp_free_i64(t);
2464 tcg_temp_free_i64(z);
2467 return NO_EXIT;
2470 #ifndef CONFIG_USER_ONLY
2471 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2473 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2474 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2475 check_privileged(s);
2476 potential_page_fault(s);
2477 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2478 tcg_temp_free_i32(r1);
2479 tcg_temp_free_i32(r3);
2480 return NO_EXIT;
2483 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2485 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2486 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2487 check_privileged(s);
2488 potential_page_fault(s);
2489 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2490 tcg_temp_free_i32(r1);
2491 tcg_temp_free_i32(r3);
2492 return NO_EXIT;
2494 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2496 check_privileged(s);
2497 potential_page_fault(s);
2498 gen_helper_lra(o->out, cpu_env, o->in2);
2499 set_cc_static(s);
2500 return NO_EXIT;
2503 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2505 TCGv_i64 t1, t2;
2507 check_privileged(s);
2509 t1 = tcg_temp_new_i64();
2510 t2 = tcg_temp_new_i64();
2511 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2512 tcg_gen_addi_i64(o->in2, o->in2, 4);
2513 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2514 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2515 tcg_gen_shli_i64(t1, t1, 32);
2516 gen_helper_load_psw(cpu_env, t1, t2);
2517 tcg_temp_free_i64(t1);
2518 tcg_temp_free_i64(t2);
2519 return EXIT_NORETURN;
2522 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2524 TCGv_i64 t1, t2;
2526 check_privileged(s);
2528 t1 = tcg_temp_new_i64();
2529 t2 = tcg_temp_new_i64();
2530 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2531 tcg_gen_addi_i64(o->in2, o->in2, 8);
2532 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2533 gen_helper_load_psw(cpu_env, t1, t2);
2534 tcg_temp_free_i64(t1);
2535 tcg_temp_free_i64(t2);
2536 return EXIT_NORETURN;
2538 #endif
2540 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2542 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2543 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2544 potential_page_fault(s);
2545 gen_helper_lam(cpu_env, r1, o->in2, r3);
2546 tcg_temp_free_i32(r1);
2547 tcg_temp_free_i32(r3);
2548 return NO_EXIT;
2551 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2553 int r1 = get_field(s->fields, r1);
2554 int r3 = get_field(s->fields, r3);
2555 TCGv_i64 t1, t2;
2557 /* Only one register to read. */
2558 t1 = tcg_temp_new_i64();
2559 if (unlikely(r1 == r3)) {
2560 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2561 store_reg32_i64(r1, t1);
2562 tcg_temp_free(t1);
2563 return NO_EXIT;
2566 /* First load the values of the first and last registers to trigger
2567 possible page faults. */
2568 t2 = tcg_temp_new_i64();
2569 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2570 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2571 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2572 store_reg32_i64(r1, t1);
2573 store_reg32_i64(r3, t2);
2575 /* Only two registers to read. */
2576 if (((r1 + 1) & 15) == r3) {
2577 tcg_temp_free(t2);
2578 tcg_temp_free(t1);
2579 return NO_EXIT;
2582 /* Then load the remaining registers. Page fault can't occur. */
2583 r3 = (r3 - 1) & 15;
2584 tcg_gen_movi_i64(t2, 4);
2585 while (r1 != r3) {
2586 r1 = (r1 + 1) & 15;
2587 tcg_gen_add_i64(o->in2, o->in2, t2);
2588 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2589 store_reg32_i64(r1, t1);
2591 tcg_temp_free(t2);
2592 tcg_temp_free(t1);
2594 return NO_EXIT;
2597 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2599 int r1 = get_field(s->fields, r1);
2600 int r3 = get_field(s->fields, r3);
2601 TCGv_i64 t1, t2;
2603 /* Only one register to read. */
2604 t1 = tcg_temp_new_i64();
2605 if (unlikely(r1 == r3)) {
2606 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2607 store_reg32h_i64(r1, t1);
2608 tcg_temp_free(t1);
2609 return NO_EXIT;
2612 /* First load the values of the first and last registers to trigger
2613 possible page faults. */
2614 t2 = tcg_temp_new_i64();
2615 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2616 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2617 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2618 store_reg32h_i64(r1, t1);
2619 store_reg32h_i64(r3, t2);
2621 /* Only two registers to read. */
2622 if (((r1 + 1) & 15) == r3) {
2623 tcg_temp_free(t2);
2624 tcg_temp_free(t1);
2625 return NO_EXIT;
2628 /* Then load the remaining registers. Page fault can't occur. */
2629 r3 = (r3 - 1) & 15;
2630 tcg_gen_movi_i64(t2, 4);
2631 while (r1 != r3) {
2632 r1 = (r1 + 1) & 15;
2633 tcg_gen_add_i64(o->in2, o->in2, t2);
2634 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2635 store_reg32h_i64(r1, t1);
2637 tcg_temp_free(t2);
2638 tcg_temp_free(t1);
2640 return NO_EXIT;
2643 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2645 int r1 = get_field(s->fields, r1);
2646 int r3 = get_field(s->fields, r3);
2647 TCGv_i64 t1, t2;
2649 /* Only one register to read. */
2650 if (unlikely(r1 == r3)) {
2651 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2652 return NO_EXIT;
2655 /* First load the values of the first and last registers to trigger
2656 possible page faults. */
2657 t1 = tcg_temp_new_i64();
2658 t2 = tcg_temp_new_i64();
2659 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2660 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2661 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2662 tcg_gen_mov_i64(regs[r1], t1);
2663 tcg_temp_free(t2);
2665 /* Only two registers to read. */
2666 if (((r1 + 1) & 15) == r3) {
2667 tcg_temp_free(t1);
2668 return NO_EXIT;
2671 /* Then load the remaining registers. Page fault can't occur. */
2672 r3 = (r3 - 1) & 15;
2673 tcg_gen_movi_i64(t1, 8);
2674 while (r1 != r3) {
2675 r1 = (r1 + 1) & 15;
2676 tcg_gen_add_i64(o->in2, o->in2, t1);
2677 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2679 tcg_temp_free(t1);
2681 return NO_EXIT;
2684 #ifndef CONFIG_USER_ONLY
2685 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2687 check_privileged(s);
2688 potential_page_fault(s);
2689 gen_helper_lura(o->out, cpu_env, o->in2);
2690 return NO_EXIT;
2693 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2695 check_privileged(s);
2696 potential_page_fault(s);
2697 gen_helper_lurag(o->out, cpu_env, o->in2);
2698 return NO_EXIT;
2700 #endif
2702 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2704 o->out = o->in2;
2705 o->g_out = o->g_in2;
2706 TCGV_UNUSED_I64(o->in2);
2707 o->g_in2 = false;
2708 return NO_EXIT;
2711 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2713 int b2 = get_field(s->fields, b2);
2714 TCGv ar1 = tcg_temp_new_i64();
2716 o->out = o->in2;
2717 o->g_out = o->g_in2;
2718 TCGV_UNUSED_I64(o->in2);
2719 o->g_in2 = false;
2721 switch (s->tb->flags & FLAG_MASK_ASC) {
2722 case PSW_ASC_PRIMARY >> 32:
2723 tcg_gen_movi_i64(ar1, 0);
2724 break;
2725 case PSW_ASC_ACCREG >> 32:
2726 tcg_gen_movi_i64(ar1, 1);
2727 break;
2728 case PSW_ASC_SECONDARY >> 32:
2729 if (b2) {
2730 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2731 } else {
2732 tcg_gen_movi_i64(ar1, 0);
2734 break;
2735 case PSW_ASC_HOME >> 32:
2736 tcg_gen_movi_i64(ar1, 2);
2737 break;
2740 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2741 tcg_temp_free_i64(ar1);
2743 return NO_EXIT;
2746 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2748 o->out = o->in1;
2749 o->out2 = o->in2;
2750 o->g_out = o->g_in1;
2751 o->g_out2 = o->g_in2;
2752 TCGV_UNUSED_I64(o->in1);
2753 TCGV_UNUSED_I64(o->in2);
2754 o->g_in1 = o->g_in2 = false;
2755 return NO_EXIT;
2758 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2760 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2761 potential_page_fault(s);
2762 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2763 tcg_temp_free_i32(l);
2764 return NO_EXIT;
2767 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2769 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2770 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2771 potential_page_fault(s);
2772 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2773 tcg_temp_free_i32(r1);
2774 tcg_temp_free_i32(r2);
2775 set_cc_static(s);
2776 return NO_EXIT;
2779 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2781 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2782 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2783 potential_page_fault(s);
2784 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2785 tcg_temp_free_i32(r1);
2786 tcg_temp_free_i32(r3);
2787 set_cc_static(s);
2788 return NO_EXIT;
2791 #ifndef CONFIG_USER_ONLY
2792 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2794 int r1 = get_field(s->fields, l1);
2795 check_privileged(s);
2796 potential_page_fault(s);
2797 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2798 set_cc_static(s);
2799 return NO_EXIT;
2802 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2804 int r1 = get_field(s->fields, l1);
2805 check_privileged(s);
2806 potential_page_fault(s);
2807 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2808 set_cc_static(s);
2809 return NO_EXIT;
2811 #endif
2813 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2815 potential_page_fault(s);
2816 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2817 set_cc_static(s);
2818 return NO_EXIT;
2821 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2823 potential_page_fault(s);
2824 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2825 set_cc_static(s);
2826 return_low128(o->in2);
2827 return NO_EXIT;
2830 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2832 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2833 return NO_EXIT;
2836 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2838 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2839 return NO_EXIT;
2842 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2844 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2845 return NO_EXIT;
2848 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2850 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2851 return NO_EXIT;
2854 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2856 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2857 return NO_EXIT;
2860 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2862 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2863 return_low128(o->out2);
2864 return NO_EXIT;
2867 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2869 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2870 return_low128(o->out2);
2871 return NO_EXIT;
2874 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2876 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2877 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2878 tcg_temp_free_i64(r3);
2879 return NO_EXIT;
2882 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2884 int r3 = get_field(s->fields, r3);
2885 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2886 return NO_EXIT;
2889 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2891 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2892 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2893 tcg_temp_free_i64(r3);
2894 return NO_EXIT;
2897 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2899 int r3 = get_field(s->fields, r3);
2900 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2901 return NO_EXIT;
2904 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2906 TCGv_i64 z, n;
2907 z = tcg_const_i64(0);
2908 n = tcg_temp_new_i64();
2909 tcg_gen_neg_i64(n, o->in2);
2910 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2911 tcg_temp_free_i64(n);
2912 tcg_temp_free_i64(z);
2913 return NO_EXIT;
2916 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2918 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2919 return NO_EXIT;
2922 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2924 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2925 return NO_EXIT;
2928 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2930 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2931 tcg_gen_mov_i64(o->out2, o->in2);
2932 return NO_EXIT;
2935 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2937 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2938 potential_page_fault(s);
2939 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2940 tcg_temp_free_i32(l);
2941 set_cc_static(s);
2942 return NO_EXIT;
2945 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2947 tcg_gen_neg_i64(o->out, o->in2);
2948 return NO_EXIT;
2951 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2953 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2954 return NO_EXIT;
2957 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2959 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2960 return NO_EXIT;
2963 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2965 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2966 tcg_gen_mov_i64(o->out2, o->in2);
2967 return NO_EXIT;
2970 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2972 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2973 potential_page_fault(s);
2974 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2975 tcg_temp_free_i32(l);
2976 set_cc_static(s);
2977 return NO_EXIT;
2980 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2982 tcg_gen_or_i64(o->out, o->in1, o->in2);
2983 return NO_EXIT;
2986 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2988 int shift = s->insn->data & 0xff;
2989 int size = s->insn->data >> 8;
2990 uint64_t mask = ((1ull << size) - 1) << shift;
2992 assert(!o->g_in2);
2993 tcg_gen_shli_i64(o->in2, o->in2, shift);
2994 tcg_gen_or_i64(o->out, o->in1, o->in2);
2996 /* Produce the CC from only the bits manipulated. */
2997 tcg_gen_andi_i64(cc_dst, o->out, mask);
2998 set_cc_nz_u64(s, cc_dst);
2999 return NO_EXIT;
3002 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3004 gen_helper_popcnt(o->out, o->in2);
3005 return NO_EXIT;
3008 #ifndef CONFIG_USER_ONLY
3009 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3011 check_privileged(s);
3012 gen_helper_ptlb(cpu_env);
3013 return NO_EXIT;
3015 #endif
3017 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3019 int i3 = get_field(s->fields, i3);
3020 int i4 = get_field(s->fields, i4);
3021 int i5 = get_field(s->fields, i5);
3022 int do_zero = i4 & 0x80;
3023 uint64_t mask, imask, pmask;
3024 int pos, len, rot;
3026 /* Adjust the arguments for the specific insn. */
3027 switch (s->fields->op2) {
3028 case 0x55: /* risbg */
3029 i3 &= 63;
3030 i4 &= 63;
3031 pmask = ~0;
3032 break;
3033 case 0x5d: /* risbhg */
3034 i3 &= 31;
3035 i4 &= 31;
3036 pmask = 0xffffffff00000000ull;
3037 break;
3038 case 0x51: /* risblg */
3039 i3 &= 31;
3040 i4 &= 31;
3041 pmask = 0x00000000ffffffffull;
3042 break;
3043 default:
3044 abort();
3047 /* MASK is the set of bits to be inserted from R2.
3048 Take care for I3/I4 wraparound. */
3049 mask = pmask >> i3;
3050 if (i3 <= i4) {
3051 mask ^= pmask >> i4 >> 1;
3052 } else {
3053 mask |= ~(pmask >> i4 >> 1);
3055 mask &= pmask;
3057 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3058 insns, we need to keep the other half of the register. */
3059 imask = ~mask | ~pmask;
3060 if (do_zero) {
3061 if (s->fields->op2 == 0x55) {
3062 imask = 0;
3063 } else {
3064 imask = ~pmask;
3068 /* In some cases we can implement this with deposit, which can be more
3069 efficient on some hosts. */
3070 if (~mask == imask && i3 <= i4) {
3071 if (s->fields->op2 == 0x5d) {
3072 i3 += 32, i4 += 32;
3074 /* Note that we rotate the bits to be inserted to the lsb, not to
3075 the position as described in the PoO. */
3076 len = i4 - i3 + 1;
3077 pos = 63 - i4;
3078 rot = (i5 - pos) & 63;
3079 } else {
3080 pos = len = -1;
3081 rot = i5 & 63;
3084 /* Rotate the input as necessary. */
3085 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3087 /* Insert the selected bits into the output. */
3088 if (pos >= 0) {
3089 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3090 } else if (imask == 0) {
3091 tcg_gen_andi_i64(o->out, o->in2, mask);
3092 } else {
3093 tcg_gen_andi_i64(o->in2, o->in2, mask);
3094 tcg_gen_andi_i64(o->out, o->out, imask);
3095 tcg_gen_or_i64(o->out, o->out, o->in2);
3097 return NO_EXIT;
3100 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3102 int i3 = get_field(s->fields, i3);
3103 int i4 = get_field(s->fields, i4);
3104 int i5 = get_field(s->fields, i5);
3105 uint64_t mask;
3107 /* If this is a test-only form, arrange to discard the result. */
3108 if (i3 & 0x80) {
3109 o->out = tcg_temp_new_i64();
3110 o->g_out = false;
3113 i3 &= 63;
3114 i4 &= 63;
3115 i5 &= 63;
3117 /* MASK is the set of bits to be operated on from R2.
3118 Take care for I3/I4 wraparound. */
3119 mask = ~0ull >> i3;
3120 if (i3 <= i4) {
3121 mask ^= ~0ull >> i4 >> 1;
3122 } else {
3123 mask |= ~(~0ull >> i4 >> 1);
3126 /* Rotate the input as necessary. */
3127 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3129 /* Operate. */
3130 switch (s->fields->op2) {
3131 case 0x55: /* AND */
3132 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3133 tcg_gen_and_i64(o->out, o->out, o->in2);
3134 break;
3135 case 0x56: /* OR */
3136 tcg_gen_andi_i64(o->in2, o->in2, mask);
3137 tcg_gen_or_i64(o->out, o->out, o->in2);
3138 break;
3139 case 0x57: /* XOR */
3140 tcg_gen_andi_i64(o->in2, o->in2, mask);
3141 tcg_gen_xor_i64(o->out, o->out, o->in2);
3142 break;
3143 default:
3144 abort();
3147 /* Set the CC. */
3148 tcg_gen_andi_i64(cc_dst, o->out, mask);
3149 set_cc_nz_u64(s, cc_dst);
3150 return NO_EXIT;
3153 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3155 tcg_gen_bswap16_i64(o->out, o->in2);
3156 return NO_EXIT;
3159 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3161 tcg_gen_bswap32_i64(o->out, o->in2);
3162 return NO_EXIT;
3165 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3167 tcg_gen_bswap64_i64(o->out, o->in2);
3168 return NO_EXIT;
3171 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3173 TCGv_i32 t1 = tcg_temp_new_i32();
3174 TCGv_i32 t2 = tcg_temp_new_i32();
3175 TCGv_i32 to = tcg_temp_new_i32();
3176 tcg_gen_trunc_i64_i32(t1, o->in1);
3177 tcg_gen_trunc_i64_i32(t2, o->in2);
3178 tcg_gen_rotl_i32(to, t1, t2);
3179 tcg_gen_extu_i32_i64(o->out, to);
3180 tcg_temp_free_i32(t1);
3181 tcg_temp_free_i32(t2);
3182 tcg_temp_free_i32(to);
3183 return NO_EXIT;
3186 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3188 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3189 return NO_EXIT;
3192 #ifndef CONFIG_USER_ONLY
3193 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3195 check_privileged(s);
3196 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3197 set_cc_static(s);
3198 return NO_EXIT;
3201 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3203 check_privileged(s);
3204 gen_helper_sacf(cpu_env, o->in2);
3205 /* Addressing mode has changed, so end the block. */
3206 return EXIT_PC_STALE;
3208 #endif
3210 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3212 int sam = s->insn->data;
3213 TCGv_i64 tsam;
3214 uint64_t mask;
3216 switch (sam) {
3217 case 0:
3218 mask = 0xffffff;
3219 break;
3220 case 1:
3221 mask = 0x7fffffff;
3222 break;
3223 default:
3224 mask = -1;
3225 break;
3228 /* Bizarre but true, we check the address of the current insn for the
3229 specification exception, not the next to be executed. Thus the PoO
3230 documents that Bad Things Happen two bytes before the end. */
3231 if (s->pc & ~mask) {
3232 gen_program_exception(s, PGM_SPECIFICATION);
3233 return EXIT_NORETURN;
3235 s->next_pc &= mask;
3237 tsam = tcg_const_i64(sam);
3238 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3239 tcg_temp_free_i64(tsam);
3241 /* Always exit the TB, since we (may have) changed execution mode. */
3242 return EXIT_PC_STALE;
3245 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3247 int r1 = get_field(s->fields, r1);
3248 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3249 return NO_EXIT;
3252 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3254 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3255 return NO_EXIT;
3258 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3260 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3261 return NO_EXIT;
3264 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3266 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3267 return_low128(o->out2);
3268 return NO_EXIT;
3271 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3273 gen_helper_sqeb(o->out, cpu_env, o->in2);
3274 return NO_EXIT;
3277 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3279 gen_helper_sqdb(o->out, cpu_env, o->in2);
3280 return NO_EXIT;
3283 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3285 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3286 return_low128(o->out2);
3287 return NO_EXIT;
3290 #ifndef CONFIG_USER_ONLY
3291 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3293 check_privileged(s);
3294 potential_page_fault(s);
3295 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3296 set_cc_static(s);
3297 return NO_EXIT;
3300 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3302 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3303 check_privileged(s);
3304 potential_page_fault(s);
3305 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3306 tcg_temp_free_i32(r1);
3307 return NO_EXIT;
3309 #endif
3311 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3313 DisasCompare c;
3314 TCGv_i64 a;
3315 TCGLabel *lab;
3316 int r1;
3318 disas_jcc(s, &c, get_field(s->fields, m3));
3320 /* We want to store when the condition is fulfilled, so branch
3321 out when it's not */
3322 c.cond = tcg_invert_cond(c.cond);
3324 lab = gen_new_label();
3325 if (c.is_64) {
3326 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3327 } else {
3328 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3330 free_compare(&c);
3332 r1 = get_field(s->fields, r1);
3333 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3334 if (s->insn->data) {
3335 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3336 } else {
3337 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3339 tcg_temp_free_i64(a);
3341 gen_set_label(lab);
3342 return NO_EXIT;
3345 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3347 uint64_t sign = 1ull << s->insn->data;
3348 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3349 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3350 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3351 /* The arithmetic left shift is curious in that it does not affect
3352 the sign bit. Copy that over from the source unchanged. */
3353 tcg_gen_andi_i64(o->out, o->out, ~sign);
3354 tcg_gen_andi_i64(o->in1, o->in1, sign);
3355 tcg_gen_or_i64(o->out, o->out, o->in1);
3356 return NO_EXIT;
3359 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3361 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3362 return NO_EXIT;
3365 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3367 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3368 return NO_EXIT;
3371 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3373 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3374 return NO_EXIT;
3377 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3379 gen_helper_sfpc(cpu_env, o->in2);
3380 return NO_EXIT;
3383 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3385 gen_helper_sfas(cpu_env, o->in2);
3386 return NO_EXIT;
3389 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3391 int b2 = get_field(s->fields, b2);
3392 int d2 = get_field(s->fields, d2);
3393 TCGv_i64 t1 = tcg_temp_new_i64();
3394 TCGv_i64 t2 = tcg_temp_new_i64();
3395 int mask, pos, len;
3397 switch (s->fields->op2) {
3398 case 0x99: /* SRNM */
3399 pos = 0, len = 2;
3400 break;
3401 case 0xb8: /* SRNMB */
3402 pos = 0, len = 3;
3403 break;
3404 case 0xb9: /* SRNMT */
3405 pos = 4, len = 3;
3406 break;
3407 default:
3408 tcg_abort();
3410 mask = (1 << len) - 1;
3412 /* Insert the value into the appropriate field of the FPC. */
3413 if (b2 == 0) {
3414 tcg_gen_movi_i64(t1, d2 & mask);
3415 } else {
3416 tcg_gen_addi_i64(t1, regs[b2], d2);
3417 tcg_gen_andi_i64(t1, t1, mask);
3419 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3420 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3421 tcg_temp_free_i64(t1);
3423 /* Then install the new FPC to set the rounding mode in fpu_status. */
3424 gen_helper_sfpc(cpu_env, t2);
3425 tcg_temp_free_i64(t2);
3426 return NO_EXIT;
3429 #ifndef CONFIG_USER_ONLY
3430 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3432 check_privileged(s);
3433 tcg_gen_shri_i64(o->in2, o->in2, 4);
3434 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3435 return NO_EXIT;
3438 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3440 check_privileged(s);
3441 gen_helper_sske(cpu_env, o->in1, o->in2);
3442 return NO_EXIT;
3445 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3447 check_privileged(s);
3448 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3449 return NO_EXIT;
3452 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3454 check_privileged(s);
3455 /* ??? Surely cpu address != cpu number. In any case the previous
3456 version of this stored more than the required half-word, so it
3457 is unlikely this has ever been tested. */
3458 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3459 return NO_EXIT;
3462 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3464 gen_helper_stck(o->out, cpu_env);
3465 /* ??? We don't implement clock states. */
3466 gen_op_movi_cc(s, 0);
3467 return NO_EXIT;
3470 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3472 TCGv_i64 c1 = tcg_temp_new_i64();
3473 TCGv_i64 c2 = tcg_temp_new_i64();
3474 gen_helper_stck(c1, cpu_env);
3475 /* Shift the 64-bit value into its place as a zero-extended
3476 104-bit value. Note that "bit positions 64-103 are always
3477 non-zero so that they compare differently to STCK"; we set
3478 the least significant bit to 1. */
3479 tcg_gen_shli_i64(c2, c1, 56);
3480 tcg_gen_shri_i64(c1, c1, 8);
3481 tcg_gen_ori_i64(c2, c2, 0x10000);
3482 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3483 tcg_gen_addi_i64(o->in2, o->in2, 8);
3484 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3485 tcg_temp_free_i64(c1);
3486 tcg_temp_free_i64(c2);
3487 /* ??? We don't implement clock states. */
3488 gen_op_movi_cc(s, 0);
3489 return NO_EXIT;
3492 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3494 check_privileged(s);
3495 gen_helper_sckc(cpu_env, o->in2);
3496 return NO_EXIT;
3499 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3501 check_privileged(s);
3502 gen_helper_stckc(o->out, cpu_env);
3503 return NO_EXIT;
3506 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3508 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3509 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3510 check_privileged(s);
3511 potential_page_fault(s);
3512 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3513 tcg_temp_free_i32(r1);
3514 tcg_temp_free_i32(r3);
3515 return NO_EXIT;
3518 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3520 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3521 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3522 check_privileged(s);
3523 potential_page_fault(s);
3524 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3525 tcg_temp_free_i32(r1);
3526 tcg_temp_free_i32(r3);
3527 return NO_EXIT;
3530 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3532 TCGv_i64 t1 = tcg_temp_new_i64();
3534 check_privileged(s);
3535 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3536 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3537 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3538 tcg_temp_free_i64(t1);
3540 return NO_EXIT;
3543 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3545 check_privileged(s);
3546 gen_helper_spt(cpu_env, o->in2);
3547 return NO_EXIT;
3550 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3552 TCGv_i64 f, a;
3553 /* We really ought to have more complete indication of facilities
3554 that we implement. Address this when STFLE is implemented. */
3555 check_privileged(s);
3556 f = tcg_const_i64(0xc0000000);
3557 a = tcg_const_i64(200);
3558 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3559 tcg_temp_free_i64(f);
3560 tcg_temp_free_i64(a);
3561 return NO_EXIT;
3564 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3566 check_privileged(s);
3567 gen_helper_stpt(o->out, cpu_env);
3568 return NO_EXIT;
3571 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3573 check_privileged(s);
3574 potential_page_fault(s);
3575 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3576 set_cc_static(s);
3577 return NO_EXIT;
3580 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3582 check_privileged(s);
3583 gen_helper_spx(cpu_env, o->in2);
3584 return NO_EXIT;
3587 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3589 check_privileged(s);
3590 /* Not operational. */
3591 gen_op_movi_cc(s, 3);
3592 return NO_EXIT;
3595 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3597 check_privileged(s);
3598 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3599 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3600 return NO_EXIT;
3603 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3605 uint64_t i2 = get_field(s->fields, i2);
3606 TCGv_i64 t;
3608 check_privileged(s);
3610 /* It is important to do what the instruction name says: STORE THEN.
3611 If we let the output hook perform the store then if we fault and
3612 restart, we'll have the wrong SYSTEM MASK in place. */
3613 t = tcg_temp_new_i64();
3614 tcg_gen_shri_i64(t, psw_mask, 56);
3615 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3616 tcg_temp_free_i64(t);
3618 if (s->fields->op == 0xac) {
3619 tcg_gen_andi_i64(psw_mask, psw_mask,
3620 (i2 << 56) | 0x00ffffffffffffffull);
3621 } else {
3622 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3624 return NO_EXIT;
3627 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3629 check_privileged(s);
3630 potential_page_fault(s);
3631 gen_helper_stura(cpu_env, o->in2, o->in1);
3632 return NO_EXIT;
3635 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3637 check_privileged(s);
3638 potential_page_fault(s);
3639 gen_helper_sturg(cpu_env, o->in2, o->in1);
3640 return NO_EXIT;
3642 #endif
3644 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3646 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3647 return NO_EXIT;
3650 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3652 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3653 return NO_EXIT;
3656 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3658 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3659 return NO_EXIT;
3662 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3664 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3665 return NO_EXIT;
3668 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3670 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3671 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3672 potential_page_fault(s);
3673 gen_helper_stam(cpu_env, r1, o->in2, r3);
3674 tcg_temp_free_i32(r1);
3675 tcg_temp_free_i32(r3);
3676 return NO_EXIT;
3679 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3681 int m3 = get_field(s->fields, m3);
3682 int pos, base = s->insn->data;
3683 TCGv_i64 tmp = tcg_temp_new_i64();
3685 pos = base + ctz32(m3) * 8;
3686 switch (m3) {
3687 case 0xf:
3688 /* Effectively a 32-bit store. */
3689 tcg_gen_shri_i64(tmp, o->in1, pos);
3690 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3691 break;
3693 case 0xc:
3694 case 0x6:
3695 case 0x3:
3696 /* Effectively a 16-bit store. */
3697 tcg_gen_shri_i64(tmp, o->in1, pos);
3698 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3699 break;
3701 case 0x8:
3702 case 0x4:
3703 case 0x2:
3704 case 0x1:
3705 /* Effectively an 8-bit store. */
3706 tcg_gen_shri_i64(tmp, o->in1, pos);
3707 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3708 break;
3710 default:
3711 /* This is going to be a sequence of shifts and stores. */
3712 pos = base + 32 - 8;
3713 while (m3) {
3714 if (m3 & 0x8) {
3715 tcg_gen_shri_i64(tmp, o->in1, pos);
3716 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3717 tcg_gen_addi_i64(o->in2, o->in2, 1);
3719 m3 = (m3 << 1) & 0xf;
3720 pos -= 8;
3722 break;
3724 tcg_temp_free_i64(tmp);
3725 return NO_EXIT;
3728 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3730 int r1 = get_field(s->fields, r1);
3731 int r3 = get_field(s->fields, r3);
3732 int size = s->insn->data;
3733 TCGv_i64 tsize = tcg_const_i64(size);
3735 while (1) {
3736 if (size == 8) {
3737 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3738 } else {
3739 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3741 if (r1 == r3) {
3742 break;
3744 tcg_gen_add_i64(o->in2, o->in2, tsize);
3745 r1 = (r1 + 1) & 15;
3748 tcg_temp_free_i64(tsize);
3749 return NO_EXIT;
3752 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3754 int r1 = get_field(s->fields, r1);
3755 int r3 = get_field(s->fields, r3);
3756 TCGv_i64 t = tcg_temp_new_i64();
3757 TCGv_i64 t4 = tcg_const_i64(4);
3758 TCGv_i64 t32 = tcg_const_i64(32);
3760 while (1) {
3761 tcg_gen_shl_i64(t, regs[r1], t32);
3762 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3763 if (r1 == r3) {
3764 break;
3766 tcg_gen_add_i64(o->in2, o->in2, t4);
3767 r1 = (r1 + 1) & 15;
3770 tcg_temp_free_i64(t);
3771 tcg_temp_free_i64(t4);
3772 tcg_temp_free_i64(t32);
3773 return NO_EXIT;
3776 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3778 potential_page_fault(s);
3779 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3780 set_cc_static(s);
3781 return_low128(o->in2);
3782 return NO_EXIT;
3785 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3787 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3788 return NO_EXIT;
3791 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3793 DisasCompare cmp;
3794 TCGv_i64 borrow;
3796 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3798 /* The !borrow flag is the msb of CC. Since we want the inverse of
3799 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3800 disas_jcc(s, &cmp, 8 | 4);
3801 borrow = tcg_temp_new_i64();
3802 if (cmp.is_64) {
3803 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3804 } else {
3805 TCGv_i32 t = tcg_temp_new_i32();
3806 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3807 tcg_gen_extu_i32_i64(borrow, t);
3808 tcg_temp_free_i32(t);
3810 free_compare(&cmp);
3812 tcg_gen_sub_i64(o->out, o->out, borrow);
3813 tcg_temp_free_i64(borrow);
3814 return NO_EXIT;
3817 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3819 TCGv_i32 t;
3821 update_psw_addr(s);
3822 update_cc_op(s);
3824 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3825 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3826 tcg_temp_free_i32(t);
3828 t = tcg_const_i32(s->next_pc - s->pc);
3829 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3830 tcg_temp_free_i32(t);
3832 gen_exception(EXCP_SVC);
3833 return EXIT_NORETURN;
3836 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3838 gen_helper_tceb(cc_op, o->in1, o->in2);
3839 set_cc_static(s);
3840 return NO_EXIT;
3843 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3845 gen_helper_tcdb(cc_op, o->in1, o->in2);
3846 set_cc_static(s);
3847 return NO_EXIT;
3850 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3852 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3853 set_cc_static(s);
3854 return NO_EXIT;
3857 #ifndef CONFIG_USER_ONLY
3858 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3860 potential_page_fault(s);
3861 gen_helper_tprot(cc_op, o->addr1, o->in2);
3862 set_cc_static(s);
3863 return NO_EXIT;
3865 #endif
3867 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3869 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3870 potential_page_fault(s);
3871 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3872 tcg_temp_free_i32(l);
3873 set_cc_static(s);
3874 return NO_EXIT;
3877 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
3879 potential_page_fault(s);
3880 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
3881 return_low128(o->out2);
3882 set_cc_static(s);
3883 return NO_EXIT;
3886 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
3888 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3889 potential_page_fault(s);
3890 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
3891 tcg_temp_free_i32(l);
3892 set_cc_static(s);
3893 return NO_EXIT;
3896 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3898 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3899 potential_page_fault(s);
3900 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3901 tcg_temp_free_i32(l);
3902 return NO_EXIT;
3905 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3907 int d1 = get_field(s->fields, d1);
3908 int d2 = get_field(s->fields, d2);
3909 int b1 = get_field(s->fields, b1);
3910 int b2 = get_field(s->fields, b2);
3911 int l = get_field(s->fields, l1);
3912 TCGv_i32 t32;
3914 o->addr1 = get_address(s, 0, b1, d1);
3916 /* If the addresses are identical, this is a store/memset of zero. */
3917 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3918 o->in2 = tcg_const_i64(0);
3920 l++;
3921 while (l >= 8) {
3922 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3923 l -= 8;
3924 if (l > 0) {
3925 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3928 if (l >= 4) {
3929 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3930 l -= 4;
3931 if (l > 0) {
3932 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3935 if (l >= 2) {
3936 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3937 l -= 2;
3938 if (l > 0) {
3939 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3942 if (l) {
3943 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3945 gen_op_movi_cc(s, 0);
3946 return NO_EXIT;
3949 /* But in general we'll defer to a helper. */
3950 o->in2 = get_address(s, 0, b2, d2);
3951 t32 = tcg_const_i32(l);
3952 potential_page_fault(s);
3953 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3954 tcg_temp_free_i32(t32);
3955 set_cc_static(s);
3956 return NO_EXIT;
3959 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3961 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3962 return NO_EXIT;
3965 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3967 int shift = s->insn->data & 0xff;
3968 int size = s->insn->data >> 8;
3969 uint64_t mask = ((1ull << size) - 1) << shift;
3971 assert(!o->g_in2);
3972 tcg_gen_shli_i64(o->in2, o->in2, shift);
3973 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3975 /* Produce the CC from only the bits manipulated. */
3976 tcg_gen_andi_i64(cc_dst, o->out, mask);
3977 set_cc_nz_u64(s, cc_dst);
3978 return NO_EXIT;
3981 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3983 o->out = tcg_const_i64(0);
3984 return NO_EXIT;
3987 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3989 o->out = tcg_const_i64(0);
3990 o->out2 = o->out;
3991 o->g_out2 = true;
3992 return NO_EXIT;
3995 /* ====================================================================== */
3996 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3997 the original inputs), update the various cc data structures in order to
3998 be able to compute the new condition code. */
4000 static void cout_abs32(DisasContext *s, DisasOps *o)
4002 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4005 static void cout_abs64(DisasContext *s, DisasOps *o)
4007 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4010 static void cout_adds32(DisasContext *s, DisasOps *o)
4012 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4015 static void cout_adds64(DisasContext *s, DisasOps *o)
4017 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4020 static void cout_addu32(DisasContext *s, DisasOps *o)
4022 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4025 static void cout_addu64(DisasContext *s, DisasOps *o)
4027 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4030 static void cout_addc32(DisasContext *s, DisasOps *o)
4032 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4035 static void cout_addc64(DisasContext *s, DisasOps *o)
4037 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4040 static void cout_cmps32(DisasContext *s, DisasOps *o)
4042 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4045 static void cout_cmps64(DisasContext *s, DisasOps *o)
4047 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4050 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4052 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4055 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4057 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4060 static void cout_f32(DisasContext *s, DisasOps *o)
4062 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4065 static void cout_f64(DisasContext *s, DisasOps *o)
4067 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4070 static void cout_f128(DisasContext *s, DisasOps *o)
4072 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4075 static void cout_nabs32(DisasContext *s, DisasOps *o)
4077 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4080 static void cout_nabs64(DisasContext *s, DisasOps *o)
4082 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4085 static void cout_neg32(DisasContext *s, DisasOps *o)
4087 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4090 static void cout_neg64(DisasContext *s, DisasOps *o)
4092 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4095 static void cout_nz32(DisasContext *s, DisasOps *o)
4097 tcg_gen_ext32u_i64(cc_dst, o->out);
4098 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4101 static void cout_nz64(DisasContext *s, DisasOps *o)
4103 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4106 static void cout_s32(DisasContext *s, DisasOps *o)
4108 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4111 static void cout_s64(DisasContext *s, DisasOps *o)
4113 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4116 static void cout_subs32(DisasContext *s, DisasOps *o)
4118 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4121 static void cout_subs64(DisasContext *s, DisasOps *o)
4123 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4126 static void cout_subu32(DisasContext *s, DisasOps *o)
4128 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4131 static void cout_subu64(DisasContext *s, DisasOps *o)
4133 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4136 static void cout_subb32(DisasContext *s, DisasOps *o)
4138 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4141 static void cout_subb64(DisasContext *s, DisasOps *o)
4143 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4146 static void cout_tm32(DisasContext *s, DisasOps *o)
4148 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4151 static void cout_tm64(DisasContext *s, DisasOps *o)
4153 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4156 /* ====================================================================== */
4157 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4158 with the TCG register to which we will write. Used in combination with
4159 the "wout" generators, in some cases we need a new temporary, and in
4160 some cases we can write to a TCG global. */
4162 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4164 o->out = tcg_temp_new_i64();
4166 #define SPEC_prep_new 0
4168 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4170 o->out = tcg_temp_new_i64();
4171 o->out2 = tcg_temp_new_i64();
4173 #define SPEC_prep_new_P 0
4175 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4177 o->out = regs[get_field(f, r1)];
4178 o->g_out = true;
4180 #define SPEC_prep_r1 0
4182 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4184 int r1 = get_field(f, r1);
4185 o->out = regs[r1];
4186 o->out2 = regs[r1 + 1];
4187 o->g_out = o->g_out2 = true;
4189 #define SPEC_prep_r1_P SPEC_r1_even
4191 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4193 o->out = fregs[get_field(f, r1)];
4194 o->g_out = true;
4196 #define SPEC_prep_f1 0
4198 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4200 int r1 = get_field(f, r1);
4201 o->out = fregs[r1];
4202 o->out2 = fregs[r1 + 2];
4203 o->g_out = o->g_out2 = true;
4205 #define SPEC_prep_x1 SPEC_r1_f128
4207 /* ====================================================================== */
4208 /* The "Write OUTput" generators. These generally perform some non-trivial
4209 copy of data to TCG globals, or to main memory. The trivial cases are
4210 generally handled by having a "prep" generator install the TCG global
4211 as the destination of the operation. */
4213 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4215 store_reg(get_field(f, r1), o->out);
4217 #define SPEC_wout_r1 0
4219 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4221 int r1 = get_field(f, r1);
4222 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4224 #define SPEC_wout_r1_8 0
4226 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4228 int r1 = get_field(f, r1);
4229 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4231 #define SPEC_wout_r1_16 0
4233 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4235 store_reg32_i64(get_field(f, r1), o->out);
4237 #define SPEC_wout_r1_32 0
4239 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4241 store_reg32h_i64(get_field(f, r1), o->out);
4243 #define SPEC_wout_r1_32h 0
4245 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4247 int r1 = get_field(f, r1);
4248 store_reg32_i64(r1, o->out);
4249 store_reg32_i64(r1 + 1, o->out2);
4251 #define SPEC_wout_r1_P32 SPEC_r1_even
4253 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4255 int r1 = get_field(f, r1);
4256 store_reg32_i64(r1 + 1, o->out);
4257 tcg_gen_shri_i64(o->out, o->out, 32);
4258 store_reg32_i64(r1, o->out);
4260 #define SPEC_wout_r1_D32 SPEC_r1_even
4262 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4264 store_freg32_i64(get_field(f, r1), o->out);
4266 #define SPEC_wout_e1 0
4268 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4270 store_freg(get_field(f, r1), o->out);
4272 #define SPEC_wout_f1 0
4274 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4276 int f1 = get_field(s->fields, r1);
4277 store_freg(f1, o->out);
4278 store_freg(f1 + 2, o->out2);
4280 #define SPEC_wout_x1 SPEC_r1_f128
4282 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4284 if (get_field(f, r1) != get_field(f, r2)) {
4285 store_reg32_i64(get_field(f, r1), o->out);
4288 #define SPEC_wout_cond_r1r2_32 0
4290 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4292 if (get_field(f, r1) != get_field(f, r2)) {
4293 store_freg32_i64(get_field(f, r1), o->out);
4296 #define SPEC_wout_cond_e1e2 0
4298 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4300 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4302 #define SPEC_wout_m1_8 0
4304 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4306 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4308 #define SPEC_wout_m1_16 0
4310 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4312 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4314 #define SPEC_wout_m1_32 0
4316 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4318 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4320 #define SPEC_wout_m1_64 0
4322 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4324 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4326 #define SPEC_wout_m2_32 0
4328 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4330 /* XXX release reservation */
4331 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4332 store_reg32_i64(get_field(f, r1), o->in2);
4334 #define SPEC_wout_m2_32_r1_atomic 0
4336 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4338 /* XXX release reservation */
4339 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4340 store_reg(get_field(f, r1), o->in2);
4342 #define SPEC_wout_m2_64_r1_atomic 0
4344 /* ====================================================================== */
4345 /* The "INput 1" generators. These load the first operand to an insn. */
4347 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4349 o->in1 = load_reg(get_field(f, r1));
4351 #define SPEC_in1_r1 0
4353 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4355 o->in1 = regs[get_field(f, r1)];
4356 o->g_in1 = true;
4358 #define SPEC_in1_r1_o 0
4360 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4362 o->in1 = tcg_temp_new_i64();
4363 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4365 #define SPEC_in1_r1_32s 0
4367 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4369 o->in1 = tcg_temp_new_i64();
4370 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4372 #define SPEC_in1_r1_32u 0
4374 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4376 o->in1 = tcg_temp_new_i64();
4377 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4379 #define SPEC_in1_r1_sr32 0
4381 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4383 o->in1 = load_reg(get_field(f, r1) + 1);
4385 #define SPEC_in1_r1p1 SPEC_r1_even
4387 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4389 o->in1 = tcg_temp_new_i64();
4390 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4392 #define SPEC_in1_r1p1_32s SPEC_r1_even
4394 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4396 o->in1 = tcg_temp_new_i64();
4397 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4399 #define SPEC_in1_r1p1_32u SPEC_r1_even
4401 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4403 int r1 = get_field(f, r1);
4404 o->in1 = tcg_temp_new_i64();
4405 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4407 #define SPEC_in1_r1_D32 SPEC_r1_even
4409 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4411 o->in1 = load_reg(get_field(f, r2));
4413 #define SPEC_in1_r2 0
4415 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4417 o->in1 = tcg_temp_new_i64();
4418 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4420 #define SPEC_in1_r2_sr32 0
4422 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4424 o->in1 = load_reg(get_field(f, r3));
4426 #define SPEC_in1_r3 0
4428 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4430 o->in1 = regs[get_field(f, r3)];
4431 o->g_in1 = true;
4433 #define SPEC_in1_r3_o 0
4435 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4437 o->in1 = tcg_temp_new_i64();
4438 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4440 #define SPEC_in1_r3_32s 0
4442 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4444 o->in1 = tcg_temp_new_i64();
4445 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4447 #define SPEC_in1_r3_32u 0
4449 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4451 int r3 = get_field(f, r3);
4452 o->in1 = tcg_temp_new_i64();
4453 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4455 #define SPEC_in1_r3_D32 SPEC_r3_even
4457 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4459 o->in1 = load_freg32_i64(get_field(f, r1));
4461 #define SPEC_in1_e1 0
4463 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4465 o->in1 = fregs[get_field(f, r1)];
4466 o->g_in1 = true;
4468 #define SPEC_in1_f1_o 0
4470 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4472 int r1 = get_field(f, r1);
4473 o->out = fregs[r1];
4474 o->out2 = fregs[r1 + 2];
4475 o->g_out = o->g_out2 = true;
4477 #define SPEC_in1_x1_o SPEC_r1_f128
4479 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4481 o->in1 = fregs[get_field(f, r3)];
4482 o->g_in1 = true;
4484 #define SPEC_in1_f3_o 0
4486 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4488 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4490 #define SPEC_in1_la1 0
4492 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4494 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4495 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4497 #define SPEC_in1_la2 0
4499 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4501 in1_la1(s, f, o);
4502 o->in1 = tcg_temp_new_i64();
4503 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4505 #define SPEC_in1_m1_8u 0
4507 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4509 in1_la1(s, f, o);
4510 o->in1 = tcg_temp_new_i64();
4511 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4513 #define SPEC_in1_m1_16s 0
4515 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4517 in1_la1(s, f, o);
4518 o->in1 = tcg_temp_new_i64();
4519 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4521 #define SPEC_in1_m1_16u 0
4523 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4525 in1_la1(s, f, o);
4526 o->in1 = tcg_temp_new_i64();
4527 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4529 #define SPEC_in1_m1_32s 0
4531 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4533 in1_la1(s, f, o);
4534 o->in1 = tcg_temp_new_i64();
4535 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4537 #define SPEC_in1_m1_32u 0
4539 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4541 in1_la1(s, f, o);
4542 o->in1 = tcg_temp_new_i64();
4543 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4545 #define SPEC_in1_m1_64 0
4547 /* ====================================================================== */
4548 /* The "INput 2" generators. These load the second operand to an insn. */
4550 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4552 o->in2 = regs[get_field(f, r1)];
4553 o->g_in2 = true;
4555 #define SPEC_in2_r1_o 0
4557 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4559 o->in2 = tcg_temp_new_i64();
4560 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4562 #define SPEC_in2_r1_16u 0
4564 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4566 o->in2 = tcg_temp_new_i64();
4567 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4569 #define SPEC_in2_r1_32u 0
4571 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4573 int r1 = get_field(f, r1);
4574 o->in2 = tcg_temp_new_i64();
4575 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4577 #define SPEC_in2_r1_D32 SPEC_r1_even
4579 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4581 o->in2 = load_reg(get_field(f, r2));
4583 #define SPEC_in2_r2 0
4585 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4587 o->in2 = regs[get_field(f, r2)];
4588 o->g_in2 = true;
4590 #define SPEC_in2_r2_o 0
4592 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4594 int r2 = get_field(f, r2);
4595 if (r2 != 0) {
4596 o->in2 = load_reg(r2);
4599 #define SPEC_in2_r2_nz 0
4601 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4603 o->in2 = tcg_temp_new_i64();
4604 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4606 #define SPEC_in2_r2_8s 0
4608 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4610 o->in2 = tcg_temp_new_i64();
4611 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4613 #define SPEC_in2_r2_8u 0
4615 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4617 o->in2 = tcg_temp_new_i64();
4618 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4620 #define SPEC_in2_r2_16s 0
4622 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4624 o->in2 = tcg_temp_new_i64();
4625 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4627 #define SPEC_in2_r2_16u 0
4629 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4631 o->in2 = load_reg(get_field(f, r3));
4633 #define SPEC_in2_r3 0
4635 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4637 o->in2 = tcg_temp_new_i64();
4638 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4640 #define SPEC_in2_r3_sr32 0
4642 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4644 o->in2 = tcg_temp_new_i64();
4645 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4647 #define SPEC_in2_r2_32s 0
4649 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4651 o->in2 = tcg_temp_new_i64();
4652 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4654 #define SPEC_in2_r2_32u 0
4656 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4658 o->in2 = tcg_temp_new_i64();
4659 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4661 #define SPEC_in2_r2_sr32 0
4663 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4665 o->in2 = load_freg32_i64(get_field(f, r2));
4667 #define SPEC_in2_e2 0
4669 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4671 o->in2 = fregs[get_field(f, r2)];
4672 o->g_in2 = true;
4674 #define SPEC_in2_f2_o 0
4676 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4678 int r2 = get_field(f, r2);
4679 o->in1 = fregs[r2];
4680 o->in2 = fregs[r2 + 2];
4681 o->g_in1 = o->g_in2 = true;
4683 #define SPEC_in2_x2_o SPEC_r2_f128
4685 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4687 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4689 #define SPEC_in2_ra2 0
4691 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4693 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4694 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4696 #define SPEC_in2_a2 0
4698 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4700 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4702 #define SPEC_in2_ri2 0
4704 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4706 help_l2_shift(s, f, o, 31);
4708 #define SPEC_in2_sh32 0
4710 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4712 help_l2_shift(s, f, o, 63);
4714 #define SPEC_in2_sh64 0
4716 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4718 in2_a2(s, f, o);
4719 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4721 #define SPEC_in2_m2_8u 0
4723 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4725 in2_a2(s, f, o);
4726 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4728 #define SPEC_in2_m2_16s 0
4730 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4732 in2_a2(s, f, o);
4733 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4735 #define SPEC_in2_m2_16u 0
4737 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4739 in2_a2(s, f, o);
4740 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4742 #define SPEC_in2_m2_32s 0
4744 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4746 in2_a2(s, f, o);
4747 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4749 #define SPEC_in2_m2_32u 0
4751 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4753 in2_a2(s, f, o);
4754 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4756 #define SPEC_in2_m2_64 0
4758 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4760 in2_ri2(s, f, o);
4761 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4763 #define SPEC_in2_mri2_16u 0
4765 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4767 in2_ri2(s, f, o);
4768 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4770 #define SPEC_in2_mri2_32s 0
4772 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4774 in2_ri2(s, f, o);
4775 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4777 #define SPEC_in2_mri2_32u 0
4779 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4781 in2_ri2(s, f, o);
4782 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4784 #define SPEC_in2_mri2_64 0
4786 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4788 /* XXX should reserve the address */
4789 in1_la2(s, f, o);
4790 o->in2 = tcg_temp_new_i64();
4791 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4793 #define SPEC_in2_m2_32s_atomic 0
4795 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4797 /* XXX should reserve the address */
4798 in1_la2(s, f, o);
4799 o->in2 = tcg_temp_new_i64();
4800 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4802 #define SPEC_in2_m2_64_atomic 0
4804 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4806 o->in2 = tcg_const_i64(get_field(f, i2));
4808 #define SPEC_in2_i2 0
4810 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4812 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4814 #define SPEC_in2_i2_8u 0
4816 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4818 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4820 #define SPEC_in2_i2_16u 0
4822 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4824 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4826 #define SPEC_in2_i2_32u 0
4828 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4830 uint64_t i2 = (uint16_t)get_field(f, i2);
4831 o->in2 = tcg_const_i64(i2 << s->insn->data);
4833 #define SPEC_in2_i2_16u_shl 0
4835 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4837 uint64_t i2 = (uint32_t)get_field(f, i2);
4838 o->in2 = tcg_const_i64(i2 << s->insn->data);
4840 #define SPEC_in2_i2_32u_shl 0
4842 /* ====================================================================== */
4844 /* Find opc within the table of insns. This is formulated as a switch
4845 statement so that (1) we get compile-time notice of cut-paste errors
4846 for duplicated opcodes, and (2) the compiler generates the binary
4847 search tree, rather than us having to post-process the table. */
4849 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4850 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4852 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4854 enum DisasInsnEnum {
4855 #include "insn-data.def"
4858 #undef D
4859 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4860 .opc = OPC, \
4861 .fmt = FMT_##FT, \
4862 .fac = FAC_##FC, \
4863 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4864 .name = #NM, \
4865 .help_in1 = in1_##I1, \
4866 .help_in2 = in2_##I2, \
4867 .help_prep = prep_##P, \
4868 .help_wout = wout_##W, \
4869 .help_cout = cout_##CC, \
4870 .help_op = op_##OP, \
4871 .data = D \
4874 /* Allow 0 to be used for NULL in the table below. */
4875 #define in1_0 NULL
4876 #define in2_0 NULL
4877 #define prep_0 NULL
4878 #define wout_0 NULL
4879 #define cout_0 NULL
4880 #define op_0 NULL
4882 #define SPEC_in1_0 0
4883 #define SPEC_in2_0 0
4884 #define SPEC_prep_0 0
4885 #define SPEC_wout_0 0
4887 static const DisasInsn insn_info[] = {
4888 #include "insn-data.def"
4891 #undef D
4892 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4893 case OPC: return &insn_info[insn_ ## NM];
4895 static const DisasInsn *lookup_opc(uint16_t opc)
4897 switch (opc) {
4898 #include "insn-data.def"
4899 default:
4900 return NULL;
4904 #undef D
4905 #undef C
4907 /* Extract a field from the insn. The INSN should be left-aligned in
4908 the uint64_t so that we can more easily utilize the big-bit-endian
4909 definitions we extract from the Principals of Operation. */
4911 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4913 uint32_t r, m;
4915 if (f->size == 0) {
4916 return;
4919 /* Zero extract the field from the insn. */
4920 r = (insn << f->beg) >> (64 - f->size);
4922 /* Sign-extend, or un-swap the field as necessary. */
4923 switch (f->type) {
4924 case 0: /* unsigned */
4925 break;
4926 case 1: /* signed */
4927 assert(f->size <= 32);
4928 m = 1u << (f->size - 1);
4929 r = (r ^ m) - m;
4930 break;
4931 case 2: /* dl+dh split, signed 20 bit. */
4932 r = ((int8_t)r << 12) | (r >> 8);
4933 break;
4934 default:
4935 abort();
4938 /* Validate that the "compressed" encoding we selected above is valid.
4939 I.e. we havn't make two different original fields overlap. */
4940 assert(((o->presentC >> f->indexC) & 1) == 0);
4941 o->presentC |= 1 << f->indexC;
4942 o->presentO |= 1 << f->indexO;
4944 o->c[f->indexC] = r;
4947 /* Lookup the insn at the current PC, extracting the operands into O and
4948 returning the info struct for the insn. Returns NULL for invalid insn. */
4950 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4951 DisasFields *f)
4953 uint64_t insn, pc = s->pc;
4954 int op, op2, ilen;
4955 const DisasInsn *info;
4957 insn = ld_code2(env, pc);
4958 op = (insn >> 8) & 0xff;
4959 ilen = get_ilen(op);
4960 s->next_pc = s->pc + ilen;
4962 switch (ilen) {
4963 case 2:
4964 insn = insn << 48;
4965 break;
4966 case 4:
4967 insn = ld_code4(env, pc) << 32;
4968 break;
4969 case 6:
4970 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4971 break;
4972 default:
4973 abort();
4976 /* We can't actually determine the insn format until we've looked up
4977 the full insn opcode. Which we can't do without locating the
4978 secondary opcode. Assume by default that OP2 is at bit 40; for
4979 those smaller insns that don't actually have a secondary opcode
4980 this will correctly result in OP2 = 0. */
4981 switch (op) {
4982 case 0x01: /* E */
4983 case 0x80: /* S */
4984 case 0x82: /* S */
4985 case 0x93: /* S */
4986 case 0xb2: /* S, RRF, RRE */
4987 case 0xb3: /* RRE, RRD, RRF */
4988 case 0xb9: /* RRE, RRF */
4989 case 0xe5: /* SSE, SIL */
4990 op2 = (insn << 8) >> 56;
4991 break;
4992 case 0xa5: /* RI */
4993 case 0xa7: /* RI */
4994 case 0xc0: /* RIL */
4995 case 0xc2: /* RIL */
4996 case 0xc4: /* RIL */
4997 case 0xc6: /* RIL */
4998 case 0xc8: /* SSF */
4999 case 0xcc: /* RIL */
5000 op2 = (insn << 12) >> 60;
5001 break;
5002 case 0xd0 ... 0xdf: /* SS */
5003 case 0xe1: /* SS */
5004 case 0xe2: /* SS */
5005 case 0xe8: /* SS */
5006 case 0xe9: /* SS */
5007 case 0xea: /* SS */
5008 case 0xee ... 0xf3: /* SS */
5009 case 0xf8 ... 0xfd: /* SS */
5010 op2 = 0;
5011 break;
5012 default:
5013 op2 = (insn << 40) >> 56;
5014 break;
5017 memset(f, 0, sizeof(*f));
5018 f->op = op;
5019 f->op2 = op2;
5021 /* Lookup the instruction. */
5022 info = lookup_opc(op << 8 | op2);
5024 /* If we found it, extract the operands. */
5025 if (info != NULL) {
5026 DisasFormat fmt = info->fmt;
5027 int i;
5029 for (i = 0; i < NUM_C_FIELD; ++i) {
5030 extract_field(f, &format_info[fmt].op[i], insn);
5033 return info;
5036 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5038 const DisasInsn *insn;
5039 ExitStatus ret = NO_EXIT;
5040 DisasFields f;
5041 DisasOps o;
5043 /* Search for the insn in the table. */
5044 insn = extract_insn(env, s, &f);
5046 /* Not found means unimplemented/illegal opcode. */
5047 if (insn == NULL) {
5048 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5049 f.op, f.op2);
5050 gen_illegal_opcode(s);
5051 return EXIT_NORETURN;
5054 /* Check for insn specification exceptions. */
5055 if (insn->spec) {
5056 int spec = insn->spec, excp = 0, r;
5058 if (spec & SPEC_r1_even) {
5059 r = get_field(&f, r1);
5060 if (r & 1) {
5061 excp = PGM_SPECIFICATION;
5064 if (spec & SPEC_r2_even) {
5065 r = get_field(&f, r2);
5066 if (r & 1) {
5067 excp = PGM_SPECIFICATION;
5070 if (spec & SPEC_r3_even) {
5071 r = get_field(&f, r3);
5072 if (r & 1) {
5073 excp = PGM_SPECIFICATION;
5076 if (spec & SPEC_r1_f128) {
5077 r = get_field(&f, r1);
5078 if (r > 13) {
5079 excp = PGM_SPECIFICATION;
5082 if (spec & SPEC_r2_f128) {
5083 r = get_field(&f, r2);
5084 if (r > 13) {
5085 excp = PGM_SPECIFICATION;
5088 if (excp) {
5089 gen_program_exception(s, excp);
5090 return EXIT_NORETURN;
5094 /* Set up the strutures we use to communicate with the helpers. */
5095 s->insn = insn;
5096 s->fields = &f;
5097 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5098 TCGV_UNUSED_I64(o.out);
5099 TCGV_UNUSED_I64(o.out2);
5100 TCGV_UNUSED_I64(o.in1);
5101 TCGV_UNUSED_I64(o.in2);
5102 TCGV_UNUSED_I64(o.addr1);
5104 /* Implement the instruction. */
5105 if (insn->help_in1) {
5106 insn->help_in1(s, &f, &o);
5108 if (insn->help_in2) {
5109 insn->help_in2(s, &f, &o);
5111 if (insn->help_prep) {
5112 insn->help_prep(s, &f, &o);
5114 if (insn->help_op) {
5115 ret = insn->help_op(s, &o);
5117 if (insn->help_wout) {
5118 insn->help_wout(s, &f, &o);
5120 if (insn->help_cout) {
5121 insn->help_cout(s, &o);
5124 /* Free any temporaries created by the helpers. */
5125 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5126 tcg_temp_free_i64(o.out);
5128 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5129 tcg_temp_free_i64(o.out2);
5131 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5132 tcg_temp_free_i64(o.in1);
5134 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5135 tcg_temp_free_i64(o.in2);
5137 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5138 tcg_temp_free_i64(o.addr1);
5141 /* Advance to the next instruction. */
5142 s->pc = s->next_pc;
5143 return ret;
5146 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5147 TranslationBlock *tb,
5148 bool search_pc)
5150 CPUState *cs = CPU(cpu);
5151 CPUS390XState *env = &cpu->env;
5152 DisasContext dc;
5153 target_ulong pc_start;
5154 uint64_t next_page_start;
5155 int j, lj = -1;
5156 int num_insns, max_insns;
5157 CPUBreakpoint *bp;
5158 ExitStatus status;
5159 bool do_debug;
5161 pc_start = tb->pc;
5163 /* 31-bit mode */
5164 if (!(tb->flags & FLAG_MASK_64)) {
5165 pc_start &= 0x7fffffff;
5168 dc.tb = tb;
5169 dc.pc = pc_start;
5170 dc.cc_op = CC_OP_DYNAMIC;
5171 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5173 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5175 num_insns = 0;
5176 max_insns = tb->cflags & CF_COUNT_MASK;
5177 if (max_insns == 0) {
5178 max_insns = CF_COUNT_MASK;
5181 gen_tb_start(tb);
5183 do {
5184 if (search_pc) {
5185 j = tcg_op_buf_count();
5186 if (lj < j) {
5187 lj++;
5188 while (lj < j) {
5189 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5192 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5193 gen_opc_cc_op[lj] = dc.cc_op;
5194 tcg_ctx.gen_opc_instr_start[lj] = 1;
5195 tcg_ctx.gen_opc_icount[lj] = num_insns;
5197 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5198 gen_io_start();
5201 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5202 tcg_gen_debug_insn_start(dc.pc);
5205 status = NO_EXIT;
5206 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5207 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5208 if (bp->pc == dc.pc) {
5209 status = EXIT_PC_STALE;
5210 do_debug = true;
5211 break;
5215 if (status == NO_EXIT) {
5216 status = translate_one(env, &dc);
5219 /* If we reach a page boundary, are single stepping,
5220 or exhaust instruction count, stop generation. */
5221 if (status == NO_EXIT
5222 && (dc.pc >= next_page_start
5223 || tcg_op_buf_full()
5224 || num_insns >= max_insns
5225 || singlestep
5226 || cs->singlestep_enabled)) {
5227 status = EXIT_PC_STALE;
5229 } while (status == NO_EXIT);
5231 if (tb->cflags & CF_LAST_IO) {
5232 gen_io_end();
5235 switch (status) {
5236 case EXIT_GOTO_TB:
5237 case EXIT_NORETURN:
5238 break;
5239 case EXIT_PC_STALE:
5240 update_psw_addr(&dc);
5241 /* FALLTHRU */
5242 case EXIT_PC_UPDATED:
5243 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5244 cc op type is in env */
5245 update_cc_op(&dc);
5246 /* Exit the TB, either by raising a debug exception or by return. */
5247 if (do_debug) {
5248 gen_exception(EXCP_DEBUG);
5249 } else {
5250 tcg_gen_exit_tb(0);
5252 break;
5253 default:
5254 abort();
5257 gen_tb_end(tb, num_insns);
5259 if (search_pc) {
5260 j = tcg_op_buf_count();
5261 lj++;
5262 while (lj <= j) {
5263 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5265 } else {
5266 tb->size = dc.pc - pc_start;
5267 tb->icount = num_insns;
5270 #if defined(S390X_DEBUG_DISAS)
5271 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5272 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5273 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
5274 qemu_log("\n");
5276 #endif
5279 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5281 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5284 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5286 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5289 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5291 int cc_op;
5292 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5293 cc_op = gen_opc_cc_op[pc_pos];
5294 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5295 env->cc_op = cc_op;