target-s390x: implement load-and-trap facility
[qemu/ar7.git] / target-s390x / translate.c
blob9e53c9812eabc8ba189f97af013c710d7b5e27d3
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 for (i = 0; i < 32; i++) {
125 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
126 env->vregs[i][0].ll, env->vregs[i][1].ll);
127 cpu_fprintf(f, (i % 2) ? " " : "\n");
130 #ifndef CONFIG_USER_ONLY
131 for (i = 0; i < 16; i++) {
132 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
133 if ((i % 4) == 3) {
134 cpu_fprintf(f, "\n");
135 } else {
136 cpu_fprintf(f, " ");
139 #endif
141 #ifdef DEBUG_INLINE_BRANCHES
142 for (i = 0; i < CC_OP_MAX; i++) {
143 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
144 inline_branch_miss[i], inline_branch_hit[i]);
146 #endif
148 cpu_fprintf(f, "\n");
151 static TCGv_i64 psw_addr;
152 static TCGv_i64 psw_mask;
154 static TCGv_i32 cc_op;
155 static TCGv_i64 cc_src;
156 static TCGv_i64 cc_dst;
157 static TCGv_i64 cc_vr;
159 static char cpu_reg_names[32][4];
160 static TCGv_i64 regs[16];
161 static TCGv_i64 fregs[16];
163 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
165 void s390x_translate_init(void)
167 int i;
169 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
170 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
171 offsetof(CPUS390XState, psw.addr),
172 "psw_addr");
173 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
174 offsetof(CPUS390XState, psw.mask),
175 "psw_mask");
177 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
178 "cc_op");
179 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
180 "cc_src");
181 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
182 "cc_dst");
183 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
184 "cc_vr");
186 for (i = 0; i < 16; i++) {
187 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
188 regs[i] = tcg_global_mem_new(TCG_AREG0,
189 offsetof(CPUS390XState, regs[i]),
190 cpu_reg_names[i]);
193 for (i = 0; i < 16; i++) {
194 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
195 fregs[i] = tcg_global_mem_new(TCG_AREG0,
196 offsetof(CPUS390XState, vregs[i][0].d),
197 cpu_reg_names[i + 16]);
201 static TCGv_i64 load_reg(int reg)
203 TCGv_i64 r = tcg_temp_new_i64();
204 tcg_gen_mov_i64(r, regs[reg]);
205 return r;
208 static TCGv_i64 load_freg32_i64(int reg)
210 TCGv_i64 r = tcg_temp_new_i64();
211 tcg_gen_shri_i64(r, fregs[reg], 32);
212 return r;
215 static void store_reg(int reg, TCGv_i64 v)
217 tcg_gen_mov_i64(regs[reg], v);
220 static void store_freg(int reg, TCGv_i64 v)
222 tcg_gen_mov_i64(fregs[reg], v);
225 static void store_reg32_i64(int reg, TCGv_i64 v)
227 /* 32 bit register writes keep the upper half */
228 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
231 static void store_reg32h_i64(int reg, TCGv_i64 v)
233 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
236 static void store_freg32_i64(int reg, TCGv_i64 v)
238 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
241 static void return_low128(TCGv_i64 dest)
243 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
246 static void update_psw_addr(DisasContext *s)
248 /* psw.addr */
249 tcg_gen_movi_i64(psw_addr, s->pc);
252 static void update_cc_op(DisasContext *s)
254 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
255 tcg_gen_movi_i32(cc_op, s->cc_op);
259 static void potential_page_fault(DisasContext *s)
261 update_psw_addr(s);
262 update_cc_op(s);
265 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
267 return (uint64_t)cpu_lduw_code(env, pc);
270 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
272 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
275 static int get_mem_index(DisasContext *s)
277 switch (s->tb->flags & FLAG_MASK_ASC) {
278 case PSW_ASC_PRIMARY >> 32:
279 return 0;
280 case PSW_ASC_SECONDARY >> 32:
281 return 1;
282 case PSW_ASC_HOME >> 32:
283 return 2;
284 default:
285 tcg_abort();
286 break;
290 static void gen_exception(int excp)
292 TCGv_i32 tmp = tcg_const_i32(excp);
293 gen_helper_exception(cpu_env, tmp);
294 tcg_temp_free_i32(tmp);
297 static void gen_program_exception(DisasContext *s, int code)
299 TCGv_i32 tmp;
301 /* Remember what pgm exeption this was. */
302 tmp = tcg_const_i32(code);
303 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
304 tcg_temp_free_i32(tmp);
306 tmp = tcg_const_i32(s->next_pc - s->pc);
307 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
308 tcg_temp_free_i32(tmp);
310 /* Advance past instruction. */
311 s->pc = s->next_pc;
312 update_psw_addr(s);
314 /* Save off cc. */
315 update_cc_op(s);
317 /* Trigger exception. */
318 gen_exception(EXCP_PGM);
321 static inline void gen_illegal_opcode(DisasContext *s)
323 gen_program_exception(s, PGM_OPERATION);
326 static inline void gen_trap(DisasContext *s)
328 TCGv_i32 t;
330 /* Set DXC to 0xff. */
331 t = tcg_temp_new_i32();
332 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
333 tcg_gen_ori_i32(t, t, 0xff00);
334 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
335 tcg_temp_free_i32(t);
337 gen_program_exception(s, PGM_DATA);
340 #ifndef CONFIG_USER_ONLY
341 static void check_privileged(DisasContext *s)
343 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
344 gen_program_exception(s, PGM_PRIVILEGED);
347 #endif
349 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
351 TCGv_i64 tmp = tcg_temp_new_i64();
352 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
354 /* Note that d2 is limited to 20 bits, signed. If we crop negative
355 displacements early we create larger immedate addends. */
357 /* Note that addi optimizes the imm==0 case. */
358 if (b2 && x2) {
359 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
360 tcg_gen_addi_i64(tmp, tmp, d2);
361 } else if (b2) {
362 tcg_gen_addi_i64(tmp, regs[b2], d2);
363 } else if (x2) {
364 tcg_gen_addi_i64(tmp, regs[x2], d2);
365 } else {
366 if (need_31) {
367 d2 &= 0x7fffffff;
368 need_31 = false;
370 tcg_gen_movi_i64(tmp, d2);
372 if (need_31) {
373 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
376 return tmp;
379 static inline bool live_cc_data(DisasContext *s)
381 return (s->cc_op != CC_OP_DYNAMIC
382 && s->cc_op != CC_OP_STATIC
383 && s->cc_op > 3);
386 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
388 if (live_cc_data(s)) {
389 tcg_gen_discard_i64(cc_src);
390 tcg_gen_discard_i64(cc_dst);
391 tcg_gen_discard_i64(cc_vr);
393 s->cc_op = CC_OP_CONST0 + val;
396 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
398 if (live_cc_data(s)) {
399 tcg_gen_discard_i64(cc_src);
400 tcg_gen_discard_i64(cc_vr);
402 tcg_gen_mov_i64(cc_dst, dst);
403 s->cc_op = op;
406 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
407 TCGv_i64 dst)
409 if (live_cc_data(s)) {
410 tcg_gen_discard_i64(cc_vr);
412 tcg_gen_mov_i64(cc_src, src);
413 tcg_gen_mov_i64(cc_dst, dst);
414 s->cc_op = op;
417 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
418 TCGv_i64 dst, TCGv_i64 vr)
420 tcg_gen_mov_i64(cc_src, src);
421 tcg_gen_mov_i64(cc_dst, dst);
422 tcg_gen_mov_i64(cc_vr, vr);
423 s->cc_op = op;
426 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
428 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
431 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
433 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
436 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
438 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
441 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
443 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
446 /* CC value is in env->cc_op */
447 static void set_cc_static(DisasContext *s)
449 if (live_cc_data(s)) {
450 tcg_gen_discard_i64(cc_src);
451 tcg_gen_discard_i64(cc_dst);
452 tcg_gen_discard_i64(cc_vr);
454 s->cc_op = CC_OP_STATIC;
457 /* calculates cc into cc_op */
458 static void gen_op_calc_cc(DisasContext *s)
460 TCGv_i32 local_cc_op;
461 TCGv_i64 dummy;
463 TCGV_UNUSED_I32(local_cc_op);
464 TCGV_UNUSED_I64(dummy);
465 switch (s->cc_op) {
466 default:
467 dummy = tcg_const_i64(0);
468 /* FALLTHRU */
469 case CC_OP_ADD_64:
470 case CC_OP_ADDU_64:
471 case CC_OP_ADDC_64:
472 case CC_OP_SUB_64:
473 case CC_OP_SUBU_64:
474 case CC_OP_SUBB_64:
475 case CC_OP_ADD_32:
476 case CC_OP_ADDU_32:
477 case CC_OP_ADDC_32:
478 case CC_OP_SUB_32:
479 case CC_OP_SUBU_32:
480 case CC_OP_SUBB_32:
481 local_cc_op = tcg_const_i32(s->cc_op);
482 break;
483 case CC_OP_CONST0:
484 case CC_OP_CONST1:
485 case CC_OP_CONST2:
486 case CC_OP_CONST3:
487 case CC_OP_STATIC:
488 case CC_OP_DYNAMIC:
489 break;
492 switch (s->cc_op) {
493 case CC_OP_CONST0:
494 case CC_OP_CONST1:
495 case CC_OP_CONST2:
496 case CC_OP_CONST3:
497 /* s->cc_op is the cc value */
498 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
499 break;
500 case CC_OP_STATIC:
501 /* env->cc_op already is the cc value */
502 break;
503 case CC_OP_NZ:
504 case CC_OP_ABS_64:
505 case CC_OP_NABS_64:
506 case CC_OP_ABS_32:
507 case CC_OP_NABS_32:
508 case CC_OP_LTGT0_32:
509 case CC_OP_LTGT0_64:
510 case CC_OP_COMP_32:
511 case CC_OP_COMP_64:
512 case CC_OP_NZ_F32:
513 case CC_OP_NZ_F64:
514 case CC_OP_FLOGR:
515 /* 1 argument */
516 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
517 break;
518 case CC_OP_ICM:
519 case CC_OP_LTGT_32:
520 case CC_OP_LTGT_64:
521 case CC_OP_LTUGTU_32:
522 case CC_OP_LTUGTU_64:
523 case CC_OP_TM_32:
524 case CC_OP_TM_64:
525 case CC_OP_SLA_32:
526 case CC_OP_SLA_64:
527 case CC_OP_NZ_F128:
528 /* 2 arguments */
529 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
530 break;
531 case CC_OP_ADD_64:
532 case CC_OP_ADDU_64:
533 case CC_OP_ADDC_64:
534 case CC_OP_SUB_64:
535 case CC_OP_SUBU_64:
536 case CC_OP_SUBB_64:
537 case CC_OP_ADD_32:
538 case CC_OP_ADDU_32:
539 case CC_OP_ADDC_32:
540 case CC_OP_SUB_32:
541 case CC_OP_SUBU_32:
542 case CC_OP_SUBB_32:
543 /* 3 arguments */
544 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
545 break;
546 case CC_OP_DYNAMIC:
547 /* unknown operation - assume 3 arguments and cc_op in env */
548 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
549 break;
550 default:
551 tcg_abort();
554 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
555 tcg_temp_free_i32(local_cc_op);
557 if (!TCGV_IS_UNUSED_I64(dummy)) {
558 tcg_temp_free_i64(dummy);
561 /* We now have cc in cc_op as constant */
562 set_cc_static(s);
565 static int use_goto_tb(DisasContext *s, uint64_t dest)
567 /* NOTE: we handle the case where the TB spans two pages here */
568 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
569 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
570 && !s->singlestep_enabled
571 && !(s->tb->cflags & CF_LAST_IO));
574 static void account_noninline_branch(DisasContext *s, int cc_op)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss[cc_op]++;
578 #endif
581 static void account_inline_branch(DisasContext *s, int cc_op)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit[cc_op]++;
585 #endif
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond[16] = {
591 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
592 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
593 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
594 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
595 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
596 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
597 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
598 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond[16] = {
604 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
605 TCG_COND_NEVER, TCG_COND_NEVER,
606 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
607 TCG_COND_NE, TCG_COND_NE,
608 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
609 TCG_COND_EQ, TCG_COND_EQ,
610 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
618 TCGCond cond;
619 enum cc_op old_cc_op = s->cc_op;
621 if (mask == 15 || mask == 0) {
622 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
623 c->u.s32.a = cc_op;
624 c->u.s32.b = cc_op;
625 c->g1 = c->g2 = true;
626 c->is_64 = false;
627 return;
630 /* Find the TCG condition for the mask + cc op. */
631 switch (old_cc_op) {
632 case CC_OP_LTGT0_32:
633 case CC_OP_LTGT0_64:
634 case CC_OP_LTGT_32:
635 case CC_OP_LTGT_64:
636 cond = ltgt_cond[mask];
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
640 account_inline_branch(s, old_cc_op);
641 break;
643 case CC_OP_LTUGTU_32:
644 case CC_OP_LTUGTU_64:
645 cond = tcg_unsigned_cond(ltgt_cond[mask]);
646 if (cond == TCG_COND_NEVER) {
647 goto do_dynamic;
649 account_inline_branch(s, old_cc_op);
650 break;
652 case CC_OP_NZ:
653 cond = nz_cond[mask];
654 if (cond == TCG_COND_NEVER) {
655 goto do_dynamic;
657 account_inline_branch(s, old_cc_op);
658 break;
660 case CC_OP_TM_32:
661 case CC_OP_TM_64:
662 switch (mask) {
663 case 8:
664 cond = TCG_COND_EQ;
665 break;
666 case 4 | 2 | 1:
667 cond = TCG_COND_NE;
668 break;
669 default:
670 goto do_dynamic;
672 account_inline_branch(s, old_cc_op);
673 break;
675 case CC_OP_ICM:
676 switch (mask) {
677 case 8:
678 cond = TCG_COND_EQ;
679 break;
680 case 4 | 2 | 1:
681 case 4 | 2:
682 cond = TCG_COND_NE;
683 break;
684 default:
685 goto do_dynamic;
687 account_inline_branch(s, old_cc_op);
688 break;
690 case CC_OP_FLOGR:
691 switch (mask & 0xa) {
692 case 8: /* src == 0 -> no one bit found */
693 cond = TCG_COND_EQ;
694 break;
695 case 2: /* src != 0 -> one bit found */
696 cond = TCG_COND_NE;
697 break;
698 default:
699 goto do_dynamic;
701 account_inline_branch(s, old_cc_op);
702 break;
704 case CC_OP_ADDU_32:
705 case CC_OP_ADDU_64:
706 switch (mask) {
707 case 8 | 2: /* vr == 0 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* vr != 0 */
711 cond = TCG_COND_NE;
712 break;
713 case 8 | 4: /* no carry -> vr >= src */
714 cond = TCG_COND_GEU;
715 break;
716 case 2 | 1: /* carry -> vr < src */
717 cond = TCG_COND_LTU;
718 break;
719 default:
720 goto do_dynamic;
722 account_inline_branch(s, old_cc_op);
723 break;
725 case CC_OP_SUBU_32:
726 case CC_OP_SUBU_64:
727 /* Note that CC=0 is impossible; treat it as dont-care. */
728 switch (mask & 7) {
729 case 2: /* zero -> op1 == op2 */
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 1: /* !zero -> op1 != op2 */
733 cond = TCG_COND_NE;
734 break;
735 case 4: /* borrow (!carry) -> op1 < op2 */
736 cond = TCG_COND_LTU;
737 break;
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
739 cond = TCG_COND_GEU;
740 break;
741 default:
742 goto do_dynamic;
744 account_inline_branch(s, old_cc_op);
745 break;
747 default:
748 do_dynamic:
749 /* Calculate cc value. */
750 gen_op_calc_cc(s);
751 /* FALLTHRU */
753 case CC_OP_STATIC:
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s, old_cc_op);
757 old_cc_op = CC_OP_STATIC;
758 cond = TCG_COND_NEVER;
759 break;
762 /* Load up the arguments of the comparison. */
763 c->is_64 = true;
764 c->g1 = c->g2 = false;
765 switch (old_cc_op) {
766 case CC_OP_LTGT0_32:
767 c->is_64 = false;
768 c->u.s32.a = tcg_temp_new_i32();
769 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
770 c->u.s32.b = tcg_const_i32(0);
771 break;
772 case CC_OP_LTGT_32:
773 case CC_OP_LTUGTU_32:
774 case CC_OP_SUBU_32:
775 c->is_64 = false;
776 c->u.s32.a = tcg_temp_new_i32();
777 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
778 c->u.s32.b = tcg_temp_new_i32();
779 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
780 break;
782 case CC_OP_LTGT0_64:
783 case CC_OP_NZ:
784 case CC_OP_FLOGR:
785 c->u.s64.a = cc_dst;
786 c->u.s64.b = tcg_const_i64(0);
787 c->g1 = true;
788 break;
789 case CC_OP_LTGT_64:
790 case CC_OP_LTUGTU_64:
791 case CC_OP_SUBU_64:
792 c->u.s64.a = cc_src;
793 c->u.s64.b = cc_dst;
794 c->g1 = c->g2 = true;
795 break;
797 case CC_OP_TM_32:
798 case CC_OP_TM_64:
799 case CC_OP_ICM:
800 c->u.s64.a = tcg_temp_new_i64();
801 c->u.s64.b = tcg_const_i64(0);
802 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
803 break;
805 case CC_OP_ADDU_32:
806 c->is_64 = false;
807 c->u.s32.a = tcg_temp_new_i32();
808 c->u.s32.b = tcg_temp_new_i32();
809 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 tcg_gen_movi_i32(c->u.s32.b, 0);
812 } else {
813 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
815 break;
817 case CC_OP_ADDU_64:
818 c->u.s64.a = cc_vr;
819 c->g1 = true;
820 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
821 c->u.s64.b = tcg_const_i64(0);
822 } else {
823 c->u.s64.b = cc_src;
824 c->g2 = true;
826 break;
828 case CC_OP_STATIC:
829 c->is_64 = false;
830 c->u.s32.a = cc_op;
831 c->g1 = true;
832 switch (mask) {
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
834 cond = TCG_COND_NE;
835 c->u.s32.b = tcg_const_i32(3);
836 break;
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
838 cond = TCG_COND_NE;
839 c->u.s32.b = tcg_const_i32(2);
840 break;
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
842 cond = TCG_COND_NE;
843 c->u.s32.b = tcg_const_i32(1);
844 break;
845 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
846 cond = TCG_COND_EQ;
847 c->g1 = false;
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
851 break;
852 case 0x8 | 0x4: /* cc < 2 */
853 cond = TCG_COND_LTU;
854 c->u.s32.b = tcg_const_i32(2);
855 break;
856 case 0x8: /* cc == 0 */
857 cond = TCG_COND_EQ;
858 c->u.s32.b = tcg_const_i32(0);
859 break;
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
861 cond = TCG_COND_NE;
862 c->u.s32.b = tcg_const_i32(0);
863 break;
864 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
865 cond = TCG_COND_NE;
866 c->g1 = false;
867 c->u.s32.a = tcg_temp_new_i32();
868 c->u.s32.b = tcg_const_i32(0);
869 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
870 break;
871 case 0x4: /* cc == 1 */
872 cond = TCG_COND_EQ;
873 c->u.s32.b = tcg_const_i32(1);
874 break;
875 case 0x2 | 0x1: /* cc > 1 */
876 cond = TCG_COND_GTU;
877 c->u.s32.b = tcg_const_i32(1);
878 break;
879 case 0x2: /* cc == 2 */
880 cond = TCG_COND_EQ;
881 c->u.s32.b = tcg_const_i32(2);
882 break;
883 case 0x1: /* cc == 3 */
884 cond = TCG_COND_EQ;
885 c->u.s32.b = tcg_const_i32(3);
886 break;
887 default:
888 /* CC is masked by something else: (8 >> cc) & mask. */
889 cond = TCG_COND_NE;
890 c->g1 = false;
891 c->u.s32.a = tcg_const_i32(8);
892 c->u.s32.b = tcg_const_i32(0);
893 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
894 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
895 break;
897 break;
899 default:
900 abort();
902 c->cond = cond;
905 static void free_compare(DisasCompare *c)
907 if (!c->g1) {
908 if (c->is_64) {
909 tcg_temp_free_i64(c->u.s64.a);
910 } else {
911 tcg_temp_free_i32(c->u.s32.a);
914 if (!c->g2) {
915 if (c->is_64) {
916 tcg_temp_free_i64(c->u.s64.b);
917 } else {
918 tcg_temp_free_i32(c->u.s32.b);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
932 typedef enum {
933 #include "insn-format.def"
934 } DisasFormat;
936 #undef F0
937 #undef F1
938 #undef F2
939 #undef F3
940 #undef F4
941 #undef F5
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO {
950 FLD_O_r1,
951 FLD_O_r2,
952 FLD_O_r3,
953 FLD_O_m1,
954 FLD_O_m3,
955 FLD_O_m4,
956 FLD_O_b1,
957 FLD_O_b2,
958 FLD_O_b4,
959 FLD_O_d1,
960 FLD_O_d2,
961 FLD_O_d4,
962 FLD_O_x2,
963 FLD_O_l1,
964 FLD_O_l2,
965 FLD_O_i1,
966 FLD_O_i2,
967 FLD_O_i3,
968 FLD_O_i4,
969 FLD_O_i5
972 enum DisasFieldIndexC {
973 FLD_C_r1 = 0,
974 FLD_C_m1 = 0,
975 FLD_C_b1 = 0,
976 FLD_C_i1 = 0,
978 FLD_C_r2 = 1,
979 FLD_C_b2 = 1,
980 FLD_C_i2 = 1,
982 FLD_C_r3 = 2,
983 FLD_C_m3 = 2,
984 FLD_C_i3 = 2,
986 FLD_C_m4 = 3,
987 FLD_C_b4 = 3,
988 FLD_C_i4 = 3,
989 FLD_C_l1 = 3,
991 FLD_C_i5 = 4,
992 FLD_C_d1 = 4,
994 FLD_C_d2 = 5,
996 FLD_C_d4 = 6,
997 FLD_C_x2 = 6,
998 FLD_C_l2 = 6,
1000 NUM_C_FIELD = 7
1003 struct DisasFields {
1004 unsigned op:8;
1005 unsigned op2:8;
1006 unsigned presentC:16;
1007 unsigned int presentO;
1008 int c[NUM_C_FIELD];
1011 /* This is the way fields are to be accessed out of DisasFields. */
1012 #define have_field(S, F) have_field1((S), FLD_O_##F)
1013 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1015 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1017 return (f->presentO >> c) & 1;
1020 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1021 enum DisasFieldIndexC c)
1023 assert(have_field1(f, o));
1024 return f->c[c];
1027 /* Describe the layout of each field in each format. */
1028 typedef struct DisasField {
1029 unsigned int beg:8;
1030 unsigned int size:8;
1031 unsigned int type:2;
1032 unsigned int indexC:6;
1033 enum DisasFieldIndexO indexO:8;
1034 } DisasField;
1036 typedef struct DisasFormatInfo {
1037 DisasField op[NUM_C_FIELD];
1038 } DisasFormatInfo;
1040 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1041 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1042 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1044 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1047 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1048 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1049 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1050 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1051 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1052 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1053 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1055 #define F0(N) { { } },
1056 #define F1(N, X1) { { X1 } },
1057 #define F2(N, X1, X2) { { X1, X2 } },
1058 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1059 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1060 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1062 static const DisasFormatInfo format_info[] = {
1063 #include "insn-format.def"
1066 #undef F0
1067 #undef F1
1068 #undef F2
1069 #undef F3
1070 #undef F4
1071 #undef F5
1072 #undef R
1073 #undef M
1074 #undef BD
1075 #undef BXD
1076 #undef BDL
1077 #undef BXDL
1078 #undef I
1079 #undef L
1081 /* Generally, we'll extract operands into this structures, operate upon
1082 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1083 of routines below for more details. */
1084 typedef struct {
1085 bool g_out, g_out2, g_in1, g_in2;
1086 TCGv_i64 out, out2, in1, in2;
1087 TCGv_i64 addr1;
1088 } DisasOps;
1090 /* Instructions can place constraints on their operands, raising specification
1091 exceptions if they are violated. To make this easy to automate, each "in1",
1092 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1093 of the following, or 0. To make this easy to document, we'll put the
1094 SPEC_<name> defines next to <name>. */
1096 #define SPEC_r1_even 1
1097 #define SPEC_r2_even 2
1098 #define SPEC_r3_even 4
1099 #define SPEC_r1_f128 8
1100 #define SPEC_r2_f128 16
1102 /* Return values from translate_one, indicating the state of the TB. */
1103 typedef enum {
1104 /* Continue the TB. */
1105 NO_EXIT,
1106 /* We have emitted one or more goto_tb. No fixup required. */
1107 EXIT_GOTO_TB,
1108 /* We are not using a goto_tb (for whatever reason), but have updated
1109 the PC (for whatever reason), so there's no need to do it again on
1110 exiting the TB. */
1111 EXIT_PC_UPDATED,
1112 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1113 updated the PC for the next instruction to be executed. */
1114 EXIT_PC_STALE,
1115 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1116 No following code will be executed. */
1117 EXIT_NORETURN,
1118 } ExitStatus;
1120 typedef enum DisasFacility {
1121 FAC_Z, /* zarch (default) */
1122 FAC_CASS, /* compare and swap and store */
1123 FAC_CASS2, /* compare and swap and store 2*/
1124 FAC_DFP, /* decimal floating point */
1125 FAC_DFPR, /* decimal floating point rounding */
1126 FAC_DO, /* distinct operands */
1127 FAC_EE, /* execute extensions */
1128 FAC_EI, /* extended immediate */
1129 FAC_FPE, /* floating point extension */
1130 FAC_FPSSH, /* floating point support sign handling */
1131 FAC_FPRGR, /* FPR-GR transfer */
1132 FAC_GIE, /* general instructions extension */
1133 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1134 FAC_HW, /* high-word */
1135 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1136 FAC_MIE, /* miscellaneous-instruction-extensions */
1137 FAC_LAT, /* load-and-trap */
1138 FAC_LOC, /* load/store on condition */
1139 FAC_LD, /* long displacement */
1140 FAC_PC, /* population count */
1141 FAC_SCF, /* store clock fast */
1142 FAC_SFLE, /* store facility list extended */
1143 FAC_ILA, /* interlocked access facility 1 */
1144 } DisasFacility;
1146 struct DisasInsn {
1147 unsigned opc:16;
1148 DisasFormat fmt:8;
1149 DisasFacility fac:8;
1150 unsigned spec:8;
1152 const char *name;
1154 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1155 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1156 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1157 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1158 void (*help_cout)(DisasContext *, DisasOps *);
1159 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1161 uint64_t data;
1164 /* ====================================================================== */
1165 /* Miscellaneous helpers, used by several operations. */
1167 static void help_l2_shift(DisasContext *s, DisasFields *f,
1168 DisasOps *o, int mask)
1170 int b2 = get_field(f, b2);
1171 int d2 = get_field(f, d2);
1173 if (b2 == 0) {
1174 o->in2 = tcg_const_i64(d2 & mask);
1175 } else {
1176 o->in2 = get_address(s, 0, b2, d2);
1177 tcg_gen_andi_i64(o->in2, o->in2, mask);
1181 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1183 if (dest == s->next_pc) {
1184 return NO_EXIT;
1186 if (use_goto_tb(s, dest)) {
1187 update_cc_op(s);
1188 tcg_gen_goto_tb(0);
1189 tcg_gen_movi_i64(psw_addr, dest);
1190 tcg_gen_exit_tb((uintptr_t)s->tb);
1191 return EXIT_GOTO_TB;
1192 } else {
1193 tcg_gen_movi_i64(psw_addr, dest);
1194 return EXIT_PC_UPDATED;
1198 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1199 bool is_imm, int imm, TCGv_i64 cdest)
1201 ExitStatus ret;
1202 uint64_t dest = s->pc + 2 * imm;
1203 TCGLabel *lab;
1205 /* Take care of the special cases first. */
1206 if (c->cond == TCG_COND_NEVER) {
1207 ret = NO_EXIT;
1208 goto egress;
1210 if (is_imm) {
1211 if (dest == s->next_pc) {
1212 /* Branch to next. */
1213 ret = NO_EXIT;
1214 goto egress;
1216 if (c->cond == TCG_COND_ALWAYS) {
1217 ret = help_goto_direct(s, dest);
1218 goto egress;
1220 } else {
1221 if (TCGV_IS_UNUSED_I64(cdest)) {
1222 /* E.g. bcr %r0 -> no branch. */
1223 ret = NO_EXIT;
1224 goto egress;
1226 if (c->cond == TCG_COND_ALWAYS) {
1227 tcg_gen_mov_i64(psw_addr, cdest);
1228 ret = EXIT_PC_UPDATED;
1229 goto egress;
1233 if (use_goto_tb(s, s->next_pc)) {
1234 if (is_imm && use_goto_tb(s, dest)) {
1235 /* Both exits can use goto_tb. */
1236 update_cc_op(s);
1238 lab = gen_new_label();
1239 if (c->is_64) {
1240 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1241 } else {
1242 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1245 /* Branch not taken. */
1246 tcg_gen_goto_tb(0);
1247 tcg_gen_movi_i64(psw_addr, s->next_pc);
1248 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1250 /* Branch taken. */
1251 gen_set_label(lab);
1252 tcg_gen_goto_tb(1);
1253 tcg_gen_movi_i64(psw_addr, dest);
1254 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1256 ret = EXIT_GOTO_TB;
1257 } else {
1258 /* Fallthru can use goto_tb, but taken branch cannot. */
1259 /* Store taken branch destination before the brcond. This
1260 avoids having to allocate a new local temp to hold it.
1261 We'll overwrite this in the not taken case anyway. */
1262 if (!is_imm) {
1263 tcg_gen_mov_i64(psw_addr, cdest);
1266 lab = gen_new_label();
1267 if (c->is_64) {
1268 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1269 } else {
1270 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1273 /* Branch not taken. */
1274 update_cc_op(s);
1275 tcg_gen_goto_tb(0);
1276 tcg_gen_movi_i64(psw_addr, s->next_pc);
1277 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1279 gen_set_label(lab);
1280 if (is_imm) {
1281 tcg_gen_movi_i64(psw_addr, dest);
1283 ret = EXIT_PC_UPDATED;
1285 } else {
1286 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1287 Most commonly we're single-stepping or some other condition that
1288 disables all use of goto_tb. Just update the PC and exit. */
1290 TCGv_i64 next = tcg_const_i64(s->next_pc);
1291 if (is_imm) {
1292 cdest = tcg_const_i64(dest);
1295 if (c->is_64) {
1296 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1297 cdest, next);
1298 } else {
1299 TCGv_i32 t0 = tcg_temp_new_i32();
1300 TCGv_i64 t1 = tcg_temp_new_i64();
1301 TCGv_i64 z = tcg_const_i64(0);
1302 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1303 tcg_gen_extu_i32_i64(t1, t0);
1304 tcg_temp_free_i32(t0);
1305 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1306 tcg_temp_free_i64(t1);
1307 tcg_temp_free_i64(z);
1310 if (is_imm) {
1311 tcg_temp_free_i64(cdest);
1313 tcg_temp_free_i64(next);
1315 ret = EXIT_PC_UPDATED;
1318 egress:
1319 free_compare(c);
1320 return ret;
1323 /* ====================================================================== */
1324 /* The operations. These perform the bulk of the work for any insn,
1325 usually after the operands have been loaded and output initialized. */
1327 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1329 TCGv_i64 z, n;
1330 z = tcg_const_i64(0);
1331 n = tcg_temp_new_i64();
1332 tcg_gen_neg_i64(n, o->in2);
1333 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1334 tcg_temp_free_i64(n);
1335 tcg_temp_free_i64(z);
1336 return NO_EXIT;
1339 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1341 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1342 return NO_EXIT;
1345 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1347 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1348 return NO_EXIT;
1351 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1353 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1354 tcg_gen_mov_i64(o->out2, o->in2);
1355 return NO_EXIT;
1358 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1360 tcg_gen_add_i64(o->out, o->in1, o->in2);
1361 return NO_EXIT;
1364 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1366 DisasCompare cmp;
1367 TCGv_i64 carry;
1369 tcg_gen_add_i64(o->out, o->in1, o->in2);
1371 /* The carry flag is the msb of CC, therefore the branch mask that would
1372 create that comparison is 3. Feeding the generated comparison to
1373 setcond produces the carry flag that we desire. */
1374 disas_jcc(s, &cmp, 3);
1375 carry = tcg_temp_new_i64();
1376 if (cmp.is_64) {
1377 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1378 } else {
1379 TCGv_i32 t = tcg_temp_new_i32();
1380 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1381 tcg_gen_extu_i32_i64(carry, t);
1382 tcg_temp_free_i32(t);
1384 free_compare(&cmp);
1386 tcg_gen_add_i64(o->out, o->out, carry);
1387 tcg_temp_free_i64(carry);
1388 return NO_EXIT;
1391 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1393 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1394 return NO_EXIT;
1397 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1399 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1400 return NO_EXIT;
1403 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1405 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1406 return_low128(o->out2);
1407 return NO_EXIT;
1410 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1412 tcg_gen_and_i64(o->out, o->in1, o->in2);
1413 return NO_EXIT;
1416 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1418 int shift = s->insn->data & 0xff;
1419 int size = s->insn->data >> 8;
1420 uint64_t mask = ((1ull << size) - 1) << shift;
1422 assert(!o->g_in2);
1423 tcg_gen_shli_i64(o->in2, o->in2, shift);
1424 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1425 tcg_gen_and_i64(o->out, o->in1, o->in2);
1427 /* Produce the CC from only the bits manipulated. */
1428 tcg_gen_andi_i64(cc_dst, o->out, mask);
1429 set_cc_nz_u64(s, cc_dst);
1430 return NO_EXIT;
1433 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1435 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1436 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1437 tcg_gen_mov_i64(psw_addr, o->in2);
1438 return EXIT_PC_UPDATED;
1439 } else {
1440 return NO_EXIT;
1444 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1446 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1447 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1450 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1452 int m1 = get_field(s->fields, m1);
1453 bool is_imm = have_field(s->fields, i2);
1454 int imm = is_imm ? get_field(s->fields, i2) : 0;
1455 DisasCompare c;
1457 disas_jcc(s, &c, m1);
1458 return help_branch(s, &c, is_imm, imm, o->in2);
1461 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1463 int r1 = get_field(s->fields, r1);
1464 bool is_imm = have_field(s->fields, i2);
1465 int imm = is_imm ? get_field(s->fields, i2) : 0;
1466 DisasCompare c;
1467 TCGv_i64 t;
1469 c.cond = TCG_COND_NE;
1470 c.is_64 = false;
1471 c.g1 = false;
1472 c.g2 = false;
1474 t = tcg_temp_new_i64();
1475 tcg_gen_subi_i64(t, regs[r1], 1);
1476 store_reg32_i64(r1, t);
1477 c.u.s32.a = tcg_temp_new_i32();
1478 c.u.s32.b = tcg_const_i32(0);
1479 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1480 tcg_temp_free_i64(t);
1482 return help_branch(s, &c, is_imm, imm, o->in2);
1485 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1487 int r1 = get_field(s->fields, r1);
1488 bool is_imm = have_field(s->fields, i2);
1489 int imm = is_imm ? get_field(s->fields, i2) : 0;
1490 DisasCompare c;
1492 c.cond = TCG_COND_NE;
1493 c.is_64 = true;
1494 c.g1 = true;
1495 c.g2 = false;
1497 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1498 c.u.s64.a = regs[r1];
1499 c.u.s64.b = tcg_const_i64(0);
1501 return help_branch(s, &c, is_imm, imm, o->in2);
1504 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1506 int r1 = get_field(s->fields, r1);
1507 int r3 = get_field(s->fields, r3);
1508 bool is_imm = have_field(s->fields, i2);
1509 int imm = is_imm ? get_field(s->fields, i2) : 0;
1510 DisasCompare c;
1511 TCGv_i64 t;
1513 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1514 c.is_64 = false;
1515 c.g1 = false;
1516 c.g2 = false;
1518 t = tcg_temp_new_i64();
1519 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1520 c.u.s32.a = tcg_temp_new_i32();
1521 c.u.s32.b = tcg_temp_new_i32();
1522 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1523 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1524 store_reg32_i64(r1, t);
1525 tcg_temp_free_i64(t);
1527 return help_branch(s, &c, is_imm, imm, o->in2);
1530 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1532 int r1 = get_field(s->fields, r1);
1533 int r3 = get_field(s->fields, r3);
1534 bool is_imm = have_field(s->fields, i2);
1535 int imm = is_imm ? get_field(s->fields, i2) : 0;
1536 DisasCompare c;
1538 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1539 c.is_64 = true;
1541 if (r1 == (r3 | 1)) {
1542 c.u.s64.b = load_reg(r3 | 1);
1543 c.g2 = false;
1544 } else {
1545 c.u.s64.b = regs[r3 | 1];
1546 c.g2 = true;
1549 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1550 c.u.s64.a = regs[r1];
1551 c.g1 = true;
1553 return help_branch(s, &c, is_imm, imm, o->in2);
1556 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1558 int imm, m3 = get_field(s->fields, m3);
1559 bool is_imm;
1560 DisasCompare c;
1562 c.cond = ltgt_cond[m3];
1563 if (s->insn->data) {
1564 c.cond = tcg_unsigned_cond(c.cond);
1566 c.is_64 = c.g1 = c.g2 = true;
1567 c.u.s64.a = o->in1;
1568 c.u.s64.b = o->in2;
1570 is_imm = have_field(s->fields, i4);
1571 if (is_imm) {
1572 imm = get_field(s->fields, i4);
1573 } else {
1574 imm = 0;
1575 o->out = get_address(s, 0, get_field(s->fields, b4),
1576 get_field(s->fields, d4));
1579 return help_branch(s, &c, is_imm, imm, o->out);
1582 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1584 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1585 set_cc_static(s);
1586 return NO_EXIT;
1589 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1591 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1592 set_cc_static(s);
1593 return NO_EXIT;
1596 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1598 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1599 set_cc_static(s);
1600 return NO_EXIT;
1603 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1605 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1606 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1607 tcg_temp_free_i32(m3);
1608 gen_set_cc_nz_f32(s, o->in2);
1609 return NO_EXIT;
1612 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1614 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1615 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1616 tcg_temp_free_i32(m3);
1617 gen_set_cc_nz_f64(s, o->in2);
1618 return NO_EXIT;
1621 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1623 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1624 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1625 tcg_temp_free_i32(m3);
1626 gen_set_cc_nz_f128(s, o->in1, o->in2);
1627 return NO_EXIT;
1630 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1632 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1633 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1634 tcg_temp_free_i32(m3);
1635 gen_set_cc_nz_f32(s, o->in2);
1636 return NO_EXIT;
1639 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1641 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1642 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1643 tcg_temp_free_i32(m3);
1644 gen_set_cc_nz_f64(s, o->in2);
1645 return NO_EXIT;
1648 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1650 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1651 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1652 tcg_temp_free_i32(m3);
1653 gen_set_cc_nz_f128(s, o->in1, o->in2);
1654 return NO_EXIT;
1657 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1659 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1660 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1661 tcg_temp_free_i32(m3);
1662 gen_set_cc_nz_f32(s, o->in2);
1663 return NO_EXIT;
1666 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1668 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1669 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1670 tcg_temp_free_i32(m3);
1671 gen_set_cc_nz_f64(s, o->in2);
1672 return NO_EXIT;
1675 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1677 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1678 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1679 tcg_temp_free_i32(m3);
1680 gen_set_cc_nz_f128(s, o->in1, o->in2);
1681 return NO_EXIT;
1684 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1686 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1687 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1688 tcg_temp_free_i32(m3);
1689 gen_set_cc_nz_f32(s, o->in2);
1690 return NO_EXIT;
1693 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1695 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1696 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1697 tcg_temp_free_i32(m3);
1698 gen_set_cc_nz_f64(s, o->in2);
1699 return NO_EXIT;
1702 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1704 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1705 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1706 tcg_temp_free_i32(m3);
1707 gen_set_cc_nz_f128(s, o->in1, o->in2);
1708 return NO_EXIT;
1711 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1713 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1714 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1715 tcg_temp_free_i32(m3);
1716 return NO_EXIT;
1719 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 return NO_EXIT;
1727 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1729 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1730 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1731 tcg_temp_free_i32(m3);
1732 return_low128(o->out2);
1733 return NO_EXIT;
1736 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1738 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1739 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1740 tcg_temp_free_i32(m3);
1741 return NO_EXIT;
1744 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1746 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1747 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1748 tcg_temp_free_i32(m3);
1749 return NO_EXIT;
1752 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1754 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1755 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1756 tcg_temp_free_i32(m3);
1757 return_low128(o->out2);
1758 return NO_EXIT;
1761 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1763 int r2 = get_field(s->fields, r2);
1764 TCGv_i64 len = tcg_temp_new_i64();
1766 potential_page_fault(s);
1767 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1768 set_cc_static(s);
1769 return_low128(o->out);
1771 tcg_gen_add_i64(regs[r2], regs[r2], len);
1772 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1773 tcg_temp_free_i64(len);
1775 return NO_EXIT;
1778 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1780 int l = get_field(s->fields, l1);
1781 TCGv_i32 vl;
1783 switch (l + 1) {
1784 case 1:
1785 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1786 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1787 break;
1788 case 2:
1789 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1790 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1791 break;
1792 case 4:
1793 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1794 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1795 break;
1796 case 8:
1797 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1798 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1799 break;
1800 default:
1801 potential_page_fault(s);
1802 vl = tcg_const_i32(l);
1803 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1804 tcg_temp_free_i32(vl);
1805 set_cc_static(s);
1806 return NO_EXIT;
1808 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1809 return NO_EXIT;
1812 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1814 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1815 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1816 potential_page_fault(s);
1817 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1818 tcg_temp_free_i32(r1);
1819 tcg_temp_free_i32(r3);
1820 set_cc_static(s);
1821 return NO_EXIT;
1824 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1826 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1827 TCGv_i32 t1 = tcg_temp_new_i32();
1828 tcg_gen_trunc_i64_i32(t1, o->in1);
1829 potential_page_fault(s);
1830 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1831 set_cc_static(s);
1832 tcg_temp_free_i32(t1);
1833 tcg_temp_free_i32(m3);
1834 return NO_EXIT;
1837 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1839 potential_page_fault(s);
1840 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1841 set_cc_static(s);
1842 return_low128(o->in2);
1843 return NO_EXIT;
1846 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1848 TCGv_i64 t = tcg_temp_new_i64();
1849 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1850 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1851 tcg_gen_or_i64(o->out, o->out, t);
1852 tcg_temp_free_i64(t);
1853 return NO_EXIT;
1856 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1858 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1859 int d2 = get_field(s->fields, d2);
1860 int b2 = get_field(s->fields, b2);
1861 int is_64 = s->insn->data;
1862 TCGv_i64 addr, mem, cc, z;
1864 /* Note that in1 = R3 (new value) and
1865 in2 = (zero-extended) R1 (expected value). */
1867 /* Load the memory into the (temporary) output. While the PoO only talks
1868 about moving the memory to R1 on inequality, if we include equality it
1869 means that R1 is equal to the memory in all conditions. */
1870 addr = get_address(s, 0, b2, d2);
1871 if (is_64) {
1872 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1873 } else {
1874 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1877 /* Are the memory and expected values (un)equal? Note that this setcond
1878 produces the output CC value, thus the NE sense of the test. */
1879 cc = tcg_temp_new_i64();
1880 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1882 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1883 Recall that we are allowed to unconditionally issue the store (and
1884 thus any possible write trap), so (re-)store the original contents
1885 of MEM in case of inequality. */
1886 z = tcg_const_i64(0);
1887 mem = tcg_temp_new_i64();
1888 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1889 if (is_64) {
1890 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1891 } else {
1892 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1894 tcg_temp_free_i64(z);
1895 tcg_temp_free_i64(mem);
1896 tcg_temp_free_i64(addr);
1898 /* Store CC back to cc_op. Wait until after the store so that any
1899 exception gets the old cc_op value. */
1900 tcg_gen_trunc_i64_i32(cc_op, cc);
1901 tcg_temp_free_i64(cc);
1902 set_cc_static(s);
1903 return NO_EXIT;
1906 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1908 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1909 int r1 = get_field(s->fields, r1);
1910 int r3 = get_field(s->fields, r3);
1911 int d2 = get_field(s->fields, d2);
1912 int b2 = get_field(s->fields, b2);
1913 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1915 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1917 addrh = get_address(s, 0, b2, d2);
1918 addrl = get_address(s, 0, b2, d2 + 8);
1919 outh = tcg_temp_new_i64();
1920 outl = tcg_temp_new_i64();
1922 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1923 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1925 /* Fold the double-word compare with arithmetic. */
1926 cc = tcg_temp_new_i64();
1927 z = tcg_temp_new_i64();
1928 tcg_gen_xor_i64(cc, outh, regs[r1]);
1929 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1930 tcg_gen_or_i64(cc, cc, z);
1931 tcg_gen_movi_i64(z, 0);
1932 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1934 memh = tcg_temp_new_i64();
1935 meml = tcg_temp_new_i64();
1936 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1937 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1938 tcg_temp_free_i64(z);
1940 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1941 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1942 tcg_temp_free_i64(memh);
1943 tcg_temp_free_i64(meml);
1944 tcg_temp_free_i64(addrh);
1945 tcg_temp_free_i64(addrl);
1947 /* Save back state now that we've passed all exceptions. */
1948 tcg_gen_mov_i64(regs[r1], outh);
1949 tcg_gen_mov_i64(regs[r1 + 1], outl);
1950 tcg_gen_trunc_i64_i32(cc_op, cc);
1951 tcg_temp_free_i64(outh);
1952 tcg_temp_free_i64(outl);
1953 tcg_temp_free_i64(cc);
1954 set_cc_static(s);
1955 return NO_EXIT;
1958 #ifndef CONFIG_USER_ONLY
1959 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1961 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1962 check_privileged(s);
1963 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1964 tcg_temp_free_i32(r1);
1965 set_cc_static(s);
1966 return NO_EXIT;
1968 #endif
1970 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1972 TCGv_i64 t1 = tcg_temp_new_i64();
1973 TCGv_i32 t2 = tcg_temp_new_i32();
1974 tcg_gen_trunc_i64_i32(t2, o->in1);
1975 gen_helper_cvd(t1, t2);
1976 tcg_temp_free_i32(t2);
1977 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1978 tcg_temp_free_i64(t1);
1979 return NO_EXIT;
1982 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1984 int m3 = get_field(s->fields, m3);
1985 TCGLabel *lab = gen_new_label();
1986 TCGCond c;
1988 c = tcg_invert_cond(ltgt_cond[m3]);
1989 if (s->insn->data) {
1990 c = tcg_unsigned_cond(c);
1992 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1994 /* Trap. */
1995 gen_trap(s);
1997 gen_set_label(lab);
1998 return NO_EXIT;
2001 #ifndef CONFIG_USER_ONLY
2002 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2004 TCGv_i32 tmp;
2006 check_privileged(s);
2007 potential_page_fault(s);
2009 /* We pretend the format is RX_a so that D2 is the field we want. */
2010 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
2011 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
2012 tcg_temp_free_i32(tmp);
2013 return NO_EXIT;
2015 #endif
2017 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2019 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2020 return_low128(o->out);
2021 return NO_EXIT;
2024 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2026 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2027 return_low128(o->out);
2028 return NO_EXIT;
2031 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2033 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2034 return_low128(o->out);
2035 return NO_EXIT;
2038 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2040 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2041 return_low128(o->out);
2042 return NO_EXIT;
2045 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2047 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2048 return NO_EXIT;
2051 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2053 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2054 return NO_EXIT;
2057 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2059 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2060 return_low128(o->out2);
2061 return NO_EXIT;
2064 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2066 int r2 = get_field(s->fields, r2);
2067 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2068 return NO_EXIT;
2071 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2073 /* No cache information provided. */
2074 tcg_gen_movi_i64(o->out, -1);
2075 return NO_EXIT;
2078 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2080 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2081 return NO_EXIT;
2084 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2086 int r1 = get_field(s->fields, r1);
2087 int r2 = get_field(s->fields, r2);
2088 TCGv_i64 t = tcg_temp_new_i64();
2090 /* Note the "subsequently" in the PoO, which implies a defined result
2091 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2092 tcg_gen_shri_i64(t, psw_mask, 32);
2093 store_reg32_i64(r1, t);
2094 if (r2 != 0) {
2095 store_reg32_i64(r2, psw_mask);
2098 tcg_temp_free_i64(t);
2099 return NO_EXIT;
2102 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2104 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2105 tb->flags, (ab)use the tb->cs_base field as the address of
2106 the template in memory, and grab 8 bits of tb->flags/cflags for
2107 the contents of the register. We would then recognize all this
2108 in gen_intermediate_code_internal, generating code for exactly
2109 one instruction. This new TB then gets executed normally.
2111 On the other hand, this seems to be mostly used for modifying
2112 MVC inside of memcpy, which needs a helper call anyway. So
2113 perhaps this doesn't bear thinking about any further. */
2115 TCGv_i64 tmp;
2117 update_psw_addr(s);
2118 gen_op_calc_cc(s);
2120 tmp = tcg_const_i64(s->next_pc);
2121 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2122 tcg_temp_free_i64(tmp);
2124 return NO_EXIT;
2127 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2129 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2130 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2131 tcg_temp_free_i32(m3);
2132 return NO_EXIT;
2135 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2137 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2138 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2139 tcg_temp_free_i32(m3);
2140 return NO_EXIT;
2143 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2145 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2146 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2147 return_low128(o->out2);
2148 tcg_temp_free_i32(m3);
2149 return NO_EXIT;
2152 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2154 /* We'll use the original input for cc computation, since we get to
2155 compare that against 0, which ought to be better than comparing
2156 the real output against 64. It also lets cc_dst be a convenient
2157 temporary during our computation. */
2158 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2160 /* R1 = IN ? CLZ(IN) : 64. */
2161 gen_helper_clz(o->out, o->in2);
2163 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2164 value by 64, which is undefined. But since the shift is 64 iff the
2165 input is zero, we still get the correct result after and'ing. */
2166 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2167 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2168 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2169 return NO_EXIT;
2172 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2174 int m3 = get_field(s->fields, m3);
2175 int pos, len, base = s->insn->data;
2176 TCGv_i64 tmp = tcg_temp_new_i64();
2177 uint64_t ccm;
2179 switch (m3) {
2180 case 0xf:
2181 /* Effectively a 32-bit load. */
2182 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2183 len = 32;
2184 goto one_insert;
2186 case 0xc:
2187 case 0x6:
2188 case 0x3:
2189 /* Effectively a 16-bit load. */
2190 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2191 len = 16;
2192 goto one_insert;
2194 case 0x8:
2195 case 0x4:
2196 case 0x2:
2197 case 0x1:
2198 /* Effectively an 8-bit load. */
2199 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2200 len = 8;
2201 goto one_insert;
2203 one_insert:
2204 pos = base + ctz32(m3) * 8;
2205 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2206 ccm = ((1ull << len) - 1) << pos;
2207 break;
2209 default:
2210 /* This is going to be a sequence of loads and inserts. */
2211 pos = base + 32 - 8;
2212 ccm = 0;
2213 while (m3) {
2214 if (m3 & 0x8) {
2215 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2216 tcg_gen_addi_i64(o->in2, o->in2, 1);
2217 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2218 ccm |= 0xff << pos;
2220 m3 = (m3 << 1) & 0xf;
2221 pos -= 8;
2223 break;
2226 tcg_gen_movi_i64(tmp, ccm);
2227 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2228 tcg_temp_free_i64(tmp);
2229 return NO_EXIT;
2232 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2234 int shift = s->insn->data & 0xff;
2235 int size = s->insn->data >> 8;
2236 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2237 return NO_EXIT;
2240 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2242 TCGv_i64 t1;
2244 gen_op_calc_cc(s);
2245 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2247 t1 = tcg_temp_new_i64();
2248 tcg_gen_shli_i64(t1, psw_mask, 20);
2249 tcg_gen_shri_i64(t1, t1, 36);
2250 tcg_gen_or_i64(o->out, o->out, t1);
2252 tcg_gen_extu_i32_i64(t1, cc_op);
2253 tcg_gen_shli_i64(t1, t1, 28);
2254 tcg_gen_or_i64(o->out, o->out, t1);
2255 tcg_temp_free_i64(t1);
2256 return NO_EXIT;
2259 #ifndef CONFIG_USER_ONLY
2260 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2262 check_privileged(s);
2263 gen_helper_ipte(cpu_env, o->in1, o->in2);
2264 return NO_EXIT;
2267 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2269 check_privileged(s);
2270 gen_helper_iske(o->out, cpu_env, o->in2);
2271 return NO_EXIT;
2273 #endif
2275 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2277 gen_helper_ldeb(o->out, cpu_env, o->in2);
2278 return NO_EXIT;
2281 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2283 gen_helper_ledb(o->out, cpu_env, o->in2);
2284 return NO_EXIT;
2287 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2289 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2290 return NO_EXIT;
2293 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2295 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2296 return NO_EXIT;
2299 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2301 gen_helper_lxdb(o->out, cpu_env, o->in2);
2302 return_low128(o->out2);
2303 return NO_EXIT;
2306 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2308 gen_helper_lxeb(o->out, cpu_env, o->in2);
2309 return_low128(o->out2);
2310 return NO_EXIT;
2313 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2315 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2316 return NO_EXIT;
2319 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2321 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2322 return NO_EXIT;
2325 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2327 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2328 return NO_EXIT;
2331 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2333 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2334 return NO_EXIT;
2337 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2339 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2340 return NO_EXIT;
2343 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2345 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2346 return NO_EXIT;
2349 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2351 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2352 return NO_EXIT;
2355 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2357 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2358 return NO_EXIT;
2361 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2363 TCGLabel *lab = gen_new_label();
2364 store_reg32_i64(get_field(s->fields, r1), o->in2);
2365 /* The value is stored even in case of trap. */
2366 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2367 gen_trap(s);
2368 gen_set_label(lab);
2369 return NO_EXIT;
2372 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2374 TCGLabel *lab = gen_new_label();
2375 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2376 /* The value is stored even in case of trap. */
2377 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2378 gen_trap(s);
2379 gen_set_label(lab);
2380 return NO_EXIT;
2383 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2385 TCGLabel *lab = gen_new_label();
2386 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2387 /* The value is stored even in case of trap. */
2388 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2389 gen_trap(s);
2390 gen_set_label(lab);
2391 return NO_EXIT;
2394 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2396 TCGLabel *lab = gen_new_label();
2397 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2398 /* The value is stored even in case of trap. */
2399 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2400 gen_trap(s);
2401 gen_set_label(lab);
2402 return NO_EXIT;
2405 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2407 TCGLabel *lab = gen_new_label();
2408 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2409 /* The value is stored even in case of trap. */
2410 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2411 gen_trap(s);
2412 gen_set_label(lab);
2413 return NO_EXIT;
2416 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2418 DisasCompare c;
2420 disas_jcc(s, &c, get_field(s->fields, m3));
2422 if (c.is_64) {
2423 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2424 o->in2, o->in1);
2425 free_compare(&c);
2426 } else {
2427 TCGv_i32 t32 = tcg_temp_new_i32();
2428 TCGv_i64 t, z;
2430 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2431 free_compare(&c);
2433 t = tcg_temp_new_i64();
2434 tcg_gen_extu_i32_i64(t, t32);
2435 tcg_temp_free_i32(t32);
2437 z = tcg_const_i64(0);
2438 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2439 tcg_temp_free_i64(t);
2440 tcg_temp_free_i64(z);
2443 return NO_EXIT;
2446 #ifndef CONFIG_USER_ONLY
2447 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2449 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2450 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2451 check_privileged(s);
2452 potential_page_fault(s);
2453 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2454 tcg_temp_free_i32(r1);
2455 tcg_temp_free_i32(r3);
2456 return NO_EXIT;
2459 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2461 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2462 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2463 check_privileged(s);
2464 potential_page_fault(s);
2465 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2466 tcg_temp_free_i32(r1);
2467 tcg_temp_free_i32(r3);
2468 return NO_EXIT;
2470 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2472 check_privileged(s);
2473 potential_page_fault(s);
2474 gen_helper_lra(o->out, cpu_env, o->in2);
2475 set_cc_static(s);
2476 return NO_EXIT;
2479 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2481 TCGv_i64 t1, t2;
2483 check_privileged(s);
2485 t1 = tcg_temp_new_i64();
2486 t2 = tcg_temp_new_i64();
2487 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2488 tcg_gen_addi_i64(o->in2, o->in2, 4);
2489 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2490 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2491 tcg_gen_shli_i64(t1, t1, 32);
2492 gen_helper_load_psw(cpu_env, t1, t2);
2493 tcg_temp_free_i64(t1);
2494 tcg_temp_free_i64(t2);
2495 return EXIT_NORETURN;
2498 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2500 TCGv_i64 t1, t2;
2502 check_privileged(s);
2504 t1 = tcg_temp_new_i64();
2505 t2 = tcg_temp_new_i64();
2506 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2507 tcg_gen_addi_i64(o->in2, o->in2, 8);
2508 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2509 gen_helper_load_psw(cpu_env, t1, t2);
2510 tcg_temp_free_i64(t1);
2511 tcg_temp_free_i64(t2);
2512 return EXIT_NORETURN;
2514 #endif
2516 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2518 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2519 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2520 potential_page_fault(s);
2521 gen_helper_lam(cpu_env, r1, o->in2, r3);
2522 tcg_temp_free_i32(r1);
2523 tcg_temp_free_i32(r3);
2524 return NO_EXIT;
2527 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2529 int r1 = get_field(s->fields, r1);
2530 int r3 = get_field(s->fields, r3);
2531 TCGv_i64 t1, t2;
2533 /* Only one register to read. */
2534 t1 = tcg_temp_new_i64();
2535 if (unlikely(r1 == r3)) {
2536 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2537 store_reg32_i64(r1, t1);
2538 tcg_temp_free(t1);
2539 return NO_EXIT;
2542 /* First load the values of the first and last registers to trigger
2543 possible page faults. */
2544 t2 = tcg_temp_new_i64();
2545 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2546 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2547 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2548 store_reg32_i64(r1, t1);
2549 store_reg32_i64(r3, t2);
2551 /* Only two registers to read. */
2552 if (((r1 + 1) & 15) == r3) {
2553 tcg_temp_free(t2);
2554 tcg_temp_free(t1);
2555 return NO_EXIT;
2558 /* Then load the remaining registers. Page fault can't occur. */
2559 r3 = (r3 - 1) & 15;
2560 tcg_gen_movi_i64(t2, 4);
2561 while (r1 != r3) {
2562 r1 = (r1 + 1) & 15;
2563 tcg_gen_add_i64(o->in2, o->in2, t2);
2564 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2565 store_reg32_i64(r1, t1);
2567 tcg_temp_free(t2);
2568 tcg_temp_free(t1);
2570 return NO_EXIT;
2573 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2575 int r1 = get_field(s->fields, r1);
2576 int r3 = get_field(s->fields, r3);
2577 TCGv_i64 t1, t2;
2579 /* Only one register to read. */
2580 t1 = tcg_temp_new_i64();
2581 if (unlikely(r1 == r3)) {
2582 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2583 store_reg32h_i64(r1, t1);
2584 tcg_temp_free(t1);
2585 return NO_EXIT;
2588 /* First load the values of the first and last registers to trigger
2589 possible page faults. */
2590 t2 = tcg_temp_new_i64();
2591 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2592 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2593 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2594 store_reg32h_i64(r1, t1);
2595 store_reg32h_i64(r3, t2);
2597 /* Only two registers to read. */
2598 if (((r1 + 1) & 15) == r3) {
2599 tcg_temp_free(t2);
2600 tcg_temp_free(t1);
2601 return NO_EXIT;
2604 /* Then load the remaining registers. Page fault can't occur. */
2605 r3 = (r3 - 1) & 15;
2606 tcg_gen_movi_i64(t2, 4);
2607 while (r1 != r3) {
2608 r1 = (r1 + 1) & 15;
2609 tcg_gen_add_i64(o->in2, o->in2, t2);
2610 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2611 store_reg32h_i64(r1, t1);
2613 tcg_temp_free(t2);
2614 tcg_temp_free(t1);
2616 return NO_EXIT;
2619 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2621 int r1 = get_field(s->fields, r1);
2622 int r3 = get_field(s->fields, r3);
2623 TCGv_i64 t1, t2;
2625 /* Only one register to read. */
2626 if (unlikely(r1 == r3)) {
2627 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2628 return NO_EXIT;
2631 /* First load the values of the first and last registers to trigger
2632 possible page faults. */
2633 t1 = tcg_temp_new_i64();
2634 t2 = tcg_temp_new_i64();
2635 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2636 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2637 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2638 tcg_gen_mov_i64(regs[r1], t1);
2639 tcg_temp_free(t2);
2641 /* Only two registers to read. */
2642 if (((r1 + 1) & 15) == r3) {
2643 tcg_temp_free(t1);
2644 return NO_EXIT;
2647 /* Then load the remaining registers. Page fault can't occur. */
2648 r3 = (r3 - 1) & 15;
2649 tcg_gen_movi_i64(t1, 8);
2650 while (r1 != r3) {
2651 r1 = (r1 + 1) & 15;
2652 tcg_gen_add_i64(o->in2, o->in2, t1);
2653 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2655 tcg_temp_free(t1);
2657 return NO_EXIT;
2660 #ifndef CONFIG_USER_ONLY
2661 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2663 check_privileged(s);
2664 potential_page_fault(s);
2665 gen_helper_lura(o->out, cpu_env, o->in2);
2666 return NO_EXIT;
2669 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2671 check_privileged(s);
2672 potential_page_fault(s);
2673 gen_helper_lurag(o->out, cpu_env, o->in2);
2674 return NO_EXIT;
2676 #endif
2678 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2680 o->out = o->in2;
2681 o->g_out = o->g_in2;
2682 TCGV_UNUSED_I64(o->in2);
2683 o->g_in2 = false;
2684 return NO_EXIT;
2687 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2689 int b2 = get_field(s->fields, b2);
2690 TCGv ar1 = tcg_temp_new_i64();
2692 o->out = o->in2;
2693 o->g_out = o->g_in2;
2694 TCGV_UNUSED_I64(o->in2);
2695 o->g_in2 = false;
2697 switch (s->tb->flags & FLAG_MASK_ASC) {
2698 case PSW_ASC_PRIMARY >> 32:
2699 tcg_gen_movi_i64(ar1, 0);
2700 break;
2701 case PSW_ASC_ACCREG >> 32:
2702 tcg_gen_movi_i64(ar1, 1);
2703 break;
2704 case PSW_ASC_SECONDARY >> 32:
2705 if (b2) {
2706 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2707 } else {
2708 tcg_gen_movi_i64(ar1, 0);
2710 break;
2711 case PSW_ASC_HOME >> 32:
2712 tcg_gen_movi_i64(ar1, 2);
2713 break;
2716 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2717 tcg_temp_free_i64(ar1);
2719 return NO_EXIT;
2722 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2724 o->out = o->in1;
2725 o->out2 = o->in2;
2726 o->g_out = o->g_in1;
2727 o->g_out2 = o->g_in2;
2728 TCGV_UNUSED_I64(o->in1);
2729 TCGV_UNUSED_I64(o->in2);
2730 o->g_in1 = o->g_in2 = false;
2731 return NO_EXIT;
2734 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2736 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2737 potential_page_fault(s);
2738 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2739 tcg_temp_free_i32(l);
2740 return NO_EXIT;
2743 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2745 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2746 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2747 potential_page_fault(s);
2748 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2749 tcg_temp_free_i32(r1);
2750 tcg_temp_free_i32(r2);
2751 set_cc_static(s);
2752 return NO_EXIT;
2755 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2757 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2758 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2759 potential_page_fault(s);
2760 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2761 tcg_temp_free_i32(r1);
2762 tcg_temp_free_i32(r3);
2763 set_cc_static(s);
2764 return NO_EXIT;
2767 #ifndef CONFIG_USER_ONLY
2768 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2770 int r1 = get_field(s->fields, l1);
2771 check_privileged(s);
2772 potential_page_fault(s);
2773 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2774 set_cc_static(s);
2775 return NO_EXIT;
2778 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2780 int r1 = get_field(s->fields, l1);
2781 check_privileged(s);
2782 potential_page_fault(s);
2783 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2784 set_cc_static(s);
2785 return NO_EXIT;
2787 #endif
2789 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2791 potential_page_fault(s);
2792 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2793 set_cc_static(s);
2794 return NO_EXIT;
2797 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2799 potential_page_fault(s);
2800 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2801 set_cc_static(s);
2802 return_low128(o->in2);
2803 return NO_EXIT;
2806 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2808 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2809 return NO_EXIT;
2812 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2814 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2815 return NO_EXIT;
2818 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2820 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2821 return NO_EXIT;
2824 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2826 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2827 return NO_EXIT;
2830 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2832 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2833 return NO_EXIT;
2836 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2838 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2839 return_low128(o->out2);
2840 return NO_EXIT;
2843 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2845 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2846 return_low128(o->out2);
2847 return NO_EXIT;
2850 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2852 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2853 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2854 tcg_temp_free_i64(r3);
2855 return NO_EXIT;
2858 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2860 int r3 = get_field(s->fields, r3);
2861 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2862 return NO_EXIT;
2865 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2867 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2868 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2869 tcg_temp_free_i64(r3);
2870 return NO_EXIT;
2873 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2875 int r3 = get_field(s->fields, r3);
2876 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2877 return NO_EXIT;
2880 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2882 TCGv_i64 z, n;
2883 z = tcg_const_i64(0);
2884 n = tcg_temp_new_i64();
2885 tcg_gen_neg_i64(n, o->in2);
2886 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
2887 tcg_temp_free_i64(n);
2888 tcg_temp_free_i64(z);
2889 return NO_EXIT;
2892 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2894 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2895 return NO_EXIT;
2898 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2900 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2901 return NO_EXIT;
2904 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2906 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2907 tcg_gen_mov_i64(o->out2, o->in2);
2908 return NO_EXIT;
2911 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2913 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2914 potential_page_fault(s);
2915 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2916 tcg_temp_free_i32(l);
2917 set_cc_static(s);
2918 return NO_EXIT;
2921 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2923 tcg_gen_neg_i64(o->out, o->in2);
2924 return NO_EXIT;
2927 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2929 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2930 return NO_EXIT;
2933 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2935 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2936 return NO_EXIT;
2939 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2941 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2942 tcg_gen_mov_i64(o->out2, o->in2);
2943 return NO_EXIT;
2946 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2948 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2949 potential_page_fault(s);
2950 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2951 tcg_temp_free_i32(l);
2952 set_cc_static(s);
2953 return NO_EXIT;
2956 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2958 tcg_gen_or_i64(o->out, o->in1, o->in2);
2959 return NO_EXIT;
2962 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2964 int shift = s->insn->data & 0xff;
2965 int size = s->insn->data >> 8;
2966 uint64_t mask = ((1ull << size) - 1) << shift;
2968 assert(!o->g_in2);
2969 tcg_gen_shli_i64(o->in2, o->in2, shift);
2970 tcg_gen_or_i64(o->out, o->in1, o->in2);
2972 /* Produce the CC from only the bits manipulated. */
2973 tcg_gen_andi_i64(cc_dst, o->out, mask);
2974 set_cc_nz_u64(s, cc_dst);
2975 return NO_EXIT;
2978 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2980 gen_helper_popcnt(o->out, o->in2);
2981 return NO_EXIT;
2984 #ifndef CONFIG_USER_ONLY
2985 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2987 check_privileged(s);
2988 gen_helper_ptlb(cpu_env);
2989 return NO_EXIT;
2991 #endif
2993 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2995 int i3 = get_field(s->fields, i3);
2996 int i4 = get_field(s->fields, i4);
2997 int i5 = get_field(s->fields, i5);
2998 int do_zero = i4 & 0x80;
2999 uint64_t mask, imask, pmask;
3000 int pos, len, rot;
3002 /* Adjust the arguments for the specific insn. */
3003 switch (s->fields->op2) {
3004 case 0x55: /* risbg */
3005 i3 &= 63;
3006 i4 &= 63;
3007 pmask = ~0;
3008 break;
3009 case 0x5d: /* risbhg */
3010 i3 &= 31;
3011 i4 &= 31;
3012 pmask = 0xffffffff00000000ull;
3013 break;
3014 case 0x51: /* risblg */
3015 i3 &= 31;
3016 i4 &= 31;
3017 pmask = 0x00000000ffffffffull;
3018 break;
3019 default:
3020 abort();
3023 /* MASK is the set of bits to be inserted from R2.
3024 Take care for I3/I4 wraparound. */
3025 mask = pmask >> i3;
3026 if (i3 <= i4) {
3027 mask ^= pmask >> i4 >> 1;
3028 } else {
3029 mask |= ~(pmask >> i4 >> 1);
3031 mask &= pmask;
3033 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3034 insns, we need to keep the other half of the register. */
3035 imask = ~mask | ~pmask;
3036 if (do_zero) {
3037 if (s->fields->op2 == 0x55) {
3038 imask = 0;
3039 } else {
3040 imask = ~pmask;
3044 /* In some cases we can implement this with deposit, which can be more
3045 efficient on some hosts. */
3046 if (~mask == imask && i3 <= i4) {
3047 if (s->fields->op2 == 0x5d) {
3048 i3 += 32, i4 += 32;
3050 /* Note that we rotate the bits to be inserted to the lsb, not to
3051 the position as described in the PoO. */
3052 len = i4 - i3 + 1;
3053 pos = 63 - i4;
3054 rot = (i5 - pos) & 63;
3055 } else {
3056 pos = len = -1;
3057 rot = i5 & 63;
3060 /* Rotate the input as necessary. */
3061 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3063 /* Insert the selected bits into the output. */
3064 if (pos >= 0) {
3065 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3066 } else if (imask == 0) {
3067 tcg_gen_andi_i64(o->out, o->in2, mask);
3068 } else {
3069 tcg_gen_andi_i64(o->in2, o->in2, mask);
3070 tcg_gen_andi_i64(o->out, o->out, imask);
3071 tcg_gen_or_i64(o->out, o->out, o->in2);
3073 return NO_EXIT;
3076 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3078 int i3 = get_field(s->fields, i3);
3079 int i4 = get_field(s->fields, i4);
3080 int i5 = get_field(s->fields, i5);
3081 uint64_t mask;
3083 /* If this is a test-only form, arrange to discard the result. */
3084 if (i3 & 0x80) {
3085 o->out = tcg_temp_new_i64();
3086 o->g_out = false;
3089 i3 &= 63;
3090 i4 &= 63;
3091 i5 &= 63;
3093 /* MASK is the set of bits to be operated on from R2.
3094 Take care for I3/I4 wraparound. */
3095 mask = ~0ull >> i3;
3096 if (i3 <= i4) {
3097 mask ^= ~0ull >> i4 >> 1;
3098 } else {
3099 mask |= ~(~0ull >> i4 >> 1);
3102 /* Rotate the input as necessary. */
3103 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3105 /* Operate. */
3106 switch (s->fields->op2) {
3107 case 0x55: /* AND */
3108 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3109 tcg_gen_and_i64(o->out, o->out, o->in2);
3110 break;
3111 case 0x56: /* OR */
3112 tcg_gen_andi_i64(o->in2, o->in2, mask);
3113 tcg_gen_or_i64(o->out, o->out, o->in2);
3114 break;
3115 case 0x57: /* XOR */
3116 tcg_gen_andi_i64(o->in2, o->in2, mask);
3117 tcg_gen_xor_i64(o->out, o->out, o->in2);
3118 break;
3119 default:
3120 abort();
3123 /* Set the CC. */
3124 tcg_gen_andi_i64(cc_dst, o->out, mask);
3125 set_cc_nz_u64(s, cc_dst);
3126 return NO_EXIT;
3129 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3131 tcg_gen_bswap16_i64(o->out, o->in2);
3132 return NO_EXIT;
3135 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3137 tcg_gen_bswap32_i64(o->out, o->in2);
3138 return NO_EXIT;
3141 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3143 tcg_gen_bswap64_i64(o->out, o->in2);
3144 return NO_EXIT;
3147 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3149 TCGv_i32 t1 = tcg_temp_new_i32();
3150 TCGv_i32 t2 = tcg_temp_new_i32();
3151 TCGv_i32 to = tcg_temp_new_i32();
3152 tcg_gen_trunc_i64_i32(t1, o->in1);
3153 tcg_gen_trunc_i64_i32(t2, o->in2);
3154 tcg_gen_rotl_i32(to, t1, t2);
3155 tcg_gen_extu_i32_i64(o->out, to);
3156 tcg_temp_free_i32(t1);
3157 tcg_temp_free_i32(t2);
3158 tcg_temp_free_i32(to);
3159 return NO_EXIT;
3162 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3164 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3165 return NO_EXIT;
3168 #ifndef CONFIG_USER_ONLY
3169 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3171 check_privileged(s);
3172 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3173 set_cc_static(s);
3174 return NO_EXIT;
3177 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3179 check_privileged(s);
3180 gen_helper_sacf(cpu_env, o->in2);
3181 /* Addressing mode has changed, so end the block. */
3182 return EXIT_PC_STALE;
3184 #endif
3186 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3188 int sam = s->insn->data;
3189 TCGv_i64 tsam;
3190 uint64_t mask;
3192 switch (sam) {
3193 case 0:
3194 mask = 0xffffff;
3195 break;
3196 case 1:
3197 mask = 0x7fffffff;
3198 break;
3199 default:
3200 mask = -1;
3201 break;
3204 /* Bizarre but true, we check the address of the current insn for the
3205 specification exception, not the next to be executed. Thus the PoO
3206 documents that Bad Things Happen two bytes before the end. */
3207 if (s->pc & ~mask) {
3208 gen_program_exception(s, PGM_SPECIFICATION);
3209 return EXIT_NORETURN;
3211 s->next_pc &= mask;
3213 tsam = tcg_const_i64(sam);
3214 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3215 tcg_temp_free_i64(tsam);
3217 /* Always exit the TB, since we (may have) changed execution mode. */
3218 return EXIT_PC_STALE;
3221 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3223 int r1 = get_field(s->fields, r1);
3224 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3225 return NO_EXIT;
3228 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3230 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3231 return NO_EXIT;
3234 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3236 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3237 return NO_EXIT;
3240 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3242 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3243 return_low128(o->out2);
3244 return NO_EXIT;
3247 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3249 gen_helper_sqeb(o->out, cpu_env, o->in2);
3250 return NO_EXIT;
3253 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3255 gen_helper_sqdb(o->out, cpu_env, o->in2);
3256 return NO_EXIT;
3259 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3261 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3262 return_low128(o->out2);
3263 return NO_EXIT;
3266 #ifndef CONFIG_USER_ONLY
3267 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3269 check_privileged(s);
3270 potential_page_fault(s);
3271 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3272 set_cc_static(s);
3273 return NO_EXIT;
3276 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3278 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3279 check_privileged(s);
3280 potential_page_fault(s);
3281 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3282 tcg_temp_free_i32(r1);
3283 return NO_EXIT;
3285 #endif
3287 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3289 DisasCompare c;
3290 TCGv_i64 a;
3291 TCGLabel *lab;
3292 int r1;
3294 disas_jcc(s, &c, get_field(s->fields, m3));
3296 /* We want to store when the condition is fulfilled, so branch
3297 out when it's not */
3298 c.cond = tcg_invert_cond(c.cond);
3300 lab = gen_new_label();
3301 if (c.is_64) {
3302 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3303 } else {
3304 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3306 free_compare(&c);
3308 r1 = get_field(s->fields, r1);
3309 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3310 if (s->insn->data) {
3311 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3312 } else {
3313 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3315 tcg_temp_free_i64(a);
3317 gen_set_label(lab);
3318 return NO_EXIT;
3321 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3323 uint64_t sign = 1ull << s->insn->data;
3324 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3325 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3326 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3327 /* The arithmetic left shift is curious in that it does not affect
3328 the sign bit. Copy that over from the source unchanged. */
3329 tcg_gen_andi_i64(o->out, o->out, ~sign);
3330 tcg_gen_andi_i64(o->in1, o->in1, sign);
3331 tcg_gen_or_i64(o->out, o->out, o->in1);
3332 return NO_EXIT;
3335 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3337 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3338 return NO_EXIT;
3341 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3343 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3344 return NO_EXIT;
3347 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3349 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3350 return NO_EXIT;
3353 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3355 gen_helper_sfpc(cpu_env, o->in2);
3356 return NO_EXIT;
3359 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3361 gen_helper_sfas(cpu_env, o->in2);
3362 return NO_EXIT;
3365 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3367 int b2 = get_field(s->fields, b2);
3368 int d2 = get_field(s->fields, d2);
3369 TCGv_i64 t1 = tcg_temp_new_i64();
3370 TCGv_i64 t2 = tcg_temp_new_i64();
3371 int mask, pos, len;
3373 switch (s->fields->op2) {
3374 case 0x99: /* SRNM */
3375 pos = 0, len = 2;
3376 break;
3377 case 0xb8: /* SRNMB */
3378 pos = 0, len = 3;
3379 break;
3380 case 0xb9: /* SRNMT */
3381 pos = 4, len = 3;
3382 break;
3383 default:
3384 tcg_abort();
3386 mask = (1 << len) - 1;
3388 /* Insert the value into the appropriate field of the FPC. */
3389 if (b2 == 0) {
3390 tcg_gen_movi_i64(t1, d2 & mask);
3391 } else {
3392 tcg_gen_addi_i64(t1, regs[b2], d2);
3393 tcg_gen_andi_i64(t1, t1, mask);
3395 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3396 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3397 tcg_temp_free_i64(t1);
3399 /* Then install the new FPC to set the rounding mode in fpu_status. */
3400 gen_helper_sfpc(cpu_env, t2);
3401 tcg_temp_free_i64(t2);
3402 return NO_EXIT;
3405 #ifndef CONFIG_USER_ONLY
3406 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3408 check_privileged(s);
3409 tcg_gen_shri_i64(o->in2, o->in2, 4);
3410 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3411 return NO_EXIT;
3414 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3416 check_privileged(s);
3417 gen_helper_sske(cpu_env, o->in1, o->in2);
3418 return NO_EXIT;
3421 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3423 check_privileged(s);
3424 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3425 return NO_EXIT;
3428 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3430 check_privileged(s);
3431 /* ??? Surely cpu address != cpu number. In any case the previous
3432 version of this stored more than the required half-word, so it
3433 is unlikely this has ever been tested. */
3434 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3435 return NO_EXIT;
3438 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3440 gen_helper_stck(o->out, cpu_env);
3441 /* ??? We don't implement clock states. */
3442 gen_op_movi_cc(s, 0);
3443 return NO_EXIT;
3446 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3448 TCGv_i64 c1 = tcg_temp_new_i64();
3449 TCGv_i64 c2 = tcg_temp_new_i64();
3450 gen_helper_stck(c1, cpu_env);
3451 /* Shift the 64-bit value into its place as a zero-extended
3452 104-bit value. Note that "bit positions 64-103 are always
3453 non-zero so that they compare differently to STCK"; we set
3454 the least significant bit to 1. */
3455 tcg_gen_shli_i64(c2, c1, 56);
3456 tcg_gen_shri_i64(c1, c1, 8);
3457 tcg_gen_ori_i64(c2, c2, 0x10000);
3458 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3459 tcg_gen_addi_i64(o->in2, o->in2, 8);
3460 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3461 tcg_temp_free_i64(c1);
3462 tcg_temp_free_i64(c2);
3463 /* ??? We don't implement clock states. */
3464 gen_op_movi_cc(s, 0);
3465 return NO_EXIT;
3468 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3470 check_privileged(s);
3471 gen_helper_sckc(cpu_env, o->in2);
3472 return NO_EXIT;
3475 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3477 check_privileged(s);
3478 gen_helper_stckc(o->out, cpu_env);
3479 return NO_EXIT;
3482 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3484 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3485 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3486 check_privileged(s);
3487 potential_page_fault(s);
3488 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3489 tcg_temp_free_i32(r1);
3490 tcg_temp_free_i32(r3);
3491 return NO_EXIT;
3494 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3496 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3497 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3498 check_privileged(s);
3499 potential_page_fault(s);
3500 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3501 tcg_temp_free_i32(r1);
3502 tcg_temp_free_i32(r3);
3503 return NO_EXIT;
3506 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3508 TCGv_i64 t1 = tcg_temp_new_i64();
3510 check_privileged(s);
3511 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3512 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3513 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3514 tcg_temp_free_i64(t1);
3516 return NO_EXIT;
3519 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3521 check_privileged(s);
3522 gen_helper_spt(cpu_env, o->in2);
3523 return NO_EXIT;
3526 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3528 TCGv_i64 f, a;
3529 /* We really ought to have more complete indication of facilities
3530 that we implement. Address this when STFLE is implemented. */
3531 check_privileged(s);
3532 f = tcg_const_i64(0xc0000000);
3533 a = tcg_const_i64(200);
3534 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3535 tcg_temp_free_i64(f);
3536 tcg_temp_free_i64(a);
3537 return NO_EXIT;
3540 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3542 check_privileged(s);
3543 gen_helper_stpt(o->out, cpu_env);
3544 return NO_EXIT;
3547 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3549 check_privileged(s);
3550 potential_page_fault(s);
3551 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3552 set_cc_static(s);
3553 return NO_EXIT;
3556 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3558 check_privileged(s);
3559 gen_helper_spx(cpu_env, o->in2);
3560 return NO_EXIT;
3563 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3565 check_privileged(s);
3566 /* Not operational. */
3567 gen_op_movi_cc(s, 3);
3568 return NO_EXIT;
3571 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3573 check_privileged(s);
3574 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3575 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3576 return NO_EXIT;
3579 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3581 uint64_t i2 = get_field(s->fields, i2);
3582 TCGv_i64 t;
3584 check_privileged(s);
3586 /* It is important to do what the instruction name says: STORE THEN.
3587 If we let the output hook perform the store then if we fault and
3588 restart, we'll have the wrong SYSTEM MASK in place. */
3589 t = tcg_temp_new_i64();
3590 tcg_gen_shri_i64(t, psw_mask, 56);
3591 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3592 tcg_temp_free_i64(t);
3594 if (s->fields->op == 0xac) {
3595 tcg_gen_andi_i64(psw_mask, psw_mask,
3596 (i2 << 56) | 0x00ffffffffffffffull);
3597 } else {
3598 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3600 return NO_EXIT;
3603 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3605 check_privileged(s);
3606 potential_page_fault(s);
3607 gen_helper_stura(cpu_env, o->in2, o->in1);
3608 return NO_EXIT;
3611 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3613 check_privileged(s);
3614 potential_page_fault(s);
3615 gen_helper_sturg(cpu_env, o->in2, o->in1);
3616 return NO_EXIT;
3618 #endif
3620 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3622 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3623 return NO_EXIT;
3626 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3628 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3629 return NO_EXIT;
3632 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3634 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3635 return NO_EXIT;
3638 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3640 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3641 return NO_EXIT;
3644 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3646 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3647 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3648 potential_page_fault(s);
3649 gen_helper_stam(cpu_env, r1, o->in2, r3);
3650 tcg_temp_free_i32(r1);
3651 tcg_temp_free_i32(r3);
3652 return NO_EXIT;
3655 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3657 int m3 = get_field(s->fields, m3);
3658 int pos, base = s->insn->data;
3659 TCGv_i64 tmp = tcg_temp_new_i64();
3661 pos = base + ctz32(m3) * 8;
3662 switch (m3) {
3663 case 0xf:
3664 /* Effectively a 32-bit store. */
3665 tcg_gen_shri_i64(tmp, o->in1, pos);
3666 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3667 break;
3669 case 0xc:
3670 case 0x6:
3671 case 0x3:
3672 /* Effectively a 16-bit store. */
3673 tcg_gen_shri_i64(tmp, o->in1, pos);
3674 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3675 break;
3677 case 0x8:
3678 case 0x4:
3679 case 0x2:
3680 case 0x1:
3681 /* Effectively an 8-bit store. */
3682 tcg_gen_shri_i64(tmp, o->in1, pos);
3683 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3684 break;
3686 default:
3687 /* This is going to be a sequence of shifts and stores. */
3688 pos = base + 32 - 8;
3689 while (m3) {
3690 if (m3 & 0x8) {
3691 tcg_gen_shri_i64(tmp, o->in1, pos);
3692 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3693 tcg_gen_addi_i64(o->in2, o->in2, 1);
3695 m3 = (m3 << 1) & 0xf;
3696 pos -= 8;
3698 break;
3700 tcg_temp_free_i64(tmp);
3701 return NO_EXIT;
3704 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3706 int r1 = get_field(s->fields, r1);
3707 int r3 = get_field(s->fields, r3);
3708 int size = s->insn->data;
3709 TCGv_i64 tsize = tcg_const_i64(size);
3711 while (1) {
3712 if (size == 8) {
3713 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3714 } else {
3715 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3717 if (r1 == r3) {
3718 break;
3720 tcg_gen_add_i64(o->in2, o->in2, tsize);
3721 r1 = (r1 + 1) & 15;
3724 tcg_temp_free_i64(tsize);
3725 return NO_EXIT;
3728 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3730 int r1 = get_field(s->fields, r1);
3731 int r3 = get_field(s->fields, r3);
3732 TCGv_i64 t = tcg_temp_new_i64();
3733 TCGv_i64 t4 = tcg_const_i64(4);
3734 TCGv_i64 t32 = tcg_const_i64(32);
3736 while (1) {
3737 tcg_gen_shl_i64(t, regs[r1], t32);
3738 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3739 if (r1 == r3) {
3740 break;
3742 tcg_gen_add_i64(o->in2, o->in2, t4);
3743 r1 = (r1 + 1) & 15;
3746 tcg_temp_free_i64(t);
3747 tcg_temp_free_i64(t4);
3748 tcg_temp_free_i64(t32);
3749 return NO_EXIT;
3752 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3754 potential_page_fault(s);
3755 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3756 set_cc_static(s);
3757 return_low128(o->in2);
3758 return NO_EXIT;
3761 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3763 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3764 return NO_EXIT;
3767 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3769 DisasCompare cmp;
3770 TCGv_i64 borrow;
3772 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3774 /* The !borrow flag is the msb of CC. Since we want the inverse of
3775 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3776 disas_jcc(s, &cmp, 8 | 4);
3777 borrow = tcg_temp_new_i64();
3778 if (cmp.is_64) {
3779 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3780 } else {
3781 TCGv_i32 t = tcg_temp_new_i32();
3782 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3783 tcg_gen_extu_i32_i64(borrow, t);
3784 tcg_temp_free_i32(t);
3786 free_compare(&cmp);
3788 tcg_gen_sub_i64(o->out, o->out, borrow);
3789 tcg_temp_free_i64(borrow);
3790 return NO_EXIT;
3793 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3795 TCGv_i32 t;
3797 update_psw_addr(s);
3798 update_cc_op(s);
3800 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3801 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3802 tcg_temp_free_i32(t);
3804 t = tcg_const_i32(s->next_pc - s->pc);
3805 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3806 tcg_temp_free_i32(t);
3808 gen_exception(EXCP_SVC);
3809 return EXIT_NORETURN;
3812 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3814 gen_helper_tceb(cc_op, o->in1, o->in2);
3815 set_cc_static(s);
3816 return NO_EXIT;
3819 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3821 gen_helper_tcdb(cc_op, o->in1, o->in2);
3822 set_cc_static(s);
3823 return NO_EXIT;
3826 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3828 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3829 set_cc_static(s);
3830 return NO_EXIT;
3833 #ifndef CONFIG_USER_ONLY
3834 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3836 potential_page_fault(s);
3837 gen_helper_tprot(cc_op, o->addr1, o->in2);
3838 set_cc_static(s);
3839 return NO_EXIT;
3841 #endif
3843 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3845 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3846 potential_page_fault(s);
3847 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3848 tcg_temp_free_i32(l);
3849 set_cc_static(s);
3850 return NO_EXIT;
3853 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
3855 potential_page_fault(s);
3856 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
3857 return_low128(o->out2);
3858 set_cc_static(s);
3859 return NO_EXIT;
3862 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
3864 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3865 potential_page_fault(s);
3866 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
3867 tcg_temp_free_i32(l);
3868 set_cc_static(s);
3869 return NO_EXIT;
3872 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3874 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3875 potential_page_fault(s);
3876 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3877 tcg_temp_free_i32(l);
3878 return NO_EXIT;
3881 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3883 int d1 = get_field(s->fields, d1);
3884 int d2 = get_field(s->fields, d2);
3885 int b1 = get_field(s->fields, b1);
3886 int b2 = get_field(s->fields, b2);
3887 int l = get_field(s->fields, l1);
3888 TCGv_i32 t32;
3890 o->addr1 = get_address(s, 0, b1, d1);
3892 /* If the addresses are identical, this is a store/memset of zero. */
3893 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3894 o->in2 = tcg_const_i64(0);
3896 l++;
3897 while (l >= 8) {
3898 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3899 l -= 8;
3900 if (l > 0) {
3901 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3904 if (l >= 4) {
3905 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3906 l -= 4;
3907 if (l > 0) {
3908 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3911 if (l >= 2) {
3912 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3913 l -= 2;
3914 if (l > 0) {
3915 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3918 if (l) {
3919 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3921 gen_op_movi_cc(s, 0);
3922 return NO_EXIT;
3925 /* But in general we'll defer to a helper. */
3926 o->in2 = get_address(s, 0, b2, d2);
3927 t32 = tcg_const_i32(l);
3928 potential_page_fault(s);
3929 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3930 tcg_temp_free_i32(t32);
3931 set_cc_static(s);
3932 return NO_EXIT;
3935 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3937 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3938 return NO_EXIT;
3941 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3943 int shift = s->insn->data & 0xff;
3944 int size = s->insn->data >> 8;
3945 uint64_t mask = ((1ull << size) - 1) << shift;
3947 assert(!o->g_in2);
3948 tcg_gen_shli_i64(o->in2, o->in2, shift);
3949 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3951 /* Produce the CC from only the bits manipulated. */
3952 tcg_gen_andi_i64(cc_dst, o->out, mask);
3953 set_cc_nz_u64(s, cc_dst);
3954 return NO_EXIT;
3957 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3959 o->out = tcg_const_i64(0);
3960 return NO_EXIT;
3963 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3965 o->out = tcg_const_i64(0);
3966 o->out2 = o->out;
3967 o->g_out2 = true;
3968 return NO_EXIT;
3971 /* ====================================================================== */
3972 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3973 the original inputs), update the various cc data structures in order to
3974 be able to compute the new condition code. */
3976 static void cout_abs32(DisasContext *s, DisasOps *o)
3978 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3981 static void cout_abs64(DisasContext *s, DisasOps *o)
3983 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3986 static void cout_adds32(DisasContext *s, DisasOps *o)
3988 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3991 static void cout_adds64(DisasContext *s, DisasOps *o)
3993 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3996 static void cout_addu32(DisasContext *s, DisasOps *o)
3998 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4001 static void cout_addu64(DisasContext *s, DisasOps *o)
4003 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4006 static void cout_addc32(DisasContext *s, DisasOps *o)
4008 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4011 static void cout_addc64(DisasContext *s, DisasOps *o)
4013 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4016 static void cout_cmps32(DisasContext *s, DisasOps *o)
4018 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4021 static void cout_cmps64(DisasContext *s, DisasOps *o)
4023 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4026 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4028 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4031 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4033 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4036 static void cout_f32(DisasContext *s, DisasOps *o)
4038 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4041 static void cout_f64(DisasContext *s, DisasOps *o)
4043 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4046 static void cout_f128(DisasContext *s, DisasOps *o)
4048 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4051 static void cout_nabs32(DisasContext *s, DisasOps *o)
4053 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4056 static void cout_nabs64(DisasContext *s, DisasOps *o)
4058 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4061 static void cout_neg32(DisasContext *s, DisasOps *o)
4063 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4066 static void cout_neg64(DisasContext *s, DisasOps *o)
4068 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4071 static void cout_nz32(DisasContext *s, DisasOps *o)
4073 tcg_gen_ext32u_i64(cc_dst, o->out);
4074 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4077 static void cout_nz64(DisasContext *s, DisasOps *o)
4079 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4082 static void cout_s32(DisasContext *s, DisasOps *o)
4084 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4087 static void cout_s64(DisasContext *s, DisasOps *o)
4089 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4092 static void cout_subs32(DisasContext *s, DisasOps *o)
4094 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4097 static void cout_subs64(DisasContext *s, DisasOps *o)
4099 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4102 static void cout_subu32(DisasContext *s, DisasOps *o)
4104 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4107 static void cout_subu64(DisasContext *s, DisasOps *o)
4109 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4112 static void cout_subb32(DisasContext *s, DisasOps *o)
4114 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4117 static void cout_subb64(DisasContext *s, DisasOps *o)
4119 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4122 static void cout_tm32(DisasContext *s, DisasOps *o)
4124 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4127 static void cout_tm64(DisasContext *s, DisasOps *o)
4129 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4132 /* ====================================================================== */
4133 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4134 with the TCG register to which we will write. Used in combination with
4135 the "wout" generators, in some cases we need a new temporary, and in
4136 some cases we can write to a TCG global. */
4138 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4140 o->out = tcg_temp_new_i64();
4142 #define SPEC_prep_new 0
4144 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4146 o->out = tcg_temp_new_i64();
4147 o->out2 = tcg_temp_new_i64();
4149 #define SPEC_prep_new_P 0
4151 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4153 o->out = regs[get_field(f, r1)];
4154 o->g_out = true;
4156 #define SPEC_prep_r1 0
4158 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4160 int r1 = get_field(f, r1);
4161 o->out = regs[r1];
4162 o->out2 = regs[r1 + 1];
4163 o->g_out = o->g_out2 = true;
4165 #define SPEC_prep_r1_P SPEC_r1_even
4167 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4169 o->out = fregs[get_field(f, r1)];
4170 o->g_out = true;
4172 #define SPEC_prep_f1 0
4174 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4176 int r1 = get_field(f, r1);
4177 o->out = fregs[r1];
4178 o->out2 = fregs[r1 + 2];
4179 o->g_out = o->g_out2 = true;
4181 #define SPEC_prep_x1 SPEC_r1_f128
4183 /* ====================================================================== */
4184 /* The "Write OUTput" generators. These generally perform some non-trivial
4185 copy of data to TCG globals, or to main memory. The trivial cases are
4186 generally handled by having a "prep" generator install the TCG global
4187 as the destination of the operation. */
4189 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4191 store_reg(get_field(f, r1), o->out);
4193 #define SPEC_wout_r1 0
4195 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4197 int r1 = get_field(f, r1);
4198 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4200 #define SPEC_wout_r1_8 0
4202 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4204 int r1 = get_field(f, r1);
4205 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4207 #define SPEC_wout_r1_16 0
4209 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4211 store_reg32_i64(get_field(f, r1), o->out);
4213 #define SPEC_wout_r1_32 0
4215 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4217 int r1 = get_field(f, r1);
4218 store_reg32_i64(r1, o->out);
4219 store_reg32_i64(r1 + 1, o->out2);
4221 #define SPEC_wout_r1_P32 SPEC_r1_even
4223 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4225 int r1 = get_field(f, r1);
4226 store_reg32_i64(r1 + 1, o->out);
4227 tcg_gen_shri_i64(o->out, o->out, 32);
4228 store_reg32_i64(r1, o->out);
4230 #define SPEC_wout_r1_D32 SPEC_r1_even
4232 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4234 store_freg32_i64(get_field(f, r1), o->out);
4236 #define SPEC_wout_e1 0
4238 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4240 store_freg(get_field(f, r1), o->out);
4242 #define SPEC_wout_f1 0
4244 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4246 int f1 = get_field(s->fields, r1);
4247 store_freg(f1, o->out);
4248 store_freg(f1 + 2, o->out2);
4250 #define SPEC_wout_x1 SPEC_r1_f128
4252 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4254 if (get_field(f, r1) != get_field(f, r2)) {
4255 store_reg32_i64(get_field(f, r1), o->out);
4258 #define SPEC_wout_cond_r1r2_32 0
4260 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4262 if (get_field(f, r1) != get_field(f, r2)) {
4263 store_freg32_i64(get_field(f, r1), o->out);
4266 #define SPEC_wout_cond_e1e2 0
4268 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4270 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4272 #define SPEC_wout_m1_8 0
4274 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4276 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4278 #define SPEC_wout_m1_16 0
4280 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4282 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4284 #define SPEC_wout_m1_32 0
4286 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4288 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4290 #define SPEC_wout_m1_64 0
4292 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4294 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4296 #define SPEC_wout_m2_32 0
4298 static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4300 /* XXX release reservation */
4301 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4302 store_reg32_i64(get_field(f, r1), o->in2);
4304 #define SPEC_wout_m2_32_r1_atomic 0
4306 static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4308 /* XXX release reservation */
4309 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4310 store_reg(get_field(f, r1), o->in2);
4312 #define SPEC_wout_m2_64_r1_atomic 0
4314 /* ====================================================================== */
4315 /* The "INput 1" generators. These load the first operand to an insn. */
4317 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4319 o->in1 = load_reg(get_field(f, r1));
4321 #define SPEC_in1_r1 0
4323 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4325 o->in1 = regs[get_field(f, r1)];
4326 o->g_in1 = true;
4328 #define SPEC_in1_r1_o 0
4330 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4332 o->in1 = tcg_temp_new_i64();
4333 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4335 #define SPEC_in1_r1_32s 0
4337 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4339 o->in1 = tcg_temp_new_i64();
4340 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4342 #define SPEC_in1_r1_32u 0
4344 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4346 o->in1 = tcg_temp_new_i64();
4347 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4349 #define SPEC_in1_r1_sr32 0
4351 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4353 o->in1 = load_reg(get_field(f, r1) + 1);
4355 #define SPEC_in1_r1p1 SPEC_r1_even
4357 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4359 o->in1 = tcg_temp_new_i64();
4360 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4362 #define SPEC_in1_r1p1_32s SPEC_r1_even
4364 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4366 o->in1 = tcg_temp_new_i64();
4367 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4369 #define SPEC_in1_r1p1_32u SPEC_r1_even
4371 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4373 int r1 = get_field(f, r1);
4374 o->in1 = tcg_temp_new_i64();
4375 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4377 #define SPEC_in1_r1_D32 SPEC_r1_even
4379 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4381 o->in1 = load_reg(get_field(f, r2));
4383 #define SPEC_in1_r2 0
4385 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4387 o->in1 = load_reg(get_field(f, r3));
4389 #define SPEC_in1_r3 0
4391 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4393 o->in1 = regs[get_field(f, r3)];
4394 o->g_in1 = true;
4396 #define SPEC_in1_r3_o 0
4398 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4400 o->in1 = tcg_temp_new_i64();
4401 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4403 #define SPEC_in1_r3_32s 0
4405 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4407 o->in1 = tcg_temp_new_i64();
4408 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4410 #define SPEC_in1_r3_32u 0
4412 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4414 int r3 = get_field(f, r3);
4415 o->in1 = tcg_temp_new_i64();
4416 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4418 #define SPEC_in1_r3_D32 SPEC_r3_even
4420 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4422 o->in1 = load_freg32_i64(get_field(f, r1));
4424 #define SPEC_in1_e1 0
4426 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4428 o->in1 = fregs[get_field(f, r1)];
4429 o->g_in1 = true;
4431 #define SPEC_in1_f1_o 0
4433 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4435 int r1 = get_field(f, r1);
4436 o->out = fregs[r1];
4437 o->out2 = fregs[r1 + 2];
4438 o->g_out = o->g_out2 = true;
4440 #define SPEC_in1_x1_o SPEC_r1_f128
4442 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4444 o->in1 = fregs[get_field(f, r3)];
4445 o->g_in1 = true;
4447 #define SPEC_in1_f3_o 0
4449 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4451 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4453 #define SPEC_in1_la1 0
4455 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4457 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4458 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4460 #define SPEC_in1_la2 0
4462 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4464 in1_la1(s, f, o);
4465 o->in1 = tcg_temp_new_i64();
4466 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4468 #define SPEC_in1_m1_8u 0
4470 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4472 in1_la1(s, f, o);
4473 o->in1 = tcg_temp_new_i64();
4474 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4476 #define SPEC_in1_m1_16s 0
4478 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4480 in1_la1(s, f, o);
4481 o->in1 = tcg_temp_new_i64();
4482 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4484 #define SPEC_in1_m1_16u 0
4486 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4488 in1_la1(s, f, o);
4489 o->in1 = tcg_temp_new_i64();
4490 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4492 #define SPEC_in1_m1_32s 0
4494 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4496 in1_la1(s, f, o);
4497 o->in1 = tcg_temp_new_i64();
4498 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4500 #define SPEC_in1_m1_32u 0
4502 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4504 in1_la1(s, f, o);
4505 o->in1 = tcg_temp_new_i64();
4506 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4508 #define SPEC_in1_m1_64 0
4510 /* ====================================================================== */
4511 /* The "INput 2" generators. These load the second operand to an insn. */
4513 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4515 o->in2 = regs[get_field(f, r1)];
4516 o->g_in2 = true;
4518 #define SPEC_in2_r1_o 0
4520 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4522 o->in2 = tcg_temp_new_i64();
4523 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4525 #define SPEC_in2_r1_16u 0
4527 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4529 o->in2 = tcg_temp_new_i64();
4530 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4532 #define SPEC_in2_r1_32u 0
4534 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4536 int r1 = get_field(f, r1);
4537 o->in2 = tcg_temp_new_i64();
4538 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4540 #define SPEC_in2_r1_D32 SPEC_r1_even
4542 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4544 o->in2 = load_reg(get_field(f, r2));
4546 #define SPEC_in2_r2 0
4548 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4550 o->in2 = regs[get_field(f, r2)];
4551 o->g_in2 = true;
4553 #define SPEC_in2_r2_o 0
4555 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4557 int r2 = get_field(f, r2);
4558 if (r2 != 0) {
4559 o->in2 = load_reg(r2);
4562 #define SPEC_in2_r2_nz 0
4564 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4566 o->in2 = tcg_temp_new_i64();
4567 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4569 #define SPEC_in2_r2_8s 0
4571 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4573 o->in2 = tcg_temp_new_i64();
4574 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4576 #define SPEC_in2_r2_8u 0
4578 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4580 o->in2 = tcg_temp_new_i64();
4581 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4583 #define SPEC_in2_r2_16s 0
4585 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4587 o->in2 = tcg_temp_new_i64();
4588 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4590 #define SPEC_in2_r2_16u 0
4592 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4594 o->in2 = load_reg(get_field(f, r3));
4596 #define SPEC_in2_r3 0
4598 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4600 o->in2 = tcg_temp_new_i64();
4601 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4603 #define SPEC_in2_r2_32s 0
4605 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4607 o->in2 = tcg_temp_new_i64();
4608 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4610 #define SPEC_in2_r2_32u 0
4612 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4614 o->in2 = load_freg32_i64(get_field(f, r2));
4616 #define SPEC_in2_e2 0
4618 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4620 o->in2 = fregs[get_field(f, r2)];
4621 o->g_in2 = true;
4623 #define SPEC_in2_f2_o 0
4625 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4627 int r2 = get_field(f, r2);
4628 o->in1 = fregs[r2];
4629 o->in2 = fregs[r2 + 2];
4630 o->g_in1 = o->g_in2 = true;
4632 #define SPEC_in2_x2_o SPEC_r2_f128
4634 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4636 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4638 #define SPEC_in2_ra2 0
4640 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4642 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4643 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4645 #define SPEC_in2_a2 0
4647 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4649 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4651 #define SPEC_in2_ri2 0
4653 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4655 help_l2_shift(s, f, o, 31);
4657 #define SPEC_in2_sh32 0
4659 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4661 help_l2_shift(s, f, o, 63);
4663 #define SPEC_in2_sh64 0
4665 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4667 in2_a2(s, f, o);
4668 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4670 #define SPEC_in2_m2_8u 0
4672 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4674 in2_a2(s, f, o);
4675 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4677 #define SPEC_in2_m2_16s 0
4679 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4681 in2_a2(s, f, o);
4682 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4684 #define SPEC_in2_m2_16u 0
4686 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4688 in2_a2(s, f, o);
4689 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4691 #define SPEC_in2_m2_32s 0
4693 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4695 in2_a2(s, f, o);
4696 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4698 #define SPEC_in2_m2_32u 0
4700 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4702 in2_a2(s, f, o);
4703 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4705 #define SPEC_in2_m2_64 0
4707 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4709 in2_ri2(s, f, o);
4710 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4712 #define SPEC_in2_mri2_16u 0
4714 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4716 in2_ri2(s, f, o);
4717 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4719 #define SPEC_in2_mri2_32s 0
4721 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4723 in2_ri2(s, f, o);
4724 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4726 #define SPEC_in2_mri2_32u 0
4728 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4730 in2_ri2(s, f, o);
4731 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4733 #define SPEC_in2_mri2_64 0
4735 static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4737 /* XXX should reserve the address */
4738 in1_la2(s, f, o);
4739 o->in2 = tcg_temp_new_i64();
4740 tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
4742 #define SPEC_in2_m2_32s_atomic 0
4744 static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
4746 /* XXX should reserve the address */
4747 in1_la2(s, f, o);
4748 o->in2 = tcg_temp_new_i64();
4749 tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
4751 #define SPEC_in2_m2_64_atomic 0
4753 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4755 o->in2 = tcg_const_i64(get_field(f, i2));
4757 #define SPEC_in2_i2 0
4759 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4761 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4763 #define SPEC_in2_i2_8u 0
4765 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4767 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4769 #define SPEC_in2_i2_16u 0
4771 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4773 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4775 #define SPEC_in2_i2_32u 0
4777 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4779 uint64_t i2 = (uint16_t)get_field(f, i2);
4780 o->in2 = tcg_const_i64(i2 << s->insn->data);
4782 #define SPEC_in2_i2_16u_shl 0
4784 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4786 uint64_t i2 = (uint32_t)get_field(f, i2);
4787 o->in2 = tcg_const_i64(i2 << s->insn->data);
4789 #define SPEC_in2_i2_32u_shl 0
4791 /* ====================================================================== */
4793 /* Find opc within the table of insns. This is formulated as a switch
4794 statement so that (1) we get compile-time notice of cut-paste errors
4795 for duplicated opcodes, and (2) the compiler generates the binary
4796 search tree, rather than us having to post-process the table. */
4798 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4799 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4801 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4803 enum DisasInsnEnum {
4804 #include "insn-data.def"
4807 #undef D
4808 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4809 .opc = OPC, \
4810 .fmt = FMT_##FT, \
4811 .fac = FAC_##FC, \
4812 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4813 .name = #NM, \
4814 .help_in1 = in1_##I1, \
4815 .help_in2 = in2_##I2, \
4816 .help_prep = prep_##P, \
4817 .help_wout = wout_##W, \
4818 .help_cout = cout_##CC, \
4819 .help_op = op_##OP, \
4820 .data = D \
4823 /* Allow 0 to be used for NULL in the table below. */
4824 #define in1_0 NULL
4825 #define in2_0 NULL
4826 #define prep_0 NULL
4827 #define wout_0 NULL
4828 #define cout_0 NULL
4829 #define op_0 NULL
4831 #define SPEC_in1_0 0
4832 #define SPEC_in2_0 0
4833 #define SPEC_prep_0 0
4834 #define SPEC_wout_0 0
4836 static const DisasInsn insn_info[] = {
4837 #include "insn-data.def"
4840 #undef D
4841 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4842 case OPC: return &insn_info[insn_ ## NM];
4844 static const DisasInsn *lookup_opc(uint16_t opc)
4846 switch (opc) {
4847 #include "insn-data.def"
4848 default:
4849 return NULL;
4853 #undef D
4854 #undef C
4856 /* Extract a field from the insn. The INSN should be left-aligned in
4857 the uint64_t so that we can more easily utilize the big-bit-endian
4858 definitions we extract from the Principals of Operation. */
4860 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4862 uint32_t r, m;
4864 if (f->size == 0) {
4865 return;
4868 /* Zero extract the field from the insn. */
4869 r = (insn << f->beg) >> (64 - f->size);
4871 /* Sign-extend, or un-swap the field as necessary. */
4872 switch (f->type) {
4873 case 0: /* unsigned */
4874 break;
4875 case 1: /* signed */
4876 assert(f->size <= 32);
4877 m = 1u << (f->size - 1);
4878 r = (r ^ m) - m;
4879 break;
4880 case 2: /* dl+dh split, signed 20 bit. */
4881 r = ((int8_t)r << 12) | (r >> 8);
4882 break;
4883 default:
4884 abort();
4887 /* Validate that the "compressed" encoding we selected above is valid.
4888 I.e. we havn't make two different original fields overlap. */
4889 assert(((o->presentC >> f->indexC) & 1) == 0);
4890 o->presentC |= 1 << f->indexC;
4891 o->presentO |= 1 << f->indexO;
4893 o->c[f->indexC] = r;
4896 /* Lookup the insn at the current PC, extracting the operands into O and
4897 returning the info struct for the insn. Returns NULL for invalid insn. */
4899 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4900 DisasFields *f)
4902 uint64_t insn, pc = s->pc;
4903 int op, op2, ilen;
4904 const DisasInsn *info;
4906 insn = ld_code2(env, pc);
4907 op = (insn >> 8) & 0xff;
4908 ilen = get_ilen(op);
4909 s->next_pc = s->pc + ilen;
4911 switch (ilen) {
4912 case 2:
4913 insn = insn << 48;
4914 break;
4915 case 4:
4916 insn = ld_code4(env, pc) << 32;
4917 break;
4918 case 6:
4919 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4920 break;
4921 default:
4922 abort();
4925 /* We can't actually determine the insn format until we've looked up
4926 the full insn opcode. Which we can't do without locating the
4927 secondary opcode. Assume by default that OP2 is at bit 40; for
4928 those smaller insns that don't actually have a secondary opcode
4929 this will correctly result in OP2 = 0. */
4930 switch (op) {
4931 case 0x01: /* E */
4932 case 0x80: /* S */
4933 case 0x82: /* S */
4934 case 0x93: /* S */
4935 case 0xb2: /* S, RRF, RRE */
4936 case 0xb3: /* RRE, RRD, RRF */
4937 case 0xb9: /* RRE, RRF */
4938 case 0xe5: /* SSE, SIL */
4939 op2 = (insn << 8) >> 56;
4940 break;
4941 case 0xa5: /* RI */
4942 case 0xa7: /* RI */
4943 case 0xc0: /* RIL */
4944 case 0xc2: /* RIL */
4945 case 0xc4: /* RIL */
4946 case 0xc6: /* RIL */
4947 case 0xc8: /* SSF */
4948 case 0xcc: /* RIL */
4949 op2 = (insn << 12) >> 60;
4950 break;
4951 case 0xd0 ... 0xdf: /* SS */
4952 case 0xe1: /* SS */
4953 case 0xe2: /* SS */
4954 case 0xe8: /* SS */
4955 case 0xe9: /* SS */
4956 case 0xea: /* SS */
4957 case 0xee ... 0xf3: /* SS */
4958 case 0xf8 ... 0xfd: /* SS */
4959 op2 = 0;
4960 break;
4961 default:
4962 op2 = (insn << 40) >> 56;
4963 break;
4966 memset(f, 0, sizeof(*f));
4967 f->op = op;
4968 f->op2 = op2;
4970 /* Lookup the instruction. */
4971 info = lookup_opc(op << 8 | op2);
4973 /* If we found it, extract the operands. */
4974 if (info != NULL) {
4975 DisasFormat fmt = info->fmt;
4976 int i;
4978 for (i = 0; i < NUM_C_FIELD; ++i) {
4979 extract_field(f, &format_info[fmt].op[i], insn);
4982 return info;
4985 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4987 const DisasInsn *insn;
4988 ExitStatus ret = NO_EXIT;
4989 DisasFields f;
4990 DisasOps o;
4992 /* Search for the insn in the table. */
4993 insn = extract_insn(env, s, &f);
4995 /* Not found means unimplemented/illegal opcode. */
4996 if (insn == NULL) {
4997 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4998 f.op, f.op2);
4999 gen_illegal_opcode(s);
5000 return EXIT_NORETURN;
5003 /* Check for insn specification exceptions. */
5004 if (insn->spec) {
5005 int spec = insn->spec, excp = 0, r;
5007 if (spec & SPEC_r1_even) {
5008 r = get_field(&f, r1);
5009 if (r & 1) {
5010 excp = PGM_SPECIFICATION;
5013 if (spec & SPEC_r2_even) {
5014 r = get_field(&f, r2);
5015 if (r & 1) {
5016 excp = PGM_SPECIFICATION;
5019 if (spec & SPEC_r3_even) {
5020 r = get_field(&f, r3);
5021 if (r & 1) {
5022 excp = PGM_SPECIFICATION;
5025 if (spec & SPEC_r1_f128) {
5026 r = get_field(&f, r1);
5027 if (r > 13) {
5028 excp = PGM_SPECIFICATION;
5031 if (spec & SPEC_r2_f128) {
5032 r = get_field(&f, r2);
5033 if (r > 13) {
5034 excp = PGM_SPECIFICATION;
5037 if (excp) {
5038 gen_program_exception(s, excp);
5039 return EXIT_NORETURN;
5043 /* Set up the strutures we use to communicate with the helpers. */
5044 s->insn = insn;
5045 s->fields = &f;
5046 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5047 TCGV_UNUSED_I64(o.out);
5048 TCGV_UNUSED_I64(o.out2);
5049 TCGV_UNUSED_I64(o.in1);
5050 TCGV_UNUSED_I64(o.in2);
5051 TCGV_UNUSED_I64(o.addr1);
5053 /* Implement the instruction. */
5054 if (insn->help_in1) {
5055 insn->help_in1(s, &f, &o);
5057 if (insn->help_in2) {
5058 insn->help_in2(s, &f, &o);
5060 if (insn->help_prep) {
5061 insn->help_prep(s, &f, &o);
5063 if (insn->help_op) {
5064 ret = insn->help_op(s, &o);
5066 if (insn->help_wout) {
5067 insn->help_wout(s, &f, &o);
5069 if (insn->help_cout) {
5070 insn->help_cout(s, &o);
5073 /* Free any temporaries created by the helpers. */
5074 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5075 tcg_temp_free_i64(o.out);
5077 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5078 tcg_temp_free_i64(o.out2);
5080 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5081 tcg_temp_free_i64(o.in1);
5083 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5084 tcg_temp_free_i64(o.in2);
5086 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5087 tcg_temp_free_i64(o.addr1);
5090 /* Advance to the next instruction. */
5091 s->pc = s->next_pc;
5092 return ret;
5095 static inline void gen_intermediate_code_internal(S390CPU *cpu,
5096 TranslationBlock *tb,
5097 bool search_pc)
5099 CPUState *cs = CPU(cpu);
5100 CPUS390XState *env = &cpu->env;
5101 DisasContext dc;
5102 target_ulong pc_start;
5103 uint64_t next_page_start;
5104 int j, lj = -1;
5105 int num_insns, max_insns;
5106 CPUBreakpoint *bp;
5107 ExitStatus status;
5108 bool do_debug;
5110 pc_start = tb->pc;
5112 /* 31-bit mode */
5113 if (!(tb->flags & FLAG_MASK_64)) {
5114 pc_start &= 0x7fffffff;
5117 dc.tb = tb;
5118 dc.pc = pc_start;
5119 dc.cc_op = CC_OP_DYNAMIC;
5120 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5122 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5124 num_insns = 0;
5125 max_insns = tb->cflags & CF_COUNT_MASK;
5126 if (max_insns == 0) {
5127 max_insns = CF_COUNT_MASK;
5130 gen_tb_start(tb);
5132 do {
5133 if (search_pc) {
5134 j = tcg_op_buf_count();
5135 if (lj < j) {
5136 lj++;
5137 while (lj < j) {
5138 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5141 tcg_ctx.gen_opc_pc[lj] = dc.pc;
5142 gen_opc_cc_op[lj] = dc.cc_op;
5143 tcg_ctx.gen_opc_instr_start[lj] = 1;
5144 tcg_ctx.gen_opc_icount[lj] = num_insns;
5146 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5147 gen_io_start();
5150 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5151 tcg_gen_debug_insn_start(dc.pc);
5154 status = NO_EXIT;
5155 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
5156 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
5157 if (bp->pc == dc.pc) {
5158 status = EXIT_PC_STALE;
5159 do_debug = true;
5160 break;
5164 if (status == NO_EXIT) {
5165 status = translate_one(env, &dc);
5168 /* If we reach a page boundary, are single stepping,
5169 or exhaust instruction count, stop generation. */
5170 if (status == NO_EXIT
5171 && (dc.pc >= next_page_start
5172 || tcg_op_buf_full()
5173 || num_insns >= max_insns
5174 || singlestep
5175 || cs->singlestep_enabled)) {
5176 status = EXIT_PC_STALE;
5178 } while (status == NO_EXIT);
5180 if (tb->cflags & CF_LAST_IO) {
5181 gen_io_end();
5184 switch (status) {
5185 case EXIT_GOTO_TB:
5186 case EXIT_NORETURN:
5187 break;
5188 case EXIT_PC_STALE:
5189 update_psw_addr(&dc);
5190 /* FALLTHRU */
5191 case EXIT_PC_UPDATED:
5192 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5193 cc op type is in env */
5194 update_cc_op(&dc);
5195 /* Exit the TB, either by raising a debug exception or by return. */
5196 if (do_debug) {
5197 gen_exception(EXCP_DEBUG);
5198 } else {
5199 tcg_gen_exit_tb(0);
5201 break;
5202 default:
5203 abort();
5206 gen_tb_end(tb, num_insns);
5208 if (search_pc) {
5209 j = tcg_op_buf_count();
5210 lj++;
5211 while (lj <= j) {
5212 tcg_ctx.gen_opc_instr_start[lj++] = 0;
5214 } else {
5215 tb->size = dc.pc - pc_start;
5216 tb->icount = num_insns;
5219 #if defined(S390X_DEBUG_DISAS)
5220 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
5221 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5222 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
5223 qemu_log("\n");
5225 #endif
5228 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
5230 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
5233 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
5235 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
5238 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
5240 int cc_op;
5241 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
5242 cc_op = gen_opc_cc_op[pc_pos];
5243 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5244 env->cc_op = cc_op;