exec: Protect map_client_list with mutex
[qemu/ar7.git] / target-s390x / translate.c
blob4f82edde5b72bfa80d6185089cb02d8ad6c110d8
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "cpu.h"
32 #include "disas/disas.h"
33 #include "tcg-op.h"
34 #include "qemu/log.h"
35 #include "qemu/host-utils.h"
36 #include "exec/cpu_ldst.h"
38 /* global register indexes */
39 static TCGv_ptr cpu_env;
41 #include "exec/gen-icount.h"
42 #include "exec/helper-proto.h"
43 #include "exec/helper-gen.h"
45 #include "trace-tcg.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t pc, next_pc;
58 enum cc_op cc_op;
59 bool singlestep_enabled;
62 /* Information carried about a condition to be evaluated. */
63 typedef struct {
64 TCGCond cond:8;
65 bool is_64;
66 bool g1;
67 bool g2;
68 union {
69 struct { TCGv_i64 a, b; } s64;
70 struct { TCGv_i32 a, b; } s32;
71 } u;
72 } DisasCompare;
74 #define DISAS_EXCP 4
76 #ifdef DEBUG_INLINE_BRANCHES
77 static uint64_t inline_branch_hit[CC_OP_MAX];
78 static uint64_t inline_branch_miss[CC_OP_MAX];
79 #endif
81 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
83 if (!(s->tb->flags & FLAG_MASK_64)) {
84 if (s->tb->flags & FLAG_MASK_32) {
85 return pc | 0x80000000;
88 return pc;
91 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
92 int flags)
94 S390CPU *cpu = S390_CPU(cs);
95 CPUS390XState *env = &cpu->env;
96 int i;
98 if (env->cc_op > 3) {
99 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
100 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
101 } else {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
103 env->psw.mask, env->psw.addr, env->cc_op);
106 for (i = 0; i < 16; i++) {
107 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
108 if ((i % 4) == 3) {
109 cpu_fprintf(f, "\n");
110 } else {
111 cpu_fprintf(f, " ");
115 for (i = 0; i < 16; i++) {
116 cpu_fprintf(f, "F%02d=%016" PRIx64, i, env->fregs[i].ll);
117 if ((i % 4) == 3) {
118 cpu_fprintf(f, "\n");
119 } else {
120 cpu_fprintf(f, " ");
124 #ifndef CONFIG_USER_ONLY
125 for (i = 0; i < 16; i++) {
126 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
127 if ((i % 4) == 3) {
128 cpu_fprintf(f, "\n");
129 } else {
130 cpu_fprintf(f, " ");
133 #endif
135 #ifdef DEBUG_INLINE_BRANCHES
136 for (i = 0; i < CC_OP_MAX; i++) {
137 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
138 inline_branch_miss[i], inline_branch_hit[i]);
140 #endif
142 cpu_fprintf(f, "\n");
145 static TCGv_i64 psw_addr;
146 static TCGv_i64 psw_mask;
148 static TCGv_i32 cc_op;
149 static TCGv_i64 cc_src;
150 static TCGv_i64 cc_dst;
151 static TCGv_i64 cc_vr;
153 static char cpu_reg_names[32][4];
154 static TCGv_i64 regs[16];
155 static TCGv_i64 fregs[16];
157 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
159 void s390x_translate_init(void)
161 int i;
163 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
164 psw_addr = tcg_global_mem_new_i64(TCG_AREG0,
165 offsetof(CPUS390XState, psw.addr),
166 "psw_addr");
167 psw_mask = tcg_global_mem_new_i64(TCG_AREG0,
168 offsetof(CPUS390XState, psw.mask),
169 "psw_mask");
171 cc_op = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUS390XState, cc_op),
172 "cc_op");
173 cc_src = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_src),
174 "cc_src");
175 cc_dst = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_dst),
176 "cc_dst");
177 cc_vr = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUS390XState, cc_vr),
178 "cc_vr");
180 for (i = 0; i < 16; i++) {
181 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
182 regs[i] = tcg_global_mem_new(TCG_AREG0,
183 offsetof(CPUS390XState, regs[i]),
184 cpu_reg_names[i]);
187 for (i = 0; i < 16; i++) {
188 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
189 fregs[i] = tcg_global_mem_new(TCG_AREG0,
190 offsetof(CPUS390XState, fregs[i].d),
191 cpu_reg_names[i + 16]);
195 static TCGv_i64 load_reg(int reg)
197 TCGv_i64 r = tcg_temp_new_i64();
198 tcg_gen_mov_i64(r, regs[reg]);
199 return r;
202 static TCGv_i64 load_freg32_i64(int reg)
204 TCGv_i64 r = tcg_temp_new_i64();
205 tcg_gen_shri_i64(r, fregs[reg], 32);
206 return r;
209 static void store_reg(int reg, TCGv_i64 v)
211 tcg_gen_mov_i64(regs[reg], v);
214 static void store_freg(int reg, TCGv_i64 v)
216 tcg_gen_mov_i64(fregs[reg], v);
219 static void store_reg32_i64(int reg, TCGv_i64 v)
221 /* 32 bit register writes keep the upper half */
222 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
225 static void store_reg32h_i64(int reg, TCGv_i64 v)
227 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
230 static void store_freg32_i64(int reg, TCGv_i64 v)
232 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
235 static void return_low128(TCGv_i64 dest)
237 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
240 static void update_psw_addr(DisasContext *s)
242 /* psw.addr */
243 tcg_gen_movi_i64(psw_addr, s->pc);
246 static void update_cc_op(DisasContext *s)
248 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
249 tcg_gen_movi_i32(cc_op, s->cc_op);
253 static void potential_page_fault(DisasContext *s)
255 update_psw_addr(s);
256 update_cc_op(s);
259 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
261 return (uint64_t)cpu_lduw_code(env, pc);
264 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
266 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
269 static int get_mem_index(DisasContext *s)
271 switch (s->tb->flags & FLAG_MASK_ASC) {
272 case PSW_ASC_PRIMARY >> 32:
273 return 0;
274 case PSW_ASC_SECONDARY >> 32:
275 return 1;
276 case PSW_ASC_HOME >> 32:
277 return 2;
278 default:
279 tcg_abort();
280 break;
284 static void gen_exception(int excp)
286 TCGv_i32 tmp = tcg_const_i32(excp);
287 gen_helper_exception(cpu_env, tmp);
288 tcg_temp_free_i32(tmp);
291 static void gen_program_exception(DisasContext *s, int code)
293 TCGv_i32 tmp;
295 /* Remember what pgm exeption this was. */
296 tmp = tcg_const_i32(code);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
298 tcg_temp_free_i32(tmp);
300 tmp = tcg_const_i32(s->next_pc - s->pc);
301 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
302 tcg_temp_free_i32(tmp);
304 /* Advance past instruction. */
305 s->pc = s->next_pc;
306 update_psw_addr(s);
308 /* Save off cc. */
309 update_cc_op(s);
311 /* Trigger exception. */
312 gen_exception(EXCP_PGM);
315 static inline void gen_illegal_opcode(DisasContext *s)
317 gen_program_exception(s, PGM_SPECIFICATION);
320 #ifndef CONFIG_USER_ONLY
321 static void check_privileged(DisasContext *s)
323 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
324 gen_program_exception(s, PGM_PRIVILEGED);
327 #endif
329 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
331 TCGv_i64 tmp = tcg_temp_new_i64();
332 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
334 /* Note that d2 is limited to 20 bits, signed. If we crop negative
335 displacements early we create larger immedate addends. */
337 /* Note that addi optimizes the imm==0 case. */
338 if (b2 && x2) {
339 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
340 tcg_gen_addi_i64(tmp, tmp, d2);
341 } else if (b2) {
342 tcg_gen_addi_i64(tmp, regs[b2], d2);
343 } else if (x2) {
344 tcg_gen_addi_i64(tmp, regs[x2], d2);
345 } else {
346 if (need_31) {
347 d2 &= 0x7fffffff;
348 need_31 = false;
350 tcg_gen_movi_i64(tmp, d2);
352 if (need_31) {
353 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
356 return tmp;
359 static inline bool live_cc_data(DisasContext *s)
361 return (s->cc_op != CC_OP_DYNAMIC
362 && s->cc_op != CC_OP_STATIC
363 && s->cc_op > 3);
366 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
368 if (live_cc_data(s)) {
369 tcg_gen_discard_i64(cc_src);
370 tcg_gen_discard_i64(cc_dst);
371 tcg_gen_discard_i64(cc_vr);
373 s->cc_op = CC_OP_CONST0 + val;
376 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
378 if (live_cc_data(s)) {
379 tcg_gen_discard_i64(cc_src);
380 tcg_gen_discard_i64(cc_vr);
382 tcg_gen_mov_i64(cc_dst, dst);
383 s->cc_op = op;
386 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
387 TCGv_i64 dst)
389 if (live_cc_data(s)) {
390 tcg_gen_discard_i64(cc_vr);
392 tcg_gen_mov_i64(cc_src, src);
393 tcg_gen_mov_i64(cc_dst, dst);
394 s->cc_op = op;
397 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
398 TCGv_i64 dst, TCGv_i64 vr)
400 tcg_gen_mov_i64(cc_src, src);
401 tcg_gen_mov_i64(cc_dst, dst);
402 tcg_gen_mov_i64(cc_vr, vr);
403 s->cc_op = op;
406 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
408 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
411 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
413 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
416 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
418 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
421 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
423 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
426 /* CC value is in env->cc_op */
427 static void set_cc_static(DisasContext *s)
429 if (live_cc_data(s)) {
430 tcg_gen_discard_i64(cc_src);
431 tcg_gen_discard_i64(cc_dst);
432 tcg_gen_discard_i64(cc_vr);
434 s->cc_op = CC_OP_STATIC;
437 /* calculates cc into cc_op */
438 static void gen_op_calc_cc(DisasContext *s)
440 TCGv_i32 local_cc_op;
441 TCGv_i64 dummy;
443 TCGV_UNUSED_I32(local_cc_op);
444 TCGV_UNUSED_I64(dummy);
445 switch (s->cc_op) {
446 default:
447 dummy = tcg_const_i64(0);
448 /* FALLTHRU */
449 case CC_OP_ADD_64:
450 case CC_OP_ADDU_64:
451 case CC_OP_ADDC_64:
452 case CC_OP_SUB_64:
453 case CC_OP_SUBU_64:
454 case CC_OP_SUBB_64:
455 case CC_OP_ADD_32:
456 case CC_OP_ADDU_32:
457 case CC_OP_ADDC_32:
458 case CC_OP_SUB_32:
459 case CC_OP_SUBU_32:
460 case CC_OP_SUBB_32:
461 local_cc_op = tcg_const_i32(s->cc_op);
462 break;
463 case CC_OP_CONST0:
464 case CC_OP_CONST1:
465 case CC_OP_CONST2:
466 case CC_OP_CONST3:
467 case CC_OP_STATIC:
468 case CC_OP_DYNAMIC:
469 break;
472 switch (s->cc_op) {
473 case CC_OP_CONST0:
474 case CC_OP_CONST1:
475 case CC_OP_CONST2:
476 case CC_OP_CONST3:
477 /* s->cc_op is the cc value */
478 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
479 break;
480 case CC_OP_STATIC:
481 /* env->cc_op already is the cc value */
482 break;
483 case CC_OP_NZ:
484 case CC_OP_ABS_64:
485 case CC_OP_NABS_64:
486 case CC_OP_ABS_32:
487 case CC_OP_NABS_32:
488 case CC_OP_LTGT0_32:
489 case CC_OP_LTGT0_64:
490 case CC_OP_COMP_32:
491 case CC_OP_COMP_64:
492 case CC_OP_NZ_F32:
493 case CC_OP_NZ_F64:
494 case CC_OP_FLOGR:
495 /* 1 argument */
496 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
497 break;
498 case CC_OP_ICM:
499 case CC_OP_LTGT_32:
500 case CC_OP_LTGT_64:
501 case CC_OP_LTUGTU_32:
502 case CC_OP_LTUGTU_64:
503 case CC_OP_TM_32:
504 case CC_OP_TM_64:
505 case CC_OP_SLA_32:
506 case CC_OP_SLA_64:
507 case CC_OP_NZ_F128:
508 /* 2 arguments */
509 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
510 break;
511 case CC_OP_ADD_64:
512 case CC_OP_ADDU_64:
513 case CC_OP_ADDC_64:
514 case CC_OP_SUB_64:
515 case CC_OP_SUBU_64:
516 case CC_OP_SUBB_64:
517 case CC_OP_ADD_32:
518 case CC_OP_ADDU_32:
519 case CC_OP_ADDC_32:
520 case CC_OP_SUB_32:
521 case CC_OP_SUBU_32:
522 case CC_OP_SUBB_32:
523 /* 3 arguments */
524 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
525 break;
526 case CC_OP_DYNAMIC:
527 /* unknown operation - assume 3 arguments and cc_op in env */
528 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
529 break;
530 default:
531 tcg_abort();
534 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
535 tcg_temp_free_i32(local_cc_op);
537 if (!TCGV_IS_UNUSED_I64(dummy)) {
538 tcg_temp_free_i64(dummy);
541 /* We now have cc in cc_op as constant */
542 set_cc_static(s);
545 static int use_goto_tb(DisasContext *s, uint64_t dest)
547 /* NOTE: we handle the case where the TB spans two pages here */
548 return (((dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK)
549 || (dest & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK))
550 && !s->singlestep_enabled
551 && !(s->tb->cflags & CF_LAST_IO));
554 static void account_noninline_branch(DisasContext *s, int cc_op)
556 #ifdef DEBUG_INLINE_BRANCHES
557 inline_branch_miss[cc_op]++;
558 #endif
561 static void account_inline_branch(DisasContext *s, int cc_op)
563 #ifdef DEBUG_INLINE_BRANCHES
564 inline_branch_hit[cc_op]++;
565 #endif
568 /* Table of mask values to comparison codes, given a comparison as input.
569 For such, CC=3 should not be possible. */
570 static const TCGCond ltgt_cond[16] = {
571 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
572 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
573 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
574 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
575 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
576 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
577 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
578 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
581 /* Table of mask values to comparison codes, given a logic op as input.
582 For such, only CC=0 and CC=1 should be possible. */
583 static const TCGCond nz_cond[16] = {
584 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
585 TCG_COND_NEVER, TCG_COND_NEVER,
586 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
587 TCG_COND_NE, TCG_COND_NE,
588 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
589 TCG_COND_EQ, TCG_COND_EQ,
590 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
591 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
594 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
595 details required to generate a TCG comparison. */
596 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
598 TCGCond cond;
599 enum cc_op old_cc_op = s->cc_op;
601 if (mask == 15 || mask == 0) {
602 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
603 c->u.s32.a = cc_op;
604 c->u.s32.b = cc_op;
605 c->g1 = c->g2 = true;
606 c->is_64 = false;
607 return;
610 /* Find the TCG condition for the mask + cc op. */
611 switch (old_cc_op) {
612 case CC_OP_LTGT0_32:
613 case CC_OP_LTGT0_64:
614 case CC_OP_LTGT_32:
615 case CC_OP_LTGT_64:
616 cond = ltgt_cond[mask];
617 if (cond == TCG_COND_NEVER) {
618 goto do_dynamic;
620 account_inline_branch(s, old_cc_op);
621 break;
623 case CC_OP_LTUGTU_32:
624 case CC_OP_LTUGTU_64:
625 cond = tcg_unsigned_cond(ltgt_cond[mask]);
626 if (cond == TCG_COND_NEVER) {
627 goto do_dynamic;
629 account_inline_branch(s, old_cc_op);
630 break;
632 case CC_OP_NZ:
633 cond = nz_cond[mask];
634 if (cond == TCG_COND_NEVER) {
635 goto do_dynamic;
637 account_inline_branch(s, old_cc_op);
638 break;
640 case CC_OP_TM_32:
641 case CC_OP_TM_64:
642 switch (mask) {
643 case 8:
644 cond = TCG_COND_EQ;
645 break;
646 case 4 | 2 | 1:
647 cond = TCG_COND_NE;
648 break;
649 default:
650 goto do_dynamic;
652 account_inline_branch(s, old_cc_op);
653 break;
655 case CC_OP_ICM:
656 switch (mask) {
657 case 8:
658 cond = TCG_COND_EQ;
659 break;
660 case 4 | 2 | 1:
661 case 4 | 2:
662 cond = TCG_COND_NE;
663 break;
664 default:
665 goto do_dynamic;
667 account_inline_branch(s, old_cc_op);
668 break;
670 case CC_OP_FLOGR:
671 switch (mask & 0xa) {
672 case 8: /* src == 0 -> no one bit found */
673 cond = TCG_COND_EQ;
674 break;
675 case 2: /* src != 0 -> one bit found */
676 cond = TCG_COND_NE;
677 break;
678 default:
679 goto do_dynamic;
681 account_inline_branch(s, old_cc_op);
682 break;
684 case CC_OP_ADDU_32:
685 case CC_OP_ADDU_64:
686 switch (mask) {
687 case 8 | 2: /* vr == 0 */
688 cond = TCG_COND_EQ;
689 break;
690 case 4 | 1: /* vr != 0 */
691 cond = TCG_COND_NE;
692 break;
693 case 8 | 4: /* no carry -> vr >= src */
694 cond = TCG_COND_GEU;
695 break;
696 case 2 | 1: /* carry -> vr < src */
697 cond = TCG_COND_LTU;
698 break;
699 default:
700 goto do_dynamic;
702 account_inline_branch(s, old_cc_op);
703 break;
705 case CC_OP_SUBU_32:
706 case CC_OP_SUBU_64:
707 /* Note that CC=0 is impossible; treat it as dont-care. */
708 switch (mask & 7) {
709 case 2: /* zero -> op1 == op2 */
710 cond = TCG_COND_EQ;
711 break;
712 case 4 | 1: /* !zero -> op1 != op2 */
713 cond = TCG_COND_NE;
714 break;
715 case 4: /* borrow (!carry) -> op1 < op2 */
716 cond = TCG_COND_LTU;
717 break;
718 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
719 cond = TCG_COND_GEU;
720 break;
721 default:
722 goto do_dynamic;
724 account_inline_branch(s, old_cc_op);
725 break;
727 default:
728 do_dynamic:
729 /* Calculate cc value. */
730 gen_op_calc_cc(s);
731 /* FALLTHRU */
733 case CC_OP_STATIC:
734 /* Jump based on CC. We'll load up the real cond below;
735 the assignment here merely avoids a compiler warning. */
736 account_noninline_branch(s, old_cc_op);
737 old_cc_op = CC_OP_STATIC;
738 cond = TCG_COND_NEVER;
739 break;
742 /* Load up the arguments of the comparison. */
743 c->is_64 = true;
744 c->g1 = c->g2 = false;
745 switch (old_cc_op) {
746 case CC_OP_LTGT0_32:
747 c->is_64 = false;
748 c->u.s32.a = tcg_temp_new_i32();
749 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst);
750 c->u.s32.b = tcg_const_i32(0);
751 break;
752 case CC_OP_LTGT_32:
753 case CC_OP_LTUGTU_32:
754 case CC_OP_SUBU_32:
755 c->is_64 = false;
756 c->u.s32.a = tcg_temp_new_i32();
757 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src);
758 c->u.s32.b = tcg_temp_new_i32();
759 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst);
760 break;
762 case CC_OP_LTGT0_64:
763 case CC_OP_NZ:
764 case CC_OP_FLOGR:
765 c->u.s64.a = cc_dst;
766 c->u.s64.b = tcg_const_i64(0);
767 c->g1 = true;
768 break;
769 case CC_OP_LTGT_64:
770 case CC_OP_LTUGTU_64:
771 case CC_OP_SUBU_64:
772 c->u.s64.a = cc_src;
773 c->u.s64.b = cc_dst;
774 c->g1 = c->g2 = true;
775 break;
777 case CC_OP_TM_32:
778 case CC_OP_TM_64:
779 case CC_OP_ICM:
780 c->u.s64.a = tcg_temp_new_i64();
781 c->u.s64.b = tcg_const_i64(0);
782 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
783 break;
785 case CC_OP_ADDU_32:
786 c->is_64 = false;
787 c->u.s32.a = tcg_temp_new_i32();
788 c->u.s32.b = tcg_temp_new_i32();
789 tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr);
790 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
791 tcg_gen_movi_i32(c->u.s32.b, 0);
792 } else {
793 tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src);
795 break;
797 case CC_OP_ADDU_64:
798 c->u.s64.a = cc_vr;
799 c->g1 = true;
800 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
801 c->u.s64.b = tcg_const_i64(0);
802 } else {
803 c->u.s64.b = cc_src;
804 c->g2 = true;
806 break;
808 case CC_OP_STATIC:
809 c->is_64 = false;
810 c->u.s32.a = cc_op;
811 c->g1 = true;
812 switch (mask) {
813 case 0x8 | 0x4 | 0x2: /* cc != 3 */
814 cond = TCG_COND_NE;
815 c->u.s32.b = tcg_const_i32(3);
816 break;
817 case 0x8 | 0x4 | 0x1: /* cc != 2 */
818 cond = TCG_COND_NE;
819 c->u.s32.b = tcg_const_i32(2);
820 break;
821 case 0x8 | 0x2 | 0x1: /* cc != 1 */
822 cond = TCG_COND_NE;
823 c->u.s32.b = tcg_const_i32(1);
824 break;
825 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
826 cond = TCG_COND_EQ;
827 c->g1 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 c->u.s32.b = tcg_const_i32(0);
830 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
831 break;
832 case 0x8 | 0x4: /* cc < 2 */
833 cond = TCG_COND_LTU;
834 c->u.s32.b = tcg_const_i32(2);
835 break;
836 case 0x8: /* cc == 0 */
837 cond = TCG_COND_EQ;
838 c->u.s32.b = tcg_const_i32(0);
839 break;
840 case 0x4 | 0x2 | 0x1: /* cc != 0 */
841 cond = TCG_COND_NE;
842 c->u.s32.b = tcg_const_i32(0);
843 break;
844 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
845 cond = TCG_COND_NE;
846 c->g1 = false;
847 c->u.s32.a = tcg_temp_new_i32();
848 c->u.s32.b = tcg_const_i32(0);
849 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
850 break;
851 case 0x4: /* cc == 1 */
852 cond = TCG_COND_EQ;
853 c->u.s32.b = tcg_const_i32(1);
854 break;
855 case 0x2 | 0x1: /* cc > 1 */
856 cond = TCG_COND_GTU;
857 c->u.s32.b = tcg_const_i32(1);
858 break;
859 case 0x2: /* cc == 2 */
860 cond = TCG_COND_EQ;
861 c->u.s32.b = tcg_const_i32(2);
862 break;
863 case 0x1: /* cc == 3 */
864 cond = TCG_COND_EQ;
865 c->u.s32.b = tcg_const_i32(3);
866 break;
867 default:
868 /* CC is masked by something else: (8 >> cc) & mask. */
869 cond = TCG_COND_NE;
870 c->g1 = false;
871 c->u.s32.a = tcg_const_i32(8);
872 c->u.s32.b = tcg_const_i32(0);
873 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
874 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
875 break;
877 break;
879 default:
880 abort();
882 c->cond = cond;
885 static void free_compare(DisasCompare *c)
887 if (!c->g1) {
888 if (c->is_64) {
889 tcg_temp_free_i64(c->u.s64.a);
890 } else {
891 tcg_temp_free_i32(c->u.s32.a);
894 if (!c->g2) {
895 if (c->is_64) {
896 tcg_temp_free_i64(c->u.s64.b);
897 } else {
898 tcg_temp_free_i32(c->u.s32.b);
903 /* ====================================================================== */
904 /* Define the insn format enumeration. */
905 #define F0(N) FMT_##N,
906 #define F1(N, X1) F0(N)
907 #define F2(N, X1, X2) F0(N)
908 #define F3(N, X1, X2, X3) F0(N)
909 #define F4(N, X1, X2, X3, X4) F0(N)
910 #define F5(N, X1, X2, X3, X4, X5) F0(N)
912 typedef enum {
913 #include "insn-format.def"
914 } DisasFormat;
916 #undef F0
917 #undef F1
918 #undef F2
919 #undef F3
920 #undef F4
921 #undef F5
923 /* Define a structure to hold the decoded fields. We'll store each inside
924 an array indexed by an enum. In order to conserve memory, we'll arrange
925 for fields that do not exist at the same time to overlap, thus the "C"
926 for compact. For checking purposes there is an "O" for original index
927 as well that will be applied to availability bitmaps. */
929 enum DisasFieldIndexO {
930 FLD_O_r1,
931 FLD_O_r2,
932 FLD_O_r3,
933 FLD_O_m1,
934 FLD_O_m3,
935 FLD_O_m4,
936 FLD_O_b1,
937 FLD_O_b2,
938 FLD_O_b4,
939 FLD_O_d1,
940 FLD_O_d2,
941 FLD_O_d4,
942 FLD_O_x2,
943 FLD_O_l1,
944 FLD_O_l2,
945 FLD_O_i1,
946 FLD_O_i2,
947 FLD_O_i3,
948 FLD_O_i4,
949 FLD_O_i5
952 enum DisasFieldIndexC {
953 FLD_C_r1 = 0,
954 FLD_C_m1 = 0,
955 FLD_C_b1 = 0,
956 FLD_C_i1 = 0,
958 FLD_C_r2 = 1,
959 FLD_C_b2 = 1,
960 FLD_C_i2 = 1,
962 FLD_C_r3 = 2,
963 FLD_C_m3 = 2,
964 FLD_C_i3 = 2,
966 FLD_C_m4 = 3,
967 FLD_C_b4 = 3,
968 FLD_C_i4 = 3,
969 FLD_C_l1 = 3,
971 FLD_C_i5 = 4,
972 FLD_C_d1 = 4,
974 FLD_C_d2 = 5,
976 FLD_C_d4 = 6,
977 FLD_C_x2 = 6,
978 FLD_C_l2 = 6,
980 NUM_C_FIELD = 7
983 struct DisasFields {
984 unsigned op:8;
985 unsigned op2:8;
986 unsigned presentC:16;
987 unsigned int presentO;
988 int c[NUM_C_FIELD];
991 /* This is the way fields are to be accessed out of DisasFields. */
992 #define have_field(S, F) have_field1((S), FLD_O_##F)
993 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
995 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
997 return (f->presentO >> c) & 1;
1000 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1001 enum DisasFieldIndexC c)
1003 assert(have_field1(f, o));
1004 return f->c[c];
1007 /* Describe the layout of each field in each format. */
1008 typedef struct DisasField {
1009 unsigned int beg:8;
1010 unsigned int size:8;
1011 unsigned int type:2;
1012 unsigned int indexC:6;
1013 enum DisasFieldIndexO indexO:8;
1014 } DisasField;
1016 typedef struct DisasFormatInfo {
1017 DisasField op[NUM_C_FIELD];
1018 } DisasFormatInfo;
1020 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1021 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1022 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1023 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1024 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1025 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1026 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1027 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1028 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1029 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1030 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1031 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1032 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1033 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1035 #define F0(N) { { } },
1036 #define F1(N, X1) { { X1 } },
1037 #define F2(N, X1, X2) { { X1, X2 } },
1038 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1039 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1040 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1042 static const DisasFormatInfo format_info[] = {
1043 #include "insn-format.def"
1046 #undef F0
1047 #undef F1
1048 #undef F2
1049 #undef F3
1050 #undef F4
1051 #undef F5
1052 #undef R
1053 #undef M
1054 #undef BD
1055 #undef BXD
1056 #undef BDL
1057 #undef BXDL
1058 #undef I
1059 #undef L
1061 /* Generally, we'll extract operands into this structures, operate upon
1062 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1063 of routines below for more details. */
1064 typedef struct {
1065 bool g_out, g_out2, g_in1, g_in2;
1066 TCGv_i64 out, out2, in1, in2;
1067 TCGv_i64 addr1;
1068 } DisasOps;
1070 /* Instructions can place constraints on their operands, raising specification
1071 exceptions if they are violated. To make this easy to automate, each "in1",
1072 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1073 of the following, or 0. To make this easy to document, we'll put the
1074 SPEC_<name> defines next to <name>. */
1076 #define SPEC_r1_even 1
1077 #define SPEC_r2_even 2
1078 #define SPEC_r3_even 4
1079 #define SPEC_r1_f128 8
1080 #define SPEC_r2_f128 16
1082 /* Return values from translate_one, indicating the state of the TB. */
1083 typedef enum {
1084 /* Continue the TB. */
1085 NO_EXIT,
1086 /* We have emitted one or more goto_tb. No fixup required. */
1087 EXIT_GOTO_TB,
1088 /* We are not using a goto_tb (for whatever reason), but have updated
1089 the PC (for whatever reason), so there's no need to do it again on
1090 exiting the TB. */
1091 EXIT_PC_UPDATED,
1092 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1093 updated the PC for the next instruction to be executed. */
1094 EXIT_PC_STALE,
1095 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1096 No following code will be executed. */
1097 EXIT_NORETURN,
1098 } ExitStatus;
1100 typedef enum DisasFacility {
1101 FAC_Z, /* zarch (default) */
1102 FAC_CASS, /* compare and swap and store */
1103 FAC_CASS2, /* compare and swap and store 2*/
1104 FAC_DFP, /* decimal floating point */
1105 FAC_DFPR, /* decimal floating point rounding */
1106 FAC_DO, /* distinct operands */
1107 FAC_EE, /* execute extensions */
1108 FAC_EI, /* extended immediate */
1109 FAC_FPE, /* floating point extension */
1110 FAC_FPSSH, /* floating point support sign handling */
1111 FAC_FPRGR, /* FPR-GR transfer */
1112 FAC_GIE, /* general instructions extension */
1113 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1114 FAC_HW, /* high-word */
1115 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1116 FAC_LOC, /* load/store on condition */
1117 FAC_LD, /* long displacement */
1118 FAC_PC, /* population count */
1119 FAC_SCF, /* store clock fast */
1120 FAC_SFLE, /* store facility list extended */
1121 } DisasFacility;
1123 struct DisasInsn {
1124 unsigned opc:16;
1125 DisasFormat fmt:8;
1126 DisasFacility fac:8;
1127 unsigned spec:8;
1129 const char *name;
1131 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1132 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_cout)(DisasContext *, DisasOps *);
1136 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1138 uint64_t data;
1141 /* ====================================================================== */
1142 /* Miscellaneous helpers, used by several operations. */
1144 static void help_l2_shift(DisasContext *s, DisasFields *f,
1145 DisasOps *o, int mask)
1147 int b2 = get_field(f, b2);
1148 int d2 = get_field(f, d2);
1150 if (b2 == 0) {
1151 o->in2 = tcg_const_i64(d2 & mask);
1152 } else {
1153 o->in2 = get_address(s, 0, b2, d2);
1154 tcg_gen_andi_i64(o->in2, o->in2, mask);
1158 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1160 if (dest == s->next_pc) {
1161 return NO_EXIT;
1163 if (use_goto_tb(s, dest)) {
1164 update_cc_op(s);
1165 tcg_gen_goto_tb(0);
1166 tcg_gen_movi_i64(psw_addr, dest);
1167 tcg_gen_exit_tb((uintptr_t)s->tb);
1168 return EXIT_GOTO_TB;
1169 } else {
1170 tcg_gen_movi_i64(psw_addr, dest);
1171 return EXIT_PC_UPDATED;
1175 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1176 bool is_imm, int imm, TCGv_i64 cdest)
1178 ExitStatus ret;
1179 uint64_t dest = s->pc + 2 * imm;
1180 TCGLabel *lab;
1182 /* Take care of the special cases first. */
1183 if (c->cond == TCG_COND_NEVER) {
1184 ret = NO_EXIT;
1185 goto egress;
1187 if (is_imm) {
1188 if (dest == s->next_pc) {
1189 /* Branch to next. */
1190 ret = NO_EXIT;
1191 goto egress;
1193 if (c->cond == TCG_COND_ALWAYS) {
1194 ret = help_goto_direct(s, dest);
1195 goto egress;
1197 } else {
1198 if (TCGV_IS_UNUSED_I64(cdest)) {
1199 /* E.g. bcr %r0 -> no branch. */
1200 ret = NO_EXIT;
1201 goto egress;
1203 if (c->cond == TCG_COND_ALWAYS) {
1204 tcg_gen_mov_i64(psw_addr, cdest);
1205 ret = EXIT_PC_UPDATED;
1206 goto egress;
1210 if (use_goto_tb(s, s->next_pc)) {
1211 if (is_imm && use_goto_tb(s, dest)) {
1212 /* Both exits can use goto_tb. */
1213 update_cc_op(s);
1215 lab = gen_new_label();
1216 if (c->is_64) {
1217 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1218 } else {
1219 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1222 /* Branch not taken. */
1223 tcg_gen_goto_tb(0);
1224 tcg_gen_movi_i64(psw_addr, s->next_pc);
1225 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1227 /* Branch taken. */
1228 gen_set_label(lab);
1229 tcg_gen_goto_tb(1);
1230 tcg_gen_movi_i64(psw_addr, dest);
1231 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1233 ret = EXIT_GOTO_TB;
1234 } else {
1235 /* Fallthru can use goto_tb, but taken branch cannot. */
1236 /* Store taken branch destination before the brcond. This
1237 avoids having to allocate a new local temp to hold it.
1238 We'll overwrite this in the not taken case anyway. */
1239 if (!is_imm) {
1240 tcg_gen_mov_i64(psw_addr, cdest);
1243 lab = gen_new_label();
1244 if (c->is_64) {
1245 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1246 } else {
1247 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1250 /* Branch not taken. */
1251 update_cc_op(s);
1252 tcg_gen_goto_tb(0);
1253 tcg_gen_movi_i64(psw_addr, s->next_pc);
1254 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1256 gen_set_label(lab);
1257 if (is_imm) {
1258 tcg_gen_movi_i64(psw_addr, dest);
1260 ret = EXIT_PC_UPDATED;
1262 } else {
1263 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1264 Most commonly we're single-stepping or some other condition that
1265 disables all use of goto_tb. Just update the PC and exit. */
1267 TCGv_i64 next = tcg_const_i64(s->next_pc);
1268 if (is_imm) {
1269 cdest = tcg_const_i64(dest);
1272 if (c->is_64) {
1273 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1274 cdest, next);
1275 } else {
1276 TCGv_i32 t0 = tcg_temp_new_i32();
1277 TCGv_i64 t1 = tcg_temp_new_i64();
1278 TCGv_i64 z = tcg_const_i64(0);
1279 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1280 tcg_gen_extu_i32_i64(t1, t0);
1281 tcg_temp_free_i32(t0);
1282 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1283 tcg_temp_free_i64(t1);
1284 tcg_temp_free_i64(z);
1287 if (is_imm) {
1288 tcg_temp_free_i64(cdest);
1290 tcg_temp_free_i64(next);
1292 ret = EXIT_PC_UPDATED;
1295 egress:
1296 free_compare(c);
1297 return ret;
1300 /* ====================================================================== */
1301 /* The operations. These perform the bulk of the work for any insn,
1302 usually after the operands have been loaded and output initialized. */
1304 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1306 gen_helper_abs_i64(o->out, o->in2);
1307 return NO_EXIT;
1310 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1312 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1313 return NO_EXIT;
1316 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1318 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1319 return NO_EXIT;
1322 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1324 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1325 tcg_gen_mov_i64(o->out2, o->in2);
1326 return NO_EXIT;
1329 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1331 tcg_gen_add_i64(o->out, o->in1, o->in2);
1332 return NO_EXIT;
1335 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1337 DisasCompare cmp;
1338 TCGv_i64 carry;
1340 tcg_gen_add_i64(o->out, o->in1, o->in2);
1342 /* The carry flag is the msb of CC, therefore the branch mask that would
1343 create that comparison is 3. Feeding the generated comparison to
1344 setcond produces the carry flag that we desire. */
1345 disas_jcc(s, &cmp, 3);
1346 carry = tcg_temp_new_i64();
1347 if (cmp.is_64) {
1348 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1349 } else {
1350 TCGv_i32 t = tcg_temp_new_i32();
1351 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1352 tcg_gen_extu_i32_i64(carry, t);
1353 tcg_temp_free_i32(t);
1355 free_compare(&cmp);
1357 tcg_gen_add_i64(o->out, o->out, carry);
1358 tcg_temp_free_i64(carry);
1359 return NO_EXIT;
1362 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1364 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1365 return NO_EXIT;
1368 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1370 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1371 return NO_EXIT;
1374 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1376 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1377 return_low128(o->out2);
1378 return NO_EXIT;
1381 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1383 tcg_gen_and_i64(o->out, o->in1, o->in2);
1384 return NO_EXIT;
1387 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1389 int shift = s->insn->data & 0xff;
1390 int size = s->insn->data >> 8;
1391 uint64_t mask = ((1ull << size) - 1) << shift;
1393 assert(!o->g_in2);
1394 tcg_gen_shli_i64(o->in2, o->in2, shift);
1395 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1396 tcg_gen_and_i64(o->out, o->in1, o->in2);
1398 /* Produce the CC from only the bits manipulated. */
1399 tcg_gen_andi_i64(cc_dst, o->out, mask);
1400 set_cc_nz_u64(s, cc_dst);
1401 return NO_EXIT;
1404 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1406 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1407 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1408 tcg_gen_mov_i64(psw_addr, o->in2);
1409 return EXIT_PC_UPDATED;
1410 } else {
1411 return NO_EXIT;
1415 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1417 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1418 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1421 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1423 int m1 = get_field(s->fields, m1);
1424 bool is_imm = have_field(s->fields, i2);
1425 int imm = is_imm ? get_field(s->fields, i2) : 0;
1426 DisasCompare c;
1428 disas_jcc(s, &c, m1);
1429 return help_branch(s, &c, is_imm, imm, o->in2);
1432 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1434 int r1 = get_field(s->fields, r1);
1435 bool is_imm = have_field(s->fields, i2);
1436 int imm = is_imm ? get_field(s->fields, i2) : 0;
1437 DisasCompare c;
1438 TCGv_i64 t;
1440 c.cond = TCG_COND_NE;
1441 c.is_64 = false;
1442 c.g1 = false;
1443 c.g2 = false;
1445 t = tcg_temp_new_i64();
1446 tcg_gen_subi_i64(t, regs[r1], 1);
1447 store_reg32_i64(r1, t);
1448 c.u.s32.a = tcg_temp_new_i32();
1449 c.u.s32.b = tcg_const_i32(0);
1450 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1451 tcg_temp_free_i64(t);
1453 return help_branch(s, &c, is_imm, imm, o->in2);
1456 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1458 int r1 = get_field(s->fields, r1);
1459 bool is_imm = have_field(s->fields, i2);
1460 int imm = is_imm ? get_field(s->fields, i2) : 0;
1461 DisasCompare c;
1463 c.cond = TCG_COND_NE;
1464 c.is_64 = true;
1465 c.g1 = true;
1466 c.g2 = false;
1468 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1469 c.u.s64.a = regs[r1];
1470 c.u.s64.b = tcg_const_i64(0);
1472 return help_branch(s, &c, is_imm, imm, o->in2);
1475 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1477 int r1 = get_field(s->fields, r1);
1478 int r3 = get_field(s->fields, r3);
1479 bool is_imm = have_field(s->fields, i2);
1480 int imm = is_imm ? get_field(s->fields, i2) : 0;
1481 DisasCompare c;
1482 TCGv_i64 t;
1484 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1485 c.is_64 = false;
1486 c.g1 = false;
1487 c.g2 = false;
1489 t = tcg_temp_new_i64();
1490 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1491 c.u.s32.a = tcg_temp_new_i32();
1492 c.u.s32.b = tcg_temp_new_i32();
1493 tcg_gen_trunc_i64_i32(c.u.s32.a, t);
1494 tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]);
1495 store_reg32_i64(r1, t);
1496 tcg_temp_free_i64(t);
1498 return help_branch(s, &c, is_imm, imm, o->in2);
1501 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1503 int r1 = get_field(s->fields, r1);
1504 int r3 = get_field(s->fields, r3);
1505 bool is_imm = have_field(s->fields, i2);
1506 int imm = is_imm ? get_field(s->fields, i2) : 0;
1507 DisasCompare c;
1509 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1510 c.is_64 = true;
1512 if (r1 == (r3 | 1)) {
1513 c.u.s64.b = load_reg(r3 | 1);
1514 c.g2 = false;
1515 } else {
1516 c.u.s64.b = regs[r3 | 1];
1517 c.g2 = true;
1520 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1521 c.u.s64.a = regs[r1];
1522 c.g1 = true;
1524 return help_branch(s, &c, is_imm, imm, o->in2);
1527 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1529 int imm, m3 = get_field(s->fields, m3);
1530 bool is_imm;
1531 DisasCompare c;
1533 c.cond = ltgt_cond[m3];
1534 if (s->insn->data) {
1535 c.cond = tcg_unsigned_cond(c.cond);
1537 c.is_64 = c.g1 = c.g2 = true;
1538 c.u.s64.a = o->in1;
1539 c.u.s64.b = o->in2;
1541 is_imm = have_field(s->fields, i4);
1542 if (is_imm) {
1543 imm = get_field(s->fields, i4);
1544 } else {
1545 imm = 0;
1546 o->out = get_address(s, 0, get_field(s->fields, b4),
1547 get_field(s->fields, d4));
1550 return help_branch(s, &c, is_imm, imm, o->out);
1553 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1555 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1556 set_cc_static(s);
1557 return NO_EXIT;
1560 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1562 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1563 set_cc_static(s);
1564 return NO_EXIT;
1567 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1569 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1570 set_cc_static(s);
1571 return NO_EXIT;
1574 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1576 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1577 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1578 tcg_temp_free_i32(m3);
1579 gen_set_cc_nz_f32(s, o->in2);
1580 return NO_EXIT;
1583 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1585 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1586 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1587 tcg_temp_free_i32(m3);
1588 gen_set_cc_nz_f64(s, o->in2);
1589 return NO_EXIT;
1592 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1594 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1595 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1596 tcg_temp_free_i32(m3);
1597 gen_set_cc_nz_f128(s, o->in1, o->in2);
1598 return NO_EXIT;
1601 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1603 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1604 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1605 tcg_temp_free_i32(m3);
1606 gen_set_cc_nz_f32(s, o->in2);
1607 return NO_EXIT;
1610 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1612 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1613 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1614 tcg_temp_free_i32(m3);
1615 gen_set_cc_nz_f64(s, o->in2);
1616 return NO_EXIT;
1619 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1621 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1622 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1623 tcg_temp_free_i32(m3);
1624 gen_set_cc_nz_f128(s, o->in1, o->in2);
1625 return NO_EXIT;
1628 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1630 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1631 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1632 tcg_temp_free_i32(m3);
1633 gen_set_cc_nz_f32(s, o->in2);
1634 return NO_EXIT;
1637 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1639 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1640 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1641 tcg_temp_free_i32(m3);
1642 gen_set_cc_nz_f64(s, o->in2);
1643 return NO_EXIT;
1646 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1648 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1649 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1650 tcg_temp_free_i32(m3);
1651 gen_set_cc_nz_f128(s, o->in1, o->in2);
1652 return NO_EXIT;
1655 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1657 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1658 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1659 tcg_temp_free_i32(m3);
1660 gen_set_cc_nz_f32(s, o->in2);
1661 return NO_EXIT;
1664 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1666 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1667 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1668 tcg_temp_free_i32(m3);
1669 gen_set_cc_nz_f64(s, o->in2);
1670 return NO_EXIT;
1673 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1675 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1676 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1677 tcg_temp_free_i32(m3);
1678 gen_set_cc_nz_f128(s, o->in1, o->in2);
1679 return NO_EXIT;
1682 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1684 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1685 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1686 tcg_temp_free_i32(m3);
1687 return NO_EXIT;
1690 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1692 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1693 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1694 tcg_temp_free_i32(m3);
1695 return NO_EXIT;
1698 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1700 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1701 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1702 tcg_temp_free_i32(m3);
1703 return_low128(o->out2);
1704 return NO_EXIT;
1707 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1709 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1710 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1711 tcg_temp_free_i32(m3);
1712 return NO_EXIT;
1715 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1720 return NO_EXIT;
1723 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1725 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1726 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1727 tcg_temp_free_i32(m3);
1728 return_low128(o->out2);
1729 return NO_EXIT;
1732 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1734 int r2 = get_field(s->fields, r2);
1735 TCGv_i64 len = tcg_temp_new_i64();
1737 potential_page_fault(s);
1738 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1739 set_cc_static(s);
1740 return_low128(o->out);
1742 tcg_gen_add_i64(regs[r2], regs[r2], len);
1743 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1744 tcg_temp_free_i64(len);
1746 return NO_EXIT;
1749 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1751 int l = get_field(s->fields, l1);
1752 TCGv_i32 vl;
1754 switch (l + 1) {
1755 case 1:
1756 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1757 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1758 break;
1759 case 2:
1760 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1761 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1762 break;
1763 case 4:
1764 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1765 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1766 break;
1767 case 8:
1768 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1769 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1770 break;
1771 default:
1772 potential_page_fault(s);
1773 vl = tcg_const_i32(l);
1774 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1775 tcg_temp_free_i32(vl);
1776 set_cc_static(s);
1777 return NO_EXIT;
1779 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1780 return NO_EXIT;
1783 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1785 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1786 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1787 potential_page_fault(s);
1788 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1789 tcg_temp_free_i32(r1);
1790 tcg_temp_free_i32(r3);
1791 set_cc_static(s);
1792 return NO_EXIT;
1795 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1797 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1798 TCGv_i32 t1 = tcg_temp_new_i32();
1799 tcg_gen_trunc_i64_i32(t1, o->in1);
1800 potential_page_fault(s);
1801 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1802 set_cc_static(s);
1803 tcg_temp_free_i32(t1);
1804 tcg_temp_free_i32(m3);
1805 return NO_EXIT;
1808 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1810 potential_page_fault(s);
1811 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1812 set_cc_static(s);
1813 return_low128(o->in2);
1814 return NO_EXIT;
1817 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1819 TCGv_i64 t = tcg_temp_new_i64();
1820 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1821 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1822 tcg_gen_or_i64(o->out, o->out, t);
1823 tcg_temp_free_i64(t);
1824 return NO_EXIT;
1827 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1829 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1830 int d2 = get_field(s->fields, d2);
1831 int b2 = get_field(s->fields, b2);
1832 int is_64 = s->insn->data;
1833 TCGv_i64 addr, mem, cc, z;
1835 /* Note that in1 = R3 (new value) and
1836 in2 = (zero-extended) R1 (expected value). */
1838 /* Load the memory into the (temporary) output. While the PoO only talks
1839 about moving the memory to R1 on inequality, if we include equality it
1840 means that R1 is equal to the memory in all conditions. */
1841 addr = get_address(s, 0, b2, d2);
1842 if (is_64) {
1843 tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
1844 } else {
1845 tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
1848 /* Are the memory and expected values (un)equal? Note that this setcond
1849 produces the output CC value, thus the NE sense of the test. */
1850 cc = tcg_temp_new_i64();
1851 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1853 /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
1854 Recall that we are allowed to unconditionally issue the store (and
1855 thus any possible write trap), so (re-)store the original contents
1856 of MEM in case of inequality. */
1857 z = tcg_const_i64(0);
1858 mem = tcg_temp_new_i64();
1859 tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
1860 if (is_64) {
1861 tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
1862 } else {
1863 tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
1865 tcg_temp_free_i64(z);
1866 tcg_temp_free_i64(mem);
1867 tcg_temp_free_i64(addr);
1869 /* Store CC back to cc_op. Wait until after the store so that any
1870 exception gets the old cc_op value. */
1871 tcg_gen_trunc_i64_i32(cc_op, cc);
1872 tcg_temp_free_i64(cc);
1873 set_cc_static(s);
1874 return NO_EXIT;
1877 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1879 /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
1880 int r1 = get_field(s->fields, r1);
1881 int r3 = get_field(s->fields, r3);
1882 int d2 = get_field(s->fields, d2);
1883 int b2 = get_field(s->fields, b2);
1884 TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
1886 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1888 addrh = get_address(s, 0, b2, d2);
1889 addrl = get_address(s, 0, b2, d2 + 8);
1890 outh = tcg_temp_new_i64();
1891 outl = tcg_temp_new_i64();
1893 tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
1894 tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
1896 /* Fold the double-word compare with arithmetic. */
1897 cc = tcg_temp_new_i64();
1898 z = tcg_temp_new_i64();
1899 tcg_gen_xor_i64(cc, outh, regs[r1]);
1900 tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
1901 tcg_gen_or_i64(cc, cc, z);
1902 tcg_gen_movi_i64(z, 0);
1903 tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
1905 memh = tcg_temp_new_i64();
1906 meml = tcg_temp_new_i64();
1907 tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
1908 tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
1909 tcg_temp_free_i64(z);
1911 tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
1912 tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
1913 tcg_temp_free_i64(memh);
1914 tcg_temp_free_i64(meml);
1915 tcg_temp_free_i64(addrh);
1916 tcg_temp_free_i64(addrl);
1918 /* Save back state now that we've passed all exceptions. */
1919 tcg_gen_mov_i64(regs[r1], outh);
1920 tcg_gen_mov_i64(regs[r1 + 1], outl);
1921 tcg_gen_trunc_i64_i32(cc_op, cc);
1922 tcg_temp_free_i64(outh);
1923 tcg_temp_free_i64(outl);
1924 tcg_temp_free_i64(cc);
1925 set_cc_static(s);
1926 return NO_EXIT;
1929 #ifndef CONFIG_USER_ONLY
1930 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1932 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1933 check_privileged(s);
1934 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
1935 tcg_temp_free_i32(r1);
1936 set_cc_static(s);
1937 return NO_EXIT;
1939 #endif
1941 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
1943 TCGv_i64 t1 = tcg_temp_new_i64();
1944 TCGv_i32 t2 = tcg_temp_new_i32();
1945 tcg_gen_trunc_i64_i32(t2, o->in1);
1946 gen_helper_cvd(t1, t2);
1947 tcg_temp_free_i32(t2);
1948 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
1949 tcg_temp_free_i64(t1);
1950 return NO_EXIT;
1953 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
1955 int m3 = get_field(s->fields, m3);
1956 TCGLabel *lab = gen_new_label();
1957 TCGv_i32 t;
1958 TCGCond c;
1960 c = tcg_invert_cond(ltgt_cond[m3]);
1961 if (s->insn->data) {
1962 c = tcg_unsigned_cond(c);
1964 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
1966 /* Set DXC to 0xff. */
1967 t = tcg_temp_new_i32();
1968 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1969 tcg_gen_ori_i32(t, t, 0xff00);
1970 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
1971 tcg_temp_free_i32(t);
1973 /* Trap. */
1974 gen_program_exception(s, PGM_DATA);
1976 gen_set_label(lab);
1977 return NO_EXIT;
1980 #ifndef CONFIG_USER_ONLY
1981 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
1983 TCGv_i32 tmp;
1985 check_privileged(s);
1986 potential_page_fault(s);
1988 /* We pretend the format is RX_a so that D2 is the field we want. */
1989 tmp = tcg_const_i32(get_field(s->fields, d2) & 0xfff);
1990 gen_helper_diag(regs[2], cpu_env, tmp, regs[2], regs[1]);
1991 tcg_temp_free_i32(tmp);
1992 return NO_EXIT;
1994 #endif
1996 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
1998 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
1999 return_low128(o->out);
2000 return NO_EXIT;
2003 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2005 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2006 return_low128(o->out);
2007 return NO_EXIT;
2010 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2012 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2013 return_low128(o->out);
2014 return NO_EXIT;
2017 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2019 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2020 return_low128(o->out);
2021 return NO_EXIT;
2024 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2026 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2027 return NO_EXIT;
2030 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2032 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2033 return NO_EXIT;
2036 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2038 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2039 return_low128(o->out2);
2040 return NO_EXIT;
2043 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2045 int r2 = get_field(s->fields, r2);
2046 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2047 return NO_EXIT;
2050 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2052 /* No cache information provided. */
2053 tcg_gen_movi_i64(o->out, -1);
2054 return NO_EXIT;
2057 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2059 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2060 return NO_EXIT;
2063 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2065 int r1 = get_field(s->fields, r1);
2066 int r2 = get_field(s->fields, r2);
2067 TCGv_i64 t = tcg_temp_new_i64();
2069 /* Note the "subsequently" in the PoO, which implies a defined result
2070 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2071 tcg_gen_shri_i64(t, psw_mask, 32);
2072 store_reg32_i64(r1, t);
2073 if (r2 != 0) {
2074 store_reg32_i64(r2, psw_mask);
2077 tcg_temp_free_i64(t);
2078 return NO_EXIT;
2081 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2083 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2084 tb->flags, (ab)use the tb->cs_base field as the address of
2085 the template in memory, and grab 8 bits of tb->flags/cflags for
2086 the contents of the register. We would then recognize all this
2087 in gen_intermediate_code_internal, generating code for exactly
2088 one instruction. This new TB then gets executed normally.
2090 On the other hand, this seems to be mostly used for modifying
2091 MVC inside of memcpy, which needs a helper call anyway. So
2092 perhaps this doesn't bear thinking about any further. */
2094 TCGv_i64 tmp;
2096 update_psw_addr(s);
2097 update_cc_op(s);
2099 tmp = tcg_const_i64(s->next_pc);
2100 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2101 tcg_temp_free_i64(tmp);
2103 set_cc_static(s);
2104 return NO_EXIT;
2107 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2109 /* We'll use the original input for cc computation, since we get to
2110 compare that against 0, which ought to be better than comparing
2111 the real output against 64. It also lets cc_dst be a convenient
2112 temporary during our computation. */
2113 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2115 /* R1 = IN ? CLZ(IN) : 64. */
2116 gen_helper_clz(o->out, o->in2);
2118 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2119 value by 64, which is undefined. But since the shift is 64 iff the
2120 input is zero, we still get the correct result after and'ing. */
2121 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2122 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2123 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2124 return NO_EXIT;
2127 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2129 int m3 = get_field(s->fields, m3);
2130 int pos, len, base = s->insn->data;
2131 TCGv_i64 tmp = tcg_temp_new_i64();
2132 uint64_t ccm;
2134 switch (m3) {
2135 case 0xf:
2136 /* Effectively a 32-bit load. */
2137 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2138 len = 32;
2139 goto one_insert;
2141 case 0xc:
2142 case 0x6:
2143 case 0x3:
2144 /* Effectively a 16-bit load. */
2145 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2146 len = 16;
2147 goto one_insert;
2149 case 0x8:
2150 case 0x4:
2151 case 0x2:
2152 case 0x1:
2153 /* Effectively an 8-bit load. */
2154 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2155 len = 8;
2156 goto one_insert;
2158 one_insert:
2159 pos = base + ctz32(m3) * 8;
2160 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2161 ccm = ((1ull << len) - 1) << pos;
2162 break;
2164 default:
2165 /* This is going to be a sequence of loads and inserts. */
2166 pos = base + 32 - 8;
2167 ccm = 0;
2168 while (m3) {
2169 if (m3 & 0x8) {
2170 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2171 tcg_gen_addi_i64(o->in2, o->in2, 1);
2172 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2173 ccm |= 0xff << pos;
2175 m3 = (m3 << 1) & 0xf;
2176 pos -= 8;
2178 break;
2181 tcg_gen_movi_i64(tmp, ccm);
2182 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2183 tcg_temp_free_i64(tmp);
2184 return NO_EXIT;
2187 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2189 int shift = s->insn->data & 0xff;
2190 int size = s->insn->data >> 8;
2191 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2192 return NO_EXIT;
2195 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2197 TCGv_i64 t1;
2199 gen_op_calc_cc(s);
2200 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2202 t1 = tcg_temp_new_i64();
2203 tcg_gen_shli_i64(t1, psw_mask, 20);
2204 tcg_gen_shri_i64(t1, t1, 36);
2205 tcg_gen_or_i64(o->out, o->out, t1);
2207 tcg_gen_extu_i32_i64(t1, cc_op);
2208 tcg_gen_shli_i64(t1, t1, 28);
2209 tcg_gen_or_i64(o->out, o->out, t1);
2210 tcg_temp_free_i64(t1);
2211 return NO_EXIT;
2214 #ifndef CONFIG_USER_ONLY
2215 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2217 check_privileged(s);
2218 gen_helper_ipte(cpu_env, o->in1, o->in2);
2219 return NO_EXIT;
2222 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2224 check_privileged(s);
2225 gen_helper_iske(o->out, cpu_env, o->in2);
2226 return NO_EXIT;
2228 #endif
2230 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2232 gen_helper_ldeb(o->out, cpu_env, o->in2);
2233 return NO_EXIT;
2236 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2238 gen_helper_ledb(o->out, cpu_env, o->in2);
2239 return NO_EXIT;
2242 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2244 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2245 return NO_EXIT;
2248 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2250 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2251 return NO_EXIT;
2254 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2256 gen_helper_lxdb(o->out, cpu_env, o->in2);
2257 return_low128(o->out2);
2258 return NO_EXIT;
2261 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2263 gen_helper_lxeb(o->out, cpu_env, o->in2);
2264 return_low128(o->out2);
2265 return NO_EXIT;
2268 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2270 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2271 return NO_EXIT;
2274 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2276 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2277 return NO_EXIT;
2280 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2282 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2283 return NO_EXIT;
2286 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2288 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2289 return NO_EXIT;
2292 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2294 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2295 return NO_EXIT;
2298 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2300 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2301 return NO_EXIT;
2304 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2306 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2307 return NO_EXIT;
2310 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2312 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2313 return NO_EXIT;
2316 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2318 DisasCompare c;
2320 disas_jcc(s, &c, get_field(s->fields, m3));
2322 if (c.is_64) {
2323 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2324 o->in2, o->in1);
2325 free_compare(&c);
2326 } else {
2327 TCGv_i32 t32 = tcg_temp_new_i32();
2328 TCGv_i64 t, z;
2330 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2331 free_compare(&c);
2333 t = tcg_temp_new_i64();
2334 tcg_gen_extu_i32_i64(t, t32);
2335 tcg_temp_free_i32(t32);
2337 z = tcg_const_i64(0);
2338 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2339 tcg_temp_free_i64(t);
2340 tcg_temp_free_i64(z);
2343 return NO_EXIT;
2346 #ifndef CONFIG_USER_ONLY
2347 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2349 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2350 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2351 check_privileged(s);
2352 potential_page_fault(s);
2353 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2354 tcg_temp_free_i32(r1);
2355 tcg_temp_free_i32(r3);
2356 return NO_EXIT;
2359 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2361 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2362 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2363 check_privileged(s);
2364 potential_page_fault(s);
2365 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2366 tcg_temp_free_i32(r1);
2367 tcg_temp_free_i32(r3);
2368 return NO_EXIT;
2370 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2372 check_privileged(s);
2373 potential_page_fault(s);
2374 gen_helper_lra(o->out, cpu_env, o->in2);
2375 set_cc_static(s);
2376 return NO_EXIT;
2379 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2381 TCGv_i64 t1, t2;
2383 check_privileged(s);
2385 t1 = tcg_temp_new_i64();
2386 t2 = tcg_temp_new_i64();
2387 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2388 tcg_gen_addi_i64(o->in2, o->in2, 4);
2389 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2390 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2391 tcg_gen_shli_i64(t1, t1, 32);
2392 gen_helper_load_psw(cpu_env, t1, t2);
2393 tcg_temp_free_i64(t1);
2394 tcg_temp_free_i64(t2);
2395 return EXIT_NORETURN;
2398 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2400 TCGv_i64 t1, t2;
2402 check_privileged(s);
2404 t1 = tcg_temp_new_i64();
2405 t2 = tcg_temp_new_i64();
2406 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2407 tcg_gen_addi_i64(o->in2, o->in2, 8);
2408 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2409 gen_helper_load_psw(cpu_env, t1, t2);
2410 tcg_temp_free_i64(t1);
2411 tcg_temp_free_i64(t2);
2412 return EXIT_NORETURN;
2414 #endif
2416 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2418 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2419 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2420 potential_page_fault(s);
2421 gen_helper_lam(cpu_env, r1, o->in2, r3);
2422 tcg_temp_free_i32(r1);
2423 tcg_temp_free_i32(r3);
2424 return NO_EXIT;
2427 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2429 int r1 = get_field(s->fields, r1);
2430 int r3 = get_field(s->fields, r3);
2431 TCGv_i64 t = tcg_temp_new_i64();
2432 TCGv_i64 t4 = tcg_const_i64(4);
2434 while (1) {
2435 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2436 store_reg32_i64(r1, t);
2437 if (r1 == r3) {
2438 break;
2440 tcg_gen_add_i64(o->in2, o->in2, t4);
2441 r1 = (r1 + 1) & 15;
2444 tcg_temp_free_i64(t);
2445 tcg_temp_free_i64(t4);
2446 return NO_EXIT;
2449 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2451 int r1 = get_field(s->fields, r1);
2452 int r3 = get_field(s->fields, r3);
2453 TCGv_i64 t = tcg_temp_new_i64();
2454 TCGv_i64 t4 = tcg_const_i64(4);
2456 while (1) {
2457 tcg_gen_qemu_ld32u(t, o->in2, get_mem_index(s));
2458 store_reg32h_i64(r1, t);
2459 if (r1 == r3) {
2460 break;
2462 tcg_gen_add_i64(o->in2, o->in2, t4);
2463 r1 = (r1 + 1) & 15;
2466 tcg_temp_free_i64(t);
2467 tcg_temp_free_i64(t4);
2468 return NO_EXIT;
2471 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2473 int r1 = get_field(s->fields, r1);
2474 int r3 = get_field(s->fields, r3);
2475 TCGv_i64 t8 = tcg_const_i64(8);
2477 while (1) {
2478 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2479 if (r1 == r3) {
2480 break;
2482 tcg_gen_add_i64(o->in2, o->in2, t8);
2483 r1 = (r1 + 1) & 15;
2486 tcg_temp_free_i64(t8);
2487 return NO_EXIT;
2490 #ifndef CONFIG_USER_ONLY
2491 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2493 check_privileged(s);
2494 potential_page_fault(s);
2495 gen_helper_lura(o->out, cpu_env, o->in2);
2496 return NO_EXIT;
2499 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2501 check_privileged(s);
2502 potential_page_fault(s);
2503 gen_helper_lurag(o->out, cpu_env, o->in2);
2504 return NO_EXIT;
2506 #endif
2508 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2510 o->out = o->in2;
2511 o->g_out = o->g_in2;
2512 TCGV_UNUSED_I64(o->in2);
2513 o->g_in2 = false;
2514 return NO_EXIT;
2517 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2519 o->out = o->in1;
2520 o->out2 = o->in2;
2521 o->g_out = o->g_in1;
2522 o->g_out2 = o->g_in2;
2523 TCGV_UNUSED_I64(o->in1);
2524 TCGV_UNUSED_I64(o->in2);
2525 o->g_in1 = o->g_in2 = false;
2526 return NO_EXIT;
2529 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2531 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2532 potential_page_fault(s);
2533 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2534 tcg_temp_free_i32(l);
2535 return NO_EXIT;
2538 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2540 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2541 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2542 potential_page_fault(s);
2543 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2544 tcg_temp_free_i32(r1);
2545 tcg_temp_free_i32(r2);
2546 set_cc_static(s);
2547 return NO_EXIT;
2550 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2552 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2553 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2554 potential_page_fault(s);
2555 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2556 tcg_temp_free_i32(r1);
2557 tcg_temp_free_i32(r3);
2558 set_cc_static(s);
2559 return NO_EXIT;
2562 #ifndef CONFIG_USER_ONLY
2563 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2565 int r1 = get_field(s->fields, l1);
2566 check_privileged(s);
2567 potential_page_fault(s);
2568 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2569 set_cc_static(s);
2570 return NO_EXIT;
2573 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2575 int r1 = get_field(s->fields, l1);
2576 check_privileged(s);
2577 potential_page_fault(s);
2578 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2579 set_cc_static(s);
2580 return NO_EXIT;
2582 #endif
2584 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2586 potential_page_fault(s);
2587 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2588 set_cc_static(s);
2589 return NO_EXIT;
2592 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2594 potential_page_fault(s);
2595 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2596 set_cc_static(s);
2597 return_low128(o->in2);
2598 return NO_EXIT;
2601 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2603 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2604 return NO_EXIT;
2607 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2609 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2610 return NO_EXIT;
2613 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2615 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2616 return NO_EXIT;
2619 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2621 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2622 return NO_EXIT;
2625 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2627 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2628 return NO_EXIT;
2631 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2633 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2634 return_low128(o->out2);
2635 return NO_EXIT;
2638 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2640 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2641 return_low128(o->out2);
2642 return NO_EXIT;
2645 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2647 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2648 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2649 tcg_temp_free_i64(r3);
2650 return NO_EXIT;
2653 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2655 int r3 = get_field(s->fields, r3);
2656 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2657 return NO_EXIT;
2660 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2662 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2663 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
2664 tcg_temp_free_i64(r3);
2665 return NO_EXIT;
2668 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
2670 int r3 = get_field(s->fields, r3);
2671 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2672 return NO_EXIT;
2675 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
2677 gen_helper_nabs_i64(o->out, o->in2);
2678 return NO_EXIT;
2681 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
2683 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
2684 return NO_EXIT;
2687 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
2689 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
2690 return NO_EXIT;
2693 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
2695 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
2696 tcg_gen_mov_i64(o->out2, o->in2);
2697 return NO_EXIT;
2700 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
2702 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2703 potential_page_fault(s);
2704 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
2705 tcg_temp_free_i32(l);
2706 set_cc_static(s);
2707 return NO_EXIT;
2710 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
2712 tcg_gen_neg_i64(o->out, o->in2);
2713 return NO_EXIT;
2716 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
2718 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
2719 return NO_EXIT;
2722 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
2724 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
2725 return NO_EXIT;
2728 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
2730 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
2731 tcg_gen_mov_i64(o->out2, o->in2);
2732 return NO_EXIT;
2735 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
2737 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2738 potential_page_fault(s);
2739 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
2740 tcg_temp_free_i32(l);
2741 set_cc_static(s);
2742 return NO_EXIT;
2745 static ExitStatus op_or(DisasContext *s, DisasOps *o)
2747 tcg_gen_or_i64(o->out, o->in1, o->in2);
2748 return NO_EXIT;
2751 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
2753 int shift = s->insn->data & 0xff;
2754 int size = s->insn->data >> 8;
2755 uint64_t mask = ((1ull << size) - 1) << shift;
2757 assert(!o->g_in2);
2758 tcg_gen_shli_i64(o->in2, o->in2, shift);
2759 tcg_gen_or_i64(o->out, o->in1, o->in2);
2761 /* Produce the CC from only the bits manipulated. */
2762 tcg_gen_andi_i64(cc_dst, o->out, mask);
2763 set_cc_nz_u64(s, cc_dst);
2764 return NO_EXIT;
2767 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
2769 gen_helper_popcnt(o->out, o->in2);
2770 return NO_EXIT;
2773 #ifndef CONFIG_USER_ONLY
2774 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
2776 check_privileged(s);
2777 gen_helper_ptlb(cpu_env);
2778 return NO_EXIT;
2780 #endif
2782 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
2784 int i3 = get_field(s->fields, i3);
2785 int i4 = get_field(s->fields, i4);
2786 int i5 = get_field(s->fields, i5);
2787 int do_zero = i4 & 0x80;
2788 uint64_t mask, imask, pmask;
2789 int pos, len, rot;
2791 /* Adjust the arguments for the specific insn. */
2792 switch (s->fields->op2) {
2793 case 0x55: /* risbg */
2794 i3 &= 63;
2795 i4 &= 63;
2796 pmask = ~0;
2797 break;
2798 case 0x5d: /* risbhg */
2799 i3 &= 31;
2800 i4 &= 31;
2801 pmask = 0xffffffff00000000ull;
2802 break;
2803 case 0x51: /* risblg */
2804 i3 &= 31;
2805 i4 &= 31;
2806 pmask = 0x00000000ffffffffull;
2807 break;
2808 default:
2809 abort();
2812 /* MASK is the set of bits to be inserted from R2.
2813 Take care for I3/I4 wraparound. */
2814 mask = pmask >> i3;
2815 if (i3 <= i4) {
2816 mask ^= pmask >> i4 >> 1;
2817 } else {
2818 mask |= ~(pmask >> i4 >> 1);
2820 mask &= pmask;
2822 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
2823 insns, we need to keep the other half of the register. */
2824 imask = ~mask | ~pmask;
2825 if (do_zero) {
2826 if (s->fields->op2 == 0x55) {
2827 imask = 0;
2828 } else {
2829 imask = ~pmask;
2833 /* In some cases we can implement this with deposit, which can be more
2834 efficient on some hosts. */
2835 if (~mask == imask && i3 <= i4) {
2836 if (s->fields->op2 == 0x5d) {
2837 i3 += 32, i4 += 32;
2839 /* Note that we rotate the bits to be inserted to the lsb, not to
2840 the position as described in the PoO. */
2841 len = i4 - i3 + 1;
2842 pos = 63 - i4;
2843 rot = (i5 - pos) & 63;
2844 } else {
2845 pos = len = -1;
2846 rot = i5 & 63;
2849 /* Rotate the input as necessary. */
2850 tcg_gen_rotli_i64(o->in2, o->in2, rot);
2852 /* Insert the selected bits into the output. */
2853 if (pos >= 0) {
2854 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
2855 } else if (imask == 0) {
2856 tcg_gen_andi_i64(o->out, o->in2, mask);
2857 } else {
2858 tcg_gen_andi_i64(o->in2, o->in2, mask);
2859 tcg_gen_andi_i64(o->out, o->out, imask);
2860 tcg_gen_or_i64(o->out, o->out, o->in2);
2862 return NO_EXIT;
2865 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
2867 int i3 = get_field(s->fields, i3);
2868 int i4 = get_field(s->fields, i4);
2869 int i5 = get_field(s->fields, i5);
2870 uint64_t mask;
2872 /* If this is a test-only form, arrange to discard the result. */
2873 if (i3 & 0x80) {
2874 o->out = tcg_temp_new_i64();
2875 o->g_out = false;
2878 i3 &= 63;
2879 i4 &= 63;
2880 i5 &= 63;
2882 /* MASK is the set of bits to be operated on from R2.
2883 Take care for I3/I4 wraparound. */
2884 mask = ~0ull >> i3;
2885 if (i3 <= i4) {
2886 mask ^= ~0ull >> i4 >> 1;
2887 } else {
2888 mask |= ~(~0ull >> i4 >> 1);
2891 /* Rotate the input as necessary. */
2892 tcg_gen_rotli_i64(o->in2, o->in2, i5);
2894 /* Operate. */
2895 switch (s->fields->op2) {
2896 case 0x55: /* AND */
2897 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
2898 tcg_gen_and_i64(o->out, o->out, o->in2);
2899 break;
2900 case 0x56: /* OR */
2901 tcg_gen_andi_i64(o->in2, o->in2, mask);
2902 tcg_gen_or_i64(o->out, o->out, o->in2);
2903 break;
2904 case 0x57: /* XOR */
2905 tcg_gen_andi_i64(o->in2, o->in2, mask);
2906 tcg_gen_xor_i64(o->out, o->out, o->in2);
2907 break;
2908 default:
2909 abort();
2912 /* Set the CC. */
2913 tcg_gen_andi_i64(cc_dst, o->out, mask);
2914 set_cc_nz_u64(s, cc_dst);
2915 return NO_EXIT;
2918 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
2920 tcg_gen_bswap16_i64(o->out, o->in2);
2921 return NO_EXIT;
2924 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
2926 tcg_gen_bswap32_i64(o->out, o->in2);
2927 return NO_EXIT;
2930 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
2932 tcg_gen_bswap64_i64(o->out, o->in2);
2933 return NO_EXIT;
2936 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
2938 TCGv_i32 t1 = tcg_temp_new_i32();
2939 TCGv_i32 t2 = tcg_temp_new_i32();
2940 TCGv_i32 to = tcg_temp_new_i32();
2941 tcg_gen_trunc_i64_i32(t1, o->in1);
2942 tcg_gen_trunc_i64_i32(t2, o->in2);
2943 tcg_gen_rotl_i32(to, t1, t2);
2944 tcg_gen_extu_i32_i64(o->out, to);
2945 tcg_temp_free_i32(t1);
2946 tcg_temp_free_i32(t2);
2947 tcg_temp_free_i32(to);
2948 return NO_EXIT;
2951 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
2953 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
2954 return NO_EXIT;
2957 #ifndef CONFIG_USER_ONLY
2958 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
2960 check_privileged(s);
2961 gen_helper_rrbe(cc_op, cpu_env, o->in2);
2962 set_cc_static(s);
2963 return NO_EXIT;
2966 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
2968 check_privileged(s);
2969 gen_helper_sacf(cpu_env, o->in2);
2970 /* Addressing mode has changed, so end the block. */
2971 return EXIT_PC_STALE;
2973 #endif
2975 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
2977 int sam = s->insn->data;
2978 TCGv_i64 tsam;
2979 uint64_t mask;
2981 switch (sam) {
2982 case 0:
2983 mask = 0xffffff;
2984 break;
2985 case 1:
2986 mask = 0x7fffffff;
2987 break;
2988 default:
2989 mask = -1;
2990 break;
2993 /* Bizzare but true, we check the address of the current insn for the
2994 specification exception, not the next to be executed. Thus the PoO
2995 documents that Bad Things Happen two bytes before the end. */
2996 if (s->pc & ~mask) {
2997 gen_program_exception(s, PGM_SPECIFICATION);
2998 return EXIT_NORETURN;
3000 s->next_pc &= mask;
3002 tsam = tcg_const_i64(sam);
3003 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3004 tcg_temp_free_i64(tsam);
3006 /* Always exit the TB, since we (may have) changed execution mode. */
3007 return EXIT_PC_STALE;
3010 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3012 int r1 = get_field(s->fields, r1);
3013 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3014 return NO_EXIT;
3017 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3019 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3020 return NO_EXIT;
3023 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3025 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3026 return NO_EXIT;
3029 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3031 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3032 return_low128(o->out2);
3033 return NO_EXIT;
3036 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3038 gen_helper_sqeb(o->out, cpu_env, o->in2);
3039 return NO_EXIT;
3042 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3044 gen_helper_sqdb(o->out, cpu_env, o->in2);
3045 return NO_EXIT;
3048 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3050 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3051 return_low128(o->out2);
3052 return NO_EXIT;
3055 #ifndef CONFIG_USER_ONLY
3056 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3058 check_privileged(s);
3059 potential_page_fault(s);
3060 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3061 set_cc_static(s);
3062 return NO_EXIT;
3065 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3067 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3068 check_privileged(s);
3069 potential_page_fault(s);
3070 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3071 tcg_temp_free_i32(r1);
3072 return NO_EXIT;
3074 #endif
3076 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3078 DisasCompare c;
3079 TCGv_i64 a;
3080 TCGLabel *lab;
3081 int r1;
3083 disas_jcc(s, &c, get_field(s->fields, m3));
3085 lab = gen_new_label();
3086 if (c.is_64) {
3087 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3088 } else {
3089 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3091 free_compare(&c);
3093 r1 = get_field(s->fields, r1);
3094 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3095 if (s->insn->data) {
3096 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3097 } else {
3098 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3100 tcg_temp_free_i64(a);
3102 gen_set_label(lab);
3103 return NO_EXIT;
3106 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3108 uint64_t sign = 1ull << s->insn->data;
3109 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3110 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3111 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3112 /* The arithmetic left shift is curious in that it does not affect
3113 the sign bit. Copy that over from the source unchanged. */
3114 tcg_gen_andi_i64(o->out, o->out, ~sign);
3115 tcg_gen_andi_i64(o->in1, o->in1, sign);
3116 tcg_gen_or_i64(o->out, o->out, o->in1);
3117 return NO_EXIT;
3120 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3122 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3123 return NO_EXIT;
3126 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3128 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3129 return NO_EXIT;
3132 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3134 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3135 return NO_EXIT;
3138 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3140 gen_helper_sfpc(cpu_env, o->in2);
3141 return NO_EXIT;
3144 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3146 gen_helper_sfas(cpu_env, o->in2);
3147 return NO_EXIT;
3150 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3152 int b2 = get_field(s->fields, b2);
3153 int d2 = get_field(s->fields, d2);
3154 TCGv_i64 t1 = tcg_temp_new_i64();
3155 TCGv_i64 t2 = tcg_temp_new_i64();
3156 int mask, pos, len;
3158 switch (s->fields->op2) {
3159 case 0x99: /* SRNM */
3160 pos = 0, len = 2;
3161 break;
3162 case 0xb8: /* SRNMB */
3163 pos = 0, len = 3;
3164 break;
3165 case 0xb9: /* SRNMT */
3166 pos = 4, len = 3;
3167 break;
3168 default:
3169 tcg_abort();
3171 mask = (1 << len) - 1;
3173 /* Insert the value into the appropriate field of the FPC. */
3174 if (b2 == 0) {
3175 tcg_gen_movi_i64(t1, d2 & mask);
3176 } else {
3177 tcg_gen_addi_i64(t1, regs[b2], d2);
3178 tcg_gen_andi_i64(t1, t1, mask);
3180 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3181 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3182 tcg_temp_free_i64(t1);
3184 /* Then install the new FPC to set the rounding mode in fpu_status. */
3185 gen_helper_sfpc(cpu_env, t2);
3186 tcg_temp_free_i64(t2);
3187 return NO_EXIT;
3190 #ifndef CONFIG_USER_ONLY
3191 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3193 check_privileged(s);
3194 tcg_gen_shri_i64(o->in2, o->in2, 4);
3195 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3196 return NO_EXIT;
3199 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3201 check_privileged(s);
3202 gen_helper_sske(cpu_env, o->in1, o->in2);
3203 return NO_EXIT;
3206 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3208 check_privileged(s);
3209 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3210 return NO_EXIT;
3213 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3215 check_privileged(s);
3216 /* ??? Surely cpu address != cpu number. In any case the previous
3217 version of this stored more than the required half-word, so it
3218 is unlikely this has ever been tested. */
3219 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3220 return NO_EXIT;
3223 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3225 gen_helper_stck(o->out, cpu_env);
3226 /* ??? We don't implement clock states. */
3227 gen_op_movi_cc(s, 0);
3228 return NO_EXIT;
3231 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3233 TCGv_i64 c1 = tcg_temp_new_i64();
3234 TCGv_i64 c2 = tcg_temp_new_i64();
3235 gen_helper_stck(c1, cpu_env);
3236 /* Shift the 64-bit value into its place as a zero-extended
3237 104-bit value. Note that "bit positions 64-103 are always
3238 non-zero so that they compare differently to STCK"; we set
3239 the least significant bit to 1. */
3240 tcg_gen_shli_i64(c2, c1, 56);
3241 tcg_gen_shri_i64(c1, c1, 8);
3242 tcg_gen_ori_i64(c2, c2, 0x10000);
3243 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3244 tcg_gen_addi_i64(o->in2, o->in2, 8);
3245 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3246 tcg_temp_free_i64(c1);
3247 tcg_temp_free_i64(c2);
3248 /* ??? We don't implement clock states. */
3249 gen_op_movi_cc(s, 0);
3250 return NO_EXIT;
3253 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3255 check_privileged(s);
3256 gen_helper_sckc(cpu_env, o->in2);
3257 return NO_EXIT;
3260 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3262 check_privileged(s);
3263 gen_helper_stckc(o->out, cpu_env);
3264 return NO_EXIT;
3267 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3269 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3270 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3271 check_privileged(s);
3272 potential_page_fault(s);
3273 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3274 tcg_temp_free_i32(r1);
3275 tcg_temp_free_i32(r3);
3276 return NO_EXIT;
3279 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3281 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3282 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3283 check_privileged(s);
3284 potential_page_fault(s);
3285 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3286 tcg_temp_free_i32(r1);
3287 tcg_temp_free_i32(r3);
3288 return NO_EXIT;
3291 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3293 TCGv_i64 t1 = tcg_temp_new_i64();
3295 check_privileged(s);
3296 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3297 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3298 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3299 tcg_temp_free_i64(t1);
3301 return NO_EXIT;
3304 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3306 check_privileged(s);
3307 gen_helper_spt(cpu_env, o->in2);
3308 return NO_EXIT;
3311 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3313 TCGv_i64 f, a;
3314 /* We really ought to have more complete indication of facilities
3315 that we implement. Address this when STFLE is implemented. */
3316 check_privileged(s);
3317 f = tcg_const_i64(0xc0000000);
3318 a = tcg_const_i64(200);
3319 tcg_gen_qemu_st32(f, a, get_mem_index(s));
3320 tcg_temp_free_i64(f);
3321 tcg_temp_free_i64(a);
3322 return NO_EXIT;
3325 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3327 check_privileged(s);
3328 gen_helper_stpt(o->out, cpu_env);
3329 return NO_EXIT;
3332 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3334 check_privileged(s);
3335 potential_page_fault(s);
3336 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3337 set_cc_static(s);
3338 return NO_EXIT;
3341 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3343 check_privileged(s);
3344 gen_helper_spx(cpu_env, o->in2);
3345 return NO_EXIT;
3348 static ExitStatus op_subchannel(DisasContext *s, DisasOps *o)
3350 check_privileged(s);
3351 /* Not operational. */
3352 gen_op_movi_cc(s, 3);
3353 return NO_EXIT;
3356 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3358 check_privileged(s);
3359 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3360 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3361 return NO_EXIT;
3364 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3366 uint64_t i2 = get_field(s->fields, i2);
3367 TCGv_i64 t;
3369 check_privileged(s);
3371 /* It is important to do what the instruction name says: STORE THEN.
3372 If we let the output hook perform the store then if we fault and
3373 restart, we'll have the wrong SYSTEM MASK in place. */
3374 t = tcg_temp_new_i64();
3375 tcg_gen_shri_i64(t, psw_mask, 56);
3376 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3377 tcg_temp_free_i64(t);
3379 if (s->fields->op == 0xac) {
3380 tcg_gen_andi_i64(psw_mask, psw_mask,
3381 (i2 << 56) | 0x00ffffffffffffffull);
3382 } else {
3383 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3385 return NO_EXIT;
3388 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3390 check_privileged(s);
3391 potential_page_fault(s);
3392 gen_helper_stura(cpu_env, o->in2, o->in1);
3393 return NO_EXIT;
3396 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3398 check_privileged(s);
3399 potential_page_fault(s);
3400 gen_helper_sturg(cpu_env, o->in2, o->in1);
3401 return NO_EXIT;
3403 #endif
3405 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3407 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3408 return NO_EXIT;
3411 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3413 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3414 return NO_EXIT;
3417 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3419 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3420 return NO_EXIT;
3423 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3425 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3426 return NO_EXIT;
3429 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3431 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3432 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3433 potential_page_fault(s);
3434 gen_helper_stam(cpu_env, r1, o->in2, r3);
3435 tcg_temp_free_i32(r1);
3436 tcg_temp_free_i32(r3);
3437 return NO_EXIT;
3440 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3442 int m3 = get_field(s->fields, m3);
3443 int pos, base = s->insn->data;
3444 TCGv_i64 tmp = tcg_temp_new_i64();
3446 pos = base + ctz32(m3) * 8;
3447 switch (m3) {
3448 case 0xf:
3449 /* Effectively a 32-bit store. */
3450 tcg_gen_shri_i64(tmp, o->in1, pos);
3451 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3452 break;
3454 case 0xc:
3455 case 0x6:
3456 case 0x3:
3457 /* Effectively a 16-bit store. */
3458 tcg_gen_shri_i64(tmp, o->in1, pos);
3459 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3460 break;
3462 case 0x8:
3463 case 0x4:
3464 case 0x2:
3465 case 0x1:
3466 /* Effectively an 8-bit store. */
3467 tcg_gen_shri_i64(tmp, o->in1, pos);
3468 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3469 break;
3471 default:
3472 /* This is going to be a sequence of shifts and stores. */
3473 pos = base + 32 - 8;
3474 while (m3) {
3475 if (m3 & 0x8) {
3476 tcg_gen_shri_i64(tmp, o->in1, pos);
3477 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3478 tcg_gen_addi_i64(o->in2, o->in2, 1);
3480 m3 = (m3 << 1) & 0xf;
3481 pos -= 8;
3483 break;
3485 tcg_temp_free_i64(tmp);
3486 return NO_EXIT;
3489 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3491 int r1 = get_field(s->fields, r1);
3492 int r3 = get_field(s->fields, r3);
3493 int size = s->insn->data;
3494 TCGv_i64 tsize = tcg_const_i64(size);
3496 while (1) {
3497 if (size == 8) {
3498 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3499 } else {
3500 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3502 if (r1 == r3) {
3503 break;
3505 tcg_gen_add_i64(o->in2, o->in2, tsize);
3506 r1 = (r1 + 1) & 15;
3509 tcg_temp_free_i64(tsize);
3510 return NO_EXIT;
3513 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3515 int r1 = get_field(s->fields, r1);
3516 int r3 = get_field(s->fields, r3);
3517 TCGv_i64 t = tcg_temp_new_i64();
3518 TCGv_i64 t4 = tcg_const_i64(4);
3519 TCGv_i64 t32 = tcg_const_i64(32);
3521 while (1) {
3522 tcg_gen_shl_i64(t, regs[r1], t32);
3523 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3524 if (r1 == r3) {
3525 break;
3527 tcg_gen_add_i64(o->in2, o->in2, t4);
3528 r1 = (r1 + 1) & 15;
3531 tcg_temp_free_i64(t);
3532 tcg_temp_free_i64(t4);
3533 tcg_temp_free_i64(t32);
3534 return NO_EXIT;
3537 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3539 potential_page_fault(s);
3540 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3541 set_cc_static(s);
3542 return_low128(o->in2);
3543 return NO_EXIT;
3546 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3548 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3549 return NO_EXIT;
3552 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3554 DisasCompare cmp;
3555 TCGv_i64 borrow;
3557 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3559 /* The !borrow flag is the msb of CC. Since we want the inverse of
3560 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
3561 disas_jcc(s, &cmp, 8 | 4);
3562 borrow = tcg_temp_new_i64();
3563 if (cmp.is_64) {
3564 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
3565 } else {
3566 TCGv_i32 t = tcg_temp_new_i32();
3567 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
3568 tcg_gen_extu_i32_i64(borrow, t);
3569 tcg_temp_free_i32(t);
3571 free_compare(&cmp);
3573 tcg_gen_sub_i64(o->out, o->out, borrow);
3574 tcg_temp_free_i64(borrow);
3575 return NO_EXIT;
3578 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
3580 TCGv_i32 t;
3582 update_psw_addr(s);
3583 update_cc_op(s);
3585 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
3586 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
3587 tcg_temp_free_i32(t);
3589 t = tcg_const_i32(s->next_pc - s->pc);
3590 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
3591 tcg_temp_free_i32(t);
3593 gen_exception(EXCP_SVC);
3594 return EXIT_NORETURN;
3597 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
3599 gen_helper_tceb(cc_op, o->in1, o->in2);
3600 set_cc_static(s);
3601 return NO_EXIT;
3604 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
3606 gen_helper_tcdb(cc_op, o->in1, o->in2);
3607 set_cc_static(s);
3608 return NO_EXIT;
3611 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
3613 gen_helper_tcxb(cc_op, o->out, o->out2, o->in2);
3614 set_cc_static(s);
3615 return NO_EXIT;
3618 #ifndef CONFIG_USER_ONLY
3619 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
3621 potential_page_fault(s);
3622 gen_helper_tprot(cc_op, o->addr1, o->in2);
3623 set_cc_static(s);
3624 return NO_EXIT;
3626 #endif
3628 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
3630 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3631 potential_page_fault(s);
3632 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
3633 tcg_temp_free_i32(l);
3634 set_cc_static(s);
3635 return NO_EXIT;
3638 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
3640 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3641 potential_page_fault(s);
3642 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
3643 tcg_temp_free_i32(l);
3644 return NO_EXIT;
3647 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
3649 int d1 = get_field(s->fields, d1);
3650 int d2 = get_field(s->fields, d2);
3651 int b1 = get_field(s->fields, b1);
3652 int b2 = get_field(s->fields, b2);
3653 int l = get_field(s->fields, l1);
3654 TCGv_i32 t32;
3656 o->addr1 = get_address(s, 0, b1, d1);
3658 /* If the addresses are identical, this is a store/memset of zero. */
3659 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
3660 o->in2 = tcg_const_i64(0);
3662 l++;
3663 while (l >= 8) {
3664 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
3665 l -= 8;
3666 if (l > 0) {
3667 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
3670 if (l >= 4) {
3671 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
3672 l -= 4;
3673 if (l > 0) {
3674 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
3677 if (l >= 2) {
3678 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
3679 l -= 2;
3680 if (l > 0) {
3681 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
3684 if (l) {
3685 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
3687 gen_op_movi_cc(s, 0);
3688 return NO_EXIT;
3691 /* But in general we'll defer to a helper. */
3692 o->in2 = get_address(s, 0, b2, d2);
3693 t32 = tcg_const_i32(l);
3694 potential_page_fault(s);
3695 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
3696 tcg_temp_free_i32(t32);
3697 set_cc_static(s);
3698 return NO_EXIT;
3701 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
3703 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3704 return NO_EXIT;
3707 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
3709 int shift = s->insn->data & 0xff;
3710 int size = s->insn->data >> 8;
3711 uint64_t mask = ((1ull << size) - 1) << shift;
3713 assert(!o->g_in2);
3714 tcg_gen_shli_i64(o->in2, o->in2, shift);
3715 tcg_gen_xor_i64(o->out, o->in1, o->in2);
3717 /* Produce the CC from only the bits manipulated. */
3718 tcg_gen_andi_i64(cc_dst, o->out, mask);
3719 set_cc_nz_u64(s, cc_dst);
3720 return NO_EXIT;
3723 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
3725 o->out = tcg_const_i64(0);
3726 return NO_EXIT;
3729 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
3731 o->out = tcg_const_i64(0);
3732 o->out2 = o->out;
3733 o->g_out2 = true;
3734 return NO_EXIT;
3737 /* ====================================================================== */
3738 /* The "Cc OUTput" generators. Given the generated output (and in some cases
3739 the original inputs), update the various cc data structures in order to
3740 be able to compute the new condition code. */
3742 static void cout_abs32(DisasContext *s, DisasOps *o)
3744 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
3747 static void cout_abs64(DisasContext *s, DisasOps *o)
3749 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
3752 static void cout_adds32(DisasContext *s, DisasOps *o)
3754 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
3757 static void cout_adds64(DisasContext *s, DisasOps *o)
3759 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
3762 static void cout_addu32(DisasContext *s, DisasOps *o)
3764 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
3767 static void cout_addu64(DisasContext *s, DisasOps *o)
3769 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
3772 static void cout_addc32(DisasContext *s, DisasOps *o)
3774 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
3777 static void cout_addc64(DisasContext *s, DisasOps *o)
3779 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
3782 static void cout_cmps32(DisasContext *s, DisasOps *o)
3784 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
3787 static void cout_cmps64(DisasContext *s, DisasOps *o)
3789 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
3792 static void cout_cmpu32(DisasContext *s, DisasOps *o)
3794 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
3797 static void cout_cmpu64(DisasContext *s, DisasOps *o)
3799 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
3802 static void cout_f32(DisasContext *s, DisasOps *o)
3804 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
3807 static void cout_f64(DisasContext *s, DisasOps *o)
3809 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
3812 static void cout_f128(DisasContext *s, DisasOps *o)
3814 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
3817 static void cout_nabs32(DisasContext *s, DisasOps *o)
3819 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
3822 static void cout_nabs64(DisasContext *s, DisasOps *o)
3824 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
3827 static void cout_neg32(DisasContext *s, DisasOps *o)
3829 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
3832 static void cout_neg64(DisasContext *s, DisasOps *o)
3834 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
3837 static void cout_nz32(DisasContext *s, DisasOps *o)
3839 tcg_gen_ext32u_i64(cc_dst, o->out);
3840 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
3843 static void cout_nz64(DisasContext *s, DisasOps *o)
3845 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
3848 static void cout_s32(DisasContext *s, DisasOps *o)
3850 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
3853 static void cout_s64(DisasContext *s, DisasOps *o)
3855 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
3858 static void cout_subs32(DisasContext *s, DisasOps *o)
3860 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
3863 static void cout_subs64(DisasContext *s, DisasOps *o)
3865 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
3868 static void cout_subu32(DisasContext *s, DisasOps *o)
3870 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
3873 static void cout_subu64(DisasContext *s, DisasOps *o)
3875 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
3878 static void cout_subb32(DisasContext *s, DisasOps *o)
3880 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
3883 static void cout_subb64(DisasContext *s, DisasOps *o)
3885 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
3888 static void cout_tm32(DisasContext *s, DisasOps *o)
3890 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
3893 static void cout_tm64(DisasContext *s, DisasOps *o)
3895 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
3898 /* ====================================================================== */
3899 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
3900 with the TCG register to which we will write. Used in combination with
3901 the "wout" generators, in some cases we need a new temporary, and in
3902 some cases we can write to a TCG global. */
3904 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
3906 o->out = tcg_temp_new_i64();
3908 #define SPEC_prep_new 0
3910 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
3912 o->out = tcg_temp_new_i64();
3913 o->out2 = tcg_temp_new_i64();
3915 #define SPEC_prep_new_P 0
3917 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3919 o->out = regs[get_field(f, r1)];
3920 o->g_out = true;
3922 #define SPEC_prep_r1 0
3924 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
3926 int r1 = get_field(f, r1);
3927 o->out = regs[r1];
3928 o->out2 = regs[r1 + 1];
3929 o->g_out = o->g_out2 = true;
3931 #define SPEC_prep_r1_P SPEC_r1_even
3933 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
3935 o->out = fregs[get_field(f, r1)];
3936 o->g_out = true;
3938 #define SPEC_prep_f1 0
3940 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
3942 int r1 = get_field(f, r1);
3943 o->out = fregs[r1];
3944 o->out2 = fregs[r1 + 2];
3945 o->g_out = o->g_out2 = true;
3947 #define SPEC_prep_x1 SPEC_r1_f128
3949 /* ====================================================================== */
3950 /* The "Write OUTput" generators. These generally perform some non-trivial
3951 copy of data to TCG globals, or to main memory. The trivial cases are
3952 generally handled by having a "prep" generator install the TCG global
3953 as the destination of the operation. */
3955 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
3957 store_reg(get_field(f, r1), o->out);
3959 #define SPEC_wout_r1 0
3961 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
3963 int r1 = get_field(f, r1);
3964 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
3966 #define SPEC_wout_r1_8 0
3968 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
3970 int r1 = get_field(f, r1);
3971 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
3973 #define SPEC_wout_r1_16 0
3975 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
3977 store_reg32_i64(get_field(f, r1), o->out);
3979 #define SPEC_wout_r1_32 0
3981 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
3983 int r1 = get_field(f, r1);
3984 store_reg32_i64(r1, o->out);
3985 store_reg32_i64(r1 + 1, o->out2);
3987 #define SPEC_wout_r1_P32 SPEC_r1_even
3989 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
3991 int r1 = get_field(f, r1);
3992 store_reg32_i64(r1 + 1, o->out);
3993 tcg_gen_shri_i64(o->out, o->out, 32);
3994 store_reg32_i64(r1, o->out);
3996 #define SPEC_wout_r1_D32 SPEC_r1_even
3998 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4000 store_freg32_i64(get_field(f, r1), o->out);
4002 #define SPEC_wout_e1 0
4004 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4006 store_freg(get_field(f, r1), o->out);
4008 #define SPEC_wout_f1 0
4010 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4012 int f1 = get_field(s->fields, r1);
4013 store_freg(f1, o->out);
4014 store_freg(f1 + 2, o->out2);
4016 #define SPEC_wout_x1 SPEC_r1_f128
4018 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4020 if (get_field(f, r1) != get_field(f, r2)) {
4021 store_reg32_i64(get_field(f, r1), o->out);
4024 #define SPEC_wout_cond_r1r2_32 0
4026 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4028 if (get_field(f, r1) != get_field(f, r2)) {
4029 store_freg32_i64(get_field(f, r1), o->out);
4032 #define SPEC_wout_cond_e1e2 0
4034 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4036 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4038 #define SPEC_wout_m1_8 0
4040 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4042 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4044 #define SPEC_wout_m1_16 0
4046 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4048 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4050 #define SPEC_wout_m1_32 0
4052 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4054 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4056 #define SPEC_wout_m1_64 0
4058 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4060 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4062 #define SPEC_wout_m2_32 0
4064 /* ====================================================================== */
4065 /* The "INput 1" generators. These load the first operand to an insn. */
4067 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4069 o->in1 = load_reg(get_field(f, r1));
4071 #define SPEC_in1_r1 0
4073 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4075 o->in1 = regs[get_field(f, r1)];
4076 o->g_in1 = true;
4078 #define SPEC_in1_r1_o 0
4080 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4082 o->in1 = tcg_temp_new_i64();
4083 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4085 #define SPEC_in1_r1_32s 0
4087 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4089 o->in1 = tcg_temp_new_i64();
4090 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4092 #define SPEC_in1_r1_32u 0
4094 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4096 o->in1 = tcg_temp_new_i64();
4097 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4099 #define SPEC_in1_r1_sr32 0
4101 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4103 o->in1 = load_reg(get_field(f, r1) + 1);
4105 #define SPEC_in1_r1p1 SPEC_r1_even
4107 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4109 o->in1 = tcg_temp_new_i64();
4110 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4112 #define SPEC_in1_r1p1_32s SPEC_r1_even
4114 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4116 o->in1 = tcg_temp_new_i64();
4117 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4119 #define SPEC_in1_r1p1_32u SPEC_r1_even
4121 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4123 int r1 = get_field(f, r1);
4124 o->in1 = tcg_temp_new_i64();
4125 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4127 #define SPEC_in1_r1_D32 SPEC_r1_even
4129 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4131 o->in1 = load_reg(get_field(f, r2));
4133 #define SPEC_in1_r2 0
4135 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4137 o->in1 = load_reg(get_field(f, r3));
4139 #define SPEC_in1_r3 0
4141 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4143 o->in1 = regs[get_field(f, r3)];
4144 o->g_in1 = true;
4146 #define SPEC_in1_r3_o 0
4148 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4150 o->in1 = tcg_temp_new_i64();
4151 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4153 #define SPEC_in1_r3_32s 0
4155 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4157 o->in1 = tcg_temp_new_i64();
4158 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4160 #define SPEC_in1_r3_32u 0
4162 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4164 int r3 = get_field(f, r3);
4165 o->in1 = tcg_temp_new_i64();
4166 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4168 #define SPEC_in1_r3_D32 SPEC_r3_even
4170 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4172 o->in1 = load_freg32_i64(get_field(f, r1));
4174 #define SPEC_in1_e1 0
4176 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4178 o->in1 = fregs[get_field(f, r1)];
4179 o->g_in1 = true;
4181 #define SPEC_in1_f1_o 0
4183 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4185 int r1 = get_field(f, r1);
4186 o->out = fregs[r1];
4187 o->out2 = fregs[r1 + 2];
4188 o->g_out = o->g_out2 = true;
4190 #define SPEC_in1_x1_o SPEC_r1_f128
4192 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4194 o->in1 = fregs[get_field(f, r3)];
4195 o->g_in1 = true;
4197 #define SPEC_in1_f3_o 0
4199 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4201 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4203 #define SPEC_in1_la1 0
4205 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4207 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4208 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4210 #define SPEC_in1_la2 0
4212 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4214 in1_la1(s, f, o);
4215 o->in1 = tcg_temp_new_i64();
4216 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4218 #define SPEC_in1_m1_8u 0
4220 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4222 in1_la1(s, f, o);
4223 o->in1 = tcg_temp_new_i64();
4224 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4226 #define SPEC_in1_m1_16s 0
4228 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4230 in1_la1(s, f, o);
4231 o->in1 = tcg_temp_new_i64();
4232 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4234 #define SPEC_in1_m1_16u 0
4236 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4238 in1_la1(s, f, o);
4239 o->in1 = tcg_temp_new_i64();
4240 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4242 #define SPEC_in1_m1_32s 0
4244 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4246 in1_la1(s, f, o);
4247 o->in1 = tcg_temp_new_i64();
4248 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4250 #define SPEC_in1_m1_32u 0
4252 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4254 in1_la1(s, f, o);
4255 o->in1 = tcg_temp_new_i64();
4256 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4258 #define SPEC_in1_m1_64 0
4260 /* ====================================================================== */
4261 /* The "INput 2" generators. These load the second operand to an insn. */
4263 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4265 o->in2 = regs[get_field(f, r1)];
4266 o->g_in2 = true;
4268 #define SPEC_in2_r1_o 0
4270 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4272 o->in2 = tcg_temp_new_i64();
4273 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4275 #define SPEC_in2_r1_16u 0
4277 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4279 o->in2 = tcg_temp_new_i64();
4280 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4282 #define SPEC_in2_r1_32u 0
4284 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4286 int r1 = get_field(f, r1);
4287 o->in2 = tcg_temp_new_i64();
4288 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4290 #define SPEC_in2_r1_D32 SPEC_r1_even
4292 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4294 o->in2 = load_reg(get_field(f, r2));
4296 #define SPEC_in2_r2 0
4298 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4300 o->in2 = regs[get_field(f, r2)];
4301 o->g_in2 = true;
4303 #define SPEC_in2_r2_o 0
4305 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4307 int r2 = get_field(f, r2);
4308 if (r2 != 0) {
4309 o->in2 = load_reg(r2);
4312 #define SPEC_in2_r2_nz 0
4314 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4316 o->in2 = tcg_temp_new_i64();
4317 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4319 #define SPEC_in2_r2_8s 0
4321 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4323 o->in2 = tcg_temp_new_i64();
4324 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4326 #define SPEC_in2_r2_8u 0
4328 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4330 o->in2 = tcg_temp_new_i64();
4331 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4333 #define SPEC_in2_r2_16s 0
4335 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4337 o->in2 = tcg_temp_new_i64();
4338 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4340 #define SPEC_in2_r2_16u 0
4342 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4344 o->in2 = load_reg(get_field(f, r3));
4346 #define SPEC_in2_r3 0
4348 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4350 o->in2 = tcg_temp_new_i64();
4351 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4353 #define SPEC_in2_r2_32s 0
4355 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4357 o->in2 = tcg_temp_new_i64();
4358 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4360 #define SPEC_in2_r2_32u 0
4362 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4364 o->in2 = load_freg32_i64(get_field(f, r2));
4366 #define SPEC_in2_e2 0
4368 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4370 o->in2 = fregs[get_field(f, r2)];
4371 o->g_in2 = true;
4373 #define SPEC_in2_f2_o 0
4375 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4377 int r2 = get_field(f, r2);
4378 o->in1 = fregs[r2];
4379 o->in2 = fregs[r2 + 2];
4380 o->g_in1 = o->g_in2 = true;
4382 #define SPEC_in2_x2_o SPEC_r2_f128
4384 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4386 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4388 #define SPEC_in2_ra2 0
4390 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4392 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4393 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4395 #define SPEC_in2_a2 0
4397 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4399 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4401 #define SPEC_in2_ri2 0
4403 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4405 help_l2_shift(s, f, o, 31);
4407 #define SPEC_in2_sh32 0
4409 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4411 help_l2_shift(s, f, o, 63);
4413 #define SPEC_in2_sh64 0
4415 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4417 in2_a2(s, f, o);
4418 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4420 #define SPEC_in2_m2_8u 0
4422 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4424 in2_a2(s, f, o);
4425 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4427 #define SPEC_in2_m2_16s 0
4429 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4431 in2_a2(s, f, o);
4432 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4434 #define SPEC_in2_m2_16u 0
4436 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4438 in2_a2(s, f, o);
4439 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4441 #define SPEC_in2_m2_32s 0
4443 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4445 in2_a2(s, f, o);
4446 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4448 #define SPEC_in2_m2_32u 0
4450 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4452 in2_a2(s, f, o);
4453 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4455 #define SPEC_in2_m2_64 0
4457 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4459 in2_ri2(s, f, o);
4460 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4462 #define SPEC_in2_mri2_16u 0
4464 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4466 in2_ri2(s, f, o);
4467 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4469 #define SPEC_in2_mri2_32s 0
4471 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4473 in2_ri2(s, f, o);
4474 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4476 #define SPEC_in2_mri2_32u 0
4478 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4480 in2_ri2(s, f, o);
4481 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4483 #define SPEC_in2_mri2_64 0
4485 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
4487 o->in2 = tcg_const_i64(get_field(f, i2));
4489 #define SPEC_in2_i2 0
4491 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4493 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
4495 #define SPEC_in2_i2_8u 0
4497 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4499 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
4501 #define SPEC_in2_i2_16u 0
4503 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4505 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
4507 #define SPEC_in2_i2_32u 0
4509 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4511 uint64_t i2 = (uint16_t)get_field(f, i2);
4512 o->in2 = tcg_const_i64(i2 << s->insn->data);
4514 #define SPEC_in2_i2_16u_shl 0
4516 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
4518 uint64_t i2 = (uint32_t)get_field(f, i2);
4519 o->in2 = tcg_const_i64(i2 << s->insn->data);
4521 #define SPEC_in2_i2_32u_shl 0
4523 /* ====================================================================== */
4525 /* Find opc within the table of insns. This is formulated as a switch
4526 statement so that (1) we get compile-time notice of cut-paste errors
4527 for duplicated opcodes, and (2) the compiler generates the binary
4528 search tree, rather than us having to post-process the table. */
4530 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
4531 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
4533 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
4535 enum DisasInsnEnum {
4536 #include "insn-data.def"
4539 #undef D
4540 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
4541 .opc = OPC, \
4542 .fmt = FMT_##FT, \
4543 .fac = FAC_##FC, \
4544 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
4545 .name = #NM, \
4546 .help_in1 = in1_##I1, \
4547 .help_in2 = in2_##I2, \
4548 .help_prep = prep_##P, \
4549 .help_wout = wout_##W, \
4550 .help_cout = cout_##CC, \
4551 .help_op = op_##OP, \
4552 .data = D \
4555 /* Allow 0 to be used for NULL in the table below. */
4556 #define in1_0 NULL
4557 #define in2_0 NULL
4558 #define prep_0 NULL
4559 #define wout_0 NULL
4560 #define cout_0 NULL
4561 #define op_0 NULL
4563 #define SPEC_in1_0 0
4564 #define SPEC_in2_0 0
4565 #define SPEC_prep_0 0
4566 #define SPEC_wout_0 0
4568 static const DisasInsn insn_info[] = {
4569 #include "insn-data.def"
4572 #undef D
4573 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
4574 case OPC: return &insn_info[insn_ ## NM];
4576 static const DisasInsn *lookup_opc(uint16_t opc)
4578 switch (opc) {
4579 #include "insn-data.def"
4580 default:
4581 return NULL;
4585 #undef D
4586 #undef C
4588 /* Extract a field from the insn. The INSN should be left-aligned in
4589 the uint64_t so that we can more easily utilize the big-bit-endian
4590 definitions we extract from the Principals of Operation. */
4592 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
4594 uint32_t r, m;
4596 if (f->size == 0) {
4597 return;
4600 /* Zero extract the field from the insn. */
4601 r = (insn << f->beg) >> (64 - f->size);
4603 /* Sign-extend, or un-swap the field as necessary. */
4604 switch (f->type) {
4605 case 0: /* unsigned */
4606 break;
4607 case 1: /* signed */
4608 assert(f->size <= 32);
4609 m = 1u << (f->size - 1);
4610 r = (r ^ m) - m;
4611 break;
4612 case 2: /* dl+dh split, signed 20 bit. */
4613 r = ((int8_t)r << 12) | (r >> 8);
4614 break;
4615 default:
4616 abort();
4619 /* Validate that the "compressed" encoding we selected above is valid.
4620 I.e. we havn't make two different original fields overlap. */
4621 assert(((o->presentC >> f->indexC) & 1) == 0);
4622 o->presentC |= 1 << f->indexC;
4623 o->presentO |= 1 << f->indexO;
4625 o->c[f->indexC] = r;
4628 /* Lookup the insn at the current PC, extracting the operands into O and
4629 returning the info struct for the insn. Returns NULL for invalid insn. */
4631 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
4632 DisasFields *f)
4634 uint64_t insn, pc = s->pc;
4635 int op, op2, ilen;
4636 const DisasInsn *info;
4638 insn = ld_code2(env, pc);
4639 op = (insn >> 8) & 0xff;
4640 ilen = get_ilen(op);
4641 s->next_pc = s->pc + ilen;
4643 switch (ilen) {
4644 case 2:
4645 insn = insn << 48;
4646 break;
4647 case 4:
4648 insn = ld_code4(env, pc) << 32;
4649 break;
4650 case 6:
4651 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
4652 break;
4653 default:
4654 abort();
4657 /* We can't actually determine the insn format until we've looked up
4658 the full insn opcode. Which we can't do without locating the
4659 secondary opcode. Assume by default that OP2 is at bit 40; for
4660 those smaller insns that don't actually have a secondary opcode
4661 this will correctly result in OP2 = 0. */
4662 switch (op) {
4663 case 0x01: /* E */
4664 case 0x80: /* S */
4665 case 0x82: /* S */
4666 case 0x93: /* S */
4667 case 0xb2: /* S, RRF, RRE */
4668 case 0xb3: /* RRE, RRD, RRF */
4669 case 0xb9: /* RRE, RRF */
4670 case 0xe5: /* SSE, SIL */
4671 op2 = (insn << 8) >> 56;
4672 break;
4673 case 0xa5: /* RI */
4674 case 0xa7: /* RI */
4675 case 0xc0: /* RIL */
4676 case 0xc2: /* RIL */
4677 case 0xc4: /* RIL */
4678 case 0xc6: /* RIL */
4679 case 0xc8: /* SSF */
4680 case 0xcc: /* RIL */
4681 op2 = (insn << 12) >> 60;
4682 break;
4683 case 0xd0 ... 0xdf: /* SS */
4684 case 0xe1: /* SS */
4685 case 0xe2: /* SS */
4686 case 0xe8: /* SS */
4687 case 0xe9: /* SS */
4688 case 0xea: /* SS */
4689 case 0xee ... 0xf3: /* SS */
4690 case 0xf8 ... 0xfd: /* SS */
4691 op2 = 0;
4692 break;
4693 default:
4694 op2 = (insn << 40) >> 56;
4695 break;
4698 memset(f, 0, sizeof(*f));
4699 f->op = op;
4700 f->op2 = op2;
4702 /* Lookup the instruction. */
4703 info = lookup_opc(op << 8 | op2);
4705 /* If we found it, extract the operands. */
4706 if (info != NULL) {
4707 DisasFormat fmt = info->fmt;
4708 int i;
4710 for (i = 0; i < NUM_C_FIELD; ++i) {
4711 extract_field(f, &format_info[fmt].op[i], insn);
4714 return info;
4717 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
4719 const DisasInsn *insn;
4720 ExitStatus ret = NO_EXIT;
4721 DisasFields f;
4722 DisasOps o;
4724 /* Search for the insn in the table. */
4725 insn = extract_insn(env, s, &f);
4727 /* Not found means unimplemented/illegal opcode. */
4728 if (insn == NULL) {
4729 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
4730 f.op, f.op2);
4731 gen_illegal_opcode(s);
4732 return EXIT_NORETURN;
4735 /* Check for insn specification exceptions. */
4736 if (insn->spec) {
4737 int spec = insn->spec, excp = 0, r;
4739 if (spec & SPEC_r1_even) {
4740 r = get_field(&f, r1);
4741 if (r & 1) {
4742 excp = PGM_SPECIFICATION;
4745 if (spec & SPEC_r2_even) {
4746 r = get_field(&f, r2);
4747 if (r & 1) {
4748 excp = PGM_SPECIFICATION;
4751 if (spec & SPEC_r3_even) {
4752 r = get_field(&f, r3);
4753 if (r & 1) {
4754 excp = PGM_SPECIFICATION;
4757 if (spec & SPEC_r1_f128) {
4758 r = get_field(&f, r1);
4759 if (r > 13) {
4760 excp = PGM_SPECIFICATION;
4763 if (spec & SPEC_r2_f128) {
4764 r = get_field(&f, r2);
4765 if (r > 13) {
4766 excp = PGM_SPECIFICATION;
4769 if (excp) {
4770 gen_program_exception(s, excp);
4771 return EXIT_NORETURN;
4775 /* Set up the strutures we use to communicate with the helpers. */
4776 s->insn = insn;
4777 s->fields = &f;
4778 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
4779 TCGV_UNUSED_I64(o.out);
4780 TCGV_UNUSED_I64(o.out2);
4781 TCGV_UNUSED_I64(o.in1);
4782 TCGV_UNUSED_I64(o.in2);
4783 TCGV_UNUSED_I64(o.addr1);
4785 /* Implement the instruction. */
4786 if (insn->help_in1) {
4787 insn->help_in1(s, &f, &o);
4789 if (insn->help_in2) {
4790 insn->help_in2(s, &f, &o);
4792 if (insn->help_prep) {
4793 insn->help_prep(s, &f, &o);
4795 if (insn->help_op) {
4796 ret = insn->help_op(s, &o);
4798 if (insn->help_wout) {
4799 insn->help_wout(s, &f, &o);
4801 if (insn->help_cout) {
4802 insn->help_cout(s, &o);
4805 /* Free any temporaries created by the helpers. */
4806 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
4807 tcg_temp_free_i64(o.out);
4809 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
4810 tcg_temp_free_i64(o.out2);
4812 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
4813 tcg_temp_free_i64(o.in1);
4815 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
4816 tcg_temp_free_i64(o.in2);
4818 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
4819 tcg_temp_free_i64(o.addr1);
4822 /* Advance to the next instruction. */
4823 s->pc = s->next_pc;
4824 return ret;
4827 static inline void gen_intermediate_code_internal(S390CPU *cpu,
4828 TranslationBlock *tb,
4829 bool search_pc)
4831 CPUState *cs = CPU(cpu);
4832 CPUS390XState *env = &cpu->env;
4833 DisasContext dc;
4834 target_ulong pc_start;
4835 uint64_t next_page_start;
4836 int j, lj = -1;
4837 int num_insns, max_insns;
4838 CPUBreakpoint *bp;
4839 ExitStatus status;
4840 bool do_debug;
4842 pc_start = tb->pc;
4844 /* 31-bit mode */
4845 if (!(tb->flags & FLAG_MASK_64)) {
4846 pc_start &= 0x7fffffff;
4849 dc.tb = tb;
4850 dc.pc = pc_start;
4851 dc.cc_op = CC_OP_DYNAMIC;
4852 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
4854 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
4856 num_insns = 0;
4857 max_insns = tb->cflags & CF_COUNT_MASK;
4858 if (max_insns == 0) {
4859 max_insns = CF_COUNT_MASK;
4862 gen_tb_start(tb);
4864 do {
4865 if (search_pc) {
4866 j = tcg_op_buf_count();
4867 if (lj < j) {
4868 lj++;
4869 while (lj < j) {
4870 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4873 tcg_ctx.gen_opc_pc[lj] = dc.pc;
4874 gen_opc_cc_op[lj] = dc.cc_op;
4875 tcg_ctx.gen_opc_instr_start[lj] = 1;
4876 tcg_ctx.gen_opc_icount[lj] = num_insns;
4878 if (++num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
4879 gen_io_start();
4882 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4883 tcg_gen_debug_insn_start(dc.pc);
4886 status = NO_EXIT;
4887 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
4888 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
4889 if (bp->pc == dc.pc) {
4890 status = EXIT_PC_STALE;
4891 do_debug = true;
4892 break;
4896 if (status == NO_EXIT) {
4897 status = translate_one(env, &dc);
4900 /* If we reach a page boundary, are single stepping,
4901 or exhaust instruction count, stop generation. */
4902 if (status == NO_EXIT
4903 && (dc.pc >= next_page_start
4904 || tcg_op_buf_full()
4905 || num_insns >= max_insns
4906 || singlestep
4907 || cs->singlestep_enabled)) {
4908 status = EXIT_PC_STALE;
4910 } while (status == NO_EXIT);
4912 if (tb->cflags & CF_LAST_IO) {
4913 gen_io_end();
4916 switch (status) {
4917 case EXIT_GOTO_TB:
4918 case EXIT_NORETURN:
4919 break;
4920 case EXIT_PC_STALE:
4921 update_psw_addr(&dc);
4922 /* FALLTHRU */
4923 case EXIT_PC_UPDATED:
4924 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
4925 cc op type is in env */
4926 update_cc_op(&dc);
4927 /* Exit the TB, either by raising a debug exception or by return. */
4928 if (do_debug) {
4929 gen_exception(EXCP_DEBUG);
4930 } else {
4931 tcg_gen_exit_tb(0);
4933 break;
4934 default:
4935 abort();
4938 gen_tb_end(tb, num_insns);
4940 if (search_pc) {
4941 j = tcg_op_buf_count();
4942 lj++;
4943 while (lj <= j) {
4944 tcg_ctx.gen_opc_instr_start[lj++] = 0;
4946 } else {
4947 tb->size = dc.pc - pc_start;
4948 tb->icount = num_insns;
4951 #if defined(S390X_DEBUG_DISAS)
4952 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
4953 qemu_log("IN: %s\n", lookup_symbol(pc_start));
4954 log_target_disas(env, pc_start, dc.pc - pc_start, 1);
4955 qemu_log("\n");
4957 #endif
4960 void gen_intermediate_code (CPUS390XState *env, struct TranslationBlock *tb)
4962 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, false);
4965 void gen_intermediate_code_pc (CPUS390XState *env, struct TranslationBlock *tb)
4967 gen_intermediate_code_internal(s390_env_get_cpu(env), tb, true);
4970 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, int pc_pos)
4972 int cc_op;
4973 env->psw.addr = tcg_ctx.gen_opc_pc[pc_pos];
4974 cc_op = gen_opc_cc_op[pc_pos];
4975 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
4976 env->cc_op = cc_op;