monitor: fix object_del for command-line-created objects
[qemu/ar7.git] / target / s390x / translate.c
blob4c48c593cd8a6710ad7260c500cd9a5381382936
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t pc, next_pc;
61 enum cc_op cc_op;
62 bool singlestep_enabled;
65 /* Information carried about a condition to be evaluated. */
66 typedef struct {
67 TCGCond cond:8;
68 bool is_64;
69 bool g1;
70 bool g2;
71 union {
72 struct { TCGv_i64 a, b; } s64;
73 struct { TCGv_i32 a, b; } s32;
74 } u;
75 } DisasCompare;
77 #define DISAS_EXCP 4
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
95 int flags)
97 S390CPU *cpu = S390_CPU(cs);
98 CPUS390XState *env = &cpu->env;
99 int i;
101 if (env->cc_op > 3) {
102 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
103 env->psw.mask, env->psw.addr, cc_name(env->cc_op));
104 } else {
105 cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
106 env->psw.mask, env->psw.addr, env->cc_op);
109 for (i = 0; i < 16; i++) {
110 cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
111 if ((i % 4) == 3) {
112 cpu_fprintf(f, "\n");
113 } else {
114 cpu_fprintf(f, " ");
118 for (i = 0; i < 16; i++) {
119 cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
120 if ((i % 4) == 3) {
121 cpu_fprintf(f, "\n");
122 } else {
123 cpu_fprintf(f, " ");
127 for (i = 0; i < 32; i++) {
128 cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
129 env->vregs[i][0].ll, env->vregs[i][1].ll);
130 cpu_fprintf(f, (i % 2) ? "\n" : " ");
133 #ifndef CONFIG_USER_ONLY
134 for (i = 0; i < 16; i++) {
135 cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
136 if ((i % 4) == 3) {
137 cpu_fprintf(f, "\n");
138 } else {
139 cpu_fprintf(f, " ");
142 #endif
144 #ifdef DEBUG_INLINE_BRANCHES
145 for (i = 0; i < CC_OP_MAX; i++) {
146 cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
147 inline_branch_miss[i], inline_branch_hit[i]);
149 #endif
151 cpu_fprintf(f, "\n");
154 static TCGv_i64 psw_addr;
155 static TCGv_i64 psw_mask;
156 static TCGv_i64 gbea;
158 static TCGv_i32 cc_op;
159 static TCGv_i64 cc_src;
160 static TCGv_i64 cc_dst;
161 static TCGv_i64 cc_vr;
163 static char cpu_reg_names[32][4];
164 static TCGv_i64 regs[16];
165 static TCGv_i64 fregs[16];
167 void s390x_translate_init(void)
169 int i;
171 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
172 tcg_ctx.tcg_env = cpu_env;
173 psw_addr = tcg_global_mem_new_i64(cpu_env,
174 offsetof(CPUS390XState, psw.addr),
175 "psw_addr");
176 psw_mask = tcg_global_mem_new_i64(cpu_env,
177 offsetof(CPUS390XState, psw.mask),
178 "psw_mask");
179 gbea = tcg_global_mem_new_i64(cpu_env,
180 offsetof(CPUS390XState, gbea),
181 "gbea");
183 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
184 "cc_op");
185 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
186 "cc_src");
187 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
188 "cc_dst");
189 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
190 "cc_vr");
192 for (i = 0; i < 16; i++) {
193 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
194 regs[i] = tcg_global_mem_new(cpu_env,
195 offsetof(CPUS390XState, regs[i]),
196 cpu_reg_names[i]);
199 for (i = 0; i < 16; i++) {
200 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
201 fregs[i] = tcg_global_mem_new(cpu_env,
202 offsetof(CPUS390XState, vregs[i][0].d),
203 cpu_reg_names[i + 16]);
207 static TCGv_i64 load_reg(int reg)
209 TCGv_i64 r = tcg_temp_new_i64();
210 tcg_gen_mov_i64(r, regs[reg]);
211 return r;
214 static TCGv_i64 load_freg32_i64(int reg)
216 TCGv_i64 r = tcg_temp_new_i64();
217 tcg_gen_shri_i64(r, fregs[reg], 32);
218 return r;
221 static void store_reg(int reg, TCGv_i64 v)
223 tcg_gen_mov_i64(regs[reg], v);
226 static void store_freg(int reg, TCGv_i64 v)
228 tcg_gen_mov_i64(fregs[reg], v);
231 static void store_reg32_i64(int reg, TCGv_i64 v)
233 /* 32 bit register writes keep the upper half */
234 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
237 static void store_reg32h_i64(int reg, TCGv_i64 v)
239 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
242 static void store_freg32_i64(int reg, TCGv_i64 v)
244 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
247 static void return_low128(TCGv_i64 dest)
249 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
252 static void update_psw_addr(DisasContext *s)
254 /* psw.addr */
255 tcg_gen_movi_i64(psw_addr, s->pc);
258 static void per_branch(DisasContext *s, bool to_next)
260 #ifndef CONFIG_USER_ONLY
261 tcg_gen_movi_i64(gbea, s->pc);
263 if (s->tb->flags & FLAG_MASK_PER) {
264 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
265 gen_helper_per_branch(cpu_env, gbea, next_pc);
266 if (to_next) {
267 tcg_temp_free_i64(next_pc);
270 #endif
273 static void per_branch_cond(DisasContext *s, TCGCond cond,
274 TCGv_i64 arg1, TCGv_i64 arg2)
276 #ifndef CONFIG_USER_ONLY
277 if (s->tb->flags & FLAG_MASK_PER) {
278 TCGLabel *lab = gen_new_label();
279 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
281 tcg_gen_movi_i64(gbea, s->pc);
282 gen_helper_per_branch(cpu_env, gbea, psw_addr);
284 gen_set_label(lab);
285 } else {
286 TCGv_i64 pc = tcg_const_i64(s->pc);
287 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
288 tcg_temp_free_i64(pc);
290 #endif
293 static void per_breaking_event(DisasContext *s)
295 tcg_gen_movi_i64(gbea, s->pc);
298 static void update_cc_op(DisasContext *s)
300 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
301 tcg_gen_movi_i32(cc_op, s->cc_op);
305 static void potential_page_fault(DisasContext *s)
307 update_psw_addr(s);
308 update_cc_op(s);
311 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
313 return (uint64_t)cpu_lduw_code(env, pc);
316 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
318 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
321 static int get_mem_index(DisasContext *s)
323 switch (s->tb->flags & FLAG_MASK_ASC) {
324 case PSW_ASC_PRIMARY >> 32:
325 return 0;
326 case PSW_ASC_SECONDARY >> 32:
327 return 1;
328 case PSW_ASC_HOME >> 32:
329 return 2;
330 default:
331 tcg_abort();
332 break;
336 static void gen_exception(int excp)
338 TCGv_i32 tmp = tcg_const_i32(excp);
339 gen_helper_exception(cpu_env, tmp);
340 tcg_temp_free_i32(tmp);
343 static void gen_program_exception(DisasContext *s, int code)
345 TCGv_i32 tmp;
347 /* Remember what pgm exeption this was. */
348 tmp = tcg_const_i32(code);
349 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
350 tcg_temp_free_i32(tmp);
352 tmp = tcg_const_i32(s->next_pc - s->pc);
353 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
354 tcg_temp_free_i32(tmp);
356 /* Advance past instruction. */
357 s->pc = s->next_pc;
358 update_psw_addr(s);
360 /* Save off cc. */
361 update_cc_op(s);
363 /* Trigger exception. */
364 gen_exception(EXCP_PGM);
367 static inline void gen_illegal_opcode(DisasContext *s)
369 gen_program_exception(s, PGM_OPERATION);
372 static inline void gen_trap(DisasContext *s)
374 TCGv_i32 t;
376 /* Set DXC to 0xff. */
377 t = tcg_temp_new_i32();
378 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
379 tcg_gen_ori_i32(t, t, 0xff00);
380 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
381 tcg_temp_free_i32(t);
383 gen_program_exception(s, PGM_DATA);
386 #ifndef CONFIG_USER_ONLY
387 static void check_privileged(DisasContext *s)
389 if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
390 gen_program_exception(s, PGM_PRIVILEGED);
393 #endif
395 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
397 TCGv_i64 tmp = tcg_temp_new_i64();
398 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
400 /* Note that d2 is limited to 20 bits, signed. If we crop negative
401 displacements early we create larger immedate addends. */
403 /* Note that addi optimizes the imm==0 case. */
404 if (b2 && x2) {
405 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
406 tcg_gen_addi_i64(tmp, tmp, d2);
407 } else if (b2) {
408 tcg_gen_addi_i64(tmp, regs[b2], d2);
409 } else if (x2) {
410 tcg_gen_addi_i64(tmp, regs[x2], d2);
411 } else {
412 if (need_31) {
413 d2 &= 0x7fffffff;
414 need_31 = false;
416 tcg_gen_movi_i64(tmp, d2);
418 if (need_31) {
419 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
422 return tmp;
425 static inline bool live_cc_data(DisasContext *s)
427 return (s->cc_op != CC_OP_DYNAMIC
428 && s->cc_op != CC_OP_STATIC
429 && s->cc_op > 3);
432 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
434 if (live_cc_data(s)) {
435 tcg_gen_discard_i64(cc_src);
436 tcg_gen_discard_i64(cc_dst);
437 tcg_gen_discard_i64(cc_vr);
439 s->cc_op = CC_OP_CONST0 + val;
442 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
444 if (live_cc_data(s)) {
445 tcg_gen_discard_i64(cc_src);
446 tcg_gen_discard_i64(cc_vr);
448 tcg_gen_mov_i64(cc_dst, dst);
449 s->cc_op = op;
452 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
453 TCGv_i64 dst)
455 if (live_cc_data(s)) {
456 tcg_gen_discard_i64(cc_vr);
458 tcg_gen_mov_i64(cc_src, src);
459 tcg_gen_mov_i64(cc_dst, dst);
460 s->cc_op = op;
463 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
464 TCGv_i64 dst, TCGv_i64 vr)
466 tcg_gen_mov_i64(cc_src, src);
467 tcg_gen_mov_i64(cc_dst, dst);
468 tcg_gen_mov_i64(cc_vr, vr);
469 s->cc_op = op;
472 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
474 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
477 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
479 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
482 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
484 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
487 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
489 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
492 /* CC value is in env->cc_op */
493 static void set_cc_static(DisasContext *s)
495 if (live_cc_data(s)) {
496 tcg_gen_discard_i64(cc_src);
497 tcg_gen_discard_i64(cc_dst);
498 tcg_gen_discard_i64(cc_vr);
500 s->cc_op = CC_OP_STATIC;
503 /* calculates cc into cc_op */
504 static void gen_op_calc_cc(DisasContext *s)
506 TCGv_i32 local_cc_op;
507 TCGv_i64 dummy;
509 TCGV_UNUSED_I32(local_cc_op);
510 TCGV_UNUSED_I64(dummy);
511 switch (s->cc_op) {
512 default:
513 dummy = tcg_const_i64(0);
514 /* FALLTHRU */
515 case CC_OP_ADD_64:
516 case CC_OP_ADDU_64:
517 case CC_OP_ADDC_64:
518 case CC_OP_SUB_64:
519 case CC_OP_SUBU_64:
520 case CC_OP_SUBB_64:
521 case CC_OP_ADD_32:
522 case CC_OP_ADDU_32:
523 case CC_OP_ADDC_32:
524 case CC_OP_SUB_32:
525 case CC_OP_SUBU_32:
526 case CC_OP_SUBB_32:
527 local_cc_op = tcg_const_i32(s->cc_op);
528 break;
529 case CC_OP_CONST0:
530 case CC_OP_CONST1:
531 case CC_OP_CONST2:
532 case CC_OP_CONST3:
533 case CC_OP_STATIC:
534 case CC_OP_DYNAMIC:
535 break;
538 switch (s->cc_op) {
539 case CC_OP_CONST0:
540 case CC_OP_CONST1:
541 case CC_OP_CONST2:
542 case CC_OP_CONST3:
543 /* s->cc_op is the cc value */
544 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
545 break;
546 case CC_OP_STATIC:
547 /* env->cc_op already is the cc value */
548 break;
549 case CC_OP_NZ:
550 case CC_OP_ABS_64:
551 case CC_OP_NABS_64:
552 case CC_OP_ABS_32:
553 case CC_OP_NABS_32:
554 case CC_OP_LTGT0_32:
555 case CC_OP_LTGT0_64:
556 case CC_OP_COMP_32:
557 case CC_OP_COMP_64:
558 case CC_OP_NZ_F32:
559 case CC_OP_NZ_F64:
560 case CC_OP_FLOGR:
561 /* 1 argument */
562 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
563 break;
564 case CC_OP_ICM:
565 case CC_OP_LTGT_32:
566 case CC_OP_LTGT_64:
567 case CC_OP_LTUGTU_32:
568 case CC_OP_LTUGTU_64:
569 case CC_OP_TM_32:
570 case CC_OP_TM_64:
571 case CC_OP_SLA_32:
572 case CC_OP_SLA_64:
573 case CC_OP_NZ_F128:
574 /* 2 arguments */
575 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
576 break;
577 case CC_OP_ADD_64:
578 case CC_OP_ADDU_64:
579 case CC_OP_ADDC_64:
580 case CC_OP_SUB_64:
581 case CC_OP_SUBU_64:
582 case CC_OP_SUBB_64:
583 case CC_OP_ADD_32:
584 case CC_OP_ADDU_32:
585 case CC_OP_ADDC_32:
586 case CC_OP_SUB_32:
587 case CC_OP_SUBU_32:
588 case CC_OP_SUBB_32:
589 /* 3 arguments */
590 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
591 break;
592 case CC_OP_DYNAMIC:
593 /* unknown operation - assume 3 arguments and cc_op in env */
594 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
595 break;
596 default:
597 tcg_abort();
600 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
601 tcg_temp_free_i32(local_cc_op);
603 if (!TCGV_IS_UNUSED_I64(dummy)) {
604 tcg_temp_free_i64(dummy);
607 /* We now have cc in cc_op as constant */
608 set_cc_static(s);
611 static int use_goto_tb(DisasContext *s, uint64_t dest)
613 if (unlikely(s->singlestep_enabled) ||
614 (s->tb->cflags & CF_LAST_IO) ||
615 (s->tb->flags & FLAG_MASK_PER)) {
616 return false;
618 #ifndef CONFIG_USER_ONLY
619 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
620 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
621 #else
622 return true;
623 #endif
626 static void account_noninline_branch(DisasContext *s, int cc_op)
628 #ifdef DEBUG_INLINE_BRANCHES
629 inline_branch_miss[cc_op]++;
630 #endif
633 static void account_inline_branch(DisasContext *s, int cc_op)
635 #ifdef DEBUG_INLINE_BRANCHES
636 inline_branch_hit[cc_op]++;
637 #endif
640 /* Table of mask values to comparison codes, given a comparison as input.
641 For such, CC=3 should not be possible. */
642 static const TCGCond ltgt_cond[16] = {
643 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
644 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
645 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
646 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
647 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
648 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
649 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
650 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
653 /* Table of mask values to comparison codes, given a logic op as input.
654 For such, only CC=0 and CC=1 should be possible. */
655 static const TCGCond nz_cond[16] = {
656 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
657 TCG_COND_NEVER, TCG_COND_NEVER,
658 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
659 TCG_COND_NE, TCG_COND_NE,
660 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
661 TCG_COND_EQ, TCG_COND_EQ,
662 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
663 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
666 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
667 details required to generate a TCG comparison. */
668 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
670 TCGCond cond;
671 enum cc_op old_cc_op = s->cc_op;
673 if (mask == 15 || mask == 0) {
674 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
675 c->u.s32.a = cc_op;
676 c->u.s32.b = cc_op;
677 c->g1 = c->g2 = true;
678 c->is_64 = false;
679 return;
682 /* Find the TCG condition for the mask + cc op. */
683 switch (old_cc_op) {
684 case CC_OP_LTGT0_32:
685 case CC_OP_LTGT0_64:
686 case CC_OP_LTGT_32:
687 case CC_OP_LTGT_64:
688 cond = ltgt_cond[mask];
689 if (cond == TCG_COND_NEVER) {
690 goto do_dynamic;
692 account_inline_branch(s, old_cc_op);
693 break;
695 case CC_OP_LTUGTU_32:
696 case CC_OP_LTUGTU_64:
697 cond = tcg_unsigned_cond(ltgt_cond[mask]);
698 if (cond == TCG_COND_NEVER) {
699 goto do_dynamic;
701 account_inline_branch(s, old_cc_op);
702 break;
704 case CC_OP_NZ:
705 cond = nz_cond[mask];
706 if (cond == TCG_COND_NEVER) {
707 goto do_dynamic;
709 account_inline_branch(s, old_cc_op);
710 break;
712 case CC_OP_TM_32:
713 case CC_OP_TM_64:
714 switch (mask) {
715 case 8:
716 cond = TCG_COND_EQ;
717 break;
718 case 4 | 2 | 1:
719 cond = TCG_COND_NE;
720 break;
721 default:
722 goto do_dynamic;
724 account_inline_branch(s, old_cc_op);
725 break;
727 case CC_OP_ICM:
728 switch (mask) {
729 case 8:
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 2 | 1:
733 case 4 | 2:
734 cond = TCG_COND_NE;
735 break;
736 default:
737 goto do_dynamic;
739 account_inline_branch(s, old_cc_op);
740 break;
742 case CC_OP_FLOGR:
743 switch (mask & 0xa) {
744 case 8: /* src == 0 -> no one bit found */
745 cond = TCG_COND_EQ;
746 break;
747 case 2: /* src != 0 -> one bit found */
748 cond = TCG_COND_NE;
749 break;
750 default:
751 goto do_dynamic;
753 account_inline_branch(s, old_cc_op);
754 break;
756 case CC_OP_ADDU_32:
757 case CC_OP_ADDU_64:
758 switch (mask) {
759 case 8 | 2: /* vr == 0 */
760 cond = TCG_COND_EQ;
761 break;
762 case 4 | 1: /* vr != 0 */
763 cond = TCG_COND_NE;
764 break;
765 case 8 | 4: /* no carry -> vr >= src */
766 cond = TCG_COND_GEU;
767 break;
768 case 2 | 1: /* carry -> vr < src */
769 cond = TCG_COND_LTU;
770 break;
771 default:
772 goto do_dynamic;
774 account_inline_branch(s, old_cc_op);
775 break;
777 case CC_OP_SUBU_32:
778 case CC_OP_SUBU_64:
779 /* Note that CC=0 is impossible; treat it as dont-care. */
780 switch (mask & 7) {
781 case 2: /* zero -> op1 == op2 */
782 cond = TCG_COND_EQ;
783 break;
784 case 4 | 1: /* !zero -> op1 != op2 */
785 cond = TCG_COND_NE;
786 break;
787 case 4: /* borrow (!carry) -> op1 < op2 */
788 cond = TCG_COND_LTU;
789 break;
790 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
791 cond = TCG_COND_GEU;
792 break;
793 default:
794 goto do_dynamic;
796 account_inline_branch(s, old_cc_op);
797 break;
799 default:
800 do_dynamic:
801 /* Calculate cc value. */
802 gen_op_calc_cc(s);
803 /* FALLTHRU */
805 case CC_OP_STATIC:
806 /* Jump based on CC. We'll load up the real cond below;
807 the assignment here merely avoids a compiler warning. */
808 account_noninline_branch(s, old_cc_op);
809 old_cc_op = CC_OP_STATIC;
810 cond = TCG_COND_NEVER;
811 break;
814 /* Load up the arguments of the comparison. */
815 c->is_64 = true;
816 c->g1 = c->g2 = false;
817 switch (old_cc_op) {
818 case CC_OP_LTGT0_32:
819 c->is_64 = false;
820 c->u.s32.a = tcg_temp_new_i32();
821 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
822 c->u.s32.b = tcg_const_i32(0);
823 break;
824 case CC_OP_LTGT_32:
825 case CC_OP_LTUGTU_32:
826 case CC_OP_SUBU_32:
827 c->is_64 = false;
828 c->u.s32.a = tcg_temp_new_i32();
829 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
830 c->u.s32.b = tcg_temp_new_i32();
831 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
832 break;
834 case CC_OP_LTGT0_64:
835 case CC_OP_NZ:
836 case CC_OP_FLOGR:
837 c->u.s64.a = cc_dst;
838 c->u.s64.b = tcg_const_i64(0);
839 c->g1 = true;
840 break;
841 case CC_OP_LTGT_64:
842 case CC_OP_LTUGTU_64:
843 case CC_OP_SUBU_64:
844 c->u.s64.a = cc_src;
845 c->u.s64.b = cc_dst;
846 c->g1 = c->g2 = true;
847 break;
849 case CC_OP_TM_32:
850 case CC_OP_TM_64:
851 case CC_OP_ICM:
852 c->u.s64.a = tcg_temp_new_i64();
853 c->u.s64.b = tcg_const_i64(0);
854 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
855 break;
857 case CC_OP_ADDU_32:
858 c->is_64 = false;
859 c->u.s32.a = tcg_temp_new_i32();
860 c->u.s32.b = tcg_temp_new_i32();
861 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
862 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
863 tcg_gen_movi_i32(c->u.s32.b, 0);
864 } else {
865 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
867 break;
869 case CC_OP_ADDU_64:
870 c->u.s64.a = cc_vr;
871 c->g1 = true;
872 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
873 c->u.s64.b = tcg_const_i64(0);
874 } else {
875 c->u.s64.b = cc_src;
876 c->g2 = true;
878 break;
880 case CC_OP_STATIC:
881 c->is_64 = false;
882 c->u.s32.a = cc_op;
883 c->g1 = true;
884 switch (mask) {
885 case 0x8 | 0x4 | 0x2: /* cc != 3 */
886 cond = TCG_COND_NE;
887 c->u.s32.b = tcg_const_i32(3);
888 break;
889 case 0x8 | 0x4 | 0x1: /* cc != 2 */
890 cond = TCG_COND_NE;
891 c->u.s32.b = tcg_const_i32(2);
892 break;
893 case 0x8 | 0x2 | 0x1: /* cc != 1 */
894 cond = TCG_COND_NE;
895 c->u.s32.b = tcg_const_i32(1);
896 break;
897 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
898 cond = TCG_COND_EQ;
899 c->g1 = false;
900 c->u.s32.a = tcg_temp_new_i32();
901 c->u.s32.b = tcg_const_i32(0);
902 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
903 break;
904 case 0x8 | 0x4: /* cc < 2 */
905 cond = TCG_COND_LTU;
906 c->u.s32.b = tcg_const_i32(2);
907 break;
908 case 0x8: /* cc == 0 */
909 cond = TCG_COND_EQ;
910 c->u.s32.b = tcg_const_i32(0);
911 break;
912 case 0x4 | 0x2 | 0x1: /* cc != 0 */
913 cond = TCG_COND_NE;
914 c->u.s32.b = tcg_const_i32(0);
915 break;
916 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
917 cond = TCG_COND_NE;
918 c->g1 = false;
919 c->u.s32.a = tcg_temp_new_i32();
920 c->u.s32.b = tcg_const_i32(0);
921 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
922 break;
923 case 0x4: /* cc == 1 */
924 cond = TCG_COND_EQ;
925 c->u.s32.b = tcg_const_i32(1);
926 break;
927 case 0x2 | 0x1: /* cc > 1 */
928 cond = TCG_COND_GTU;
929 c->u.s32.b = tcg_const_i32(1);
930 break;
931 case 0x2: /* cc == 2 */
932 cond = TCG_COND_EQ;
933 c->u.s32.b = tcg_const_i32(2);
934 break;
935 case 0x1: /* cc == 3 */
936 cond = TCG_COND_EQ;
937 c->u.s32.b = tcg_const_i32(3);
938 break;
939 default:
940 /* CC is masked by something else: (8 >> cc) & mask. */
941 cond = TCG_COND_NE;
942 c->g1 = false;
943 c->u.s32.a = tcg_const_i32(8);
944 c->u.s32.b = tcg_const_i32(0);
945 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
946 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
947 break;
949 break;
951 default:
952 abort();
954 c->cond = cond;
957 static void free_compare(DisasCompare *c)
959 if (!c->g1) {
960 if (c->is_64) {
961 tcg_temp_free_i64(c->u.s64.a);
962 } else {
963 tcg_temp_free_i32(c->u.s32.a);
966 if (!c->g2) {
967 if (c->is_64) {
968 tcg_temp_free_i64(c->u.s64.b);
969 } else {
970 tcg_temp_free_i32(c->u.s32.b);
975 /* ====================================================================== */
976 /* Define the insn format enumeration. */
977 #define F0(N) FMT_##N,
978 #define F1(N, X1) F0(N)
979 #define F2(N, X1, X2) F0(N)
980 #define F3(N, X1, X2, X3) F0(N)
981 #define F4(N, X1, X2, X3, X4) F0(N)
982 #define F5(N, X1, X2, X3, X4, X5) F0(N)
984 typedef enum {
985 #include "insn-format.def"
986 } DisasFormat;
988 #undef F0
989 #undef F1
990 #undef F2
991 #undef F3
992 #undef F4
993 #undef F5
995 /* Define a structure to hold the decoded fields. We'll store each inside
996 an array indexed by an enum. In order to conserve memory, we'll arrange
997 for fields that do not exist at the same time to overlap, thus the "C"
998 for compact. For checking purposes there is an "O" for original index
999 as well that will be applied to availability bitmaps. */
1001 enum DisasFieldIndexO {
1002 FLD_O_r1,
1003 FLD_O_r2,
1004 FLD_O_r3,
1005 FLD_O_m1,
1006 FLD_O_m3,
1007 FLD_O_m4,
1008 FLD_O_b1,
1009 FLD_O_b2,
1010 FLD_O_b4,
1011 FLD_O_d1,
1012 FLD_O_d2,
1013 FLD_O_d4,
1014 FLD_O_x2,
1015 FLD_O_l1,
1016 FLD_O_l2,
1017 FLD_O_i1,
1018 FLD_O_i2,
1019 FLD_O_i3,
1020 FLD_O_i4,
1021 FLD_O_i5
1024 enum DisasFieldIndexC {
1025 FLD_C_r1 = 0,
1026 FLD_C_m1 = 0,
1027 FLD_C_b1 = 0,
1028 FLD_C_i1 = 0,
1030 FLD_C_r2 = 1,
1031 FLD_C_b2 = 1,
1032 FLD_C_i2 = 1,
1034 FLD_C_r3 = 2,
1035 FLD_C_m3 = 2,
1036 FLD_C_i3 = 2,
1038 FLD_C_m4 = 3,
1039 FLD_C_b4 = 3,
1040 FLD_C_i4 = 3,
1041 FLD_C_l1 = 3,
1043 FLD_C_i5 = 4,
1044 FLD_C_d1 = 4,
1046 FLD_C_d2 = 5,
1048 FLD_C_d4 = 6,
1049 FLD_C_x2 = 6,
1050 FLD_C_l2 = 6,
1052 NUM_C_FIELD = 7
1055 struct DisasFields {
1056 uint64_t raw_insn;
1057 unsigned op:8;
1058 unsigned op2:8;
1059 unsigned presentC:16;
1060 unsigned int presentO;
1061 int c[NUM_C_FIELD];
1064 /* This is the way fields are to be accessed out of DisasFields. */
1065 #define have_field(S, F) have_field1((S), FLD_O_##F)
1066 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1068 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1070 return (f->presentO >> c) & 1;
1073 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1074 enum DisasFieldIndexC c)
1076 assert(have_field1(f, o));
1077 return f->c[c];
1080 /* Describe the layout of each field in each format. */
1081 typedef struct DisasField {
1082 unsigned int beg:8;
1083 unsigned int size:8;
1084 unsigned int type:2;
1085 unsigned int indexC:6;
1086 enum DisasFieldIndexO indexO:8;
1087 } DisasField;
1089 typedef struct DisasFormatInfo {
1090 DisasField op[NUM_C_FIELD];
1091 } DisasFormatInfo;
1093 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1094 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1095 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1096 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1097 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1098 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1099 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1100 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1101 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1102 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1103 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1104 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1105 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1106 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1108 #define F0(N) { { } },
1109 #define F1(N, X1) { { X1 } },
1110 #define F2(N, X1, X2) { { X1, X2 } },
1111 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1112 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1113 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1115 static const DisasFormatInfo format_info[] = {
1116 #include "insn-format.def"
1119 #undef F0
1120 #undef F1
1121 #undef F2
1122 #undef F3
1123 #undef F4
1124 #undef F5
1125 #undef R
1126 #undef M
1127 #undef BD
1128 #undef BXD
1129 #undef BDL
1130 #undef BXDL
1131 #undef I
1132 #undef L
1134 /* Generally, we'll extract operands into this structures, operate upon
1135 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1136 of routines below for more details. */
1137 typedef struct {
1138 bool g_out, g_out2, g_in1, g_in2;
1139 TCGv_i64 out, out2, in1, in2;
1140 TCGv_i64 addr1;
1141 } DisasOps;
1143 /* Instructions can place constraints on their operands, raising specification
1144 exceptions if they are violated. To make this easy to automate, each "in1",
1145 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1146 of the following, or 0. To make this easy to document, we'll put the
1147 SPEC_<name> defines next to <name>. */
1149 #define SPEC_r1_even 1
1150 #define SPEC_r2_even 2
1151 #define SPEC_r3_even 4
1152 #define SPEC_r1_f128 8
1153 #define SPEC_r2_f128 16
1155 /* Return values from translate_one, indicating the state of the TB. */
1156 typedef enum {
1157 /* Continue the TB. */
1158 NO_EXIT,
1159 /* We have emitted one or more goto_tb. No fixup required. */
1160 EXIT_GOTO_TB,
1161 /* We are not using a goto_tb (for whatever reason), but have updated
1162 the PC (for whatever reason), so there's no need to do it again on
1163 exiting the TB. */
1164 EXIT_PC_UPDATED,
1165 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1166 updated the PC for the next instruction to be executed. */
1167 EXIT_PC_STALE,
1168 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1169 No following code will be executed. */
1170 EXIT_NORETURN,
1171 } ExitStatus;
1173 typedef enum DisasFacility {
1174 FAC_Z, /* zarch (default) */
1175 FAC_CASS, /* compare and swap and store */
1176 FAC_CASS2, /* compare and swap and store 2*/
1177 FAC_DFP, /* decimal floating point */
1178 FAC_DFPR, /* decimal floating point rounding */
1179 FAC_DO, /* distinct operands */
1180 FAC_EE, /* execute extensions */
1181 FAC_EI, /* extended immediate */
1182 FAC_FPE, /* floating point extension */
1183 FAC_FPSSH, /* floating point support sign handling */
1184 FAC_FPRGR, /* FPR-GR transfer */
1185 FAC_GIE, /* general instructions extension */
1186 FAC_HFP_MA, /* HFP multiply-and-add/subtract */
1187 FAC_HW, /* high-word */
1188 FAC_IEEEE_SIM, /* IEEE exception sumilation */
1189 FAC_MIE, /* miscellaneous-instruction-extensions */
1190 FAC_LAT, /* load-and-trap */
1191 FAC_LOC, /* load/store on condition */
1192 FAC_LD, /* long displacement */
1193 FAC_PC, /* population count */
1194 FAC_SCF, /* store clock fast */
1195 FAC_SFLE, /* store facility list extended */
1196 FAC_ILA, /* interlocked access facility 1 */
1197 FAC_LPP, /* load-program-parameter */
1198 } DisasFacility;
1200 struct DisasInsn {
1201 unsigned opc:16;
1202 DisasFormat fmt:8;
1203 DisasFacility fac:8;
1204 unsigned spec:8;
1206 const char *name;
1208 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1209 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1210 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1211 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1212 void (*help_cout)(DisasContext *, DisasOps *);
1213 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1215 uint64_t data;
1218 /* ====================================================================== */
1219 /* Miscellaneous helpers, used by several operations. */
1221 static void help_l2_shift(DisasContext *s, DisasFields *f,
1222 DisasOps *o, int mask)
1224 int b2 = get_field(f, b2);
1225 int d2 = get_field(f, d2);
1227 if (b2 == 0) {
1228 o->in2 = tcg_const_i64(d2 & mask);
1229 } else {
1230 o->in2 = get_address(s, 0, b2, d2);
1231 tcg_gen_andi_i64(o->in2, o->in2, mask);
1235 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1237 if (dest == s->next_pc) {
1238 per_branch(s, true);
1239 return NO_EXIT;
1241 if (use_goto_tb(s, dest)) {
1242 update_cc_op(s);
1243 per_breaking_event(s);
1244 tcg_gen_goto_tb(0);
1245 tcg_gen_movi_i64(psw_addr, dest);
1246 tcg_gen_exit_tb((uintptr_t)s->tb);
1247 return EXIT_GOTO_TB;
1248 } else {
1249 tcg_gen_movi_i64(psw_addr, dest);
1250 per_branch(s, false);
1251 return EXIT_PC_UPDATED;
1255 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1256 bool is_imm, int imm, TCGv_i64 cdest)
1258 ExitStatus ret;
1259 uint64_t dest = s->pc + 2 * imm;
1260 TCGLabel *lab;
1262 /* Take care of the special cases first. */
1263 if (c->cond == TCG_COND_NEVER) {
1264 ret = NO_EXIT;
1265 goto egress;
1267 if (is_imm) {
1268 if (dest == s->next_pc) {
1269 /* Branch to next. */
1270 per_branch(s, true);
1271 ret = NO_EXIT;
1272 goto egress;
1274 if (c->cond == TCG_COND_ALWAYS) {
1275 ret = help_goto_direct(s, dest);
1276 goto egress;
1278 } else {
1279 if (TCGV_IS_UNUSED_I64(cdest)) {
1280 /* E.g. bcr %r0 -> no branch. */
1281 ret = NO_EXIT;
1282 goto egress;
1284 if (c->cond == TCG_COND_ALWAYS) {
1285 tcg_gen_mov_i64(psw_addr, cdest);
1286 per_branch(s, false);
1287 ret = EXIT_PC_UPDATED;
1288 goto egress;
1292 if (use_goto_tb(s, s->next_pc)) {
1293 if (is_imm && use_goto_tb(s, dest)) {
1294 /* Both exits can use goto_tb. */
1295 update_cc_op(s);
1297 lab = gen_new_label();
1298 if (c->is_64) {
1299 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1300 } else {
1301 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1304 /* Branch not taken. */
1305 tcg_gen_goto_tb(0);
1306 tcg_gen_movi_i64(psw_addr, s->next_pc);
1307 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1309 /* Branch taken. */
1310 gen_set_label(lab);
1311 per_breaking_event(s);
1312 tcg_gen_goto_tb(1);
1313 tcg_gen_movi_i64(psw_addr, dest);
1314 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1316 ret = EXIT_GOTO_TB;
1317 } else {
1318 /* Fallthru can use goto_tb, but taken branch cannot. */
1319 /* Store taken branch destination before the brcond. This
1320 avoids having to allocate a new local temp to hold it.
1321 We'll overwrite this in the not taken case anyway. */
1322 if (!is_imm) {
1323 tcg_gen_mov_i64(psw_addr, cdest);
1326 lab = gen_new_label();
1327 if (c->is_64) {
1328 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1329 } else {
1330 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1333 /* Branch not taken. */
1334 update_cc_op(s);
1335 tcg_gen_goto_tb(0);
1336 tcg_gen_movi_i64(psw_addr, s->next_pc);
1337 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1339 gen_set_label(lab);
1340 if (is_imm) {
1341 tcg_gen_movi_i64(psw_addr, dest);
1343 per_breaking_event(s);
1344 ret = EXIT_PC_UPDATED;
1346 } else {
1347 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1348 Most commonly we're single-stepping or some other condition that
1349 disables all use of goto_tb. Just update the PC and exit. */
1351 TCGv_i64 next = tcg_const_i64(s->next_pc);
1352 if (is_imm) {
1353 cdest = tcg_const_i64(dest);
1356 if (c->is_64) {
1357 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1358 cdest, next);
1359 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1360 } else {
1361 TCGv_i32 t0 = tcg_temp_new_i32();
1362 TCGv_i64 t1 = tcg_temp_new_i64();
1363 TCGv_i64 z = tcg_const_i64(0);
1364 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1365 tcg_gen_extu_i32_i64(t1, t0);
1366 tcg_temp_free_i32(t0);
1367 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1368 per_branch_cond(s, TCG_COND_NE, t1, z);
1369 tcg_temp_free_i64(t1);
1370 tcg_temp_free_i64(z);
1373 if (is_imm) {
1374 tcg_temp_free_i64(cdest);
1376 tcg_temp_free_i64(next);
1378 ret = EXIT_PC_UPDATED;
1381 egress:
1382 free_compare(c);
1383 return ret;
1386 /* ====================================================================== */
1387 /* The operations. These perform the bulk of the work for any insn,
1388 usually after the operands have been loaded and output initialized. */
1390 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1392 TCGv_i64 z, n;
1393 z = tcg_const_i64(0);
1394 n = tcg_temp_new_i64();
1395 tcg_gen_neg_i64(n, o->in2);
1396 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1397 tcg_temp_free_i64(n);
1398 tcg_temp_free_i64(z);
1399 return NO_EXIT;
1402 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1404 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1405 return NO_EXIT;
1408 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1410 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1411 return NO_EXIT;
1414 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1416 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1417 tcg_gen_mov_i64(o->out2, o->in2);
1418 return NO_EXIT;
1421 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1423 tcg_gen_add_i64(o->out, o->in1, o->in2);
1424 return NO_EXIT;
1427 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1429 DisasCompare cmp;
1430 TCGv_i64 carry;
1432 tcg_gen_add_i64(o->out, o->in1, o->in2);
1434 /* The carry flag is the msb of CC, therefore the branch mask that would
1435 create that comparison is 3. Feeding the generated comparison to
1436 setcond produces the carry flag that we desire. */
1437 disas_jcc(s, &cmp, 3);
1438 carry = tcg_temp_new_i64();
1439 if (cmp.is_64) {
1440 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1441 } else {
1442 TCGv_i32 t = tcg_temp_new_i32();
1443 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1444 tcg_gen_extu_i32_i64(carry, t);
1445 tcg_temp_free_i32(t);
1447 free_compare(&cmp);
1449 tcg_gen_add_i64(o->out, o->out, carry);
1450 tcg_temp_free_i64(carry);
1451 return NO_EXIT;
1454 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1456 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1457 return NO_EXIT;
1460 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1462 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1463 return NO_EXIT;
1466 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1468 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1469 return_low128(o->out2);
1470 return NO_EXIT;
1473 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1475 tcg_gen_and_i64(o->out, o->in1, o->in2);
1476 return NO_EXIT;
1479 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1481 int shift = s->insn->data & 0xff;
1482 int size = s->insn->data >> 8;
1483 uint64_t mask = ((1ull << size) - 1) << shift;
1485 assert(!o->g_in2);
1486 tcg_gen_shli_i64(o->in2, o->in2, shift);
1487 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1488 tcg_gen_and_i64(o->out, o->in1, o->in2);
1490 /* Produce the CC from only the bits manipulated. */
1491 tcg_gen_andi_i64(cc_dst, o->out, mask);
1492 set_cc_nz_u64(s, cc_dst);
1493 return NO_EXIT;
1496 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1498 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1499 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1500 tcg_gen_mov_i64(psw_addr, o->in2);
1501 per_branch(s, false);
1502 return EXIT_PC_UPDATED;
1503 } else {
1504 return NO_EXIT;
1508 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1510 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1511 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1514 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1516 int m1 = get_field(s->fields, m1);
1517 bool is_imm = have_field(s->fields, i2);
1518 int imm = is_imm ? get_field(s->fields, i2) : 0;
1519 DisasCompare c;
1521 /* BCR with R2 = 0 causes no branching */
1522 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1523 if (m1 == 14) {
1524 /* Perform serialization */
1525 /* FIXME: check for fast-BCR-serialization facility */
1526 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1528 if (m1 == 15) {
1529 /* Perform serialization */
1530 /* FIXME: perform checkpoint-synchronisation */
1531 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1533 return NO_EXIT;
1536 disas_jcc(s, &c, m1);
1537 return help_branch(s, &c, is_imm, imm, o->in2);
1540 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1542 int r1 = get_field(s->fields, r1);
1543 bool is_imm = have_field(s->fields, i2);
1544 int imm = is_imm ? get_field(s->fields, i2) : 0;
1545 DisasCompare c;
1546 TCGv_i64 t;
1548 c.cond = TCG_COND_NE;
1549 c.is_64 = false;
1550 c.g1 = false;
1551 c.g2 = false;
1553 t = tcg_temp_new_i64();
1554 tcg_gen_subi_i64(t, regs[r1], 1);
1555 store_reg32_i64(r1, t);
1556 c.u.s32.a = tcg_temp_new_i32();
1557 c.u.s32.b = tcg_const_i32(0);
1558 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1559 tcg_temp_free_i64(t);
1561 return help_branch(s, &c, is_imm, imm, o->in2);
1564 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1566 int r1 = get_field(s->fields, r1);
1567 int imm = get_field(s->fields, i2);
1568 DisasCompare c;
1569 TCGv_i64 t;
1571 c.cond = TCG_COND_NE;
1572 c.is_64 = false;
1573 c.g1 = false;
1574 c.g2 = false;
1576 t = tcg_temp_new_i64();
1577 tcg_gen_shri_i64(t, regs[r1], 32);
1578 tcg_gen_subi_i64(t, t, 1);
1579 store_reg32h_i64(r1, t);
1580 c.u.s32.a = tcg_temp_new_i32();
1581 c.u.s32.b = tcg_const_i32(0);
1582 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1583 tcg_temp_free_i64(t);
1585 return help_branch(s, &c, 1, imm, o->in2);
1588 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1590 int r1 = get_field(s->fields, r1);
1591 bool is_imm = have_field(s->fields, i2);
1592 int imm = is_imm ? get_field(s->fields, i2) : 0;
1593 DisasCompare c;
1595 c.cond = TCG_COND_NE;
1596 c.is_64 = true;
1597 c.g1 = true;
1598 c.g2 = false;
1600 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1601 c.u.s64.a = regs[r1];
1602 c.u.s64.b = tcg_const_i64(0);
1604 return help_branch(s, &c, is_imm, imm, o->in2);
1607 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1609 int r1 = get_field(s->fields, r1);
1610 int r3 = get_field(s->fields, r3);
1611 bool is_imm = have_field(s->fields, i2);
1612 int imm = is_imm ? get_field(s->fields, i2) : 0;
1613 DisasCompare c;
1614 TCGv_i64 t;
1616 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1617 c.is_64 = false;
1618 c.g1 = false;
1619 c.g2 = false;
1621 t = tcg_temp_new_i64();
1622 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1623 c.u.s32.a = tcg_temp_new_i32();
1624 c.u.s32.b = tcg_temp_new_i32();
1625 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1626 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1627 store_reg32_i64(r1, t);
1628 tcg_temp_free_i64(t);
1630 return help_branch(s, &c, is_imm, imm, o->in2);
1633 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1635 int r1 = get_field(s->fields, r1);
1636 int r3 = get_field(s->fields, r3);
1637 bool is_imm = have_field(s->fields, i2);
1638 int imm = is_imm ? get_field(s->fields, i2) : 0;
1639 DisasCompare c;
1641 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1642 c.is_64 = true;
1644 if (r1 == (r3 | 1)) {
1645 c.u.s64.b = load_reg(r3 | 1);
1646 c.g2 = false;
1647 } else {
1648 c.u.s64.b = regs[r3 | 1];
1649 c.g2 = true;
1652 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1653 c.u.s64.a = regs[r1];
1654 c.g1 = true;
1656 return help_branch(s, &c, is_imm, imm, o->in2);
1659 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1661 int imm, m3 = get_field(s->fields, m3);
1662 bool is_imm;
1663 DisasCompare c;
1665 c.cond = ltgt_cond[m3];
1666 if (s->insn->data) {
1667 c.cond = tcg_unsigned_cond(c.cond);
1669 c.is_64 = c.g1 = c.g2 = true;
1670 c.u.s64.a = o->in1;
1671 c.u.s64.b = o->in2;
1673 is_imm = have_field(s->fields, i4);
1674 if (is_imm) {
1675 imm = get_field(s->fields, i4);
1676 } else {
1677 imm = 0;
1678 o->out = get_address(s, 0, get_field(s->fields, b4),
1679 get_field(s->fields, d4));
1682 return help_branch(s, &c, is_imm, imm, o->out);
1685 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1687 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1688 set_cc_static(s);
1689 return NO_EXIT;
1692 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1694 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1695 set_cc_static(s);
1696 return NO_EXIT;
1699 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1701 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1702 set_cc_static(s);
1703 return NO_EXIT;
1706 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1708 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1709 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1710 tcg_temp_free_i32(m3);
1711 gen_set_cc_nz_f32(s, o->in2);
1712 return NO_EXIT;
1715 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1720 gen_set_cc_nz_f64(s, o->in2);
1721 return NO_EXIT;
1724 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 gen_set_cc_nz_f128(s, o->in1, o->in2);
1730 return NO_EXIT;
1733 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1735 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1736 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1737 tcg_temp_free_i32(m3);
1738 gen_set_cc_nz_f32(s, o->in2);
1739 return NO_EXIT;
1742 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1744 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1745 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1746 tcg_temp_free_i32(m3);
1747 gen_set_cc_nz_f64(s, o->in2);
1748 return NO_EXIT;
1751 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1753 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1754 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1755 tcg_temp_free_i32(m3);
1756 gen_set_cc_nz_f128(s, o->in1, o->in2);
1757 return NO_EXIT;
1760 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1762 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1763 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1764 tcg_temp_free_i32(m3);
1765 gen_set_cc_nz_f32(s, o->in2);
1766 return NO_EXIT;
1769 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1771 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1772 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1773 tcg_temp_free_i32(m3);
1774 gen_set_cc_nz_f64(s, o->in2);
1775 return NO_EXIT;
1778 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1780 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1781 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1782 tcg_temp_free_i32(m3);
1783 gen_set_cc_nz_f128(s, o->in1, o->in2);
1784 return NO_EXIT;
1787 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1789 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1790 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1791 tcg_temp_free_i32(m3);
1792 gen_set_cc_nz_f32(s, o->in2);
1793 return NO_EXIT;
1796 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1798 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1799 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1800 tcg_temp_free_i32(m3);
1801 gen_set_cc_nz_f64(s, o->in2);
1802 return NO_EXIT;
1805 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1807 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1808 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1809 tcg_temp_free_i32(m3);
1810 gen_set_cc_nz_f128(s, o->in1, o->in2);
1811 return NO_EXIT;
1814 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1816 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1817 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1818 tcg_temp_free_i32(m3);
1819 return NO_EXIT;
1822 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1824 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1825 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1826 tcg_temp_free_i32(m3);
1827 return NO_EXIT;
1830 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1832 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1833 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1834 tcg_temp_free_i32(m3);
1835 return_low128(o->out2);
1836 return NO_EXIT;
1839 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1841 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1842 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1843 tcg_temp_free_i32(m3);
1844 return NO_EXIT;
1847 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1849 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1850 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1851 tcg_temp_free_i32(m3);
1852 return NO_EXIT;
1855 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1857 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1858 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1859 tcg_temp_free_i32(m3);
1860 return_low128(o->out2);
1861 return NO_EXIT;
1864 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1866 int r2 = get_field(s->fields, r2);
1867 TCGv_i64 len = tcg_temp_new_i64();
1869 potential_page_fault(s);
1870 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1871 set_cc_static(s);
1872 return_low128(o->out);
1874 tcg_gen_add_i64(regs[r2], regs[r2], len);
1875 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1876 tcg_temp_free_i64(len);
1878 return NO_EXIT;
1881 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1883 int l = get_field(s->fields, l1);
1884 TCGv_i32 vl;
1886 switch (l + 1) {
1887 case 1:
1888 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1889 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1890 break;
1891 case 2:
1892 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1893 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1894 break;
1895 case 4:
1896 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1897 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1898 break;
1899 case 8:
1900 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1901 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1902 break;
1903 default:
1904 potential_page_fault(s);
1905 vl = tcg_const_i32(l);
1906 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1907 tcg_temp_free_i32(vl);
1908 set_cc_static(s);
1909 return NO_EXIT;
1911 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1912 return NO_EXIT;
1915 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1917 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
1918 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
1919 potential_page_fault(s);
1920 gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
1921 tcg_temp_free_i32(r1);
1922 tcg_temp_free_i32(r3);
1923 set_cc_static(s);
1924 return NO_EXIT;
1927 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1929 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1930 TCGv_i32 t1 = tcg_temp_new_i32();
1931 tcg_gen_extrl_i64_i32(t1, o->in1);
1932 potential_page_fault(s);
1933 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1934 set_cc_static(s);
1935 tcg_temp_free_i32(t1);
1936 tcg_temp_free_i32(m3);
1937 return NO_EXIT;
1940 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1942 potential_page_fault(s);
1943 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1944 set_cc_static(s);
1945 return_low128(o->in2);
1946 return NO_EXIT;
1949 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1951 TCGv_i64 t = tcg_temp_new_i64();
1952 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1953 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1954 tcg_gen_or_i64(o->out, o->out, t);
1955 tcg_temp_free_i64(t);
1956 return NO_EXIT;
1959 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1961 int d2 = get_field(s->fields, d2);
1962 int b2 = get_field(s->fields, b2);
1963 TCGv_i64 addr, cc;
1965 /* Note that in1 = R3 (new value) and
1966 in2 = (zero-extended) R1 (expected value). */
1968 addr = get_address(s, 0, b2, d2);
1969 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1970 get_mem_index(s), s->insn->data | MO_ALIGN);
1971 tcg_temp_free_i64(addr);
1973 /* Are the memory and expected values (un)equal? Note that this setcond
1974 produces the output CC value, thus the NE sense of the test. */
1975 cc = tcg_temp_new_i64();
1976 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1977 tcg_gen_extrl_i64_i32(cc_op, cc);
1978 tcg_temp_free_i64(cc);
1979 set_cc_static(s);
1981 return NO_EXIT;
1984 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1986 int r1 = get_field(s->fields, r1);
1987 int r3 = get_field(s->fields, r3);
1988 int d2 = get_field(s->fields, d2);
1989 int b2 = get_field(s->fields, b2);
1990 TCGv_i64 addr;
1991 TCGv_i32 t_r1, t_r3;
1993 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1994 addr = get_address(s, 0, b2, d2);
1995 t_r1 = tcg_const_i32(r1);
1996 t_r3 = tcg_const_i32(r3);
1997 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1998 tcg_temp_free_i64(addr);
1999 tcg_temp_free_i32(t_r1);
2000 tcg_temp_free_i32(t_r3);
2002 set_cc_static(s);
2003 return NO_EXIT;
2006 #ifndef CONFIG_USER_ONLY
2007 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
2009 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2010 check_privileged(s);
2011 gen_helper_csp(cc_op, cpu_env, r1, o->in2);
2012 tcg_temp_free_i32(r1);
2013 set_cc_static(s);
2014 return NO_EXIT;
2016 #endif
2018 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2020 TCGv_i64 t1 = tcg_temp_new_i64();
2021 TCGv_i32 t2 = tcg_temp_new_i32();
2022 tcg_gen_extrl_i64_i32(t2, o->in1);
2023 gen_helper_cvd(t1, t2);
2024 tcg_temp_free_i32(t2);
2025 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2026 tcg_temp_free_i64(t1);
2027 return NO_EXIT;
2030 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2032 int m3 = get_field(s->fields, m3);
2033 TCGLabel *lab = gen_new_label();
2034 TCGCond c;
2036 c = tcg_invert_cond(ltgt_cond[m3]);
2037 if (s->insn->data) {
2038 c = tcg_unsigned_cond(c);
2040 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2042 /* Trap. */
2043 gen_trap(s);
2045 gen_set_label(lab);
2046 return NO_EXIT;
2049 #ifndef CONFIG_USER_ONLY
2050 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2052 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2053 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2054 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2056 check_privileged(s);
2057 update_psw_addr(s);
2058 gen_op_calc_cc(s);
2060 gen_helper_diag(cpu_env, r1, r3, func_code);
2062 tcg_temp_free_i32(func_code);
2063 tcg_temp_free_i32(r3);
2064 tcg_temp_free_i32(r1);
2065 return NO_EXIT;
2067 #endif
2069 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2071 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2072 return_low128(o->out);
2073 return NO_EXIT;
2076 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2078 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2079 return_low128(o->out);
2080 return NO_EXIT;
2083 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2085 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2086 return_low128(o->out);
2087 return NO_EXIT;
2090 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2092 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2093 return_low128(o->out);
2094 return NO_EXIT;
2097 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2099 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2100 return NO_EXIT;
2103 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2105 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2106 return NO_EXIT;
2109 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2111 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2112 return_low128(o->out2);
2113 return NO_EXIT;
2116 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2118 int r2 = get_field(s->fields, r2);
2119 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2120 return NO_EXIT;
2123 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2125 /* No cache information provided. */
2126 tcg_gen_movi_i64(o->out, -1);
2127 return NO_EXIT;
2130 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2132 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2133 return NO_EXIT;
2136 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2138 int r1 = get_field(s->fields, r1);
2139 int r2 = get_field(s->fields, r2);
2140 TCGv_i64 t = tcg_temp_new_i64();
2142 /* Note the "subsequently" in the PoO, which implies a defined result
2143 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2144 tcg_gen_shri_i64(t, psw_mask, 32);
2145 store_reg32_i64(r1, t);
2146 if (r2 != 0) {
2147 store_reg32_i64(r2, psw_mask);
2150 tcg_temp_free_i64(t);
2151 return NO_EXIT;
2154 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2156 /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
2157 tb->flags, (ab)use the tb->cs_base field as the address of
2158 the template in memory, and grab 8 bits of tb->flags/cflags for
2159 the contents of the register. We would then recognize all this
2160 in gen_intermediate_code_internal, generating code for exactly
2161 one instruction. This new TB then gets executed normally.
2163 On the other hand, this seems to be mostly used for modifying
2164 MVC inside of memcpy, which needs a helper call anyway. So
2165 perhaps this doesn't bear thinking about any further. */
2167 TCGv_i64 tmp;
2169 update_psw_addr(s);
2170 gen_op_calc_cc(s);
2172 tmp = tcg_const_i64(s->next_pc);
2173 gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
2174 tcg_temp_free_i64(tmp);
2176 return NO_EXIT;
2179 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2181 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2182 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2183 tcg_temp_free_i32(m3);
2184 return NO_EXIT;
2187 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2189 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2190 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2191 tcg_temp_free_i32(m3);
2192 return NO_EXIT;
2195 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2197 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2198 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2199 return_low128(o->out2);
2200 tcg_temp_free_i32(m3);
2201 return NO_EXIT;
2204 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2206 /* We'll use the original input for cc computation, since we get to
2207 compare that against 0, which ought to be better than comparing
2208 the real output against 64. It also lets cc_dst be a convenient
2209 temporary during our computation. */
2210 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2212 /* R1 = IN ? CLZ(IN) : 64. */
2213 tcg_gen_clzi_i64(o->out, o->in2, 64);
2215 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2216 value by 64, which is undefined. But since the shift is 64 iff the
2217 input is zero, we still get the correct result after and'ing. */
2218 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2219 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2220 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2221 return NO_EXIT;
2224 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2226 int m3 = get_field(s->fields, m3);
2227 int pos, len, base = s->insn->data;
2228 TCGv_i64 tmp = tcg_temp_new_i64();
2229 uint64_t ccm;
2231 switch (m3) {
2232 case 0xf:
2233 /* Effectively a 32-bit load. */
2234 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2235 len = 32;
2236 goto one_insert;
2238 case 0xc:
2239 case 0x6:
2240 case 0x3:
2241 /* Effectively a 16-bit load. */
2242 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2243 len = 16;
2244 goto one_insert;
2246 case 0x8:
2247 case 0x4:
2248 case 0x2:
2249 case 0x1:
2250 /* Effectively an 8-bit load. */
2251 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2252 len = 8;
2253 goto one_insert;
2255 one_insert:
2256 pos = base + ctz32(m3) * 8;
2257 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2258 ccm = ((1ull << len) - 1) << pos;
2259 break;
2261 default:
2262 /* This is going to be a sequence of loads and inserts. */
2263 pos = base + 32 - 8;
2264 ccm = 0;
2265 while (m3) {
2266 if (m3 & 0x8) {
2267 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2268 tcg_gen_addi_i64(o->in2, o->in2, 1);
2269 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2270 ccm |= 0xff << pos;
2272 m3 = (m3 << 1) & 0xf;
2273 pos -= 8;
2275 break;
2278 tcg_gen_movi_i64(tmp, ccm);
2279 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2280 tcg_temp_free_i64(tmp);
2281 return NO_EXIT;
2284 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2286 int shift = s->insn->data & 0xff;
2287 int size = s->insn->data >> 8;
2288 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2289 return NO_EXIT;
2292 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2294 TCGv_i64 t1;
2296 gen_op_calc_cc(s);
2297 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2299 t1 = tcg_temp_new_i64();
2300 tcg_gen_shli_i64(t1, psw_mask, 20);
2301 tcg_gen_shri_i64(t1, t1, 36);
2302 tcg_gen_or_i64(o->out, o->out, t1);
2304 tcg_gen_extu_i32_i64(t1, cc_op);
2305 tcg_gen_shli_i64(t1, t1, 28);
2306 tcg_gen_or_i64(o->out, o->out, t1);
2307 tcg_temp_free_i64(t1);
2308 return NO_EXIT;
2311 #ifndef CONFIG_USER_ONLY
2312 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2314 check_privileged(s);
2315 gen_helper_ipte(cpu_env, o->in1, o->in2);
2316 return NO_EXIT;
2319 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2321 check_privileged(s);
2322 gen_helper_iske(o->out, cpu_env, o->in2);
2323 return NO_EXIT;
2325 #endif
2327 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2329 /* The real output is indeed the original value in memory;
2330 recompute the addition for the computation of CC. */
2331 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2332 s->insn->data | MO_ALIGN);
2333 /* However, we need to recompute the addition for setting CC. */
2334 tcg_gen_add_i64(o->out, o->in1, o->in2);
2335 return NO_EXIT;
2338 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2340 /* The real output is indeed the original value in memory;
2341 recompute the addition for the computation of CC. */
2342 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2343 s->insn->data | MO_ALIGN);
2344 /* However, we need to recompute the operation for setting CC. */
2345 tcg_gen_and_i64(o->out, o->in1, o->in2);
2346 return NO_EXIT;
2349 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2351 /* The real output is indeed the original value in memory;
2352 recompute the addition for the computation of CC. */
2353 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2354 s->insn->data | MO_ALIGN);
2355 /* However, we need to recompute the operation for setting CC. */
2356 tcg_gen_or_i64(o->out, o->in1, o->in2);
2357 return NO_EXIT;
2360 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2362 /* The real output is indeed the original value in memory;
2363 recompute the addition for the computation of CC. */
2364 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2365 s->insn->data | MO_ALIGN);
2366 /* However, we need to recompute the operation for setting CC. */
2367 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2368 return NO_EXIT;
2371 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2373 gen_helper_ldeb(o->out, cpu_env, o->in2);
2374 return NO_EXIT;
2377 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2379 gen_helper_ledb(o->out, cpu_env, o->in2);
2380 return NO_EXIT;
2383 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2385 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2386 return NO_EXIT;
2389 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2391 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2392 return NO_EXIT;
2395 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2397 gen_helper_lxdb(o->out, cpu_env, o->in2);
2398 return_low128(o->out2);
2399 return NO_EXIT;
2402 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2404 gen_helper_lxeb(o->out, cpu_env, o->in2);
2405 return_low128(o->out2);
2406 return NO_EXIT;
2409 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2411 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2412 return NO_EXIT;
2415 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2417 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2418 return NO_EXIT;
2421 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2423 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2424 return NO_EXIT;
2427 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2429 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2430 return NO_EXIT;
2433 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2435 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2436 return NO_EXIT;
2439 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2441 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2442 return NO_EXIT;
2445 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2447 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2448 return NO_EXIT;
2451 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2453 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2454 return NO_EXIT;
2457 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2459 TCGLabel *lab = gen_new_label();
2460 store_reg32_i64(get_field(s->fields, r1), o->in2);
2461 /* The value is stored even in case of trap. */
2462 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2463 gen_trap(s);
2464 gen_set_label(lab);
2465 return NO_EXIT;
2468 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2470 TCGLabel *lab = gen_new_label();
2471 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2472 /* The value is stored even in case of trap. */
2473 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2474 gen_trap(s);
2475 gen_set_label(lab);
2476 return NO_EXIT;
2479 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2481 TCGLabel *lab = gen_new_label();
2482 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2483 /* The value is stored even in case of trap. */
2484 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2485 gen_trap(s);
2486 gen_set_label(lab);
2487 return NO_EXIT;
2490 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2492 TCGLabel *lab = gen_new_label();
2493 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2494 /* The value is stored even in case of trap. */
2495 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2496 gen_trap(s);
2497 gen_set_label(lab);
2498 return NO_EXIT;
2501 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2503 TCGLabel *lab = gen_new_label();
2504 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2505 /* The value is stored even in case of trap. */
2506 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2507 gen_trap(s);
2508 gen_set_label(lab);
2509 return NO_EXIT;
2512 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2514 DisasCompare c;
2516 disas_jcc(s, &c, get_field(s->fields, m3));
2518 if (c.is_64) {
2519 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2520 o->in2, o->in1);
2521 free_compare(&c);
2522 } else {
2523 TCGv_i32 t32 = tcg_temp_new_i32();
2524 TCGv_i64 t, z;
2526 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2527 free_compare(&c);
2529 t = tcg_temp_new_i64();
2530 tcg_gen_extu_i32_i64(t, t32);
2531 tcg_temp_free_i32(t32);
2533 z = tcg_const_i64(0);
2534 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2535 tcg_temp_free_i64(t);
2536 tcg_temp_free_i64(z);
2539 return NO_EXIT;
2542 #ifndef CONFIG_USER_ONLY
2543 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2545 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2546 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2547 check_privileged(s);
2548 potential_page_fault(s);
2549 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2550 tcg_temp_free_i32(r1);
2551 tcg_temp_free_i32(r3);
2552 return NO_EXIT;
2555 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2557 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2558 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2559 check_privileged(s);
2560 potential_page_fault(s);
2561 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2562 tcg_temp_free_i32(r1);
2563 tcg_temp_free_i32(r3);
2564 return NO_EXIT;
2567 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2569 check_privileged(s);
2570 potential_page_fault(s);
2571 gen_helper_lra(o->out, cpu_env, o->in2);
2572 set_cc_static(s);
2573 return NO_EXIT;
2576 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2578 check_privileged(s);
2580 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2581 return NO_EXIT;
2584 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2586 TCGv_i64 t1, t2;
2588 check_privileged(s);
2589 per_breaking_event(s);
2591 t1 = tcg_temp_new_i64();
2592 t2 = tcg_temp_new_i64();
2593 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2594 tcg_gen_addi_i64(o->in2, o->in2, 4);
2595 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2596 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2597 tcg_gen_shli_i64(t1, t1, 32);
2598 gen_helper_load_psw(cpu_env, t1, t2);
2599 tcg_temp_free_i64(t1);
2600 tcg_temp_free_i64(t2);
2601 return EXIT_NORETURN;
2604 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2606 TCGv_i64 t1, t2;
2608 check_privileged(s);
2609 per_breaking_event(s);
2611 t1 = tcg_temp_new_i64();
2612 t2 = tcg_temp_new_i64();
2613 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2614 tcg_gen_addi_i64(o->in2, o->in2, 8);
2615 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2616 gen_helper_load_psw(cpu_env, t1, t2);
2617 tcg_temp_free_i64(t1);
2618 tcg_temp_free_i64(t2);
2619 return EXIT_NORETURN;
2621 #endif
2623 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2625 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2626 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2627 potential_page_fault(s);
2628 gen_helper_lam(cpu_env, r1, o->in2, r3);
2629 tcg_temp_free_i32(r1);
2630 tcg_temp_free_i32(r3);
2631 return NO_EXIT;
2634 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2636 int r1 = get_field(s->fields, r1);
2637 int r3 = get_field(s->fields, r3);
2638 TCGv_i64 t1, t2;
2640 /* Only one register to read. */
2641 t1 = tcg_temp_new_i64();
2642 if (unlikely(r1 == r3)) {
2643 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2644 store_reg32_i64(r1, t1);
2645 tcg_temp_free(t1);
2646 return NO_EXIT;
2649 /* First load the values of the first and last registers to trigger
2650 possible page faults. */
2651 t2 = tcg_temp_new_i64();
2652 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2653 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2654 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2655 store_reg32_i64(r1, t1);
2656 store_reg32_i64(r3, t2);
2658 /* Only two registers to read. */
2659 if (((r1 + 1) & 15) == r3) {
2660 tcg_temp_free(t2);
2661 tcg_temp_free(t1);
2662 return NO_EXIT;
2665 /* Then load the remaining registers. Page fault can't occur. */
2666 r3 = (r3 - 1) & 15;
2667 tcg_gen_movi_i64(t2, 4);
2668 while (r1 != r3) {
2669 r1 = (r1 + 1) & 15;
2670 tcg_gen_add_i64(o->in2, o->in2, t2);
2671 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2672 store_reg32_i64(r1, t1);
2674 tcg_temp_free(t2);
2675 tcg_temp_free(t1);
2677 return NO_EXIT;
2680 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2682 int r1 = get_field(s->fields, r1);
2683 int r3 = get_field(s->fields, r3);
2684 TCGv_i64 t1, t2;
2686 /* Only one register to read. */
2687 t1 = tcg_temp_new_i64();
2688 if (unlikely(r1 == r3)) {
2689 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2690 store_reg32h_i64(r1, t1);
2691 tcg_temp_free(t1);
2692 return NO_EXIT;
2695 /* First load the values of the first and last registers to trigger
2696 possible page faults. */
2697 t2 = tcg_temp_new_i64();
2698 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2699 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2700 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2701 store_reg32h_i64(r1, t1);
2702 store_reg32h_i64(r3, t2);
2704 /* Only two registers to read. */
2705 if (((r1 + 1) & 15) == r3) {
2706 tcg_temp_free(t2);
2707 tcg_temp_free(t1);
2708 return NO_EXIT;
2711 /* Then load the remaining registers. Page fault can't occur. */
2712 r3 = (r3 - 1) & 15;
2713 tcg_gen_movi_i64(t2, 4);
2714 while (r1 != r3) {
2715 r1 = (r1 + 1) & 15;
2716 tcg_gen_add_i64(o->in2, o->in2, t2);
2717 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2718 store_reg32h_i64(r1, t1);
2720 tcg_temp_free(t2);
2721 tcg_temp_free(t1);
2723 return NO_EXIT;
2726 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2728 int r1 = get_field(s->fields, r1);
2729 int r3 = get_field(s->fields, r3);
2730 TCGv_i64 t1, t2;
2732 /* Only one register to read. */
2733 if (unlikely(r1 == r3)) {
2734 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2735 return NO_EXIT;
2738 /* First load the values of the first and last registers to trigger
2739 possible page faults. */
2740 t1 = tcg_temp_new_i64();
2741 t2 = tcg_temp_new_i64();
2742 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2743 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2744 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2745 tcg_gen_mov_i64(regs[r1], t1);
2746 tcg_temp_free(t2);
2748 /* Only two registers to read. */
2749 if (((r1 + 1) & 15) == r3) {
2750 tcg_temp_free(t1);
2751 return NO_EXIT;
2754 /* Then load the remaining registers. Page fault can't occur. */
2755 r3 = (r3 - 1) & 15;
2756 tcg_gen_movi_i64(t1, 8);
2757 while (r1 != r3) {
2758 r1 = (r1 + 1) & 15;
2759 tcg_gen_add_i64(o->in2, o->in2, t1);
2760 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2762 tcg_temp_free(t1);
2764 return NO_EXIT;
2767 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2769 TCGv_i64 a1, a2;
2770 TCGMemOp mop = s->insn->data;
2772 /* In a parallel context, stop the world and single step. */
2773 if (parallel_cpus) {
2774 potential_page_fault(s);
2775 gen_exception(EXCP_ATOMIC);
2776 return EXIT_NORETURN;
2779 /* In a serial context, perform the two loads ... */
2780 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2781 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2782 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2783 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2784 tcg_temp_free_i64(a1);
2785 tcg_temp_free_i64(a2);
2787 /* ... and indicate that we performed them while interlocked. */
2788 gen_op_movi_cc(s, 0);
2789 return NO_EXIT;
2792 #ifndef CONFIG_USER_ONLY
2793 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2795 check_privileged(s);
2796 potential_page_fault(s);
2797 gen_helper_lura(o->out, cpu_env, o->in2);
2798 return NO_EXIT;
2801 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2803 check_privileged(s);
2804 potential_page_fault(s);
2805 gen_helper_lurag(o->out, cpu_env, o->in2);
2806 return NO_EXIT;
2808 #endif
2810 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2812 o->out = o->in2;
2813 o->g_out = o->g_in2;
2814 TCGV_UNUSED_I64(o->in2);
2815 o->g_in2 = false;
2816 return NO_EXIT;
2819 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2821 int b2 = get_field(s->fields, b2);
2822 TCGv ar1 = tcg_temp_new_i64();
2824 o->out = o->in2;
2825 o->g_out = o->g_in2;
2826 TCGV_UNUSED_I64(o->in2);
2827 o->g_in2 = false;
2829 switch (s->tb->flags & FLAG_MASK_ASC) {
2830 case PSW_ASC_PRIMARY >> 32:
2831 tcg_gen_movi_i64(ar1, 0);
2832 break;
2833 case PSW_ASC_ACCREG >> 32:
2834 tcg_gen_movi_i64(ar1, 1);
2835 break;
2836 case PSW_ASC_SECONDARY >> 32:
2837 if (b2) {
2838 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2839 } else {
2840 tcg_gen_movi_i64(ar1, 0);
2842 break;
2843 case PSW_ASC_HOME >> 32:
2844 tcg_gen_movi_i64(ar1, 2);
2845 break;
2848 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2849 tcg_temp_free_i64(ar1);
2851 return NO_EXIT;
2854 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2856 o->out = o->in1;
2857 o->out2 = o->in2;
2858 o->g_out = o->g_in1;
2859 o->g_out2 = o->g_in2;
2860 TCGV_UNUSED_I64(o->in1);
2861 TCGV_UNUSED_I64(o->in2);
2862 o->g_in1 = o->g_in2 = false;
2863 return NO_EXIT;
2866 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2868 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2869 potential_page_fault(s);
2870 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2871 tcg_temp_free_i32(l);
2872 return NO_EXIT;
2875 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
2877 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2878 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
2879 potential_page_fault(s);
2880 gen_helper_mvcl(cc_op, cpu_env, r1, r2);
2881 tcg_temp_free_i32(r1);
2882 tcg_temp_free_i32(r2);
2883 set_cc_static(s);
2884 return NO_EXIT;
2887 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
2889 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2890 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2891 potential_page_fault(s);
2892 gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
2893 tcg_temp_free_i32(r1);
2894 tcg_temp_free_i32(r3);
2895 set_cc_static(s);
2896 return NO_EXIT;
2899 #ifndef CONFIG_USER_ONLY
2900 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
2902 int r1 = get_field(s->fields, l1);
2903 check_privileged(s);
2904 potential_page_fault(s);
2905 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2906 set_cc_static(s);
2907 return NO_EXIT;
2910 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
2912 int r1 = get_field(s->fields, l1);
2913 check_privileged(s);
2914 potential_page_fault(s);
2915 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
2916 set_cc_static(s);
2917 return NO_EXIT;
2919 #endif
2921 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
2923 potential_page_fault(s);
2924 gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
2925 set_cc_static(s);
2926 return NO_EXIT;
2929 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
2931 potential_page_fault(s);
2932 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
2933 set_cc_static(s);
2934 return_low128(o->in2);
2935 return NO_EXIT;
2938 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
2940 tcg_gen_mul_i64(o->out, o->in1, o->in2);
2941 return NO_EXIT;
2944 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
2946 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
2947 return NO_EXIT;
2950 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
2952 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
2953 return NO_EXIT;
2956 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
2958 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
2959 return NO_EXIT;
2962 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
2964 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
2965 return NO_EXIT;
2968 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
2970 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2971 return_low128(o->out2);
2972 return NO_EXIT;
2975 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
2977 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
2978 return_low128(o->out2);
2979 return NO_EXIT;
2982 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
2984 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
2985 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
2986 tcg_temp_free_i64(r3);
2987 return NO_EXIT;
2990 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
2992 int r3 = get_field(s->fields, r3);
2993 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
2994 return NO_EXIT;
2997 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
2999 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3000 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3001 tcg_temp_free_i64(r3);
3002 return NO_EXIT;
3005 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3007 int r3 = get_field(s->fields, r3);
3008 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3009 return NO_EXIT;
3012 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3014 TCGv_i64 z, n;
3015 z = tcg_const_i64(0);
3016 n = tcg_temp_new_i64();
3017 tcg_gen_neg_i64(n, o->in2);
3018 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3019 tcg_temp_free_i64(n);
3020 tcg_temp_free_i64(z);
3021 return NO_EXIT;
3024 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3026 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3027 return NO_EXIT;
3030 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3032 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3033 return NO_EXIT;
3036 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3038 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3039 tcg_gen_mov_i64(o->out2, o->in2);
3040 return NO_EXIT;
3043 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3045 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3046 potential_page_fault(s);
3047 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3048 tcg_temp_free_i32(l);
3049 set_cc_static(s);
3050 return NO_EXIT;
3053 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3055 tcg_gen_neg_i64(o->out, o->in2);
3056 return NO_EXIT;
3059 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3061 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3062 return NO_EXIT;
3065 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3067 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3068 return NO_EXIT;
3071 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3073 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3074 tcg_gen_mov_i64(o->out2, o->in2);
3075 return NO_EXIT;
3078 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3080 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3081 potential_page_fault(s);
3082 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3083 tcg_temp_free_i32(l);
3084 set_cc_static(s);
3085 return NO_EXIT;
3088 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3090 tcg_gen_or_i64(o->out, o->in1, o->in2);
3091 return NO_EXIT;
3094 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3096 int shift = s->insn->data & 0xff;
3097 int size = s->insn->data >> 8;
3098 uint64_t mask = ((1ull << size) - 1) << shift;
3100 assert(!o->g_in2);
3101 tcg_gen_shli_i64(o->in2, o->in2, shift);
3102 tcg_gen_or_i64(o->out, o->in1, o->in2);
3104 /* Produce the CC from only the bits manipulated. */
3105 tcg_gen_andi_i64(cc_dst, o->out, mask);
3106 set_cc_nz_u64(s, cc_dst);
3107 return NO_EXIT;
3110 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3112 gen_helper_popcnt(o->out, o->in2);
3113 return NO_EXIT;
3116 #ifndef CONFIG_USER_ONLY
3117 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3119 check_privileged(s);
3120 gen_helper_ptlb(cpu_env);
3121 return NO_EXIT;
3123 #endif
3125 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3127 int i3 = get_field(s->fields, i3);
3128 int i4 = get_field(s->fields, i4);
3129 int i5 = get_field(s->fields, i5);
3130 int do_zero = i4 & 0x80;
3131 uint64_t mask, imask, pmask;
3132 int pos, len, rot;
3134 /* Adjust the arguments for the specific insn. */
3135 switch (s->fields->op2) {
3136 case 0x55: /* risbg */
3137 i3 &= 63;
3138 i4 &= 63;
3139 pmask = ~0;
3140 break;
3141 case 0x5d: /* risbhg */
3142 i3 &= 31;
3143 i4 &= 31;
3144 pmask = 0xffffffff00000000ull;
3145 break;
3146 case 0x51: /* risblg */
3147 i3 &= 31;
3148 i4 &= 31;
3149 pmask = 0x00000000ffffffffull;
3150 break;
3151 default:
3152 abort();
3155 /* MASK is the set of bits to be inserted from R2.
3156 Take care for I3/I4 wraparound. */
3157 mask = pmask >> i3;
3158 if (i3 <= i4) {
3159 mask ^= pmask >> i4 >> 1;
3160 } else {
3161 mask |= ~(pmask >> i4 >> 1);
3163 mask &= pmask;
3165 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3166 insns, we need to keep the other half of the register. */
3167 imask = ~mask | ~pmask;
3168 if (do_zero) {
3169 if (s->fields->op2 == 0x55) {
3170 imask = 0;
3171 } else {
3172 imask = ~pmask;
3176 len = i4 - i3 + 1;
3177 pos = 63 - i4;
3178 rot = i5 & 63;
3179 if (s->fields->op2 == 0x5d) {
3180 pos += 32;
3183 /* In some cases we can implement this with extract. */
3184 if (imask == 0 && pos == 0 && len > 0 && rot + len <= 64) {
3185 tcg_gen_extract_i64(o->out, o->in2, rot, len);
3186 return NO_EXIT;
3189 /* In some cases we can implement this with deposit. */
3190 if (len > 0 && (imask == 0 || ~mask == imask)) {
3191 /* Note that we rotate the bits to be inserted to the lsb, not to
3192 the position as described in the PoO. */
3193 rot = (rot - pos) & 63;
3194 } else {
3195 pos = -1;
3198 /* Rotate the input as necessary. */
3199 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3201 /* Insert the selected bits into the output. */
3202 if (pos >= 0) {
3203 if (imask == 0) {
3204 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3205 } else {
3206 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3208 } else if (imask == 0) {
3209 tcg_gen_andi_i64(o->out, o->in2, mask);
3210 } else {
3211 tcg_gen_andi_i64(o->in2, o->in2, mask);
3212 tcg_gen_andi_i64(o->out, o->out, imask);
3213 tcg_gen_or_i64(o->out, o->out, o->in2);
3215 return NO_EXIT;
3218 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3220 int i3 = get_field(s->fields, i3);
3221 int i4 = get_field(s->fields, i4);
3222 int i5 = get_field(s->fields, i5);
3223 uint64_t mask;
3225 /* If this is a test-only form, arrange to discard the result. */
3226 if (i3 & 0x80) {
3227 o->out = tcg_temp_new_i64();
3228 o->g_out = false;
3231 i3 &= 63;
3232 i4 &= 63;
3233 i5 &= 63;
3235 /* MASK is the set of bits to be operated on from R2.
3236 Take care for I3/I4 wraparound. */
3237 mask = ~0ull >> i3;
3238 if (i3 <= i4) {
3239 mask ^= ~0ull >> i4 >> 1;
3240 } else {
3241 mask |= ~(~0ull >> i4 >> 1);
3244 /* Rotate the input as necessary. */
3245 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3247 /* Operate. */
3248 switch (s->fields->op2) {
3249 case 0x55: /* AND */
3250 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3251 tcg_gen_and_i64(o->out, o->out, o->in2);
3252 break;
3253 case 0x56: /* OR */
3254 tcg_gen_andi_i64(o->in2, o->in2, mask);
3255 tcg_gen_or_i64(o->out, o->out, o->in2);
3256 break;
3257 case 0x57: /* XOR */
3258 tcg_gen_andi_i64(o->in2, o->in2, mask);
3259 tcg_gen_xor_i64(o->out, o->out, o->in2);
3260 break;
3261 default:
3262 abort();
3265 /* Set the CC. */
3266 tcg_gen_andi_i64(cc_dst, o->out, mask);
3267 set_cc_nz_u64(s, cc_dst);
3268 return NO_EXIT;
3271 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3273 tcg_gen_bswap16_i64(o->out, o->in2);
3274 return NO_EXIT;
3277 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3279 tcg_gen_bswap32_i64(o->out, o->in2);
3280 return NO_EXIT;
3283 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3285 tcg_gen_bswap64_i64(o->out, o->in2);
3286 return NO_EXIT;
3289 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3291 TCGv_i32 t1 = tcg_temp_new_i32();
3292 TCGv_i32 t2 = tcg_temp_new_i32();
3293 TCGv_i32 to = tcg_temp_new_i32();
3294 tcg_gen_extrl_i64_i32(t1, o->in1);
3295 tcg_gen_extrl_i64_i32(t2, o->in2);
3296 tcg_gen_rotl_i32(to, t1, t2);
3297 tcg_gen_extu_i32_i64(o->out, to);
3298 tcg_temp_free_i32(t1);
3299 tcg_temp_free_i32(t2);
3300 tcg_temp_free_i32(to);
3301 return NO_EXIT;
3304 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3306 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3307 return NO_EXIT;
3310 #ifndef CONFIG_USER_ONLY
3311 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3313 check_privileged(s);
3314 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3315 set_cc_static(s);
3316 return NO_EXIT;
3319 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3321 check_privileged(s);
3322 gen_helper_sacf(cpu_env, o->in2);
3323 /* Addressing mode has changed, so end the block. */
3324 return EXIT_PC_STALE;
3326 #endif
3328 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3330 int sam = s->insn->data;
3331 TCGv_i64 tsam;
3332 uint64_t mask;
3334 switch (sam) {
3335 case 0:
3336 mask = 0xffffff;
3337 break;
3338 case 1:
3339 mask = 0x7fffffff;
3340 break;
3341 default:
3342 mask = -1;
3343 break;
3346 /* Bizarre but true, we check the address of the current insn for the
3347 specification exception, not the next to be executed. Thus the PoO
3348 documents that Bad Things Happen two bytes before the end. */
3349 if (s->pc & ~mask) {
3350 gen_program_exception(s, PGM_SPECIFICATION);
3351 return EXIT_NORETURN;
3353 s->next_pc &= mask;
3355 tsam = tcg_const_i64(sam);
3356 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3357 tcg_temp_free_i64(tsam);
3359 /* Always exit the TB, since we (may have) changed execution mode. */
3360 return EXIT_PC_STALE;
3363 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3365 int r1 = get_field(s->fields, r1);
3366 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3367 return NO_EXIT;
3370 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3372 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3373 return NO_EXIT;
3376 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3378 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3379 return NO_EXIT;
3382 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3384 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3385 return_low128(o->out2);
3386 return NO_EXIT;
3389 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3391 gen_helper_sqeb(o->out, cpu_env, o->in2);
3392 return NO_EXIT;
3395 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3397 gen_helper_sqdb(o->out, cpu_env, o->in2);
3398 return NO_EXIT;
3401 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3403 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3404 return_low128(o->out2);
3405 return NO_EXIT;
3408 #ifndef CONFIG_USER_ONLY
3409 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3411 check_privileged(s);
3412 potential_page_fault(s);
3413 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3414 set_cc_static(s);
3415 return NO_EXIT;
3418 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3420 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3421 check_privileged(s);
3422 potential_page_fault(s);
3423 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3424 set_cc_static(s);
3425 tcg_temp_free_i32(r1);
3426 return NO_EXIT;
3428 #endif
3430 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3432 DisasCompare c;
3433 TCGv_i64 a;
3434 TCGLabel *lab;
3435 int r1;
3437 disas_jcc(s, &c, get_field(s->fields, m3));
3439 /* We want to store when the condition is fulfilled, so branch
3440 out when it's not */
3441 c.cond = tcg_invert_cond(c.cond);
3443 lab = gen_new_label();
3444 if (c.is_64) {
3445 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3446 } else {
3447 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3449 free_compare(&c);
3451 r1 = get_field(s->fields, r1);
3452 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3453 if (s->insn->data) {
3454 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3455 } else {
3456 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3458 tcg_temp_free_i64(a);
3460 gen_set_label(lab);
3461 return NO_EXIT;
3464 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3466 uint64_t sign = 1ull << s->insn->data;
3467 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3468 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3469 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3470 /* The arithmetic left shift is curious in that it does not affect
3471 the sign bit. Copy that over from the source unchanged. */
3472 tcg_gen_andi_i64(o->out, o->out, ~sign);
3473 tcg_gen_andi_i64(o->in1, o->in1, sign);
3474 tcg_gen_or_i64(o->out, o->out, o->in1);
3475 return NO_EXIT;
3478 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3480 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3481 return NO_EXIT;
3484 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3486 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3487 return NO_EXIT;
3490 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3492 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3493 return NO_EXIT;
3496 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3498 gen_helper_sfpc(cpu_env, o->in2);
3499 return NO_EXIT;
3502 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3504 gen_helper_sfas(cpu_env, o->in2);
3505 return NO_EXIT;
3508 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3510 int b2 = get_field(s->fields, b2);
3511 int d2 = get_field(s->fields, d2);
3512 TCGv_i64 t1 = tcg_temp_new_i64();
3513 TCGv_i64 t2 = tcg_temp_new_i64();
3514 int mask, pos, len;
3516 switch (s->fields->op2) {
3517 case 0x99: /* SRNM */
3518 pos = 0, len = 2;
3519 break;
3520 case 0xb8: /* SRNMB */
3521 pos = 0, len = 3;
3522 break;
3523 case 0xb9: /* SRNMT */
3524 pos = 4, len = 3;
3525 break;
3526 default:
3527 tcg_abort();
3529 mask = (1 << len) - 1;
3531 /* Insert the value into the appropriate field of the FPC. */
3532 if (b2 == 0) {
3533 tcg_gen_movi_i64(t1, d2 & mask);
3534 } else {
3535 tcg_gen_addi_i64(t1, regs[b2], d2);
3536 tcg_gen_andi_i64(t1, t1, mask);
3538 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3539 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3540 tcg_temp_free_i64(t1);
3542 /* Then install the new FPC to set the rounding mode in fpu_status. */
3543 gen_helper_sfpc(cpu_env, t2);
3544 tcg_temp_free_i64(t2);
3545 return NO_EXIT;
3548 #ifndef CONFIG_USER_ONLY
3549 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3551 check_privileged(s);
3552 tcg_gen_shri_i64(o->in2, o->in2, 4);
3553 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
3554 return NO_EXIT;
3557 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3559 check_privileged(s);
3560 gen_helper_sske(cpu_env, o->in1, o->in2);
3561 return NO_EXIT;
3564 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3566 check_privileged(s);
3567 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3568 return NO_EXIT;
3571 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3573 check_privileged(s);
3574 /* ??? Surely cpu address != cpu number. In any case the previous
3575 version of this stored more than the required half-word, so it
3576 is unlikely this has ever been tested. */
3577 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3578 return NO_EXIT;
3581 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3583 gen_helper_stck(o->out, cpu_env);
3584 /* ??? We don't implement clock states. */
3585 gen_op_movi_cc(s, 0);
3586 return NO_EXIT;
3589 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3591 TCGv_i64 c1 = tcg_temp_new_i64();
3592 TCGv_i64 c2 = tcg_temp_new_i64();
3593 gen_helper_stck(c1, cpu_env);
3594 /* Shift the 64-bit value into its place as a zero-extended
3595 104-bit value. Note that "bit positions 64-103 are always
3596 non-zero so that they compare differently to STCK"; we set
3597 the least significant bit to 1. */
3598 tcg_gen_shli_i64(c2, c1, 56);
3599 tcg_gen_shri_i64(c1, c1, 8);
3600 tcg_gen_ori_i64(c2, c2, 0x10000);
3601 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3602 tcg_gen_addi_i64(o->in2, o->in2, 8);
3603 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3604 tcg_temp_free_i64(c1);
3605 tcg_temp_free_i64(c2);
3606 /* ??? We don't implement clock states. */
3607 gen_op_movi_cc(s, 0);
3608 return NO_EXIT;
3611 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3613 check_privileged(s);
3614 gen_helper_sckc(cpu_env, o->in2);
3615 return NO_EXIT;
3618 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3620 check_privileged(s);
3621 gen_helper_stckc(o->out, cpu_env);
3622 return NO_EXIT;
3625 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3627 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3628 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3629 check_privileged(s);
3630 potential_page_fault(s);
3631 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3632 tcg_temp_free_i32(r1);
3633 tcg_temp_free_i32(r3);
3634 return NO_EXIT;
3637 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3639 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3640 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3641 check_privileged(s);
3642 potential_page_fault(s);
3643 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3644 tcg_temp_free_i32(r1);
3645 tcg_temp_free_i32(r3);
3646 return NO_EXIT;
3649 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3651 TCGv_i64 t1 = tcg_temp_new_i64();
3653 check_privileged(s);
3654 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3655 tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
3656 tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
3657 tcg_temp_free_i64(t1);
3659 return NO_EXIT;
3662 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3664 check_privileged(s);
3665 gen_helper_spt(cpu_env, o->in2);
3666 return NO_EXIT;
3669 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3671 check_privileged(s);
3672 gen_helper_stfl(cpu_env);
3673 return NO_EXIT;
3676 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3678 check_privileged(s);
3679 gen_helper_stpt(o->out, cpu_env);
3680 return NO_EXIT;
3683 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3685 check_privileged(s);
3686 potential_page_fault(s);
3687 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3688 set_cc_static(s);
3689 return NO_EXIT;
3692 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3694 check_privileged(s);
3695 gen_helper_spx(cpu_env, o->in2);
3696 return NO_EXIT;
3699 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3701 check_privileged(s);
3702 potential_page_fault(s);
3703 gen_helper_xsch(cpu_env, regs[1]);
3704 set_cc_static(s);
3705 return NO_EXIT;
3708 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3710 check_privileged(s);
3711 potential_page_fault(s);
3712 gen_helper_csch(cpu_env, regs[1]);
3713 set_cc_static(s);
3714 return NO_EXIT;
3717 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3719 check_privileged(s);
3720 potential_page_fault(s);
3721 gen_helper_hsch(cpu_env, regs[1]);
3722 set_cc_static(s);
3723 return NO_EXIT;
3726 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3728 check_privileged(s);
3729 potential_page_fault(s);
3730 gen_helper_msch(cpu_env, regs[1], o->in2);
3731 set_cc_static(s);
3732 return NO_EXIT;
3735 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3737 check_privileged(s);
3738 potential_page_fault(s);
3739 gen_helper_rchp(cpu_env, regs[1]);
3740 set_cc_static(s);
3741 return NO_EXIT;
3744 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3746 check_privileged(s);
3747 potential_page_fault(s);
3748 gen_helper_rsch(cpu_env, regs[1]);
3749 set_cc_static(s);
3750 return NO_EXIT;
3753 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3755 check_privileged(s);
3756 potential_page_fault(s);
3757 gen_helper_ssch(cpu_env, regs[1], o->in2);
3758 set_cc_static(s);
3759 return NO_EXIT;
3762 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
3764 check_privileged(s);
3765 potential_page_fault(s);
3766 gen_helper_stsch(cpu_env, regs[1], o->in2);
3767 set_cc_static(s);
3768 return NO_EXIT;
3771 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
3773 check_privileged(s);
3774 potential_page_fault(s);
3775 gen_helper_tsch(cpu_env, regs[1], o->in2);
3776 set_cc_static(s);
3777 return NO_EXIT;
3780 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
3782 check_privileged(s);
3783 potential_page_fault(s);
3784 gen_helper_chsc(cpu_env, o->in2);
3785 set_cc_static(s);
3786 return NO_EXIT;
3789 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
3791 check_privileged(s);
3792 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
3793 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
3794 return NO_EXIT;
3797 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
3799 uint64_t i2 = get_field(s->fields, i2);
3800 TCGv_i64 t;
3802 check_privileged(s);
3804 /* It is important to do what the instruction name says: STORE THEN.
3805 If we let the output hook perform the store then if we fault and
3806 restart, we'll have the wrong SYSTEM MASK in place. */
3807 t = tcg_temp_new_i64();
3808 tcg_gen_shri_i64(t, psw_mask, 56);
3809 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
3810 tcg_temp_free_i64(t);
3812 if (s->fields->op == 0xac) {
3813 tcg_gen_andi_i64(psw_mask, psw_mask,
3814 (i2 << 56) | 0x00ffffffffffffffull);
3815 } else {
3816 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
3818 return NO_EXIT;
3821 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
3823 check_privileged(s);
3824 potential_page_fault(s);
3825 gen_helper_stura(cpu_env, o->in2, o->in1);
3826 return NO_EXIT;
3829 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
3831 check_privileged(s);
3832 potential_page_fault(s);
3833 gen_helper_sturg(cpu_env, o->in2, o->in1);
3834 return NO_EXIT;
3836 #endif
3838 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
3840 potential_page_fault(s);
3841 gen_helper_stfle(cc_op, cpu_env, o->in2);
3842 set_cc_static(s);
3843 return NO_EXIT;
3846 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
3848 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
3849 return NO_EXIT;
3852 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
3854 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
3855 return NO_EXIT;
3858 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
3860 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
3861 return NO_EXIT;
3864 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
3866 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
3867 return NO_EXIT;
3870 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
3872 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3873 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3874 potential_page_fault(s);
3875 gen_helper_stam(cpu_env, r1, o->in2, r3);
3876 tcg_temp_free_i32(r1);
3877 tcg_temp_free_i32(r3);
3878 return NO_EXIT;
3881 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
3883 int m3 = get_field(s->fields, m3);
3884 int pos, base = s->insn->data;
3885 TCGv_i64 tmp = tcg_temp_new_i64();
3887 pos = base + ctz32(m3) * 8;
3888 switch (m3) {
3889 case 0xf:
3890 /* Effectively a 32-bit store. */
3891 tcg_gen_shri_i64(tmp, o->in1, pos);
3892 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
3893 break;
3895 case 0xc:
3896 case 0x6:
3897 case 0x3:
3898 /* Effectively a 16-bit store. */
3899 tcg_gen_shri_i64(tmp, o->in1, pos);
3900 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
3901 break;
3903 case 0x8:
3904 case 0x4:
3905 case 0x2:
3906 case 0x1:
3907 /* Effectively an 8-bit store. */
3908 tcg_gen_shri_i64(tmp, o->in1, pos);
3909 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3910 break;
3912 default:
3913 /* This is going to be a sequence of shifts and stores. */
3914 pos = base + 32 - 8;
3915 while (m3) {
3916 if (m3 & 0x8) {
3917 tcg_gen_shri_i64(tmp, o->in1, pos);
3918 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
3919 tcg_gen_addi_i64(o->in2, o->in2, 1);
3921 m3 = (m3 << 1) & 0xf;
3922 pos -= 8;
3924 break;
3926 tcg_temp_free_i64(tmp);
3927 return NO_EXIT;
3930 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
3932 int r1 = get_field(s->fields, r1);
3933 int r3 = get_field(s->fields, r3);
3934 int size = s->insn->data;
3935 TCGv_i64 tsize = tcg_const_i64(size);
3937 while (1) {
3938 if (size == 8) {
3939 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
3940 } else {
3941 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
3943 if (r1 == r3) {
3944 break;
3946 tcg_gen_add_i64(o->in2, o->in2, tsize);
3947 r1 = (r1 + 1) & 15;
3950 tcg_temp_free_i64(tsize);
3951 return NO_EXIT;
3954 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
3956 int r1 = get_field(s->fields, r1);
3957 int r3 = get_field(s->fields, r3);
3958 TCGv_i64 t = tcg_temp_new_i64();
3959 TCGv_i64 t4 = tcg_const_i64(4);
3960 TCGv_i64 t32 = tcg_const_i64(32);
3962 while (1) {
3963 tcg_gen_shl_i64(t, regs[r1], t32);
3964 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
3965 if (r1 == r3) {
3966 break;
3968 tcg_gen_add_i64(o->in2, o->in2, t4);
3969 r1 = (r1 + 1) & 15;
3972 tcg_temp_free_i64(t);
3973 tcg_temp_free_i64(t4);
3974 tcg_temp_free_i64(t32);
3975 return NO_EXIT;
3978 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
3980 potential_page_fault(s);
3981 gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3982 set_cc_static(s);
3983 return_low128(o->in2);
3984 return NO_EXIT;
3987 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
3989 tcg_gen_sub_i64(o->out, o->in1, o->in2);
3990 return NO_EXIT;
3993 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
3995 DisasCompare cmp;
3996 TCGv_i64 borrow;
3998 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4000 /* The !borrow flag is the msb of CC. Since we want the inverse of
4001 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4002 disas_jcc(s, &cmp, 8 | 4);
4003 borrow = tcg_temp_new_i64();
4004 if (cmp.is_64) {
4005 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4006 } else {
4007 TCGv_i32 t = tcg_temp_new_i32();
4008 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4009 tcg_gen_extu_i32_i64(borrow, t);
4010 tcg_temp_free_i32(t);
4012 free_compare(&cmp);
4014 tcg_gen_sub_i64(o->out, o->out, borrow);
4015 tcg_temp_free_i64(borrow);
4016 return NO_EXIT;
4019 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4021 TCGv_i32 t;
4023 update_psw_addr(s);
4024 update_cc_op(s);
4026 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4027 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4028 tcg_temp_free_i32(t);
4030 t = tcg_const_i32(s->next_pc - s->pc);
4031 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4032 tcg_temp_free_i32(t);
4034 gen_exception(EXCP_SVC);
4035 return EXIT_NORETURN;
4038 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4040 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4041 set_cc_static(s);
4042 return NO_EXIT;
4045 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4047 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4048 set_cc_static(s);
4049 return NO_EXIT;
4052 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4054 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4055 set_cc_static(s);
4056 return NO_EXIT;
4059 #ifndef CONFIG_USER_ONLY
4060 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4062 potential_page_fault(s);
4063 gen_helper_tprot(cc_op, o->addr1, o->in2);
4064 set_cc_static(s);
4065 return NO_EXIT;
4067 #endif
4069 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4071 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4072 potential_page_fault(s);
4073 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4074 tcg_temp_free_i32(l);
4075 set_cc_static(s);
4076 return NO_EXIT;
4079 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4081 potential_page_fault(s);
4082 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4083 return_low128(o->out2);
4084 set_cc_static(s);
4085 return NO_EXIT;
4088 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4090 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4091 potential_page_fault(s);
4092 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4093 tcg_temp_free_i32(l);
4094 set_cc_static(s);
4095 return NO_EXIT;
4098 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4100 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4101 potential_page_fault(s);
4102 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4103 tcg_temp_free_i32(l);
4104 return NO_EXIT;
4107 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4109 int d1 = get_field(s->fields, d1);
4110 int d2 = get_field(s->fields, d2);
4111 int b1 = get_field(s->fields, b1);
4112 int b2 = get_field(s->fields, b2);
4113 int l = get_field(s->fields, l1);
4114 TCGv_i32 t32;
4116 o->addr1 = get_address(s, 0, b1, d1);
4118 /* If the addresses are identical, this is a store/memset of zero. */
4119 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4120 o->in2 = tcg_const_i64(0);
4122 l++;
4123 while (l >= 8) {
4124 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4125 l -= 8;
4126 if (l > 0) {
4127 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4130 if (l >= 4) {
4131 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4132 l -= 4;
4133 if (l > 0) {
4134 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4137 if (l >= 2) {
4138 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4139 l -= 2;
4140 if (l > 0) {
4141 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4144 if (l) {
4145 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4147 gen_op_movi_cc(s, 0);
4148 return NO_EXIT;
4151 /* But in general we'll defer to a helper. */
4152 o->in2 = get_address(s, 0, b2, d2);
4153 t32 = tcg_const_i32(l);
4154 potential_page_fault(s);
4155 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4156 tcg_temp_free_i32(t32);
4157 set_cc_static(s);
4158 return NO_EXIT;
4161 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4163 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4164 return NO_EXIT;
4167 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4169 int shift = s->insn->data & 0xff;
4170 int size = s->insn->data >> 8;
4171 uint64_t mask = ((1ull << size) - 1) << shift;
4173 assert(!o->g_in2);
4174 tcg_gen_shli_i64(o->in2, o->in2, shift);
4175 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4177 /* Produce the CC from only the bits manipulated. */
4178 tcg_gen_andi_i64(cc_dst, o->out, mask);
4179 set_cc_nz_u64(s, cc_dst);
4180 return NO_EXIT;
4183 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4185 o->out = tcg_const_i64(0);
4186 return NO_EXIT;
4189 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4191 o->out = tcg_const_i64(0);
4192 o->out2 = o->out;
4193 o->g_out2 = true;
4194 return NO_EXIT;
4197 /* ====================================================================== */
4198 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4199 the original inputs), update the various cc data structures in order to
4200 be able to compute the new condition code. */
4202 static void cout_abs32(DisasContext *s, DisasOps *o)
4204 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4207 static void cout_abs64(DisasContext *s, DisasOps *o)
4209 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4212 static void cout_adds32(DisasContext *s, DisasOps *o)
4214 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4217 static void cout_adds64(DisasContext *s, DisasOps *o)
4219 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4222 static void cout_addu32(DisasContext *s, DisasOps *o)
4224 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4227 static void cout_addu64(DisasContext *s, DisasOps *o)
4229 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4232 static void cout_addc32(DisasContext *s, DisasOps *o)
4234 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4237 static void cout_addc64(DisasContext *s, DisasOps *o)
4239 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4242 static void cout_cmps32(DisasContext *s, DisasOps *o)
4244 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4247 static void cout_cmps64(DisasContext *s, DisasOps *o)
4249 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4252 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4254 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4257 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4259 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4262 static void cout_f32(DisasContext *s, DisasOps *o)
4264 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4267 static void cout_f64(DisasContext *s, DisasOps *o)
4269 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4272 static void cout_f128(DisasContext *s, DisasOps *o)
4274 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4277 static void cout_nabs32(DisasContext *s, DisasOps *o)
4279 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4282 static void cout_nabs64(DisasContext *s, DisasOps *o)
4284 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4287 static void cout_neg32(DisasContext *s, DisasOps *o)
4289 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4292 static void cout_neg64(DisasContext *s, DisasOps *o)
4294 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4297 static void cout_nz32(DisasContext *s, DisasOps *o)
4299 tcg_gen_ext32u_i64(cc_dst, o->out);
4300 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4303 static void cout_nz64(DisasContext *s, DisasOps *o)
4305 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4308 static void cout_s32(DisasContext *s, DisasOps *o)
4310 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4313 static void cout_s64(DisasContext *s, DisasOps *o)
4315 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4318 static void cout_subs32(DisasContext *s, DisasOps *o)
4320 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4323 static void cout_subs64(DisasContext *s, DisasOps *o)
4325 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4328 static void cout_subu32(DisasContext *s, DisasOps *o)
4330 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4333 static void cout_subu64(DisasContext *s, DisasOps *o)
4335 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4338 static void cout_subb32(DisasContext *s, DisasOps *o)
4340 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4343 static void cout_subb64(DisasContext *s, DisasOps *o)
4345 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4348 static void cout_tm32(DisasContext *s, DisasOps *o)
4350 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4353 static void cout_tm64(DisasContext *s, DisasOps *o)
4355 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4358 /* ====================================================================== */
4359 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4360 with the TCG register to which we will write. Used in combination with
4361 the "wout" generators, in some cases we need a new temporary, and in
4362 some cases we can write to a TCG global. */
4364 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4366 o->out = tcg_temp_new_i64();
4368 #define SPEC_prep_new 0
4370 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4372 o->out = tcg_temp_new_i64();
4373 o->out2 = tcg_temp_new_i64();
4375 #define SPEC_prep_new_P 0
4377 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4379 o->out = regs[get_field(f, r1)];
4380 o->g_out = true;
4382 #define SPEC_prep_r1 0
4384 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4386 int r1 = get_field(f, r1);
4387 o->out = regs[r1];
4388 o->out2 = regs[r1 + 1];
4389 o->g_out = o->g_out2 = true;
4391 #define SPEC_prep_r1_P SPEC_r1_even
4393 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4395 o->out = fregs[get_field(f, r1)];
4396 o->g_out = true;
4398 #define SPEC_prep_f1 0
4400 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4402 int r1 = get_field(f, r1);
4403 o->out = fregs[r1];
4404 o->out2 = fregs[r1 + 2];
4405 o->g_out = o->g_out2 = true;
4407 #define SPEC_prep_x1 SPEC_r1_f128
4409 /* ====================================================================== */
4410 /* The "Write OUTput" generators. These generally perform some non-trivial
4411 copy of data to TCG globals, or to main memory. The trivial cases are
4412 generally handled by having a "prep" generator install the TCG global
4413 as the destination of the operation. */
4415 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4417 store_reg(get_field(f, r1), o->out);
4419 #define SPEC_wout_r1 0
4421 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4423 int r1 = get_field(f, r1);
4424 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4426 #define SPEC_wout_r1_8 0
4428 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4430 int r1 = get_field(f, r1);
4431 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4433 #define SPEC_wout_r1_16 0
4435 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4437 store_reg32_i64(get_field(f, r1), o->out);
4439 #define SPEC_wout_r1_32 0
4441 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4443 store_reg32h_i64(get_field(f, r1), o->out);
4445 #define SPEC_wout_r1_32h 0
4447 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4449 int r1 = get_field(f, r1);
4450 store_reg32_i64(r1, o->out);
4451 store_reg32_i64(r1 + 1, o->out2);
4453 #define SPEC_wout_r1_P32 SPEC_r1_even
4455 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4457 int r1 = get_field(f, r1);
4458 store_reg32_i64(r1 + 1, o->out);
4459 tcg_gen_shri_i64(o->out, o->out, 32);
4460 store_reg32_i64(r1, o->out);
4462 #define SPEC_wout_r1_D32 SPEC_r1_even
4464 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4466 int r3 = get_field(f, r3);
4467 store_reg32_i64(r3, o->out);
4468 store_reg32_i64(r3 + 1, o->out2);
4470 #define SPEC_wout_r3_P32 SPEC_r3_even
4472 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4474 int r3 = get_field(f, r3);
4475 store_reg(r3, o->out);
4476 store_reg(r3 + 1, o->out2);
4478 #define SPEC_wout_r3_P64 SPEC_r3_even
4480 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4482 store_freg32_i64(get_field(f, r1), o->out);
4484 #define SPEC_wout_e1 0
4486 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4488 store_freg(get_field(f, r1), o->out);
4490 #define SPEC_wout_f1 0
4492 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4494 int f1 = get_field(s->fields, r1);
4495 store_freg(f1, o->out);
4496 store_freg(f1 + 2, o->out2);
4498 #define SPEC_wout_x1 SPEC_r1_f128
4500 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4502 if (get_field(f, r1) != get_field(f, r2)) {
4503 store_reg32_i64(get_field(f, r1), o->out);
4506 #define SPEC_wout_cond_r1r2_32 0
4508 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4510 if (get_field(f, r1) != get_field(f, r2)) {
4511 store_freg32_i64(get_field(f, r1), o->out);
4514 #define SPEC_wout_cond_e1e2 0
4516 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4518 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4520 #define SPEC_wout_m1_8 0
4522 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4524 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4526 #define SPEC_wout_m1_16 0
4528 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4530 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4532 #define SPEC_wout_m1_32 0
4534 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4536 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4538 #define SPEC_wout_m1_64 0
4540 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4542 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4544 #define SPEC_wout_m2_32 0
4546 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4548 store_reg(get_field(f, r1), o->in2);
4550 #define SPEC_wout_in2_r1 0
4552 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4554 store_reg32_i64(get_field(f, r1), o->in2);
4556 #define SPEC_wout_in2_r1_32 0
4558 /* ====================================================================== */
4559 /* The "INput 1" generators. These load the first operand to an insn. */
4561 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4563 o->in1 = load_reg(get_field(f, r1));
4565 #define SPEC_in1_r1 0
4567 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4569 o->in1 = regs[get_field(f, r1)];
4570 o->g_in1 = true;
4572 #define SPEC_in1_r1_o 0
4574 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4576 o->in1 = tcg_temp_new_i64();
4577 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4579 #define SPEC_in1_r1_32s 0
4581 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4583 o->in1 = tcg_temp_new_i64();
4584 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4586 #define SPEC_in1_r1_32u 0
4588 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4590 o->in1 = tcg_temp_new_i64();
4591 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4593 #define SPEC_in1_r1_sr32 0
4595 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4597 o->in1 = load_reg(get_field(f, r1) + 1);
4599 #define SPEC_in1_r1p1 SPEC_r1_even
4601 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4603 o->in1 = tcg_temp_new_i64();
4604 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4606 #define SPEC_in1_r1p1_32s SPEC_r1_even
4608 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4610 o->in1 = tcg_temp_new_i64();
4611 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4613 #define SPEC_in1_r1p1_32u SPEC_r1_even
4615 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4617 int r1 = get_field(f, r1);
4618 o->in1 = tcg_temp_new_i64();
4619 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4621 #define SPEC_in1_r1_D32 SPEC_r1_even
4623 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4625 o->in1 = load_reg(get_field(f, r2));
4627 #define SPEC_in1_r2 0
4629 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4631 o->in1 = tcg_temp_new_i64();
4632 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
4634 #define SPEC_in1_r2_sr32 0
4636 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4638 o->in1 = load_reg(get_field(f, r3));
4640 #define SPEC_in1_r3 0
4642 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4644 o->in1 = regs[get_field(f, r3)];
4645 o->g_in1 = true;
4647 #define SPEC_in1_r3_o 0
4649 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4651 o->in1 = tcg_temp_new_i64();
4652 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
4654 #define SPEC_in1_r3_32s 0
4656 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4658 o->in1 = tcg_temp_new_i64();
4659 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
4661 #define SPEC_in1_r3_32u 0
4663 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4665 int r3 = get_field(f, r3);
4666 o->in1 = tcg_temp_new_i64();
4667 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
4669 #define SPEC_in1_r3_D32 SPEC_r3_even
4671 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4673 o->in1 = load_freg32_i64(get_field(f, r1));
4675 #define SPEC_in1_e1 0
4677 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4679 o->in1 = fregs[get_field(f, r1)];
4680 o->g_in1 = true;
4682 #define SPEC_in1_f1_o 0
4684 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4686 int r1 = get_field(f, r1);
4687 o->out = fregs[r1];
4688 o->out2 = fregs[r1 + 2];
4689 o->g_out = o->g_out2 = true;
4691 #define SPEC_in1_x1_o SPEC_r1_f128
4693 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
4695 o->in1 = fregs[get_field(f, r3)];
4696 o->g_in1 = true;
4698 #define SPEC_in1_f3_o 0
4700 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
4702 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
4704 #define SPEC_in1_la1 0
4706 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
4708 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4709 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4711 #define SPEC_in1_la2 0
4713 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4715 in1_la1(s, f, o);
4716 o->in1 = tcg_temp_new_i64();
4717 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
4719 #define SPEC_in1_m1_8u 0
4721 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4723 in1_la1(s, f, o);
4724 o->in1 = tcg_temp_new_i64();
4725 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
4727 #define SPEC_in1_m1_16s 0
4729 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4731 in1_la1(s, f, o);
4732 o->in1 = tcg_temp_new_i64();
4733 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
4735 #define SPEC_in1_m1_16u 0
4737 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4739 in1_la1(s, f, o);
4740 o->in1 = tcg_temp_new_i64();
4741 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
4743 #define SPEC_in1_m1_32s 0
4745 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4747 in1_la1(s, f, o);
4748 o->in1 = tcg_temp_new_i64();
4749 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
4751 #define SPEC_in1_m1_32u 0
4753 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4755 in1_la1(s, f, o);
4756 o->in1 = tcg_temp_new_i64();
4757 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
4759 #define SPEC_in1_m1_64 0
4761 /* ====================================================================== */
4762 /* The "INput 2" generators. These load the second operand to an insn. */
4764 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4766 o->in2 = regs[get_field(f, r1)];
4767 o->g_in2 = true;
4769 #define SPEC_in2_r1_o 0
4771 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4773 o->in2 = tcg_temp_new_i64();
4774 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
4776 #define SPEC_in2_r1_16u 0
4778 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4780 o->in2 = tcg_temp_new_i64();
4781 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
4783 #define SPEC_in2_r1_32u 0
4785 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4787 int r1 = get_field(f, r1);
4788 o->in2 = tcg_temp_new_i64();
4789 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
4791 #define SPEC_in2_r1_D32 SPEC_r1_even
4793 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
4795 o->in2 = load_reg(get_field(f, r2));
4797 #define SPEC_in2_r2 0
4799 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4801 o->in2 = regs[get_field(f, r2)];
4802 o->g_in2 = true;
4804 #define SPEC_in2_r2_o 0
4806 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
4808 int r2 = get_field(f, r2);
4809 if (r2 != 0) {
4810 o->in2 = load_reg(r2);
4813 #define SPEC_in2_r2_nz 0
4815 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
4817 o->in2 = tcg_temp_new_i64();
4818 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
4820 #define SPEC_in2_r2_8s 0
4822 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4824 o->in2 = tcg_temp_new_i64();
4825 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
4827 #define SPEC_in2_r2_8u 0
4829 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4831 o->in2 = tcg_temp_new_i64();
4832 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
4834 #define SPEC_in2_r2_16s 0
4836 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4838 o->in2 = tcg_temp_new_i64();
4839 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
4841 #define SPEC_in2_r2_16u 0
4843 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
4845 o->in2 = load_reg(get_field(f, r3));
4847 #define SPEC_in2_r3 0
4849 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4851 o->in2 = tcg_temp_new_i64();
4852 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
4854 #define SPEC_in2_r3_sr32 0
4856 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4858 o->in2 = tcg_temp_new_i64();
4859 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
4861 #define SPEC_in2_r2_32s 0
4863 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4865 o->in2 = tcg_temp_new_i64();
4866 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
4868 #define SPEC_in2_r2_32u 0
4870 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4872 o->in2 = tcg_temp_new_i64();
4873 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
4875 #define SPEC_in2_r2_sr32 0
4877 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
4879 o->in2 = load_freg32_i64(get_field(f, r2));
4881 #define SPEC_in2_e2 0
4883 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4885 o->in2 = fregs[get_field(f, r2)];
4886 o->g_in2 = true;
4888 #define SPEC_in2_f2_o 0
4890 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
4892 int r2 = get_field(f, r2);
4893 o->in1 = fregs[r2];
4894 o->in2 = fregs[r2 + 2];
4895 o->g_in1 = o->g_in2 = true;
4897 #define SPEC_in2_x2_o SPEC_r2_f128
4899 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
4901 o->in2 = get_address(s, 0, get_field(f, r2), 0);
4903 #define SPEC_in2_ra2 0
4905 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
4907 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
4908 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
4910 #define SPEC_in2_a2 0
4912 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
4914 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
4916 #define SPEC_in2_ri2 0
4918 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
4920 help_l2_shift(s, f, o, 31);
4922 #define SPEC_in2_sh32 0
4924 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
4926 help_l2_shift(s, f, o, 63);
4928 #define SPEC_in2_sh64 0
4930 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
4932 in2_a2(s, f, o);
4933 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
4935 #define SPEC_in2_m2_8u 0
4937 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
4939 in2_a2(s, f, o);
4940 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
4942 #define SPEC_in2_m2_16s 0
4944 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4946 in2_a2(s, f, o);
4947 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4949 #define SPEC_in2_m2_16u 0
4951 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4953 in2_a2(s, f, o);
4954 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4956 #define SPEC_in2_m2_32s 0
4958 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4960 in2_a2(s, f, o);
4961 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4963 #define SPEC_in2_m2_32u 0
4965 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4967 in2_a2(s, f, o);
4968 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4970 #define SPEC_in2_m2_64 0
4972 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
4974 in2_ri2(s, f, o);
4975 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
4977 #define SPEC_in2_mri2_16u 0
4979 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4981 in2_ri2(s, f, o);
4982 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
4984 #define SPEC_in2_mri2_32s 0
4986 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4988 in2_ri2(s, f, o);
4989 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
4991 #define SPEC_in2_mri2_32u 0
4993 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
4995 in2_ri2(s, f, o);
4996 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
4998 #define SPEC_in2_mri2_64 0
5000 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5002 o->in2 = tcg_const_i64(get_field(f, i2));
5004 #define SPEC_in2_i2 0
5006 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5008 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5010 #define SPEC_in2_i2_8u 0
5012 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5014 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5016 #define SPEC_in2_i2_16u 0
5018 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5020 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5022 #define SPEC_in2_i2_32u 0
5024 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5026 uint64_t i2 = (uint16_t)get_field(f, i2);
5027 o->in2 = tcg_const_i64(i2 << s->insn->data);
5029 #define SPEC_in2_i2_16u_shl 0
5031 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5033 uint64_t i2 = (uint32_t)get_field(f, i2);
5034 o->in2 = tcg_const_i64(i2 << s->insn->data);
5036 #define SPEC_in2_i2_32u_shl 0
5038 #ifndef CONFIG_USER_ONLY
5039 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5041 o->in2 = tcg_const_i64(s->fields->raw_insn);
5043 #define SPEC_in2_insn 0
5044 #endif
5046 /* ====================================================================== */
5048 /* Find opc within the table of insns. This is formulated as a switch
5049 statement so that (1) we get compile-time notice of cut-paste errors
5050 for duplicated opcodes, and (2) the compiler generates the binary
5051 search tree, rather than us having to post-process the table. */
5053 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5054 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5056 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5058 enum DisasInsnEnum {
5059 #include "insn-data.def"
5062 #undef D
5063 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5064 .opc = OPC, \
5065 .fmt = FMT_##FT, \
5066 .fac = FAC_##FC, \
5067 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5068 .name = #NM, \
5069 .help_in1 = in1_##I1, \
5070 .help_in2 = in2_##I2, \
5071 .help_prep = prep_##P, \
5072 .help_wout = wout_##W, \
5073 .help_cout = cout_##CC, \
5074 .help_op = op_##OP, \
5075 .data = D \
5078 /* Allow 0 to be used for NULL in the table below. */
5079 #define in1_0 NULL
5080 #define in2_0 NULL
5081 #define prep_0 NULL
5082 #define wout_0 NULL
5083 #define cout_0 NULL
5084 #define op_0 NULL
5086 #define SPEC_in1_0 0
5087 #define SPEC_in2_0 0
5088 #define SPEC_prep_0 0
5089 #define SPEC_wout_0 0
5091 static const DisasInsn insn_info[] = {
5092 #include "insn-data.def"
5095 #undef D
5096 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5097 case OPC: return &insn_info[insn_ ## NM];
5099 static const DisasInsn *lookup_opc(uint16_t opc)
5101 switch (opc) {
5102 #include "insn-data.def"
5103 default:
5104 return NULL;
5108 #undef D
5109 #undef C
5111 /* Extract a field from the insn. The INSN should be left-aligned in
5112 the uint64_t so that we can more easily utilize the big-bit-endian
5113 definitions we extract from the Principals of Operation. */
5115 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5117 uint32_t r, m;
5119 if (f->size == 0) {
5120 return;
5123 /* Zero extract the field from the insn. */
5124 r = (insn << f->beg) >> (64 - f->size);
5126 /* Sign-extend, or un-swap the field as necessary. */
5127 switch (f->type) {
5128 case 0: /* unsigned */
5129 break;
5130 case 1: /* signed */
5131 assert(f->size <= 32);
5132 m = 1u << (f->size - 1);
5133 r = (r ^ m) - m;
5134 break;
5135 case 2: /* dl+dh split, signed 20 bit. */
5136 r = ((int8_t)r << 12) | (r >> 8);
5137 break;
5138 default:
5139 abort();
5142 /* Validate that the "compressed" encoding we selected above is valid.
5143 I.e. we havn't make two different original fields overlap. */
5144 assert(((o->presentC >> f->indexC) & 1) == 0);
5145 o->presentC |= 1 << f->indexC;
5146 o->presentO |= 1 << f->indexO;
5148 o->c[f->indexC] = r;
5151 /* Lookup the insn at the current PC, extracting the operands into O and
5152 returning the info struct for the insn. Returns NULL for invalid insn. */
5154 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5155 DisasFields *f)
5157 uint64_t insn, pc = s->pc;
5158 int op, op2, ilen;
5159 const DisasInsn *info;
5161 insn = ld_code2(env, pc);
5162 op = (insn >> 8) & 0xff;
5163 ilen = get_ilen(op);
5164 s->next_pc = s->pc + ilen;
5166 switch (ilen) {
5167 case 2:
5168 insn = insn << 48;
5169 break;
5170 case 4:
5171 insn = ld_code4(env, pc) << 32;
5172 break;
5173 case 6:
5174 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5175 break;
5176 default:
5177 abort();
5180 /* We can't actually determine the insn format until we've looked up
5181 the full insn opcode. Which we can't do without locating the
5182 secondary opcode. Assume by default that OP2 is at bit 40; for
5183 those smaller insns that don't actually have a secondary opcode
5184 this will correctly result in OP2 = 0. */
5185 switch (op) {
5186 case 0x01: /* E */
5187 case 0x80: /* S */
5188 case 0x82: /* S */
5189 case 0x93: /* S */
5190 case 0xb2: /* S, RRF, RRE */
5191 case 0xb3: /* RRE, RRD, RRF */
5192 case 0xb9: /* RRE, RRF */
5193 case 0xe5: /* SSE, SIL */
5194 op2 = (insn << 8) >> 56;
5195 break;
5196 case 0xa5: /* RI */
5197 case 0xa7: /* RI */
5198 case 0xc0: /* RIL */
5199 case 0xc2: /* RIL */
5200 case 0xc4: /* RIL */
5201 case 0xc6: /* RIL */
5202 case 0xc8: /* SSF */
5203 case 0xcc: /* RIL */
5204 op2 = (insn << 12) >> 60;
5205 break;
5206 case 0xd0 ... 0xdf: /* SS */
5207 case 0xe1: /* SS */
5208 case 0xe2: /* SS */
5209 case 0xe8: /* SS */
5210 case 0xe9: /* SS */
5211 case 0xea: /* SS */
5212 case 0xee ... 0xf3: /* SS */
5213 case 0xf8 ... 0xfd: /* SS */
5214 op2 = 0;
5215 break;
5216 default:
5217 op2 = (insn << 40) >> 56;
5218 break;
5221 memset(f, 0, sizeof(*f));
5222 f->raw_insn = insn;
5223 f->op = op;
5224 f->op2 = op2;
5226 /* Lookup the instruction. */
5227 info = lookup_opc(op << 8 | op2);
5229 /* If we found it, extract the operands. */
5230 if (info != NULL) {
5231 DisasFormat fmt = info->fmt;
5232 int i;
5234 for (i = 0; i < NUM_C_FIELD; ++i) {
5235 extract_field(f, &format_info[fmt].op[i], insn);
5238 return info;
5241 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5243 const DisasInsn *insn;
5244 ExitStatus ret = NO_EXIT;
5245 DisasFields f;
5246 DisasOps o;
5248 /* Search for the insn in the table. */
5249 insn = extract_insn(env, s, &f);
5251 /* Not found means unimplemented/illegal opcode. */
5252 if (insn == NULL) {
5253 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5254 f.op, f.op2);
5255 gen_illegal_opcode(s);
5256 return EXIT_NORETURN;
5259 #ifndef CONFIG_USER_ONLY
5260 if (s->tb->flags & FLAG_MASK_PER) {
5261 TCGv_i64 addr = tcg_const_i64(s->pc);
5262 gen_helper_per_ifetch(cpu_env, addr);
5263 tcg_temp_free_i64(addr);
5265 #endif
5267 /* Check for insn specification exceptions. */
5268 if (insn->spec) {
5269 int spec = insn->spec, excp = 0, r;
5271 if (spec & SPEC_r1_even) {
5272 r = get_field(&f, r1);
5273 if (r & 1) {
5274 excp = PGM_SPECIFICATION;
5277 if (spec & SPEC_r2_even) {
5278 r = get_field(&f, r2);
5279 if (r & 1) {
5280 excp = PGM_SPECIFICATION;
5283 if (spec & SPEC_r3_even) {
5284 r = get_field(&f, r3);
5285 if (r & 1) {
5286 excp = PGM_SPECIFICATION;
5289 if (spec & SPEC_r1_f128) {
5290 r = get_field(&f, r1);
5291 if (r > 13) {
5292 excp = PGM_SPECIFICATION;
5295 if (spec & SPEC_r2_f128) {
5296 r = get_field(&f, r2);
5297 if (r > 13) {
5298 excp = PGM_SPECIFICATION;
5301 if (excp) {
5302 gen_program_exception(s, excp);
5303 return EXIT_NORETURN;
5307 /* Set up the strutures we use to communicate with the helpers. */
5308 s->insn = insn;
5309 s->fields = &f;
5310 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5311 TCGV_UNUSED_I64(o.out);
5312 TCGV_UNUSED_I64(o.out2);
5313 TCGV_UNUSED_I64(o.in1);
5314 TCGV_UNUSED_I64(o.in2);
5315 TCGV_UNUSED_I64(o.addr1);
5317 /* Implement the instruction. */
5318 if (insn->help_in1) {
5319 insn->help_in1(s, &f, &o);
5321 if (insn->help_in2) {
5322 insn->help_in2(s, &f, &o);
5324 if (insn->help_prep) {
5325 insn->help_prep(s, &f, &o);
5327 if (insn->help_op) {
5328 ret = insn->help_op(s, &o);
5330 if (insn->help_wout) {
5331 insn->help_wout(s, &f, &o);
5333 if (insn->help_cout) {
5334 insn->help_cout(s, &o);
5337 /* Free any temporaries created by the helpers. */
5338 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5339 tcg_temp_free_i64(o.out);
5341 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5342 tcg_temp_free_i64(o.out2);
5344 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5345 tcg_temp_free_i64(o.in1);
5347 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5348 tcg_temp_free_i64(o.in2);
5350 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5351 tcg_temp_free_i64(o.addr1);
5354 #ifndef CONFIG_USER_ONLY
5355 if (s->tb->flags & FLAG_MASK_PER) {
5356 /* An exception might be triggered, save PSW if not already done. */
5357 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5358 tcg_gen_movi_i64(psw_addr, s->next_pc);
5361 /* Save off cc. */
5362 update_cc_op(s);
5364 /* Call the helper to check for a possible PER exception. */
5365 gen_helper_per_check_exception(cpu_env);
5367 #endif
5369 /* Advance to the next instruction. */
5370 s->pc = s->next_pc;
5371 return ret;
5374 void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
5376 S390CPU *cpu = s390_env_get_cpu(env);
5377 CPUState *cs = CPU(cpu);
5378 DisasContext dc;
5379 target_ulong pc_start;
5380 uint64_t next_page_start;
5381 int num_insns, max_insns;
5382 ExitStatus status;
5383 bool do_debug;
5385 pc_start = tb->pc;
5387 /* 31-bit mode */
5388 if (!(tb->flags & FLAG_MASK_64)) {
5389 pc_start &= 0x7fffffff;
5392 dc.tb = tb;
5393 dc.pc = pc_start;
5394 dc.cc_op = CC_OP_DYNAMIC;
5395 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5397 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5399 num_insns = 0;
5400 max_insns = tb->cflags & CF_COUNT_MASK;
5401 if (max_insns == 0) {
5402 max_insns = CF_COUNT_MASK;
5404 if (max_insns > TCG_MAX_INSNS) {
5405 max_insns = TCG_MAX_INSNS;
5408 gen_tb_start(tb);
5410 do {
5411 tcg_gen_insn_start(dc.pc, dc.cc_op);
5412 num_insns++;
5414 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5415 status = EXIT_PC_STALE;
5416 do_debug = true;
5417 /* The address covered by the breakpoint must be included in
5418 [tb->pc, tb->pc + tb->size) in order to for it to be
5419 properly cleared -- thus we increment the PC here so that
5420 the logic setting tb->size below does the right thing. */
5421 dc.pc += 2;
5422 break;
5425 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5426 gen_io_start();
5429 status = NO_EXIT;
5430 if (status == NO_EXIT) {
5431 status = translate_one(env, &dc);
5434 /* If we reach a page boundary, are single stepping,
5435 or exhaust instruction count, stop generation. */
5436 if (status == NO_EXIT
5437 && (dc.pc >= next_page_start
5438 || tcg_op_buf_full()
5439 || num_insns >= max_insns
5440 || singlestep
5441 || cs->singlestep_enabled)) {
5442 status = EXIT_PC_STALE;
5444 } while (status == NO_EXIT);
5446 if (tb->cflags & CF_LAST_IO) {
5447 gen_io_end();
5450 switch (status) {
5451 case EXIT_GOTO_TB:
5452 case EXIT_NORETURN:
5453 break;
5454 case EXIT_PC_STALE:
5455 update_psw_addr(&dc);
5456 /* FALLTHRU */
5457 case EXIT_PC_UPDATED:
5458 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5459 cc op type is in env */
5460 update_cc_op(&dc);
5461 /* Exit the TB, either by raising a debug exception or by return. */
5462 if (do_debug) {
5463 gen_exception(EXCP_DEBUG);
5464 } else {
5465 tcg_gen_exit_tb(0);
5467 break;
5468 default:
5469 abort();
5472 gen_tb_end(tb, num_insns);
5474 tb->size = dc.pc - pc_start;
5475 tb->icount = num_insns;
5477 #if defined(S390X_DEBUG_DISAS)
5478 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5479 && qemu_log_in_addr_range(pc_start)) {
5480 qemu_log_lock();
5481 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5482 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5483 qemu_log("\n");
5484 qemu_log_unlock();
5486 #endif
5489 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5490 target_ulong *data)
5492 int cc_op = data[1];
5493 env->psw.addr = data[0];
5494 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5495 env->cc_op = cc_op;