target/arm: Restore SPSEL to correct CONTROL register on exception return
[qemu/kevin.git] / target / s390x / translate.c
blob9ef95141f90fa4f60c91c1da5e93c903c1dc2527
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
41 /* global register indexes */
42 static TCGv_env cpu_env;
44 #include "exec/gen-icount.h"
45 #include "exec/helper-proto.h"
46 #include "exec/helper-gen.h"
48 #include "trace-tcg.h"
49 #include "exec/log.h"
52 /* Information that (most) every instruction needs to manipulate. */
53 typedef struct DisasContext DisasContext;
54 typedef struct DisasInsn DisasInsn;
55 typedef struct DisasFields DisasFields;
57 struct DisasContext {
58 struct TranslationBlock *tb;
59 const DisasInsn *insn;
60 DisasFields *fields;
61 uint64_t ex_value;
62 uint64_t pc, next_pc;
63 uint32_t ilen;
64 enum cc_op cc_op;
65 bool singlestep_enabled;
68 /* Information carried about a condition to be evaluated. */
69 typedef struct {
70 TCGCond cond:8;
71 bool is_64;
72 bool g1;
73 bool g2;
74 union {
75 struct { TCGv_i64 a, b; } s64;
76 struct { TCGv_i32 a, b; } s32;
77 } u;
78 } DisasCompare;
80 /* is_jmp field values */
81 #define DISAS_EXCP DISAS_TARGET_0
83 #ifdef DEBUG_INLINE_BRANCHES
84 static uint64_t inline_branch_hit[CC_OP_MAX];
85 static uint64_t inline_branch_miss[CC_OP_MAX];
86 #endif
88 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
90 if (!(s->tb->flags & FLAG_MASK_64)) {
91 if (s->tb->flags & FLAG_MASK_32) {
92 return pc | 0x80000000;
95 return pc;
98 static TCGv_i64 psw_addr;
99 static TCGv_i64 psw_mask;
100 static TCGv_i64 gbea;
102 static TCGv_i32 cc_op;
103 static TCGv_i64 cc_src;
104 static TCGv_i64 cc_dst;
105 static TCGv_i64 cc_vr;
107 static char cpu_reg_names[32][4];
108 static TCGv_i64 regs[16];
109 static TCGv_i64 fregs[16];
111 void s390x_translate_init(void)
113 int i;
115 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
116 tcg_ctx.tcg_env = cpu_env;
117 psw_addr = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, psw.addr),
119 "psw_addr");
120 psw_mask = tcg_global_mem_new_i64(cpu_env,
121 offsetof(CPUS390XState, psw.mask),
122 "psw_mask");
123 gbea = tcg_global_mem_new_i64(cpu_env,
124 offsetof(CPUS390XState, gbea),
125 "gbea");
127 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
128 "cc_op");
129 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
130 "cc_src");
131 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
132 "cc_dst");
133 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
134 "cc_vr");
136 for (i = 0; i < 16; i++) {
137 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
138 regs[i] = tcg_global_mem_new(cpu_env,
139 offsetof(CPUS390XState, regs[i]),
140 cpu_reg_names[i]);
143 for (i = 0; i < 16; i++) {
144 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
145 fregs[i] = tcg_global_mem_new(cpu_env,
146 offsetof(CPUS390XState, vregs[i][0].d),
147 cpu_reg_names[i + 16]);
151 static TCGv_i64 load_reg(int reg)
153 TCGv_i64 r = tcg_temp_new_i64();
154 tcg_gen_mov_i64(r, regs[reg]);
155 return r;
158 static TCGv_i64 load_freg32_i64(int reg)
160 TCGv_i64 r = tcg_temp_new_i64();
161 tcg_gen_shri_i64(r, fregs[reg], 32);
162 return r;
165 static void store_reg(int reg, TCGv_i64 v)
167 tcg_gen_mov_i64(regs[reg], v);
170 static void store_freg(int reg, TCGv_i64 v)
172 tcg_gen_mov_i64(fregs[reg], v);
175 static void store_reg32_i64(int reg, TCGv_i64 v)
177 /* 32 bit register writes keep the upper half */
178 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
181 static void store_reg32h_i64(int reg, TCGv_i64 v)
183 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
186 static void store_freg32_i64(int reg, TCGv_i64 v)
188 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
191 static void return_low128(TCGv_i64 dest)
193 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
196 static void update_psw_addr(DisasContext *s)
198 /* psw.addr */
199 tcg_gen_movi_i64(psw_addr, s->pc);
202 static void per_branch(DisasContext *s, bool to_next)
204 #ifndef CONFIG_USER_ONLY
205 tcg_gen_movi_i64(gbea, s->pc);
207 if (s->tb->flags & FLAG_MASK_PER) {
208 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
209 gen_helper_per_branch(cpu_env, gbea, next_pc);
210 if (to_next) {
211 tcg_temp_free_i64(next_pc);
214 #endif
217 static void per_branch_cond(DisasContext *s, TCGCond cond,
218 TCGv_i64 arg1, TCGv_i64 arg2)
220 #ifndef CONFIG_USER_ONLY
221 if (s->tb->flags & FLAG_MASK_PER) {
222 TCGLabel *lab = gen_new_label();
223 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
225 tcg_gen_movi_i64(gbea, s->pc);
226 gen_helper_per_branch(cpu_env, gbea, psw_addr);
228 gen_set_label(lab);
229 } else {
230 TCGv_i64 pc = tcg_const_i64(s->pc);
231 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
232 tcg_temp_free_i64(pc);
234 #endif
237 static void per_breaking_event(DisasContext *s)
239 tcg_gen_movi_i64(gbea, s->pc);
242 static void update_cc_op(DisasContext *s)
244 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
245 tcg_gen_movi_i32(cc_op, s->cc_op);
249 static void potential_page_fault(DisasContext *s)
251 update_psw_addr(s);
252 update_cc_op(s);
255 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
257 return (uint64_t)cpu_lduw_code(env, pc);
260 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
262 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
265 static int get_mem_index(DisasContext *s)
267 switch (s->tb->flags & FLAG_MASK_ASC) {
268 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
269 return 0;
270 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
271 return 1;
272 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
273 return 2;
274 default:
275 tcg_abort();
276 break;
280 static void gen_exception(int excp)
282 TCGv_i32 tmp = tcg_const_i32(excp);
283 gen_helper_exception(cpu_env, tmp);
284 tcg_temp_free_i32(tmp);
287 static void gen_program_exception(DisasContext *s, int code)
289 TCGv_i32 tmp;
291 /* Remember what pgm exeption this was. */
292 tmp = tcg_const_i32(code);
293 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
294 tcg_temp_free_i32(tmp);
296 tmp = tcg_const_i32(s->ilen);
297 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
298 tcg_temp_free_i32(tmp);
300 /* update the psw */
301 update_psw_addr(s);
303 /* Save off cc. */
304 update_cc_op(s);
306 /* Trigger exception. */
307 gen_exception(EXCP_PGM);
310 static inline void gen_illegal_opcode(DisasContext *s)
312 gen_program_exception(s, PGM_OPERATION);
315 static inline void gen_trap(DisasContext *s)
317 TCGv_i32 t;
319 /* Set DXC to 0xff. */
320 t = tcg_temp_new_i32();
321 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
322 tcg_gen_ori_i32(t, t, 0xff00);
323 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
324 tcg_temp_free_i32(t);
326 gen_program_exception(s, PGM_DATA);
329 #ifndef CONFIG_USER_ONLY
330 static void check_privileged(DisasContext *s)
332 if (s->tb->flags & FLAG_MASK_PSTATE) {
333 gen_program_exception(s, PGM_PRIVILEGED);
336 #endif
338 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
340 TCGv_i64 tmp = tcg_temp_new_i64();
341 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
343 /* Note that d2 is limited to 20 bits, signed. If we crop negative
344 displacements early we create larger immedate addends. */
346 /* Note that addi optimizes the imm==0 case. */
347 if (b2 && x2) {
348 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
349 tcg_gen_addi_i64(tmp, tmp, d2);
350 } else if (b2) {
351 tcg_gen_addi_i64(tmp, regs[b2], d2);
352 } else if (x2) {
353 tcg_gen_addi_i64(tmp, regs[x2], d2);
354 } else {
355 if (need_31) {
356 d2 &= 0x7fffffff;
357 need_31 = false;
359 tcg_gen_movi_i64(tmp, d2);
361 if (need_31) {
362 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
365 return tmp;
368 static inline bool live_cc_data(DisasContext *s)
370 return (s->cc_op != CC_OP_DYNAMIC
371 && s->cc_op != CC_OP_STATIC
372 && s->cc_op > 3);
375 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
377 if (live_cc_data(s)) {
378 tcg_gen_discard_i64(cc_src);
379 tcg_gen_discard_i64(cc_dst);
380 tcg_gen_discard_i64(cc_vr);
382 s->cc_op = CC_OP_CONST0 + val;
385 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
387 if (live_cc_data(s)) {
388 tcg_gen_discard_i64(cc_src);
389 tcg_gen_discard_i64(cc_vr);
391 tcg_gen_mov_i64(cc_dst, dst);
392 s->cc_op = op;
395 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
396 TCGv_i64 dst)
398 if (live_cc_data(s)) {
399 tcg_gen_discard_i64(cc_vr);
401 tcg_gen_mov_i64(cc_src, src);
402 tcg_gen_mov_i64(cc_dst, dst);
403 s->cc_op = op;
406 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
407 TCGv_i64 dst, TCGv_i64 vr)
409 tcg_gen_mov_i64(cc_src, src);
410 tcg_gen_mov_i64(cc_dst, dst);
411 tcg_gen_mov_i64(cc_vr, vr);
412 s->cc_op = op;
415 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
417 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
420 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
422 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
425 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
427 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
430 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
432 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
435 /* CC value is in env->cc_op */
436 static void set_cc_static(DisasContext *s)
438 if (live_cc_data(s)) {
439 tcg_gen_discard_i64(cc_src);
440 tcg_gen_discard_i64(cc_dst);
441 tcg_gen_discard_i64(cc_vr);
443 s->cc_op = CC_OP_STATIC;
446 /* calculates cc into cc_op */
447 static void gen_op_calc_cc(DisasContext *s)
449 TCGv_i32 local_cc_op;
450 TCGv_i64 dummy;
452 TCGV_UNUSED_I32(local_cc_op);
453 TCGV_UNUSED_I64(dummy);
454 switch (s->cc_op) {
455 default:
456 dummy = tcg_const_i64(0);
457 /* FALLTHRU */
458 case CC_OP_ADD_64:
459 case CC_OP_ADDU_64:
460 case CC_OP_ADDC_64:
461 case CC_OP_SUB_64:
462 case CC_OP_SUBU_64:
463 case CC_OP_SUBB_64:
464 case CC_OP_ADD_32:
465 case CC_OP_ADDU_32:
466 case CC_OP_ADDC_32:
467 case CC_OP_SUB_32:
468 case CC_OP_SUBU_32:
469 case CC_OP_SUBB_32:
470 local_cc_op = tcg_const_i32(s->cc_op);
471 break;
472 case CC_OP_CONST0:
473 case CC_OP_CONST1:
474 case CC_OP_CONST2:
475 case CC_OP_CONST3:
476 case CC_OP_STATIC:
477 case CC_OP_DYNAMIC:
478 break;
481 switch (s->cc_op) {
482 case CC_OP_CONST0:
483 case CC_OP_CONST1:
484 case CC_OP_CONST2:
485 case CC_OP_CONST3:
486 /* s->cc_op is the cc value */
487 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
488 break;
489 case CC_OP_STATIC:
490 /* env->cc_op already is the cc value */
491 break;
492 case CC_OP_NZ:
493 case CC_OP_ABS_64:
494 case CC_OP_NABS_64:
495 case CC_OP_ABS_32:
496 case CC_OP_NABS_32:
497 case CC_OP_LTGT0_32:
498 case CC_OP_LTGT0_64:
499 case CC_OP_COMP_32:
500 case CC_OP_COMP_64:
501 case CC_OP_NZ_F32:
502 case CC_OP_NZ_F64:
503 case CC_OP_FLOGR:
504 /* 1 argument */
505 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
506 break;
507 case CC_OP_ICM:
508 case CC_OP_LTGT_32:
509 case CC_OP_LTGT_64:
510 case CC_OP_LTUGTU_32:
511 case CC_OP_LTUGTU_64:
512 case CC_OP_TM_32:
513 case CC_OP_TM_64:
514 case CC_OP_SLA_32:
515 case CC_OP_SLA_64:
516 case CC_OP_NZ_F128:
517 /* 2 arguments */
518 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
519 break;
520 case CC_OP_ADD_64:
521 case CC_OP_ADDU_64:
522 case CC_OP_ADDC_64:
523 case CC_OP_SUB_64:
524 case CC_OP_SUBU_64:
525 case CC_OP_SUBB_64:
526 case CC_OP_ADD_32:
527 case CC_OP_ADDU_32:
528 case CC_OP_ADDC_32:
529 case CC_OP_SUB_32:
530 case CC_OP_SUBU_32:
531 case CC_OP_SUBB_32:
532 /* 3 arguments */
533 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
534 break;
535 case CC_OP_DYNAMIC:
536 /* unknown operation - assume 3 arguments and cc_op in env */
537 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
538 break;
539 default:
540 tcg_abort();
543 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
544 tcg_temp_free_i32(local_cc_op);
546 if (!TCGV_IS_UNUSED_I64(dummy)) {
547 tcg_temp_free_i64(dummy);
550 /* We now have cc in cc_op as constant */
551 set_cc_static(s);
554 static bool use_exit_tb(DisasContext *s)
556 return (s->singlestep_enabled ||
557 (s->tb->cflags & CF_LAST_IO) ||
558 (s->tb->flags & FLAG_MASK_PER));
561 static bool use_goto_tb(DisasContext *s, uint64_t dest)
563 if (unlikely(use_exit_tb(s))) {
564 return false;
566 #ifndef CONFIG_USER_ONLY
567 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
568 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
569 #else
570 return true;
571 #endif
574 static void account_noninline_branch(DisasContext *s, int cc_op)
576 #ifdef DEBUG_INLINE_BRANCHES
577 inline_branch_miss[cc_op]++;
578 #endif
581 static void account_inline_branch(DisasContext *s, int cc_op)
583 #ifdef DEBUG_INLINE_BRANCHES
584 inline_branch_hit[cc_op]++;
585 #endif
588 /* Table of mask values to comparison codes, given a comparison as input.
589 For such, CC=3 should not be possible. */
590 static const TCGCond ltgt_cond[16] = {
591 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
592 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
593 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
594 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
595 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
596 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
597 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
598 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
601 /* Table of mask values to comparison codes, given a logic op as input.
602 For such, only CC=0 and CC=1 should be possible. */
603 static const TCGCond nz_cond[16] = {
604 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
605 TCG_COND_NEVER, TCG_COND_NEVER,
606 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
607 TCG_COND_NE, TCG_COND_NE,
608 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
609 TCG_COND_EQ, TCG_COND_EQ,
610 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
611 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
614 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
615 details required to generate a TCG comparison. */
616 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
618 TCGCond cond;
619 enum cc_op old_cc_op = s->cc_op;
621 if (mask == 15 || mask == 0) {
622 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
623 c->u.s32.a = cc_op;
624 c->u.s32.b = cc_op;
625 c->g1 = c->g2 = true;
626 c->is_64 = false;
627 return;
630 /* Find the TCG condition for the mask + cc op. */
631 switch (old_cc_op) {
632 case CC_OP_LTGT0_32:
633 case CC_OP_LTGT0_64:
634 case CC_OP_LTGT_32:
635 case CC_OP_LTGT_64:
636 cond = ltgt_cond[mask];
637 if (cond == TCG_COND_NEVER) {
638 goto do_dynamic;
640 account_inline_branch(s, old_cc_op);
641 break;
643 case CC_OP_LTUGTU_32:
644 case CC_OP_LTUGTU_64:
645 cond = tcg_unsigned_cond(ltgt_cond[mask]);
646 if (cond == TCG_COND_NEVER) {
647 goto do_dynamic;
649 account_inline_branch(s, old_cc_op);
650 break;
652 case CC_OP_NZ:
653 cond = nz_cond[mask];
654 if (cond == TCG_COND_NEVER) {
655 goto do_dynamic;
657 account_inline_branch(s, old_cc_op);
658 break;
660 case CC_OP_TM_32:
661 case CC_OP_TM_64:
662 switch (mask) {
663 case 8:
664 cond = TCG_COND_EQ;
665 break;
666 case 4 | 2 | 1:
667 cond = TCG_COND_NE;
668 break;
669 default:
670 goto do_dynamic;
672 account_inline_branch(s, old_cc_op);
673 break;
675 case CC_OP_ICM:
676 switch (mask) {
677 case 8:
678 cond = TCG_COND_EQ;
679 break;
680 case 4 | 2 | 1:
681 case 4 | 2:
682 cond = TCG_COND_NE;
683 break;
684 default:
685 goto do_dynamic;
687 account_inline_branch(s, old_cc_op);
688 break;
690 case CC_OP_FLOGR:
691 switch (mask & 0xa) {
692 case 8: /* src == 0 -> no one bit found */
693 cond = TCG_COND_EQ;
694 break;
695 case 2: /* src != 0 -> one bit found */
696 cond = TCG_COND_NE;
697 break;
698 default:
699 goto do_dynamic;
701 account_inline_branch(s, old_cc_op);
702 break;
704 case CC_OP_ADDU_32:
705 case CC_OP_ADDU_64:
706 switch (mask) {
707 case 8 | 2: /* vr == 0 */
708 cond = TCG_COND_EQ;
709 break;
710 case 4 | 1: /* vr != 0 */
711 cond = TCG_COND_NE;
712 break;
713 case 8 | 4: /* no carry -> vr >= src */
714 cond = TCG_COND_GEU;
715 break;
716 case 2 | 1: /* carry -> vr < src */
717 cond = TCG_COND_LTU;
718 break;
719 default:
720 goto do_dynamic;
722 account_inline_branch(s, old_cc_op);
723 break;
725 case CC_OP_SUBU_32:
726 case CC_OP_SUBU_64:
727 /* Note that CC=0 is impossible; treat it as dont-care. */
728 switch (mask & 7) {
729 case 2: /* zero -> op1 == op2 */
730 cond = TCG_COND_EQ;
731 break;
732 case 4 | 1: /* !zero -> op1 != op2 */
733 cond = TCG_COND_NE;
734 break;
735 case 4: /* borrow (!carry) -> op1 < op2 */
736 cond = TCG_COND_LTU;
737 break;
738 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
739 cond = TCG_COND_GEU;
740 break;
741 default:
742 goto do_dynamic;
744 account_inline_branch(s, old_cc_op);
745 break;
747 default:
748 do_dynamic:
749 /* Calculate cc value. */
750 gen_op_calc_cc(s);
751 /* FALLTHRU */
753 case CC_OP_STATIC:
754 /* Jump based on CC. We'll load up the real cond below;
755 the assignment here merely avoids a compiler warning. */
756 account_noninline_branch(s, old_cc_op);
757 old_cc_op = CC_OP_STATIC;
758 cond = TCG_COND_NEVER;
759 break;
762 /* Load up the arguments of the comparison. */
763 c->is_64 = true;
764 c->g1 = c->g2 = false;
765 switch (old_cc_op) {
766 case CC_OP_LTGT0_32:
767 c->is_64 = false;
768 c->u.s32.a = tcg_temp_new_i32();
769 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
770 c->u.s32.b = tcg_const_i32(0);
771 break;
772 case CC_OP_LTGT_32:
773 case CC_OP_LTUGTU_32:
774 case CC_OP_SUBU_32:
775 c->is_64 = false;
776 c->u.s32.a = tcg_temp_new_i32();
777 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
778 c->u.s32.b = tcg_temp_new_i32();
779 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
780 break;
782 case CC_OP_LTGT0_64:
783 case CC_OP_NZ:
784 case CC_OP_FLOGR:
785 c->u.s64.a = cc_dst;
786 c->u.s64.b = tcg_const_i64(0);
787 c->g1 = true;
788 break;
789 case CC_OP_LTGT_64:
790 case CC_OP_LTUGTU_64:
791 case CC_OP_SUBU_64:
792 c->u.s64.a = cc_src;
793 c->u.s64.b = cc_dst;
794 c->g1 = c->g2 = true;
795 break;
797 case CC_OP_TM_32:
798 case CC_OP_TM_64:
799 case CC_OP_ICM:
800 c->u.s64.a = tcg_temp_new_i64();
801 c->u.s64.b = tcg_const_i64(0);
802 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
803 break;
805 case CC_OP_ADDU_32:
806 c->is_64 = false;
807 c->u.s32.a = tcg_temp_new_i32();
808 c->u.s32.b = tcg_temp_new_i32();
809 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
810 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
811 tcg_gen_movi_i32(c->u.s32.b, 0);
812 } else {
813 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
815 break;
817 case CC_OP_ADDU_64:
818 c->u.s64.a = cc_vr;
819 c->g1 = true;
820 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
821 c->u.s64.b = tcg_const_i64(0);
822 } else {
823 c->u.s64.b = cc_src;
824 c->g2 = true;
826 break;
828 case CC_OP_STATIC:
829 c->is_64 = false;
830 c->u.s32.a = cc_op;
831 c->g1 = true;
832 switch (mask) {
833 case 0x8 | 0x4 | 0x2: /* cc != 3 */
834 cond = TCG_COND_NE;
835 c->u.s32.b = tcg_const_i32(3);
836 break;
837 case 0x8 | 0x4 | 0x1: /* cc != 2 */
838 cond = TCG_COND_NE;
839 c->u.s32.b = tcg_const_i32(2);
840 break;
841 case 0x8 | 0x2 | 0x1: /* cc != 1 */
842 cond = TCG_COND_NE;
843 c->u.s32.b = tcg_const_i32(1);
844 break;
845 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
846 cond = TCG_COND_EQ;
847 c->g1 = false;
848 c->u.s32.a = tcg_temp_new_i32();
849 c->u.s32.b = tcg_const_i32(0);
850 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
851 break;
852 case 0x8 | 0x4: /* cc < 2 */
853 cond = TCG_COND_LTU;
854 c->u.s32.b = tcg_const_i32(2);
855 break;
856 case 0x8: /* cc == 0 */
857 cond = TCG_COND_EQ;
858 c->u.s32.b = tcg_const_i32(0);
859 break;
860 case 0x4 | 0x2 | 0x1: /* cc != 0 */
861 cond = TCG_COND_NE;
862 c->u.s32.b = tcg_const_i32(0);
863 break;
864 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
865 cond = TCG_COND_NE;
866 c->g1 = false;
867 c->u.s32.a = tcg_temp_new_i32();
868 c->u.s32.b = tcg_const_i32(0);
869 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
870 break;
871 case 0x4: /* cc == 1 */
872 cond = TCG_COND_EQ;
873 c->u.s32.b = tcg_const_i32(1);
874 break;
875 case 0x2 | 0x1: /* cc > 1 */
876 cond = TCG_COND_GTU;
877 c->u.s32.b = tcg_const_i32(1);
878 break;
879 case 0x2: /* cc == 2 */
880 cond = TCG_COND_EQ;
881 c->u.s32.b = tcg_const_i32(2);
882 break;
883 case 0x1: /* cc == 3 */
884 cond = TCG_COND_EQ;
885 c->u.s32.b = tcg_const_i32(3);
886 break;
887 default:
888 /* CC is masked by something else: (8 >> cc) & mask. */
889 cond = TCG_COND_NE;
890 c->g1 = false;
891 c->u.s32.a = tcg_const_i32(8);
892 c->u.s32.b = tcg_const_i32(0);
893 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
894 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
895 break;
897 break;
899 default:
900 abort();
902 c->cond = cond;
905 static void free_compare(DisasCompare *c)
907 if (!c->g1) {
908 if (c->is_64) {
909 tcg_temp_free_i64(c->u.s64.a);
910 } else {
911 tcg_temp_free_i32(c->u.s32.a);
914 if (!c->g2) {
915 if (c->is_64) {
916 tcg_temp_free_i64(c->u.s64.b);
917 } else {
918 tcg_temp_free_i32(c->u.s32.b);
923 /* ====================================================================== */
924 /* Define the insn format enumeration. */
925 #define F0(N) FMT_##N,
926 #define F1(N, X1) F0(N)
927 #define F2(N, X1, X2) F0(N)
928 #define F3(N, X1, X2, X3) F0(N)
929 #define F4(N, X1, X2, X3, X4) F0(N)
930 #define F5(N, X1, X2, X3, X4, X5) F0(N)
932 typedef enum {
933 #include "insn-format.def"
934 } DisasFormat;
936 #undef F0
937 #undef F1
938 #undef F2
939 #undef F3
940 #undef F4
941 #undef F5
943 /* Define a structure to hold the decoded fields. We'll store each inside
944 an array indexed by an enum. In order to conserve memory, we'll arrange
945 for fields that do not exist at the same time to overlap, thus the "C"
946 for compact. For checking purposes there is an "O" for original index
947 as well that will be applied to availability bitmaps. */
949 enum DisasFieldIndexO {
950 FLD_O_r1,
951 FLD_O_r2,
952 FLD_O_r3,
953 FLD_O_m1,
954 FLD_O_m3,
955 FLD_O_m4,
956 FLD_O_b1,
957 FLD_O_b2,
958 FLD_O_b4,
959 FLD_O_d1,
960 FLD_O_d2,
961 FLD_O_d4,
962 FLD_O_x2,
963 FLD_O_l1,
964 FLD_O_l2,
965 FLD_O_i1,
966 FLD_O_i2,
967 FLD_O_i3,
968 FLD_O_i4,
969 FLD_O_i5
972 enum DisasFieldIndexC {
973 FLD_C_r1 = 0,
974 FLD_C_m1 = 0,
975 FLD_C_b1 = 0,
976 FLD_C_i1 = 0,
978 FLD_C_r2 = 1,
979 FLD_C_b2 = 1,
980 FLD_C_i2 = 1,
982 FLD_C_r3 = 2,
983 FLD_C_m3 = 2,
984 FLD_C_i3 = 2,
986 FLD_C_m4 = 3,
987 FLD_C_b4 = 3,
988 FLD_C_i4 = 3,
989 FLD_C_l1 = 3,
991 FLD_C_i5 = 4,
992 FLD_C_d1 = 4,
994 FLD_C_d2 = 5,
996 FLD_C_d4 = 6,
997 FLD_C_x2 = 6,
998 FLD_C_l2 = 6,
1000 NUM_C_FIELD = 7
1003 struct DisasFields {
1004 uint64_t raw_insn;
1005 unsigned op:8;
1006 unsigned op2:8;
1007 unsigned presentC:16;
1008 unsigned int presentO;
1009 int c[NUM_C_FIELD];
1012 /* This is the way fields are to be accessed out of DisasFields. */
1013 #define have_field(S, F) have_field1((S), FLD_O_##F)
1014 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1016 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1018 return (f->presentO >> c) & 1;
1021 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1022 enum DisasFieldIndexC c)
1024 assert(have_field1(f, o));
1025 return f->c[c];
1028 /* Describe the layout of each field in each format. */
1029 typedef struct DisasField {
1030 unsigned int beg:8;
1031 unsigned int size:8;
1032 unsigned int type:2;
1033 unsigned int indexC:6;
1034 enum DisasFieldIndexO indexO:8;
1035 } DisasField;
1037 typedef struct DisasFormatInfo {
1038 DisasField op[NUM_C_FIELD];
1039 } DisasFormatInfo;
1041 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1042 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1043 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1045 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1046 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1047 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1048 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1050 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1051 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1052 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1053 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1054 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1056 #define F0(N) { { } },
1057 #define F1(N, X1) { { X1 } },
1058 #define F2(N, X1, X2) { { X1, X2 } },
1059 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1060 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1061 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1063 static const DisasFormatInfo format_info[] = {
1064 #include "insn-format.def"
1067 #undef F0
1068 #undef F1
1069 #undef F2
1070 #undef F3
1071 #undef F4
1072 #undef F5
1073 #undef R
1074 #undef M
1075 #undef BD
1076 #undef BXD
1077 #undef BDL
1078 #undef BXDL
1079 #undef I
1080 #undef L
1082 /* Generally, we'll extract operands into this structures, operate upon
1083 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1084 of routines below for more details. */
1085 typedef struct {
1086 bool g_out, g_out2, g_in1, g_in2;
1087 TCGv_i64 out, out2, in1, in2;
1088 TCGv_i64 addr1;
1089 } DisasOps;
1091 /* Instructions can place constraints on their operands, raising specification
1092 exceptions if they are violated. To make this easy to automate, each "in1",
1093 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1094 of the following, or 0. To make this easy to document, we'll put the
1095 SPEC_<name> defines next to <name>. */
1097 #define SPEC_r1_even 1
1098 #define SPEC_r2_even 2
1099 #define SPEC_r3_even 4
1100 #define SPEC_r1_f128 8
1101 #define SPEC_r2_f128 16
1103 /* Return values from translate_one, indicating the state of the TB. */
1104 typedef enum {
1105 /* Continue the TB. */
1106 NO_EXIT,
1107 /* We have emitted one or more goto_tb. No fixup required. */
1108 EXIT_GOTO_TB,
1109 /* We are not using a goto_tb (for whatever reason), but have updated
1110 the PC (for whatever reason), so there's no need to do it again on
1111 exiting the TB. */
1112 EXIT_PC_UPDATED,
1113 /* We have updated the PC and CC values. */
1114 EXIT_PC_CC_UPDATED,
1115 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1116 updated the PC for the next instruction to be executed. */
1117 EXIT_PC_STALE,
1118 /* We are exiting the TB to the main loop. */
1119 EXIT_PC_STALE_NOCHAIN,
1120 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1121 No following code will be executed. */
1122 EXIT_NORETURN,
1123 } ExitStatus;
1125 struct DisasInsn {
1126 unsigned opc:16;
1127 DisasFormat fmt:8;
1128 unsigned fac:8;
1129 unsigned spec:8;
1131 const char *name;
1133 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1136 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1137 void (*help_cout)(DisasContext *, DisasOps *);
1138 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1140 uint64_t data;
1143 /* ====================================================================== */
1144 /* Miscellaneous helpers, used by several operations. */
1146 static void help_l2_shift(DisasContext *s, DisasFields *f,
1147 DisasOps *o, int mask)
1149 int b2 = get_field(f, b2);
1150 int d2 = get_field(f, d2);
1152 if (b2 == 0) {
1153 o->in2 = tcg_const_i64(d2 & mask);
1154 } else {
1155 o->in2 = get_address(s, 0, b2, d2);
1156 tcg_gen_andi_i64(o->in2, o->in2, mask);
1160 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1162 if (dest == s->next_pc) {
1163 per_branch(s, true);
1164 return NO_EXIT;
1166 if (use_goto_tb(s, dest)) {
1167 update_cc_op(s);
1168 per_breaking_event(s);
1169 tcg_gen_goto_tb(0);
1170 tcg_gen_movi_i64(psw_addr, dest);
1171 tcg_gen_exit_tb((uintptr_t)s->tb);
1172 return EXIT_GOTO_TB;
1173 } else {
1174 tcg_gen_movi_i64(psw_addr, dest);
1175 per_branch(s, false);
1176 return EXIT_PC_UPDATED;
1180 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1181 bool is_imm, int imm, TCGv_i64 cdest)
1183 ExitStatus ret;
1184 uint64_t dest = s->pc + 2 * imm;
1185 TCGLabel *lab;
1187 /* Take care of the special cases first. */
1188 if (c->cond == TCG_COND_NEVER) {
1189 ret = NO_EXIT;
1190 goto egress;
1192 if (is_imm) {
1193 if (dest == s->next_pc) {
1194 /* Branch to next. */
1195 per_branch(s, true);
1196 ret = NO_EXIT;
1197 goto egress;
1199 if (c->cond == TCG_COND_ALWAYS) {
1200 ret = help_goto_direct(s, dest);
1201 goto egress;
1203 } else {
1204 if (TCGV_IS_UNUSED_I64(cdest)) {
1205 /* E.g. bcr %r0 -> no branch. */
1206 ret = NO_EXIT;
1207 goto egress;
1209 if (c->cond == TCG_COND_ALWAYS) {
1210 tcg_gen_mov_i64(psw_addr, cdest);
1211 per_branch(s, false);
1212 ret = EXIT_PC_UPDATED;
1213 goto egress;
1217 if (use_goto_tb(s, s->next_pc)) {
1218 if (is_imm && use_goto_tb(s, dest)) {
1219 /* Both exits can use goto_tb. */
1220 update_cc_op(s);
1222 lab = gen_new_label();
1223 if (c->is_64) {
1224 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1225 } else {
1226 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1229 /* Branch not taken. */
1230 tcg_gen_goto_tb(0);
1231 tcg_gen_movi_i64(psw_addr, s->next_pc);
1232 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1234 /* Branch taken. */
1235 gen_set_label(lab);
1236 per_breaking_event(s);
1237 tcg_gen_goto_tb(1);
1238 tcg_gen_movi_i64(psw_addr, dest);
1239 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1241 ret = EXIT_GOTO_TB;
1242 } else {
1243 /* Fallthru can use goto_tb, but taken branch cannot. */
1244 /* Store taken branch destination before the brcond. This
1245 avoids having to allocate a new local temp to hold it.
1246 We'll overwrite this in the not taken case anyway. */
1247 if (!is_imm) {
1248 tcg_gen_mov_i64(psw_addr, cdest);
1251 lab = gen_new_label();
1252 if (c->is_64) {
1253 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1254 } else {
1255 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1258 /* Branch not taken. */
1259 update_cc_op(s);
1260 tcg_gen_goto_tb(0);
1261 tcg_gen_movi_i64(psw_addr, s->next_pc);
1262 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1264 gen_set_label(lab);
1265 if (is_imm) {
1266 tcg_gen_movi_i64(psw_addr, dest);
1268 per_breaking_event(s);
1269 ret = EXIT_PC_UPDATED;
1271 } else {
1272 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1273 Most commonly we're single-stepping or some other condition that
1274 disables all use of goto_tb. Just update the PC and exit. */
1276 TCGv_i64 next = tcg_const_i64(s->next_pc);
1277 if (is_imm) {
1278 cdest = tcg_const_i64(dest);
1281 if (c->is_64) {
1282 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1283 cdest, next);
1284 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1285 } else {
1286 TCGv_i32 t0 = tcg_temp_new_i32();
1287 TCGv_i64 t1 = tcg_temp_new_i64();
1288 TCGv_i64 z = tcg_const_i64(0);
1289 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1290 tcg_gen_extu_i32_i64(t1, t0);
1291 tcg_temp_free_i32(t0);
1292 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1293 per_branch_cond(s, TCG_COND_NE, t1, z);
1294 tcg_temp_free_i64(t1);
1295 tcg_temp_free_i64(z);
1298 if (is_imm) {
1299 tcg_temp_free_i64(cdest);
1301 tcg_temp_free_i64(next);
1303 ret = EXIT_PC_UPDATED;
1306 egress:
1307 free_compare(c);
1308 return ret;
1311 /* ====================================================================== */
1312 /* The operations. These perform the bulk of the work for any insn,
1313 usually after the operands have been loaded and output initialized. */
1315 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1317 TCGv_i64 z, n;
1318 z = tcg_const_i64(0);
1319 n = tcg_temp_new_i64();
1320 tcg_gen_neg_i64(n, o->in2);
1321 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1322 tcg_temp_free_i64(n);
1323 tcg_temp_free_i64(z);
1324 return NO_EXIT;
1327 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1329 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1330 return NO_EXIT;
1333 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1335 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1336 return NO_EXIT;
1339 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1341 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1342 tcg_gen_mov_i64(o->out2, o->in2);
1343 return NO_EXIT;
1346 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1348 tcg_gen_add_i64(o->out, o->in1, o->in2);
1349 return NO_EXIT;
1352 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1354 DisasCompare cmp;
1355 TCGv_i64 carry;
1357 tcg_gen_add_i64(o->out, o->in1, o->in2);
1359 /* The carry flag is the msb of CC, therefore the branch mask that would
1360 create that comparison is 3. Feeding the generated comparison to
1361 setcond produces the carry flag that we desire. */
1362 disas_jcc(s, &cmp, 3);
1363 carry = tcg_temp_new_i64();
1364 if (cmp.is_64) {
1365 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1366 } else {
1367 TCGv_i32 t = tcg_temp_new_i32();
1368 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1369 tcg_gen_extu_i32_i64(carry, t);
1370 tcg_temp_free_i32(t);
1372 free_compare(&cmp);
1374 tcg_gen_add_i64(o->out, o->out, carry);
1375 tcg_temp_free_i64(carry);
1376 return NO_EXIT;
1379 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1381 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1382 return NO_EXIT;
1385 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1387 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1388 return NO_EXIT;
1391 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1393 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1394 return_low128(o->out2);
1395 return NO_EXIT;
1398 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1400 tcg_gen_and_i64(o->out, o->in1, o->in2);
1401 return NO_EXIT;
1404 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1406 int shift = s->insn->data & 0xff;
1407 int size = s->insn->data >> 8;
1408 uint64_t mask = ((1ull << size) - 1) << shift;
1410 assert(!o->g_in2);
1411 tcg_gen_shli_i64(o->in2, o->in2, shift);
1412 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1413 tcg_gen_and_i64(o->out, o->in1, o->in2);
1415 /* Produce the CC from only the bits manipulated. */
1416 tcg_gen_andi_i64(cc_dst, o->out, mask);
1417 set_cc_nz_u64(s, cc_dst);
1418 return NO_EXIT;
1421 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1423 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1424 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1425 tcg_gen_mov_i64(psw_addr, o->in2);
1426 per_branch(s, false);
1427 return EXIT_PC_UPDATED;
1428 } else {
1429 return NO_EXIT;
1433 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1435 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1436 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1439 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1441 int m1 = get_field(s->fields, m1);
1442 bool is_imm = have_field(s->fields, i2);
1443 int imm = is_imm ? get_field(s->fields, i2) : 0;
1444 DisasCompare c;
1446 /* BCR with R2 = 0 causes no branching */
1447 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1448 if (m1 == 14) {
1449 /* Perform serialization */
1450 /* FIXME: check for fast-BCR-serialization facility */
1451 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1453 if (m1 == 15) {
1454 /* Perform serialization */
1455 /* FIXME: perform checkpoint-synchronisation */
1456 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1458 return NO_EXIT;
1461 disas_jcc(s, &c, m1);
1462 return help_branch(s, &c, is_imm, imm, o->in2);
1465 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1467 int r1 = get_field(s->fields, r1);
1468 bool is_imm = have_field(s->fields, i2);
1469 int imm = is_imm ? get_field(s->fields, i2) : 0;
1470 DisasCompare c;
1471 TCGv_i64 t;
1473 c.cond = TCG_COND_NE;
1474 c.is_64 = false;
1475 c.g1 = false;
1476 c.g2 = false;
1478 t = tcg_temp_new_i64();
1479 tcg_gen_subi_i64(t, regs[r1], 1);
1480 store_reg32_i64(r1, t);
1481 c.u.s32.a = tcg_temp_new_i32();
1482 c.u.s32.b = tcg_const_i32(0);
1483 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1484 tcg_temp_free_i64(t);
1486 return help_branch(s, &c, is_imm, imm, o->in2);
1489 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1491 int r1 = get_field(s->fields, r1);
1492 int imm = get_field(s->fields, i2);
1493 DisasCompare c;
1494 TCGv_i64 t;
1496 c.cond = TCG_COND_NE;
1497 c.is_64 = false;
1498 c.g1 = false;
1499 c.g2 = false;
1501 t = tcg_temp_new_i64();
1502 tcg_gen_shri_i64(t, regs[r1], 32);
1503 tcg_gen_subi_i64(t, t, 1);
1504 store_reg32h_i64(r1, t);
1505 c.u.s32.a = tcg_temp_new_i32();
1506 c.u.s32.b = tcg_const_i32(0);
1507 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1508 tcg_temp_free_i64(t);
1510 return help_branch(s, &c, 1, imm, o->in2);
1513 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1515 int r1 = get_field(s->fields, r1);
1516 bool is_imm = have_field(s->fields, i2);
1517 int imm = is_imm ? get_field(s->fields, i2) : 0;
1518 DisasCompare c;
1520 c.cond = TCG_COND_NE;
1521 c.is_64 = true;
1522 c.g1 = true;
1523 c.g2 = false;
1525 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1526 c.u.s64.a = regs[r1];
1527 c.u.s64.b = tcg_const_i64(0);
1529 return help_branch(s, &c, is_imm, imm, o->in2);
1532 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1534 int r1 = get_field(s->fields, r1);
1535 int r3 = get_field(s->fields, r3);
1536 bool is_imm = have_field(s->fields, i2);
1537 int imm = is_imm ? get_field(s->fields, i2) : 0;
1538 DisasCompare c;
1539 TCGv_i64 t;
1541 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1542 c.is_64 = false;
1543 c.g1 = false;
1544 c.g2 = false;
1546 t = tcg_temp_new_i64();
1547 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1548 c.u.s32.a = tcg_temp_new_i32();
1549 c.u.s32.b = tcg_temp_new_i32();
1550 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1551 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1552 store_reg32_i64(r1, t);
1553 tcg_temp_free_i64(t);
1555 return help_branch(s, &c, is_imm, imm, o->in2);
1558 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1560 int r1 = get_field(s->fields, r1);
1561 int r3 = get_field(s->fields, r3);
1562 bool is_imm = have_field(s->fields, i2);
1563 int imm = is_imm ? get_field(s->fields, i2) : 0;
1564 DisasCompare c;
1566 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1567 c.is_64 = true;
1569 if (r1 == (r3 | 1)) {
1570 c.u.s64.b = load_reg(r3 | 1);
1571 c.g2 = false;
1572 } else {
1573 c.u.s64.b = regs[r3 | 1];
1574 c.g2 = true;
1577 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1578 c.u.s64.a = regs[r1];
1579 c.g1 = true;
1581 return help_branch(s, &c, is_imm, imm, o->in2);
1584 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1586 int imm, m3 = get_field(s->fields, m3);
1587 bool is_imm;
1588 DisasCompare c;
1590 c.cond = ltgt_cond[m3];
1591 if (s->insn->data) {
1592 c.cond = tcg_unsigned_cond(c.cond);
1594 c.is_64 = c.g1 = c.g2 = true;
1595 c.u.s64.a = o->in1;
1596 c.u.s64.b = o->in2;
1598 is_imm = have_field(s->fields, i4);
1599 if (is_imm) {
1600 imm = get_field(s->fields, i4);
1601 } else {
1602 imm = 0;
1603 o->out = get_address(s, 0, get_field(s->fields, b4),
1604 get_field(s->fields, d4));
1607 return help_branch(s, &c, is_imm, imm, o->out);
1610 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1612 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1613 set_cc_static(s);
1614 return NO_EXIT;
1617 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1619 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1620 set_cc_static(s);
1621 return NO_EXIT;
1624 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1626 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1627 set_cc_static(s);
1628 return NO_EXIT;
1631 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1633 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1634 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1635 tcg_temp_free_i32(m3);
1636 gen_set_cc_nz_f32(s, o->in2);
1637 return NO_EXIT;
1640 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1642 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1643 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1644 tcg_temp_free_i32(m3);
1645 gen_set_cc_nz_f64(s, o->in2);
1646 return NO_EXIT;
1649 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1651 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1652 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1653 tcg_temp_free_i32(m3);
1654 gen_set_cc_nz_f128(s, o->in1, o->in2);
1655 return NO_EXIT;
1658 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1660 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1661 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1662 tcg_temp_free_i32(m3);
1663 gen_set_cc_nz_f32(s, o->in2);
1664 return NO_EXIT;
1667 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1669 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1670 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1671 tcg_temp_free_i32(m3);
1672 gen_set_cc_nz_f64(s, o->in2);
1673 return NO_EXIT;
1676 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1678 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1679 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1680 tcg_temp_free_i32(m3);
1681 gen_set_cc_nz_f128(s, o->in1, o->in2);
1682 return NO_EXIT;
1685 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1687 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1688 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1689 tcg_temp_free_i32(m3);
1690 gen_set_cc_nz_f32(s, o->in2);
1691 return NO_EXIT;
1694 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1696 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1697 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1698 tcg_temp_free_i32(m3);
1699 gen_set_cc_nz_f64(s, o->in2);
1700 return NO_EXIT;
1703 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1705 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1706 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1707 tcg_temp_free_i32(m3);
1708 gen_set_cc_nz_f128(s, o->in1, o->in2);
1709 return NO_EXIT;
1712 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1714 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1715 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1716 tcg_temp_free_i32(m3);
1717 gen_set_cc_nz_f32(s, o->in2);
1718 return NO_EXIT;
1721 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1723 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1724 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1725 tcg_temp_free_i32(m3);
1726 gen_set_cc_nz_f64(s, o->in2);
1727 return NO_EXIT;
1730 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1732 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1733 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1734 tcg_temp_free_i32(m3);
1735 gen_set_cc_nz_f128(s, o->in1, o->in2);
1736 return NO_EXIT;
1739 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1741 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1742 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1743 tcg_temp_free_i32(m3);
1744 return NO_EXIT;
1747 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1749 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1750 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1751 tcg_temp_free_i32(m3);
1752 return NO_EXIT;
1755 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1757 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1758 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1759 tcg_temp_free_i32(m3);
1760 return_low128(o->out2);
1761 return NO_EXIT;
1764 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1766 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1767 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1768 tcg_temp_free_i32(m3);
1769 return NO_EXIT;
1772 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1774 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1775 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1776 tcg_temp_free_i32(m3);
1777 return NO_EXIT;
1780 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1782 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1783 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1784 tcg_temp_free_i32(m3);
1785 return_low128(o->out2);
1786 return NO_EXIT;
1789 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1791 int r2 = get_field(s->fields, r2);
1792 TCGv_i64 len = tcg_temp_new_i64();
1794 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1795 set_cc_static(s);
1796 return_low128(o->out);
1798 tcg_gen_add_i64(regs[r2], regs[r2], len);
1799 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1800 tcg_temp_free_i64(len);
1802 return NO_EXIT;
1805 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1807 int l = get_field(s->fields, l1);
1808 TCGv_i32 vl;
1810 switch (l + 1) {
1811 case 1:
1812 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1813 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1814 break;
1815 case 2:
1816 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1817 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1818 break;
1819 case 4:
1820 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1821 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1822 break;
1823 case 8:
1824 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1825 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1826 break;
1827 default:
1828 vl = tcg_const_i32(l);
1829 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1830 tcg_temp_free_i32(vl);
1831 set_cc_static(s);
1832 return NO_EXIT;
1834 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1835 return NO_EXIT;
1838 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1840 int r1 = get_field(s->fields, r1);
1841 int r2 = get_field(s->fields, r2);
1842 TCGv_i32 t1, t2;
1844 /* r1 and r2 must be even. */
1845 if (r1 & 1 || r2 & 1) {
1846 gen_program_exception(s, PGM_SPECIFICATION);
1847 return EXIT_NORETURN;
1850 t1 = tcg_const_i32(r1);
1851 t2 = tcg_const_i32(r2);
1852 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1853 tcg_temp_free_i32(t1);
1854 tcg_temp_free_i32(t2);
1855 set_cc_static(s);
1856 return NO_EXIT;
1859 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1861 int r1 = get_field(s->fields, r1);
1862 int r3 = get_field(s->fields, r3);
1863 TCGv_i32 t1, t3;
1865 /* r1 and r3 must be even. */
1866 if (r1 & 1 || r3 & 1) {
1867 gen_program_exception(s, PGM_SPECIFICATION);
1868 return EXIT_NORETURN;
1871 t1 = tcg_const_i32(r1);
1872 t3 = tcg_const_i32(r3);
1873 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1874 tcg_temp_free_i32(t1);
1875 tcg_temp_free_i32(t3);
1876 set_cc_static(s);
1877 return NO_EXIT;
1880 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1882 int r1 = get_field(s->fields, r1);
1883 int r3 = get_field(s->fields, r3);
1884 TCGv_i32 t1, t3;
1886 /* r1 and r3 must be even. */
1887 if (r1 & 1 || r3 & 1) {
1888 gen_program_exception(s, PGM_SPECIFICATION);
1889 return EXIT_NORETURN;
1892 t1 = tcg_const_i32(r1);
1893 t3 = tcg_const_i32(r3);
1894 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1895 tcg_temp_free_i32(t1);
1896 tcg_temp_free_i32(t3);
1897 set_cc_static(s);
1898 return NO_EXIT;
1901 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1903 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1904 TCGv_i32 t1 = tcg_temp_new_i32();
1905 tcg_gen_extrl_i64_i32(t1, o->in1);
1906 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1907 set_cc_static(s);
1908 tcg_temp_free_i32(t1);
1909 tcg_temp_free_i32(m3);
1910 return NO_EXIT;
1913 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1915 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1916 set_cc_static(s);
1917 return_low128(o->in2);
1918 return NO_EXIT;
1921 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1923 TCGv_i64 t = tcg_temp_new_i64();
1924 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1925 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1926 tcg_gen_or_i64(o->out, o->out, t);
1927 tcg_temp_free_i64(t);
1928 return NO_EXIT;
1931 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1933 int d2 = get_field(s->fields, d2);
1934 int b2 = get_field(s->fields, b2);
1935 TCGv_i64 addr, cc;
1937 /* Note that in1 = R3 (new value) and
1938 in2 = (zero-extended) R1 (expected value). */
1940 addr = get_address(s, 0, b2, d2);
1941 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1942 get_mem_index(s), s->insn->data | MO_ALIGN);
1943 tcg_temp_free_i64(addr);
1945 /* Are the memory and expected values (un)equal? Note that this setcond
1946 produces the output CC value, thus the NE sense of the test. */
1947 cc = tcg_temp_new_i64();
1948 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1949 tcg_gen_extrl_i64_i32(cc_op, cc);
1950 tcg_temp_free_i64(cc);
1951 set_cc_static(s);
1953 return NO_EXIT;
1956 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1958 int r1 = get_field(s->fields, r1);
1959 int r3 = get_field(s->fields, r3);
1960 int d2 = get_field(s->fields, d2);
1961 int b2 = get_field(s->fields, b2);
1962 TCGv_i64 addr;
1963 TCGv_i32 t_r1, t_r3;
1965 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1966 addr = get_address(s, 0, b2, d2);
1967 t_r1 = tcg_const_i32(r1);
1968 t_r3 = tcg_const_i32(r3);
1969 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1970 tcg_temp_free_i64(addr);
1971 tcg_temp_free_i32(t_r1);
1972 tcg_temp_free_i32(t_r3);
1974 set_cc_static(s);
1975 return NO_EXIT;
1978 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
1980 int r3 = get_field(s->fields, r3);
1981 TCGv_i32 t_r3 = tcg_const_i32(r3);
1983 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
1984 tcg_temp_free_i32(t_r3);
1986 set_cc_static(s);
1987 return NO_EXIT;
1990 #ifndef CONFIG_USER_ONLY
1991 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1993 TCGMemOp mop = s->insn->data;
1994 TCGv_i64 addr, old, cc;
1995 TCGLabel *lab = gen_new_label();
1997 /* Note that in1 = R1 (zero-extended expected value),
1998 out = R1 (original reg), out2 = R1+1 (new value). */
2000 check_privileged(s);
2001 addr = tcg_temp_new_i64();
2002 old = tcg_temp_new_i64();
2003 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2004 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2005 get_mem_index(s), mop | MO_ALIGN);
2006 tcg_temp_free_i64(addr);
2008 /* Are the memory and expected values (un)equal? */
2009 cc = tcg_temp_new_i64();
2010 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2011 tcg_gen_extrl_i64_i32(cc_op, cc);
2013 /* Write back the output now, so that it happens before the
2014 following branch, so that we don't need local temps. */
2015 if ((mop & MO_SIZE) == MO_32) {
2016 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2017 } else {
2018 tcg_gen_mov_i64(o->out, old);
2020 tcg_temp_free_i64(old);
2022 /* If the comparison was equal, and the LSB of R2 was set,
2023 then we need to flush the TLB (for all cpus). */
2024 tcg_gen_xori_i64(cc, cc, 1);
2025 tcg_gen_and_i64(cc, cc, o->in2);
2026 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2027 tcg_temp_free_i64(cc);
2029 gen_helper_purge(cpu_env);
2030 gen_set_label(lab);
2032 return NO_EXIT;
2034 #endif
2036 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2038 TCGv_i64 t1 = tcg_temp_new_i64();
2039 TCGv_i32 t2 = tcg_temp_new_i32();
2040 tcg_gen_extrl_i64_i32(t2, o->in1);
2041 gen_helper_cvd(t1, t2);
2042 tcg_temp_free_i32(t2);
2043 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2044 tcg_temp_free_i64(t1);
2045 return NO_EXIT;
2048 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2050 int m3 = get_field(s->fields, m3);
2051 TCGLabel *lab = gen_new_label();
2052 TCGCond c;
2054 c = tcg_invert_cond(ltgt_cond[m3]);
2055 if (s->insn->data) {
2056 c = tcg_unsigned_cond(c);
2058 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2060 /* Trap. */
2061 gen_trap(s);
2063 gen_set_label(lab);
2064 return NO_EXIT;
2067 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2069 int m3 = get_field(s->fields, m3);
2070 int r1 = get_field(s->fields, r1);
2071 int r2 = get_field(s->fields, r2);
2072 TCGv_i32 tr1, tr2, chk;
2074 /* R1 and R2 must both be even. */
2075 if ((r1 | r2) & 1) {
2076 gen_program_exception(s, PGM_SPECIFICATION);
2077 return EXIT_NORETURN;
2079 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2080 m3 = 0;
2083 tr1 = tcg_const_i32(r1);
2084 tr2 = tcg_const_i32(r2);
2085 chk = tcg_const_i32(m3);
2087 switch (s->insn->data) {
2088 case 12:
2089 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2090 break;
2091 case 14:
2092 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2093 break;
2094 case 21:
2095 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2096 break;
2097 case 24:
2098 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2099 break;
2100 case 41:
2101 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2102 break;
2103 case 42:
2104 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2105 break;
2106 default:
2107 g_assert_not_reached();
2110 tcg_temp_free_i32(tr1);
2111 tcg_temp_free_i32(tr2);
2112 tcg_temp_free_i32(chk);
2113 set_cc_static(s);
2114 return NO_EXIT;
2117 #ifndef CONFIG_USER_ONLY
2118 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2120 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2121 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2122 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2124 check_privileged(s);
2125 update_psw_addr(s);
2126 gen_op_calc_cc(s);
2128 gen_helper_diag(cpu_env, r1, r3, func_code);
2130 tcg_temp_free_i32(func_code);
2131 tcg_temp_free_i32(r3);
2132 tcg_temp_free_i32(r1);
2133 return NO_EXIT;
2135 #endif
2137 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2139 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2140 return_low128(o->out);
2141 return NO_EXIT;
2144 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2146 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2147 return_low128(o->out);
2148 return NO_EXIT;
2151 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2153 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2154 return_low128(o->out);
2155 return NO_EXIT;
2158 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2160 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2161 return_low128(o->out);
2162 return NO_EXIT;
2165 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2167 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2168 return NO_EXIT;
2171 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2173 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2174 return NO_EXIT;
2177 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2179 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2180 return_low128(o->out2);
2181 return NO_EXIT;
2184 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2186 int r2 = get_field(s->fields, r2);
2187 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2188 return NO_EXIT;
2191 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2193 /* No cache information provided. */
2194 tcg_gen_movi_i64(o->out, -1);
2195 return NO_EXIT;
2198 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2200 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2201 return NO_EXIT;
2204 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2206 int r1 = get_field(s->fields, r1);
2207 int r2 = get_field(s->fields, r2);
2208 TCGv_i64 t = tcg_temp_new_i64();
2210 /* Note the "subsequently" in the PoO, which implies a defined result
2211 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2212 tcg_gen_shri_i64(t, psw_mask, 32);
2213 store_reg32_i64(r1, t);
2214 if (r2 != 0) {
2215 store_reg32_i64(r2, psw_mask);
2218 tcg_temp_free_i64(t);
2219 return NO_EXIT;
2222 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2224 int r1 = get_field(s->fields, r1);
2225 TCGv_i32 ilen;
2226 TCGv_i64 v1;
2228 /* Nested EXECUTE is not allowed. */
2229 if (unlikely(s->ex_value)) {
2230 gen_program_exception(s, PGM_EXECUTE);
2231 return EXIT_NORETURN;
2234 update_psw_addr(s);
2235 update_cc_op(s);
2237 if (r1 == 0) {
2238 v1 = tcg_const_i64(0);
2239 } else {
2240 v1 = regs[r1];
2243 ilen = tcg_const_i32(s->ilen);
2244 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2245 tcg_temp_free_i32(ilen);
2247 if (r1 == 0) {
2248 tcg_temp_free_i64(v1);
2251 return EXIT_PC_CC_UPDATED;
2254 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2256 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2257 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2258 tcg_temp_free_i32(m3);
2259 return NO_EXIT;
2262 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2264 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2265 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2266 tcg_temp_free_i32(m3);
2267 return NO_EXIT;
2270 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2272 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2273 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2274 return_low128(o->out2);
2275 tcg_temp_free_i32(m3);
2276 return NO_EXIT;
2279 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2281 /* We'll use the original input for cc computation, since we get to
2282 compare that against 0, which ought to be better than comparing
2283 the real output against 64. It also lets cc_dst be a convenient
2284 temporary during our computation. */
2285 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2287 /* R1 = IN ? CLZ(IN) : 64. */
2288 tcg_gen_clzi_i64(o->out, o->in2, 64);
2290 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2291 value by 64, which is undefined. But since the shift is 64 iff the
2292 input is zero, we still get the correct result after and'ing. */
2293 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2294 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2295 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2296 return NO_EXIT;
2299 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2301 int m3 = get_field(s->fields, m3);
2302 int pos, len, base = s->insn->data;
2303 TCGv_i64 tmp = tcg_temp_new_i64();
2304 uint64_t ccm;
2306 switch (m3) {
2307 case 0xf:
2308 /* Effectively a 32-bit load. */
2309 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2310 len = 32;
2311 goto one_insert;
2313 case 0xc:
2314 case 0x6:
2315 case 0x3:
2316 /* Effectively a 16-bit load. */
2317 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2318 len = 16;
2319 goto one_insert;
2321 case 0x8:
2322 case 0x4:
2323 case 0x2:
2324 case 0x1:
2325 /* Effectively an 8-bit load. */
2326 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2327 len = 8;
2328 goto one_insert;
2330 one_insert:
2331 pos = base + ctz32(m3) * 8;
2332 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2333 ccm = ((1ull << len) - 1) << pos;
2334 break;
2336 default:
2337 /* This is going to be a sequence of loads and inserts. */
2338 pos = base + 32 - 8;
2339 ccm = 0;
2340 while (m3) {
2341 if (m3 & 0x8) {
2342 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2343 tcg_gen_addi_i64(o->in2, o->in2, 1);
2344 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2345 ccm |= 0xff << pos;
2347 m3 = (m3 << 1) & 0xf;
2348 pos -= 8;
2350 break;
2353 tcg_gen_movi_i64(tmp, ccm);
2354 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2355 tcg_temp_free_i64(tmp);
2356 return NO_EXIT;
2359 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2361 int shift = s->insn->data & 0xff;
2362 int size = s->insn->data >> 8;
2363 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2364 return NO_EXIT;
2367 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2369 TCGv_i64 t1;
2371 gen_op_calc_cc(s);
2372 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2374 t1 = tcg_temp_new_i64();
2375 tcg_gen_shli_i64(t1, psw_mask, 20);
2376 tcg_gen_shri_i64(t1, t1, 36);
2377 tcg_gen_or_i64(o->out, o->out, t1);
2379 tcg_gen_extu_i32_i64(t1, cc_op);
2380 tcg_gen_shli_i64(t1, t1, 28);
2381 tcg_gen_or_i64(o->out, o->out, t1);
2382 tcg_temp_free_i64(t1);
2383 return NO_EXIT;
2386 #ifndef CONFIG_USER_ONLY
2387 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2389 TCGv_i32 m4;
2391 check_privileged(s);
2392 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2393 m4 = tcg_const_i32(get_field(s->fields, m4));
2394 } else {
2395 m4 = tcg_const_i32(0);
2397 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2398 tcg_temp_free_i32(m4);
2399 return NO_EXIT;
2402 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2404 TCGv_i32 m4;
2406 check_privileged(s);
2407 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2408 m4 = tcg_const_i32(get_field(s->fields, m4));
2409 } else {
2410 m4 = tcg_const_i32(0);
2412 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2413 tcg_temp_free_i32(m4);
2414 return NO_EXIT;
2417 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2419 check_privileged(s);
2420 gen_helper_iske(o->out, cpu_env, o->in2);
2421 return NO_EXIT;
2423 #endif
2425 static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2427 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2428 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2429 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2430 TCGv_i32 t_r1, t_r2, t_r3, type;
2432 switch (s->insn->data) {
2433 case S390_FEAT_TYPE_KMCTR:
2434 if (r3 & 1 || !r3) {
2435 gen_program_exception(s, PGM_SPECIFICATION);
2436 return EXIT_NORETURN;
2438 /* FALL THROUGH */
2439 case S390_FEAT_TYPE_PPNO:
2440 case S390_FEAT_TYPE_KMF:
2441 case S390_FEAT_TYPE_KMC:
2442 case S390_FEAT_TYPE_KMO:
2443 case S390_FEAT_TYPE_KM:
2444 if (r1 & 1 || !r1) {
2445 gen_program_exception(s, PGM_SPECIFICATION);
2446 return EXIT_NORETURN;
2448 /* FALL THROUGH */
2449 case S390_FEAT_TYPE_KMAC:
2450 case S390_FEAT_TYPE_KIMD:
2451 case S390_FEAT_TYPE_KLMD:
2452 if (r2 & 1 || !r2) {
2453 gen_program_exception(s, PGM_SPECIFICATION);
2454 return EXIT_NORETURN;
2456 /* FALL THROUGH */
2457 case S390_FEAT_TYPE_PCKMO:
2458 case S390_FEAT_TYPE_PCC:
2459 break;
2460 default:
2461 g_assert_not_reached();
2464 t_r1 = tcg_const_i32(r1);
2465 t_r2 = tcg_const_i32(r2);
2466 t_r3 = tcg_const_i32(r3);
2467 type = tcg_const_i32(s->insn->data);
2468 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2469 set_cc_static(s);
2470 tcg_temp_free_i32(t_r1);
2471 tcg_temp_free_i32(t_r2);
2472 tcg_temp_free_i32(t_r3);
2473 tcg_temp_free_i32(type);
2474 return NO_EXIT;
2477 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2479 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2480 set_cc_static(s);
2481 return NO_EXIT;
2484 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2486 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2487 set_cc_static(s);
2488 return NO_EXIT;
2491 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2493 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2494 set_cc_static(s);
2495 return NO_EXIT;
2498 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2500 /* The real output is indeed the original value in memory;
2501 recompute the addition for the computation of CC. */
2502 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2503 s->insn->data | MO_ALIGN);
2504 /* However, we need to recompute the addition for setting CC. */
2505 tcg_gen_add_i64(o->out, o->in1, o->in2);
2506 return NO_EXIT;
2509 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2511 /* The real output is indeed the original value in memory;
2512 recompute the addition for the computation of CC. */
2513 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2514 s->insn->data | MO_ALIGN);
2515 /* However, we need to recompute the operation for setting CC. */
2516 tcg_gen_and_i64(o->out, o->in1, o->in2);
2517 return NO_EXIT;
2520 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2522 /* The real output is indeed the original value in memory;
2523 recompute the addition for the computation of CC. */
2524 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2525 s->insn->data | MO_ALIGN);
2526 /* However, we need to recompute the operation for setting CC. */
2527 tcg_gen_or_i64(o->out, o->in1, o->in2);
2528 return NO_EXIT;
2531 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2533 /* The real output is indeed the original value in memory;
2534 recompute the addition for the computation of CC. */
2535 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2536 s->insn->data | MO_ALIGN);
2537 /* However, we need to recompute the operation for setting CC. */
2538 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2539 return NO_EXIT;
2542 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2544 gen_helper_ldeb(o->out, cpu_env, o->in2);
2545 return NO_EXIT;
2548 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2550 gen_helper_ledb(o->out, cpu_env, o->in2);
2551 return NO_EXIT;
2554 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2556 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2557 return NO_EXIT;
2560 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2562 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2563 return NO_EXIT;
2566 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2568 gen_helper_lxdb(o->out, cpu_env, o->in2);
2569 return_low128(o->out2);
2570 return NO_EXIT;
2573 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2575 gen_helper_lxeb(o->out, cpu_env, o->in2);
2576 return_low128(o->out2);
2577 return NO_EXIT;
2580 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2582 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2583 return NO_EXIT;
2586 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2588 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2589 return NO_EXIT;
2592 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2594 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2595 return NO_EXIT;
2598 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2600 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2601 return NO_EXIT;
2604 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2606 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2607 return NO_EXIT;
2610 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2612 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2613 return NO_EXIT;
2616 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2618 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2619 return NO_EXIT;
2622 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2624 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2625 return NO_EXIT;
2628 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2630 TCGLabel *lab = gen_new_label();
2631 store_reg32_i64(get_field(s->fields, r1), o->in2);
2632 /* The value is stored even in case of trap. */
2633 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2634 gen_trap(s);
2635 gen_set_label(lab);
2636 return NO_EXIT;
2639 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2641 TCGLabel *lab = gen_new_label();
2642 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2643 /* The value is stored even in case of trap. */
2644 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2645 gen_trap(s);
2646 gen_set_label(lab);
2647 return NO_EXIT;
2650 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2652 TCGLabel *lab = gen_new_label();
2653 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2654 /* The value is stored even in case of trap. */
2655 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2656 gen_trap(s);
2657 gen_set_label(lab);
2658 return NO_EXIT;
2661 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2663 TCGLabel *lab = gen_new_label();
2664 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2665 /* The value is stored even in case of trap. */
2666 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2667 gen_trap(s);
2668 gen_set_label(lab);
2669 return NO_EXIT;
2672 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2674 TCGLabel *lab = gen_new_label();
2675 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2676 /* The value is stored even in case of trap. */
2677 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2678 gen_trap(s);
2679 gen_set_label(lab);
2680 return NO_EXIT;
2683 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2685 DisasCompare c;
2687 disas_jcc(s, &c, get_field(s->fields, m3));
2689 if (c.is_64) {
2690 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2691 o->in2, o->in1);
2692 free_compare(&c);
2693 } else {
2694 TCGv_i32 t32 = tcg_temp_new_i32();
2695 TCGv_i64 t, z;
2697 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2698 free_compare(&c);
2700 t = tcg_temp_new_i64();
2701 tcg_gen_extu_i32_i64(t, t32);
2702 tcg_temp_free_i32(t32);
2704 z = tcg_const_i64(0);
2705 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2706 tcg_temp_free_i64(t);
2707 tcg_temp_free_i64(z);
2710 return NO_EXIT;
2713 #ifndef CONFIG_USER_ONLY
2714 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2716 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2717 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2718 check_privileged(s);
2719 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2720 tcg_temp_free_i32(r1);
2721 tcg_temp_free_i32(r3);
2722 return NO_EXIT;
2725 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2727 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2728 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2729 check_privileged(s);
2730 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2731 tcg_temp_free_i32(r1);
2732 tcg_temp_free_i32(r3);
2733 return NO_EXIT;
2736 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2738 check_privileged(s);
2739 gen_helper_lra(o->out, cpu_env, o->in2);
2740 set_cc_static(s);
2741 return NO_EXIT;
2744 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2746 check_privileged(s);
2748 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2749 return NO_EXIT;
2752 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2754 TCGv_i64 t1, t2;
2756 check_privileged(s);
2757 per_breaking_event(s);
2759 t1 = tcg_temp_new_i64();
2760 t2 = tcg_temp_new_i64();
2761 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2762 tcg_gen_addi_i64(o->in2, o->in2, 4);
2763 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2764 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2765 tcg_gen_shli_i64(t1, t1, 32);
2766 gen_helper_load_psw(cpu_env, t1, t2);
2767 tcg_temp_free_i64(t1);
2768 tcg_temp_free_i64(t2);
2769 return EXIT_NORETURN;
2772 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2774 TCGv_i64 t1, t2;
2776 check_privileged(s);
2777 per_breaking_event(s);
2779 t1 = tcg_temp_new_i64();
2780 t2 = tcg_temp_new_i64();
2781 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2782 tcg_gen_addi_i64(o->in2, o->in2, 8);
2783 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2784 gen_helper_load_psw(cpu_env, t1, t2);
2785 tcg_temp_free_i64(t1);
2786 tcg_temp_free_i64(t2);
2787 return EXIT_NORETURN;
2789 #endif
2791 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2793 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2794 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2795 gen_helper_lam(cpu_env, r1, o->in2, r3);
2796 tcg_temp_free_i32(r1);
2797 tcg_temp_free_i32(r3);
2798 return NO_EXIT;
2801 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2803 int r1 = get_field(s->fields, r1);
2804 int r3 = get_field(s->fields, r3);
2805 TCGv_i64 t1, t2;
2807 /* Only one register to read. */
2808 t1 = tcg_temp_new_i64();
2809 if (unlikely(r1 == r3)) {
2810 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2811 store_reg32_i64(r1, t1);
2812 tcg_temp_free(t1);
2813 return NO_EXIT;
2816 /* First load the values of the first and last registers to trigger
2817 possible page faults. */
2818 t2 = tcg_temp_new_i64();
2819 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2820 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2821 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2822 store_reg32_i64(r1, t1);
2823 store_reg32_i64(r3, t2);
2825 /* Only two registers to read. */
2826 if (((r1 + 1) & 15) == r3) {
2827 tcg_temp_free(t2);
2828 tcg_temp_free(t1);
2829 return NO_EXIT;
2832 /* Then load the remaining registers. Page fault can't occur. */
2833 r3 = (r3 - 1) & 15;
2834 tcg_gen_movi_i64(t2, 4);
2835 while (r1 != r3) {
2836 r1 = (r1 + 1) & 15;
2837 tcg_gen_add_i64(o->in2, o->in2, t2);
2838 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2839 store_reg32_i64(r1, t1);
2841 tcg_temp_free(t2);
2842 tcg_temp_free(t1);
2844 return NO_EXIT;
2847 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2849 int r1 = get_field(s->fields, r1);
2850 int r3 = get_field(s->fields, r3);
2851 TCGv_i64 t1, t2;
2853 /* Only one register to read. */
2854 t1 = tcg_temp_new_i64();
2855 if (unlikely(r1 == r3)) {
2856 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2857 store_reg32h_i64(r1, t1);
2858 tcg_temp_free(t1);
2859 return NO_EXIT;
2862 /* First load the values of the first and last registers to trigger
2863 possible page faults. */
2864 t2 = tcg_temp_new_i64();
2865 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2866 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2867 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2868 store_reg32h_i64(r1, t1);
2869 store_reg32h_i64(r3, t2);
2871 /* Only two registers to read. */
2872 if (((r1 + 1) & 15) == r3) {
2873 tcg_temp_free(t2);
2874 tcg_temp_free(t1);
2875 return NO_EXIT;
2878 /* Then load the remaining registers. Page fault can't occur. */
2879 r3 = (r3 - 1) & 15;
2880 tcg_gen_movi_i64(t2, 4);
2881 while (r1 != r3) {
2882 r1 = (r1 + 1) & 15;
2883 tcg_gen_add_i64(o->in2, o->in2, t2);
2884 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2885 store_reg32h_i64(r1, t1);
2887 tcg_temp_free(t2);
2888 tcg_temp_free(t1);
2890 return NO_EXIT;
2893 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2895 int r1 = get_field(s->fields, r1);
2896 int r3 = get_field(s->fields, r3);
2897 TCGv_i64 t1, t2;
2899 /* Only one register to read. */
2900 if (unlikely(r1 == r3)) {
2901 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2902 return NO_EXIT;
2905 /* First load the values of the first and last registers to trigger
2906 possible page faults. */
2907 t1 = tcg_temp_new_i64();
2908 t2 = tcg_temp_new_i64();
2909 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2910 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2911 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2912 tcg_gen_mov_i64(regs[r1], t1);
2913 tcg_temp_free(t2);
2915 /* Only two registers to read. */
2916 if (((r1 + 1) & 15) == r3) {
2917 tcg_temp_free(t1);
2918 return NO_EXIT;
2921 /* Then load the remaining registers. Page fault can't occur. */
2922 r3 = (r3 - 1) & 15;
2923 tcg_gen_movi_i64(t1, 8);
2924 while (r1 != r3) {
2925 r1 = (r1 + 1) & 15;
2926 tcg_gen_add_i64(o->in2, o->in2, t1);
2927 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2929 tcg_temp_free(t1);
2931 return NO_EXIT;
2934 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2936 TCGv_i64 a1, a2;
2937 TCGMemOp mop = s->insn->data;
2939 /* In a parallel context, stop the world and single step. */
2940 if (parallel_cpus) {
2941 potential_page_fault(s);
2942 gen_exception(EXCP_ATOMIC);
2943 return EXIT_NORETURN;
2946 /* In a serial context, perform the two loads ... */
2947 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2948 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2949 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2950 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2951 tcg_temp_free_i64(a1);
2952 tcg_temp_free_i64(a2);
2954 /* ... and indicate that we performed them while interlocked. */
2955 gen_op_movi_cc(s, 0);
2956 return NO_EXIT;
2959 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2961 gen_helper_lpq(o->out, cpu_env, o->in2);
2962 return_low128(o->out2);
2963 return NO_EXIT;
2966 #ifndef CONFIG_USER_ONLY
2967 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2969 check_privileged(s);
2970 gen_helper_lura(o->out, cpu_env, o->in2);
2971 return NO_EXIT;
2974 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2976 check_privileged(s);
2977 gen_helper_lurag(o->out, cpu_env, o->in2);
2978 return NO_EXIT;
2980 #endif
2982 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2984 tcg_gen_andi_i64(o->out, o->in2, -256);
2985 return NO_EXIT;
2988 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2990 o->out = o->in2;
2991 o->g_out = o->g_in2;
2992 TCGV_UNUSED_I64(o->in2);
2993 o->g_in2 = false;
2994 return NO_EXIT;
2997 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2999 int b2 = get_field(s->fields, b2);
3000 TCGv ar1 = tcg_temp_new_i64();
3002 o->out = o->in2;
3003 o->g_out = o->g_in2;
3004 TCGV_UNUSED_I64(o->in2);
3005 o->g_in2 = false;
3007 switch (s->tb->flags & FLAG_MASK_ASC) {
3008 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3009 tcg_gen_movi_i64(ar1, 0);
3010 break;
3011 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3012 tcg_gen_movi_i64(ar1, 1);
3013 break;
3014 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3015 if (b2) {
3016 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3017 } else {
3018 tcg_gen_movi_i64(ar1, 0);
3020 break;
3021 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3022 tcg_gen_movi_i64(ar1, 2);
3023 break;
3026 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3027 tcg_temp_free_i64(ar1);
3029 return NO_EXIT;
3032 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3034 o->out = o->in1;
3035 o->out2 = o->in2;
3036 o->g_out = o->g_in1;
3037 o->g_out2 = o->g_in2;
3038 TCGV_UNUSED_I64(o->in1);
3039 TCGV_UNUSED_I64(o->in2);
3040 o->g_in1 = o->g_in2 = false;
3041 return NO_EXIT;
3044 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3046 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3047 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3048 tcg_temp_free_i32(l);
3049 return NO_EXIT;
3052 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3054 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3055 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3056 tcg_temp_free_i32(l);
3057 return NO_EXIT;
3060 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3062 int r1 = get_field(s->fields, r1);
3063 int r2 = get_field(s->fields, r2);
3064 TCGv_i32 t1, t2;
3066 /* r1 and r2 must be even. */
3067 if (r1 & 1 || r2 & 1) {
3068 gen_program_exception(s, PGM_SPECIFICATION);
3069 return EXIT_NORETURN;
3072 t1 = tcg_const_i32(r1);
3073 t2 = tcg_const_i32(r2);
3074 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3075 tcg_temp_free_i32(t1);
3076 tcg_temp_free_i32(t2);
3077 set_cc_static(s);
3078 return NO_EXIT;
3081 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3083 int r1 = get_field(s->fields, r1);
3084 int r3 = get_field(s->fields, r3);
3085 TCGv_i32 t1, t3;
3087 /* r1 and r3 must be even. */
3088 if (r1 & 1 || r3 & 1) {
3089 gen_program_exception(s, PGM_SPECIFICATION);
3090 return EXIT_NORETURN;
3093 t1 = tcg_const_i32(r1);
3094 t3 = tcg_const_i32(r3);
3095 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3096 tcg_temp_free_i32(t1);
3097 tcg_temp_free_i32(t3);
3098 set_cc_static(s);
3099 return NO_EXIT;
3102 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3104 int r1 = get_field(s->fields, r1);
3105 int r3 = get_field(s->fields, r3);
3106 TCGv_i32 t1, t3;
3108 /* r1 and r3 must be even. */
3109 if (r1 & 1 || r3 & 1) {
3110 gen_program_exception(s, PGM_SPECIFICATION);
3111 return EXIT_NORETURN;
3114 t1 = tcg_const_i32(r1);
3115 t3 = tcg_const_i32(r3);
3116 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3117 tcg_temp_free_i32(t1);
3118 tcg_temp_free_i32(t3);
3119 set_cc_static(s);
3120 return NO_EXIT;
3123 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3125 int r3 = get_field(s->fields, r3);
3126 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3127 set_cc_static(s);
3128 return NO_EXIT;
3131 #ifndef CONFIG_USER_ONLY
3132 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3134 int r1 = get_field(s->fields, l1);
3135 check_privileged(s);
3136 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3137 set_cc_static(s);
3138 return NO_EXIT;
3141 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3143 int r1 = get_field(s->fields, l1);
3144 check_privileged(s);
3145 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3146 set_cc_static(s);
3147 return NO_EXIT;
3149 #endif
3151 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3153 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3154 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3155 tcg_temp_free_i32(l);
3156 return NO_EXIT;
3159 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3161 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3162 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3163 tcg_temp_free_i32(l);
3164 return NO_EXIT;
3167 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3169 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3170 set_cc_static(s);
3171 return NO_EXIT;
3174 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3176 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3177 set_cc_static(s);
3178 return_low128(o->in2);
3179 return NO_EXIT;
3182 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3184 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3185 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3186 tcg_temp_free_i32(l);
3187 return NO_EXIT;
3190 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3192 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3193 return NO_EXIT;
3196 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3198 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3199 return NO_EXIT;
3202 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3204 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3205 return NO_EXIT;
3208 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3210 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3211 return NO_EXIT;
3214 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3216 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3217 return NO_EXIT;
3220 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3222 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3223 return_low128(o->out2);
3224 return NO_EXIT;
3227 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3229 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3230 return_low128(o->out2);
3231 return NO_EXIT;
3234 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3236 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3237 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3238 tcg_temp_free_i64(r3);
3239 return NO_EXIT;
3242 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3244 int r3 = get_field(s->fields, r3);
3245 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3246 return NO_EXIT;
3249 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3251 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3252 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3253 tcg_temp_free_i64(r3);
3254 return NO_EXIT;
3257 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3259 int r3 = get_field(s->fields, r3);
3260 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3261 return NO_EXIT;
3264 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3266 TCGv_i64 z, n;
3267 z = tcg_const_i64(0);
3268 n = tcg_temp_new_i64();
3269 tcg_gen_neg_i64(n, o->in2);
3270 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3271 tcg_temp_free_i64(n);
3272 tcg_temp_free_i64(z);
3273 return NO_EXIT;
3276 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3278 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3279 return NO_EXIT;
3282 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3284 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3285 return NO_EXIT;
3288 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3290 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3291 tcg_gen_mov_i64(o->out2, o->in2);
3292 return NO_EXIT;
3295 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3297 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3298 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3299 tcg_temp_free_i32(l);
3300 set_cc_static(s);
3301 return NO_EXIT;
3304 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3306 tcg_gen_neg_i64(o->out, o->in2);
3307 return NO_EXIT;
3310 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3312 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3313 return NO_EXIT;
3316 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3318 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3319 return NO_EXIT;
3322 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3324 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3325 tcg_gen_mov_i64(o->out2, o->in2);
3326 return NO_EXIT;
3329 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3331 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3332 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3333 tcg_temp_free_i32(l);
3334 set_cc_static(s);
3335 return NO_EXIT;
3338 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3340 tcg_gen_or_i64(o->out, o->in1, o->in2);
3341 return NO_EXIT;
3344 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3346 int shift = s->insn->data & 0xff;
3347 int size = s->insn->data >> 8;
3348 uint64_t mask = ((1ull << size) - 1) << shift;
3350 assert(!o->g_in2);
3351 tcg_gen_shli_i64(o->in2, o->in2, shift);
3352 tcg_gen_or_i64(o->out, o->in1, o->in2);
3354 /* Produce the CC from only the bits manipulated. */
3355 tcg_gen_andi_i64(cc_dst, o->out, mask);
3356 set_cc_nz_u64(s, cc_dst);
3357 return NO_EXIT;
3360 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3362 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3363 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3364 tcg_temp_free_i32(l);
3365 return NO_EXIT;
3368 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3370 int l2 = get_field(s->fields, l2) + 1;
3371 TCGv_i32 l;
3373 /* The length must not exceed 32 bytes. */
3374 if (l2 > 32) {
3375 gen_program_exception(s, PGM_SPECIFICATION);
3376 return EXIT_NORETURN;
3378 l = tcg_const_i32(l2);
3379 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3380 tcg_temp_free_i32(l);
3381 return NO_EXIT;
3384 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3386 int l2 = get_field(s->fields, l2) + 1;
3387 TCGv_i32 l;
3389 /* The length must be even and should not exceed 64 bytes. */
3390 if ((l2 & 1) || (l2 > 64)) {
3391 gen_program_exception(s, PGM_SPECIFICATION);
3392 return EXIT_NORETURN;
3394 l = tcg_const_i32(l2);
3395 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3396 tcg_temp_free_i32(l);
3397 return NO_EXIT;
3400 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3402 gen_helper_popcnt(o->out, o->in2);
3403 return NO_EXIT;
3406 #ifndef CONFIG_USER_ONLY
3407 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3409 check_privileged(s);
3410 gen_helper_ptlb(cpu_env);
3411 return NO_EXIT;
3413 #endif
3415 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3417 int i3 = get_field(s->fields, i3);
3418 int i4 = get_field(s->fields, i4);
3419 int i5 = get_field(s->fields, i5);
3420 int do_zero = i4 & 0x80;
3421 uint64_t mask, imask, pmask;
3422 int pos, len, rot;
3424 /* Adjust the arguments for the specific insn. */
3425 switch (s->fields->op2) {
3426 case 0x55: /* risbg */
3427 i3 &= 63;
3428 i4 &= 63;
3429 pmask = ~0;
3430 break;
3431 case 0x5d: /* risbhg */
3432 i3 &= 31;
3433 i4 &= 31;
3434 pmask = 0xffffffff00000000ull;
3435 break;
3436 case 0x51: /* risblg */
3437 i3 &= 31;
3438 i4 &= 31;
3439 pmask = 0x00000000ffffffffull;
3440 break;
3441 default:
3442 abort();
3445 /* MASK is the set of bits to be inserted from R2.
3446 Take care for I3/I4 wraparound. */
3447 mask = pmask >> i3;
3448 if (i3 <= i4) {
3449 mask ^= pmask >> i4 >> 1;
3450 } else {
3451 mask |= ~(pmask >> i4 >> 1);
3453 mask &= pmask;
3455 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3456 insns, we need to keep the other half of the register. */
3457 imask = ~mask | ~pmask;
3458 if (do_zero) {
3459 if (s->fields->op2 == 0x55) {
3460 imask = 0;
3461 } else {
3462 imask = ~pmask;
3466 len = i4 - i3 + 1;
3467 pos = 63 - i4;
3468 rot = i5 & 63;
3469 if (s->fields->op2 == 0x5d) {
3470 pos += 32;
3473 /* In some cases we can implement this with extract. */
3474 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3475 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3476 return NO_EXIT;
3479 /* In some cases we can implement this with deposit. */
3480 if (len > 0 && (imask == 0 || ~mask == imask)) {
3481 /* Note that we rotate the bits to be inserted to the lsb, not to
3482 the position as described in the PoO. */
3483 rot = (rot - pos) & 63;
3484 } else {
3485 pos = -1;
3488 /* Rotate the input as necessary. */
3489 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3491 /* Insert the selected bits into the output. */
3492 if (pos >= 0) {
3493 if (imask == 0) {
3494 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3495 } else {
3496 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3498 } else if (imask == 0) {
3499 tcg_gen_andi_i64(o->out, o->in2, mask);
3500 } else {
3501 tcg_gen_andi_i64(o->in2, o->in2, mask);
3502 tcg_gen_andi_i64(o->out, o->out, imask);
3503 tcg_gen_or_i64(o->out, o->out, o->in2);
3505 return NO_EXIT;
3508 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3510 int i3 = get_field(s->fields, i3);
3511 int i4 = get_field(s->fields, i4);
3512 int i5 = get_field(s->fields, i5);
3513 uint64_t mask;
3515 /* If this is a test-only form, arrange to discard the result. */
3516 if (i3 & 0x80) {
3517 o->out = tcg_temp_new_i64();
3518 o->g_out = false;
3521 i3 &= 63;
3522 i4 &= 63;
3523 i5 &= 63;
3525 /* MASK is the set of bits to be operated on from R2.
3526 Take care for I3/I4 wraparound. */
3527 mask = ~0ull >> i3;
3528 if (i3 <= i4) {
3529 mask ^= ~0ull >> i4 >> 1;
3530 } else {
3531 mask |= ~(~0ull >> i4 >> 1);
3534 /* Rotate the input as necessary. */
3535 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3537 /* Operate. */
3538 switch (s->fields->op2) {
3539 case 0x55: /* AND */
3540 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3541 tcg_gen_and_i64(o->out, o->out, o->in2);
3542 break;
3543 case 0x56: /* OR */
3544 tcg_gen_andi_i64(o->in2, o->in2, mask);
3545 tcg_gen_or_i64(o->out, o->out, o->in2);
3546 break;
3547 case 0x57: /* XOR */
3548 tcg_gen_andi_i64(o->in2, o->in2, mask);
3549 tcg_gen_xor_i64(o->out, o->out, o->in2);
3550 break;
3551 default:
3552 abort();
3555 /* Set the CC. */
3556 tcg_gen_andi_i64(cc_dst, o->out, mask);
3557 set_cc_nz_u64(s, cc_dst);
3558 return NO_EXIT;
3561 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3563 tcg_gen_bswap16_i64(o->out, o->in2);
3564 return NO_EXIT;
3567 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3569 tcg_gen_bswap32_i64(o->out, o->in2);
3570 return NO_EXIT;
3573 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3575 tcg_gen_bswap64_i64(o->out, o->in2);
3576 return NO_EXIT;
3579 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3581 TCGv_i32 t1 = tcg_temp_new_i32();
3582 TCGv_i32 t2 = tcg_temp_new_i32();
3583 TCGv_i32 to = tcg_temp_new_i32();
3584 tcg_gen_extrl_i64_i32(t1, o->in1);
3585 tcg_gen_extrl_i64_i32(t2, o->in2);
3586 tcg_gen_rotl_i32(to, t1, t2);
3587 tcg_gen_extu_i32_i64(o->out, to);
3588 tcg_temp_free_i32(t1);
3589 tcg_temp_free_i32(t2);
3590 tcg_temp_free_i32(to);
3591 return NO_EXIT;
3594 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3596 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3597 return NO_EXIT;
3600 #ifndef CONFIG_USER_ONLY
3601 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3603 check_privileged(s);
3604 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3605 set_cc_static(s);
3606 return NO_EXIT;
3609 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3611 check_privileged(s);
3612 gen_helper_sacf(cpu_env, o->in2);
3613 /* Addressing mode has changed, so end the block. */
3614 return EXIT_PC_STALE;
3616 #endif
3618 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3620 int sam = s->insn->data;
3621 TCGv_i64 tsam;
3622 uint64_t mask;
3624 switch (sam) {
3625 case 0:
3626 mask = 0xffffff;
3627 break;
3628 case 1:
3629 mask = 0x7fffffff;
3630 break;
3631 default:
3632 mask = -1;
3633 break;
3636 /* Bizarre but true, we check the address of the current insn for the
3637 specification exception, not the next to be executed. Thus the PoO
3638 documents that Bad Things Happen two bytes before the end. */
3639 if (s->pc & ~mask) {
3640 gen_program_exception(s, PGM_SPECIFICATION);
3641 return EXIT_NORETURN;
3643 s->next_pc &= mask;
3645 tsam = tcg_const_i64(sam);
3646 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3647 tcg_temp_free_i64(tsam);
3649 /* Always exit the TB, since we (may have) changed execution mode. */
3650 return EXIT_PC_STALE;
3653 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3655 int r1 = get_field(s->fields, r1);
3656 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3657 return NO_EXIT;
3660 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3662 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3663 return NO_EXIT;
3666 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3668 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3669 return NO_EXIT;
3672 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3674 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3675 return_low128(o->out2);
3676 return NO_EXIT;
3679 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3681 gen_helper_sqeb(o->out, cpu_env, o->in2);
3682 return NO_EXIT;
3685 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3687 gen_helper_sqdb(o->out, cpu_env, o->in2);
3688 return NO_EXIT;
3691 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3693 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3694 return_low128(o->out2);
3695 return NO_EXIT;
3698 #ifndef CONFIG_USER_ONLY
3699 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3701 check_privileged(s);
3702 potential_page_fault(s);
3703 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3704 set_cc_static(s);
3705 return NO_EXIT;
3708 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3710 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3711 check_privileged(s);
3712 potential_page_fault(s);
3713 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3714 set_cc_static(s);
3715 tcg_temp_free_i32(r1);
3716 return NO_EXIT;
3718 #endif
3720 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3722 DisasCompare c;
3723 TCGv_i64 a, h;
3724 TCGLabel *lab;
3725 int r1;
3727 disas_jcc(s, &c, get_field(s->fields, m3));
3729 /* We want to store when the condition is fulfilled, so branch
3730 out when it's not */
3731 c.cond = tcg_invert_cond(c.cond);
3733 lab = gen_new_label();
3734 if (c.is_64) {
3735 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3736 } else {
3737 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3739 free_compare(&c);
3741 r1 = get_field(s->fields, r1);
3742 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3743 switch (s->insn->data) {
3744 case 1: /* STOCG */
3745 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3746 break;
3747 case 0: /* STOC */
3748 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3749 break;
3750 case 2: /* STOCFH */
3751 h = tcg_temp_new_i64();
3752 tcg_gen_shri_i64(h, regs[r1], 32);
3753 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3754 tcg_temp_free_i64(h);
3755 break;
3756 default:
3757 g_assert_not_reached();
3759 tcg_temp_free_i64(a);
3761 gen_set_label(lab);
3762 return NO_EXIT;
3765 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3767 uint64_t sign = 1ull << s->insn->data;
3768 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3769 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3770 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3771 /* The arithmetic left shift is curious in that it does not affect
3772 the sign bit. Copy that over from the source unchanged. */
3773 tcg_gen_andi_i64(o->out, o->out, ~sign);
3774 tcg_gen_andi_i64(o->in1, o->in1, sign);
3775 tcg_gen_or_i64(o->out, o->out, o->in1);
3776 return NO_EXIT;
3779 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3781 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3782 return NO_EXIT;
3785 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3787 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3788 return NO_EXIT;
3791 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3793 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3794 return NO_EXIT;
3797 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3799 gen_helper_sfpc(cpu_env, o->in2);
3800 return NO_EXIT;
3803 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3805 gen_helper_sfas(cpu_env, o->in2);
3806 return NO_EXIT;
3809 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3811 int b2 = get_field(s->fields, b2);
3812 int d2 = get_field(s->fields, d2);
3813 TCGv_i64 t1 = tcg_temp_new_i64();
3814 TCGv_i64 t2 = tcg_temp_new_i64();
3815 int mask, pos, len;
3817 switch (s->fields->op2) {
3818 case 0x99: /* SRNM */
3819 pos = 0, len = 2;
3820 break;
3821 case 0xb8: /* SRNMB */
3822 pos = 0, len = 3;
3823 break;
3824 case 0xb9: /* SRNMT */
3825 pos = 4, len = 3;
3826 break;
3827 default:
3828 tcg_abort();
3830 mask = (1 << len) - 1;
3832 /* Insert the value into the appropriate field of the FPC. */
3833 if (b2 == 0) {
3834 tcg_gen_movi_i64(t1, d2 & mask);
3835 } else {
3836 tcg_gen_addi_i64(t1, regs[b2], d2);
3837 tcg_gen_andi_i64(t1, t1, mask);
3839 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3840 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3841 tcg_temp_free_i64(t1);
3843 /* Then install the new FPC to set the rounding mode in fpu_status. */
3844 gen_helper_sfpc(cpu_env, t2);
3845 tcg_temp_free_i64(t2);
3846 return NO_EXIT;
3849 static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3851 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3852 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3853 set_cc_static(s);
3855 tcg_gen_shri_i64(o->in1, o->in1, 24);
3856 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3857 return NO_EXIT;
3860 #ifndef CONFIG_USER_ONLY
3861 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3863 check_privileged(s);
3864 tcg_gen_shri_i64(o->in2, o->in2, 4);
3865 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3866 return NO_EXIT;
3869 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3871 check_privileged(s);
3872 gen_helper_sske(cpu_env, o->in1, o->in2);
3873 return NO_EXIT;
3876 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3878 check_privileged(s);
3879 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3880 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3881 return EXIT_PC_STALE_NOCHAIN;
3884 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3886 check_privileged(s);
3887 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3888 return NO_EXIT;
3891 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3893 gen_helper_stck(o->out, cpu_env);
3894 /* ??? We don't implement clock states. */
3895 gen_op_movi_cc(s, 0);
3896 return NO_EXIT;
3899 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3901 TCGv_i64 c1 = tcg_temp_new_i64();
3902 TCGv_i64 c2 = tcg_temp_new_i64();
3903 gen_helper_stck(c1, cpu_env);
3904 /* Shift the 64-bit value into its place as a zero-extended
3905 104-bit value. Note that "bit positions 64-103 are always
3906 non-zero so that they compare differently to STCK"; we set
3907 the least significant bit to 1. */
3908 tcg_gen_shli_i64(c2, c1, 56);
3909 tcg_gen_shri_i64(c1, c1, 8);
3910 tcg_gen_ori_i64(c2, c2, 0x10000);
3911 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3912 tcg_gen_addi_i64(o->in2, o->in2, 8);
3913 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3914 tcg_temp_free_i64(c1);
3915 tcg_temp_free_i64(c2);
3916 /* ??? We don't implement clock states. */
3917 gen_op_movi_cc(s, 0);
3918 return NO_EXIT;
3921 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3923 check_privileged(s);
3924 gen_helper_sckc(cpu_env, o->in2);
3925 return NO_EXIT;
3928 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3930 check_privileged(s);
3931 gen_helper_stckc(o->out, cpu_env);
3932 return NO_EXIT;
3935 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3937 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3938 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3939 check_privileged(s);
3940 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3941 tcg_temp_free_i32(r1);
3942 tcg_temp_free_i32(r3);
3943 return NO_EXIT;
3946 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3948 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3949 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3950 check_privileged(s);
3951 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3952 tcg_temp_free_i32(r1);
3953 tcg_temp_free_i32(r3);
3954 return NO_EXIT;
3957 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3959 check_privileged(s);
3960 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3961 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3962 return NO_EXIT;
3965 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3967 check_privileged(s);
3968 gen_helper_spt(cpu_env, o->in2);
3969 return NO_EXIT;
3972 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3974 check_privileged(s);
3975 gen_helper_stfl(cpu_env);
3976 return NO_EXIT;
3979 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3981 check_privileged(s);
3982 gen_helper_stpt(o->out, cpu_env);
3983 return NO_EXIT;
3986 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3988 check_privileged(s);
3989 potential_page_fault(s);
3990 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3991 set_cc_static(s);
3992 return NO_EXIT;
3995 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3997 check_privileged(s);
3998 gen_helper_spx(cpu_env, o->in2);
3999 return NO_EXIT;
4002 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4004 check_privileged(s);
4005 potential_page_fault(s);
4006 gen_helper_xsch(cpu_env, regs[1]);
4007 set_cc_static(s);
4008 return NO_EXIT;
4011 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4013 check_privileged(s);
4014 potential_page_fault(s);
4015 gen_helper_csch(cpu_env, regs[1]);
4016 set_cc_static(s);
4017 return NO_EXIT;
4020 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4022 check_privileged(s);
4023 potential_page_fault(s);
4024 gen_helper_hsch(cpu_env, regs[1]);
4025 set_cc_static(s);
4026 return NO_EXIT;
4029 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4031 check_privileged(s);
4032 potential_page_fault(s);
4033 gen_helper_msch(cpu_env, regs[1], o->in2);
4034 set_cc_static(s);
4035 return NO_EXIT;
4038 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4040 check_privileged(s);
4041 potential_page_fault(s);
4042 gen_helper_rchp(cpu_env, regs[1]);
4043 set_cc_static(s);
4044 return NO_EXIT;
4047 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4049 check_privileged(s);
4050 potential_page_fault(s);
4051 gen_helper_rsch(cpu_env, regs[1]);
4052 set_cc_static(s);
4053 return NO_EXIT;
4056 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4058 check_privileged(s);
4059 potential_page_fault(s);
4060 gen_helper_ssch(cpu_env, regs[1], o->in2);
4061 set_cc_static(s);
4062 return NO_EXIT;
4065 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4067 check_privileged(s);
4068 potential_page_fault(s);
4069 gen_helper_stsch(cpu_env, regs[1], o->in2);
4070 set_cc_static(s);
4071 return NO_EXIT;
4074 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4076 check_privileged(s);
4077 potential_page_fault(s);
4078 gen_helper_tsch(cpu_env, regs[1], o->in2);
4079 set_cc_static(s);
4080 return NO_EXIT;
4083 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4085 check_privileged(s);
4086 potential_page_fault(s);
4087 gen_helper_chsc(cpu_env, o->in2);
4088 set_cc_static(s);
4089 return NO_EXIT;
4092 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4094 check_privileged(s);
4095 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4096 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4097 return NO_EXIT;
4100 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4102 uint64_t i2 = get_field(s->fields, i2);
4103 TCGv_i64 t;
4105 check_privileged(s);
4107 /* It is important to do what the instruction name says: STORE THEN.
4108 If we let the output hook perform the store then if we fault and
4109 restart, we'll have the wrong SYSTEM MASK in place. */
4110 t = tcg_temp_new_i64();
4111 tcg_gen_shri_i64(t, psw_mask, 56);
4112 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4113 tcg_temp_free_i64(t);
4115 if (s->fields->op == 0xac) {
4116 tcg_gen_andi_i64(psw_mask, psw_mask,
4117 (i2 << 56) | 0x00ffffffffffffffull);
4118 } else {
4119 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4122 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4123 return EXIT_PC_STALE_NOCHAIN;
4126 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4128 check_privileged(s);
4129 gen_helper_stura(cpu_env, o->in2, o->in1);
4130 return NO_EXIT;
4133 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4135 check_privileged(s);
4136 gen_helper_sturg(cpu_env, o->in2, o->in1);
4137 return NO_EXIT;
4139 #endif
4141 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4143 potential_page_fault(s);
4144 gen_helper_stfle(cc_op, cpu_env, o->in2);
4145 set_cc_static(s);
4146 return NO_EXIT;
4149 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4151 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4152 return NO_EXIT;
4155 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4157 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4158 return NO_EXIT;
4161 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4163 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4164 return NO_EXIT;
4167 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4169 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4170 return NO_EXIT;
4173 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4175 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4176 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4177 gen_helper_stam(cpu_env, r1, o->in2, r3);
4178 tcg_temp_free_i32(r1);
4179 tcg_temp_free_i32(r3);
4180 return NO_EXIT;
4183 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4185 int m3 = get_field(s->fields, m3);
4186 int pos, base = s->insn->data;
4187 TCGv_i64 tmp = tcg_temp_new_i64();
4189 pos = base + ctz32(m3) * 8;
4190 switch (m3) {
4191 case 0xf:
4192 /* Effectively a 32-bit store. */
4193 tcg_gen_shri_i64(tmp, o->in1, pos);
4194 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4195 break;
4197 case 0xc:
4198 case 0x6:
4199 case 0x3:
4200 /* Effectively a 16-bit store. */
4201 tcg_gen_shri_i64(tmp, o->in1, pos);
4202 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4203 break;
4205 case 0x8:
4206 case 0x4:
4207 case 0x2:
4208 case 0x1:
4209 /* Effectively an 8-bit store. */
4210 tcg_gen_shri_i64(tmp, o->in1, pos);
4211 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4212 break;
4214 default:
4215 /* This is going to be a sequence of shifts and stores. */
4216 pos = base + 32 - 8;
4217 while (m3) {
4218 if (m3 & 0x8) {
4219 tcg_gen_shri_i64(tmp, o->in1, pos);
4220 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4221 tcg_gen_addi_i64(o->in2, o->in2, 1);
4223 m3 = (m3 << 1) & 0xf;
4224 pos -= 8;
4226 break;
4228 tcg_temp_free_i64(tmp);
4229 return NO_EXIT;
4232 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4234 int r1 = get_field(s->fields, r1);
4235 int r3 = get_field(s->fields, r3);
4236 int size = s->insn->data;
4237 TCGv_i64 tsize = tcg_const_i64(size);
4239 while (1) {
4240 if (size == 8) {
4241 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4242 } else {
4243 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4245 if (r1 == r3) {
4246 break;
4248 tcg_gen_add_i64(o->in2, o->in2, tsize);
4249 r1 = (r1 + 1) & 15;
4252 tcg_temp_free_i64(tsize);
4253 return NO_EXIT;
4256 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4258 int r1 = get_field(s->fields, r1);
4259 int r3 = get_field(s->fields, r3);
4260 TCGv_i64 t = tcg_temp_new_i64();
4261 TCGv_i64 t4 = tcg_const_i64(4);
4262 TCGv_i64 t32 = tcg_const_i64(32);
4264 while (1) {
4265 tcg_gen_shl_i64(t, regs[r1], t32);
4266 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4267 if (r1 == r3) {
4268 break;
4270 tcg_gen_add_i64(o->in2, o->in2, t4);
4271 r1 = (r1 + 1) & 15;
4274 tcg_temp_free_i64(t);
4275 tcg_temp_free_i64(t4);
4276 tcg_temp_free_i64(t32);
4277 return NO_EXIT;
4280 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4282 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4283 return NO_EXIT;
4286 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4288 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4289 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4291 gen_helper_srst(cpu_env, r1, r2);
4293 tcg_temp_free_i32(r1);
4294 tcg_temp_free_i32(r2);
4295 set_cc_static(s);
4296 return NO_EXIT;
4299 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4301 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4302 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4304 gen_helper_srstu(cpu_env, r1, r2);
4306 tcg_temp_free_i32(r1);
4307 tcg_temp_free_i32(r2);
4308 set_cc_static(s);
4309 return NO_EXIT;
4312 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4314 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4315 return NO_EXIT;
4318 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4320 DisasCompare cmp;
4321 TCGv_i64 borrow;
4323 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4325 /* The !borrow flag is the msb of CC. Since we want the inverse of
4326 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4327 disas_jcc(s, &cmp, 8 | 4);
4328 borrow = tcg_temp_new_i64();
4329 if (cmp.is_64) {
4330 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4331 } else {
4332 TCGv_i32 t = tcg_temp_new_i32();
4333 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4334 tcg_gen_extu_i32_i64(borrow, t);
4335 tcg_temp_free_i32(t);
4337 free_compare(&cmp);
4339 tcg_gen_sub_i64(o->out, o->out, borrow);
4340 tcg_temp_free_i64(borrow);
4341 return NO_EXIT;
4344 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4346 TCGv_i32 t;
4348 update_psw_addr(s);
4349 update_cc_op(s);
4351 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4352 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4353 tcg_temp_free_i32(t);
4355 t = tcg_const_i32(s->ilen);
4356 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4357 tcg_temp_free_i32(t);
4359 gen_exception(EXCP_SVC);
4360 return EXIT_NORETURN;
4363 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4365 int cc = 0;
4367 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4368 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4369 gen_op_movi_cc(s, cc);
4370 return NO_EXIT;
4373 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4375 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4376 set_cc_static(s);
4377 return NO_EXIT;
4380 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4382 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4383 set_cc_static(s);
4384 return NO_EXIT;
4387 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4389 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4390 set_cc_static(s);
4391 return NO_EXIT;
4394 #ifndef CONFIG_USER_ONLY
4396 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4398 check_privileged(s);
4399 gen_helper_testblock(cc_op, cpu_env, o->in2);
4400 set_cc_static(s);
4401 return NO_EXIT;
4404 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4406 gen_helper_tprot(cc_op, o->addr1, o->in2);
4407 set_cc_static(s);
4408 return NO_EXIT;
4411 #endif
4413 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4415 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4416 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4417 tcg_temp_free_i32(l1);
4418 set_cc_static(s);
4419 return NO_EXIT;
4422 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4424 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4425 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4426 tcg_temp_free_i32(l);
4427 set_cc_static(s);
4428 return NO_EXIT;
4431 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4433 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4434 return_low128(o->out2);
4435 set_cc_static(s);
4436 return NO_EXIT;
4439 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4441 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4442 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4443 tcg_temp_free_i32(l);
4444 set_cc_static(s);
4445 return NO_EXIT;
4448 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4450 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4451 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4452 tcg_temp_free_i32(l);
4453 set_cc_static(s);
4454 return NO_EXIT;
4457 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4459 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4460 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4461 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4462 TCGv_i32 tst = tcg_temp_new_i32();
4463 int m3 = get_field(s->fields, m3);
4465 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4466 m3 = 0;
4468 if (m3 & 1) {
4469 tcg_gen_movi_i32(tst, -1);
4470 } else {
4471 tcg_gen_extrl_i64_i32(tst, regs[0]);
4472 if (s->insn->opc & 3) {
4473 tcg_gen_ext8u_i32(tst, tst);
4474 } else {
4475 tcg_gen_ext16u_i32(tst, tst);
4478 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4480 tcg_temp_free_i32(r1);
4481 tcg_temp_free_i32(r2);
4482 tcg_temp_free_i32(sizes);
4483 tcg_temp_free_i32(tst);
4484 set_cc_static(s);
4485 return NO_EXIT;
4488 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4490 TCGv_i32 t1 = tcg_const_i32(0xff);
4491 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4492 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4493 tcg_temp_free_i32(t1);
4494 set_cc_static(s);
4495 return NO_EXIT;
4498 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4500 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4501 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4502 tcg_temp_free_i32(l);
4503 return NO_EXIT;
4506 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4508 int l1 = get_field(s->fields, l1) + 1;
4509 TCGv_i32 l;
4511 /* The length must not exceed 32 bytes. */
4512 if (l1 > 32) {
4513 gen_program_exception(s, PGM_SPECIFICATION);
4514 return EXIT_NORETURN;
4516 l = tcg_const_i32(l1);
4517 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4518 tcg_temp_free_i32(l);
4519 set_cc_static(s);
4520 return NO_EXIT;
4523 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4525 int l1 = get_field(s->fields, l1) + 1;
4526 TCGv_i32 l;
4528 /* The length must be even and should not exceed 64 bytes. */
4529 if ((l1 & 1) || (l1 > 64)) {
4530 gen_program_exception(s, PGM_SPECIFICATION);
4531 return EXIT_NORETURN;
4533 l = tcg_const_i32(l1);
4534 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4535 tcg_temp_free_i32(l);
4536 set_cc_static(s);
4537 return NO_EXIT;
4541 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4543 int d1 = get_field(s->fields, d1);
4544 int d2 = get_field(s->fields, d2);
4545 int b1 = get_field(s->fields, b1);
4546 int b2 = get_field(s->fields, b2);
4547 int l = get_field(s->fields, l1);
4548 TCGv_i32 t32;
4550 o->addr1 = get_address(s, 0, b1, d1);
4552 /* If the addresses are identical, this is a store/memset of zero. */
4553 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4554 o->in2 = tcg_const_i64(0);
4556 l++;
4557 while (l >= 8) {
4558 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4559 l -= 8;
4560 if (l > 0) {
4561 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4564 if (l >= 4) {
4565 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4566 l -= 4;
4567 if (l > 0) {
4568 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4571 if (l >= 2) {
4572 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4573 l -= 2;
4574 if (l > 0) {
4575 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4578 if (l) {
4579 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4581 gen_op_movi_cc(s, 0);
4582 return NO_EXIT;
4585 /* But in general we'll defer to a helper. */
4586 o->in2 = get_address(s, 0, b2, d2);
4587 t32 = tcg_const_i32(l);
4588 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4589 tcg_temp_free_i32(t32);
4590 set_cc_static(s);
4591 return NO_EXIT;
4594 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4596 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4597 return NO_EXIT;
4600 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4602 int shift = s->insn->data & 0xff;
4603 int size = s->insn->data >> 8;
4604 uint64_t mask = ((1ull << size) - 1) << shift;
4606 assert(!o->g_in2);
4607 tcg_gen_shli_i64(o->in2, o->in2, shift);
4608 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4610 /* Produce the CC from only the bits manipulated. */
4611 tcg_gen_andi_i64(cc_dst, o->out, mask);
4612 set_cc_nz_u64(s, cc_dst);
4613 return NO_EXIT;
4616 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4618 o->out = tcg_const_i64(0);
4619 return NO_EXIT;
4622 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4624 o->out = tcg_const_i64(0);
4625 o->out2 = o->out;
4626 o->g_out2 = true;
4627 return NO_EXIT;
4630 /* ====================================================================== */
4631 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4632 the original inputs), update the various cc data structures in order to
4633 be able to compute the new condition code. */
4635 static void cout_abs32(DisasContext *s, DisasOps *o)
4637 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4640 static void cout_abs64(DisasContext *s, DisasOps *o)
4642 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4645 static void cout_adds32(DisasContext *s, DisasOps *o)
4647 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4650 static void cout_adds64(DisasContext *s, DisasOps *o)
4652 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4655 static void cout_addu32(DisasContext *s, DisasOps *o)
4657 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4660 static void cout_addu64(DisasContext *s, DisasOps *o)
4662 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4665 static void cout_addc32(DisasContext *s, DisasOps *o)
4667 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4670 static void cout_addc64(DisasContext *s, DisasOps *o)
4672 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4675 static void cout_cmps32(DisasContext *s, DisasOps *o)
4677 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4680 static void cout_cmps64(DisasContext *s, DisasOps *o)
4682 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4685 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4687 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4690 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4692 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4695 static void cout_f32(DisasContext *s, DisasOps *o)
4697 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4700 static void cout_f64(DisasContext *s, DisasOps *o)
4702 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4705 static void cout_f128(DisasContext *s, DisasOps *o)
4707 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4710 static void cout_nabs32(DisasContext *s, DisasOps *o)
4712 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4715 static void cout_nabs64(DisasContext *s, DisasOps *o)
4717 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4720 static void cout_neg32(DisasContext *s, DisasOps *o)
4722 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4725 static void cout_neg64(DisasContext *s, DisasOps *o)
4727 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4730 static void cout_nz32(DisasContext *s, DisasOps *o)
4732 tcg_gen_ext32u_i64(cc_dst, o->out);
4733 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4736 static void cout_nz64(DisasContext *s, DisasOps *o)
4738 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4741 static void cout_s32(DisasContext *s, DisasOps *o)
4743 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4746 static void cout_s64(DisasContext *s, DisasOps *o)
4748 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4751 static void cout_subs32(DisasContext *s, DisasOps *o)
4753 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4756 static void cout_subs64(DisasContext *s, DisasOps *o)
4758 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4761 static void cout_subu32(DisasContext *s, DisasOps *o)
4763 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4766 static void cout_subu64(DisasContext *s, DisasOps *o)
4768 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4771 static void cout_subb32(DisasContext *s, DisasOps *o)
4773 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4776 static void cout_subb64(DisasContext *s, DisasOps *o)
4778 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4781 static void cout_tm32(DisasContext *s, DisasOps *o)
4783 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4786 static void cout_tm64(DisasContext *s, DisasOps *o)
4788 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4791 /* ====================================================================== */
4792 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4793 with the TCG register to which we will write. Used in combination with
4794 the "wout" generators, in some cases we need a new temporary, and in
4795 some cases we can write to a TCG global. */
4797 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4799 o->out = tcg_temp_new_i64();
4801 #define SPEC_prep_new 0
4803 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4805 o->out = tcg_temp_new_i64();
4806 o->out2 = tcg_temp_new_i64();
4808 #define SPEC_prep_new_P 0
4810 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4812 o->out = regs[get_field(f, r1)];
4813 o->g_out = true;
4815 #define SPEC_prep_r1 0
4817 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4819 int r1 = get_field(f, r1);
4820 o->out = regs[r1];
4821 o->out2 = regs[r1 + 1];
4822 o->g_out = o->g_out2 = true;
4824 #define SPEC_prep_r1_P SPEC_r1_even
4826 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4828 o->out = fregs[get_field(f, r1)];
4829 o->g_out = true;
4831 #define SPEC_prep_f1 0
4833 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4835 int r1 = get_field(f, r1);
4836 o->out = fregs[r1];
4837 o->out2 = fregs[r1 + 2];
4838 o->g_out = o->g_out2 = true;
4840 #define SPEC_prep_x1 SPEC_r1_f128
4842 /* ====================================================================== */
4843 /* The "Write OUTput" generators. These generally perform some non-trivial
4844 copy of data to TCG globals, or to main memory. The trivial cases are
4845 generally handled by having a "prep" generator install the TCG global
4846 as the destination of the operation. */
4848 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4850 store_reg(get_field(f, r1), o->out);
4852 #define SPEC_wout_r1 0
4854 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4856 int r1 = get_field(f, r1);
4857 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4859 #define SPEC_wout_r1_8 0
4861 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4863 int r1 = get_field(f, r1);
4864 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4866 #define SPEC_wout_r1_16 0
4868 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4870 store_reg32_i64(get_field(f, r1), o->out);
4872 #define SPEC_wout_r1_32 0
4874 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4876 store_reg32h_i64(get_field(f, r1), o->out);
4878 #define SPEC_wout_r1_32h 0
4880 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4882 int r1 = get_field(f, r1);
4883 store_reg32_i64(r1, o->out);
4884 store_reg32_i64(r1 + 1, o->out2);
4886 #define SPEC_wout_r1_P32 SPEC_r1_even
4888 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4890 int r1 = get_field(f, r1);
4891 store_reg32_i64(r1 + 1, o->out);
4892 tcg_gen_shri_i64(o->out, o->out, 32);
4893 store_reg32_i64(r1, o->out);
4895 #define SPEC_wout_r1_D32 SPEC_r1_even
4897 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4899 int r3 = get_field(f, r3);
4900 store_reg32_i64(r3, o->out);
4901 store_reg32_i64(r3 + 1, o->out2);
4903 #define SPEC_wout_r3_P32 SPEC_r3_even
4905 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4907 int r3 = get_field(f, r3);
4908 store_reg(r3, o->out);
4909 store_reg(r3 + 1, o->out2);
4911 #define SPEC_wout_r3_P64 SPEC_r3_even
4913 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4915 store_freg32_i64(get_field(f, r1), o->out);
4917 #define SPEC_wout_e1 0
4919 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4921 store_freg(get_field(f, r1), o->out);
4923 #define SPEC_wout_f1 0
4925 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4927 int f1 = get_field(s->fields, r1);
4928 store_freg(f1, o->out);
4929 store_freg(f1 + 2, o->out2);
4931 #define SPEC_wout_x1 SPEC_r1_f128
4933 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4935 if (get_field(f, r1) != get_field(f, r2)) {
4936 store_reg32_i64(get_field(f, r1), o->out);
4939 #define SPEC_wout_cond_r1r2_32 0
4941 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4943 if (get_field(f, r1) != get_field(f, r2)) {
4944 store_freg32_i64(get_field(f, r1), o->out);
4947 #define SPEC_wout_cond_e1e2 0
4949 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4951 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4953 #define SPEC_wout_m1_8 0
4955 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4957 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4959 #define SPEC_wout_m1_16 0
4961 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4963 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4965 #define SPEC_wout_m1_32 0
4967 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4969 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4971 #define SPEC_wout_m1_64 0
4973 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4975 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4977 #define SPEC_wout_m2_32 0
4979 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4981 store_reg(get_field(f, r1), o->in2);
4983 #define SPEC_wout_in2_r1 0
4985 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4987 store_reg32_i64(get_field(f, r1), o->in2);
4989 #define SPEC_wout_in2_r1_32 0
4991 /* ====================================================================== */
4992 /* The "INput 1" generators. These load the first operand to an insn. */
4994 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4996 o->in1 = load_reg(get_field(f, r1));
4998 #define SPEC_in1_r1 0
5000 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5002 o->in1 = regs[get_field(f, r1)];
5003 o->g_in1 = true;
5005 #define SPEC_in1_r1_o 0
5007 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5009 o->in1 = tcg_temp_new_i64();
5010 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5012 #define SPEC_in1_r1_32s 0
5014 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5016 o->in1 = tcg_temp_new_i64();
5017 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5019 #define SPEC_in1_r1_32u 0
5021 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5023 o->in1 = tcg_temp_new_i64();
5024 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5026 #define SPEC_in1_r1_sr32 0
5028 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5030 o->in1 = load_reg(get_field(f, r1) + 1);
5032 #define SPEC_in1_r1p1 SPEC_r1_even
5034 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5036 o->in1 = tcg_temp_new_i64();
5037 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5039 #define SPEC_in1_r1p1_32s SPEC_r1_even
5041 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5043 o->in1 = tcg_temp_new_i64();
5044 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5046 #define SPEC_in1_r1p1_32u SPEC_r1_even
5048 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5050 int r1 = get_field(f, r1);
5051 o->in1 = tcg_temp_new_i64();
5052 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5054 #define SPEC_in1_r1_D32 SPEC_r1_even
5056 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5058 o->in1 = load_reg(get_field(f, r2));
5060 #define SPEC_in1_r2 0
5062 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5064 o->in1 = tcg_temp_new_i64();
5065 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5067 #define SPEC_in1_r2_sr32 0
5069 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5071 o->in1 = load_reg(get_field(f, r3));
5073 #define SPEC_in1_r3 0
5075 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5077 o->in1 = regs[get_field(f, r3)];
5078 o->g_in1 = true;
5080 #define SPEC_in1_r3_o 0
5082 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5084 o->in1 = tcg_temp_new_i64();
5085 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5087 #define SPEC_in1_r3_32s 0
5089 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5091 o->in1 = tcg_temp_new_i64();
5092 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5094 #define SPEC_in1_r3_32u 0
5096 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5098 int r3 = get_field(f, r3);
5099 o->in1 = tcg_temp_new_i64();
5100 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5102 #define SPEC_in1_r3_D32 SPEC_r3_even
5104 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5106 o->in1 = load_freg32_i64(get_field(f, r1));
5108 #define SPEC_in1_e1 0
5110 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5112 o->in1 = fregs[get_field(f, r1)];
5113 o->g_in1 = true;
5115 #define SPEC_in1_f1_o 0
5117 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5119 int r1 = get_field(f, r1);
5120 o->out = fregs[r1];
5121 o->out2 = fregs[r1 + 2];
5122 o->g_out = o->g_out2 = true;
5124 #define SPEC_in1_x1_o SPEC_r1_f128
5126 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5128 o->in1 = fregs[get_field(f, r3)];
5129 o->g_in1 = true;
5131 #define SPEC_in1_f3_o 0
5133 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5135 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5137 #define SPEC_in1_la1 0
5139 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5141 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5142 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5144 #define SPEC_in1_la2 0
5146 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5148 in1_la1(s, f, o);
5149 o->in1 = tcg_temp_new_i64();
5150 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5152 #define SPEC_in1_m1_8u 0
5154 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5156 in1_la1(s, f, o);
5157 o->in1 = tcg_temp_new_i64();
5158 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5160 #define SPEC_in1_m1_16s 0
5162 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5164 in1_la1(s, f, o);
5165 o->in1 = tcg_temp_new_i64();
5166 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5168 #define SPEC_in1_m1_16u 0
5170 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5172 in1_la1(s, f, o);
5173 o->in1 = tcg_temp_new_i64();
5174 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5176 #define SPEC_in1_m1_32s 0
5178 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5180 in1_la1(s, f, o);
5181 o->in1 = tcg_temp_new_i64();
5182 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5184 #define SPEC_in1_m1_32u 0
5186 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5188 in1_la1(s, f, o);
5189 o->in1 = tcg_temp_new_i64();
5190 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5192 #define SPEC_in1_m1_64 0
5194 /* ====================================================================== */
5195 /* The "INput 2" generators. These load the second operand to an insn. */
5197 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5199 o->in2 = regs[get_field(f, r1)];
5200 o->g_in2 = true;
5202 #define SPEC_in2_r1_o 0
5204 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5206 o->in2 = tcg_temp_new_i64();
5207 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5209 #define SPEC_in2_r1_16u 0
5211 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5213 o->in2 = tcg_temp_new_i64();
5214 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5216 #define SPEC_in2_r1_32u 0
5218 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5220 int r1 = get_field(f, r1);
5221 o->in2 = tcg_temp_new_i64();
5222 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5224 #define SPEC_in2_r1_D32 SPEC_r1_even
5226 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5228 o->in2 = load_reg(get_field(f, r2));
5230 #define SPEC_in2_r2 0
5232 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5234 o->in2 = regs[get_field(f, r2)];
5235 o->g_in2 = true;
5237 #define SPEC_in2_r2_o 0
5239 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5241 int r2 = get_field(f, r2);
5242 if (r2 != 0) {
5243 o->in2 = load_reg(r2);
5246 #define SPEC_in2_r2_nz 0
5248 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5250 o->in2 = tcg_temp_new_i64();
5251 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5253 #define SPEC_in2_r2_8s 0
5255 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5257 o->in2 = tcg_temp_new_i64();
5258 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5260 #define SPEC_in2_r2_8u 0
5262 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5264 o->in2 = tcg_temp_new_i64();
5265 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5267 #define SPEC_in2_r2_16s 0
5269 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5271 o->in2 = tcg_temp_new_i64();
5272 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5274 #define SPEC_in2_r2_16u 0
5276 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5278 o->in2 = load_reg(get_field(f, r3));
5280 #define SPEC_in2_r3 0
5282 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5284 o->in2 = tcg_temp_new_i64();
5285 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5287 #define SPEC_in2_r3_sr32 0
5289 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5291 o->in2 = tcg_temp_new_i64();
5292 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5294 #define SPEC_in2_r2_32s 0
5296 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5298 o->in2 = tcg_temp_new_i64();
5299 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5301 #define SPEC_in2_r2_32u 0
5303 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5305 o->in2 = tcg_temp_new_i64();
5306 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5308 #define SPEC_in2_r2_sr32 0
5310 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5312 o->in2 = load_freg32_i64(get_field(f, r2));
5314 #define SPEC_in2_e2 0
5316 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5318 o->in2 = fregs[get_field(f, r2)];
5319 o->g_in2 = true;
5321 #define SPEC_in2_f2_o 0
5323 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5325 int r2 = get_field(f, r2);
5326 o->in1 = fregs[r2];
5327 o->in2 = fregs[r2 + 2];
5328 o->g_in1 = o->g_in2 = true;
5330 #define SPEC_in2_x2_o SPEC_r2_f128
5332 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5334 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5336 #define SPEC_in2_ra2 0
5338 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5340 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5341 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5343 #define SPEC_in2_a2 0
5345 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5347 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5349 #define SPEC_in2_ri2 0
5351 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5353 help_l2_shift(s, f, o, 31);
5355 #define SPEC_in2_sh32 0
5357 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5359 help_l2_shift(s, f, o, 63);
5361 #define SPEC_in2_sh64 0
5363 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5365 in2_a2(s, f, o);
5366 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5368 #define SPEC_in2_m2_8u 0
5370 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5372 in2_a2(s, f, o);
5373 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5375 #define SPEC_in2_m2_16s 0
5377 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5379 in2_a2(s, f, o);
5380 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5382 #define SPEC_in2_m2_16u 0
5384 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5386 in2_a2(s, f, o);
5387 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5389 #define SPEC_in2_m2_32s 0
5391 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5393 in2_a2(s, f, o);
5394 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5396 #define SPEC_in2_m2_32u 0
5398 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5400 in2_a2(s, f, o);
5401 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5403 #define SPEC_in2_m2_64 0
5405 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5407 in2_ri2(s, f, o);
5408 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5410 #define SPEC_in2_mri2_16u 0
5412 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5414 in2_ri2(s, f, o);
5415 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5417 #define SPEC_in2_mri2_32s 0
5419 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5421 in2_ri2(s, f, o);
5422 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5424 #define SPEC_in2_mri2_32u 0
5426 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5428 in2_ri2(s, f, o);
5429 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5431 #define SPEC_in2_mri2_64 0
5433 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5435 o->in2 = tcg_const_i64(get_field(f, i2));
5437 #define SPEC_in2_i2 0
5439 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5441 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5443 #define SPEC_in2_i2_8u 0
5445 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5447 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5449 #define SPEC_in2_i2_16u 0
5451 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5453 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5455 #define SPEC_in2_i2_32u 0
5457 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5459 uint64_t i2 = (uint16_t)get_field(f, i2);
5460 o->in2 = tcg_const_i64(i2 << s->insn->data);
5462 #define SPEC_in2_i2_16u_shl 0
5464 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5466 uint64_t i2 = (uint32_t)get_field(f, i2);
5467 o->in2 = tcg_const_i64(i2 << s->insn->data);
5469 #define SPEC_in2_i2_32u_shl 0
5471 #ifndef CONFIG_USER_ONLY
5472 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5474 o->in2 = tcg_const_i64(s->fields->raw_insn);
5476 #define SPEC_in2_insn 0
5477 #endif
5479 /* ====================================================================== */
5481 /* Find opc within the table of insns. This is formulated as a switch
5482 statement so that (1) we get compile-time notice of cut-paste errors
5483 for duplicated opcodes, and (2) the compiler generates the binary
5484 search tree, rather than us having to post-process the table. */
5486 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5487 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5489 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5491 enum DisasInsnEnum {
5492 #include "insn-data.def"
5495 #undef D
5496 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5497 .opc = OPC, \
5498 .fmt = FMT_##FT, \
5499 .fac = FAC_##FC, \
5500 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5501 .name = #NM, \
5502 .help_in1 = in1_##I1, \
5503 .help_in2 = in2_##I2, \
5504 .help_prep = prep_##P, \
5505 .help_wout = wout_##W, \
5506 .help_cout = cout_##CC, \
5507 .help_op = op_##OP, \
5508 .data = D \
5511 /* Allow 0 to be used for NULL in the table below. */
5512 #define in1_0 NULL
5513 #define in2_0 NULL
5514 #define prep_0 NULL
5515 #define wout_0 NULL
5516 #define cout_0 NULL
5517 #define op_0 NULL
5519 #define SPEC_in1_0 0
5520 #define SPEC_in2_0 0
5521 #define SPEC_prep_0 0
5522 #define SPEC_wout_0 0
5524 /* Give smaller names to the various facilities. */
5525 #define FAC_Z S390_FEAT_ZARCH
5526 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5527 #define FAC_DFP S390_FEAT_DFP
5528 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5529 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5530 #define FAC_EE S390_FEAT_EXECUTE_EXT
5531 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5532 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5533 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5534 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5535 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5536 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5537 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5538 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5539 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5540 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5541 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5542 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5543 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5544 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5545 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5546 #define FAC_SFLE S390_FEAT_STFLE
5547 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5548 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5549 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5550 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5551 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5552 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5553 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5554 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5555 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5556 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5557 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5558 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5559 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5561 static const DisasInsn insn_info[] = {
5562 #include "insn-data.def"
5565 #undef D
5566 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5567 case OPC: return &insn_info[insn_ ## NM];
5569 static const DisasInsn *lookup_opc(uint16_t opc)
5571 switch (opc) {
5572 #include "insn-data.def"
5573 default:
5574 return NULL;
5578 #undef D
5579 #undef C
5581 /* Extract a field from the insn. The INSN should be left-aligned in
5582 the uint64_t so that we can more easily utilize the big-bit-endian
5583 definitions we extract from the Principals of Operation. */
5585 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5587 uint32_t r, m;
5589 if (f->size == 0) {
5590 return;
5593 /* Zero extract the field from the insn. */
5594 r = (insn << f->beg) >> (64 - f->size);
5596 /* Sign-extend, or un-swap the field as necessary. */
5597 switch (f->type) {
5598 case 0: /* unsigned */
5599 break;
5600 case 1: /* signed */
5601 assert(f->size <= 32);
5602 m = 1u << (f->size - 1);
5603 r = (r ^ m) - m;
5604 break;
5605 case 2: /* dl+dh split, signed 20 bit. */
5606 r = ((int8_t)r << 12) | (r >> 8);
5607 break;
5608 default:
5609 abort();
5612 /* Validate that the "compressed" encoding we selected above is valid.
5613 I.e. we havn't make two different original fields overlap. */
5614 assert(((o->presentC >> f->indexC) & 1) == 0);
5615 o->presentC |= 1 << f->indexC;
5616 o->presentO |= 1 << f->indexO;
5618 o->c[f->indexC] = r;
5621 /* Lookup the insn at the current PC, extracting the operands into O and
5622 returning the info struct for the insn. Returns NULL for invalid insn. */
5624 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5625 DisasFields *f)
5627 uint64_t insn, pc = s->pc;
5628 int op, op2, ilen;
5629 const DisasInsn *info;
5631 if (unlikely(s->ex_value)) {
5632 /* Drop the EX data now, so that it's clear on exception paths. */
5633 TCGv_i64 zero = tcg_const_i64(0);
5634 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5635 tcg_temp_free_i64(zero);
5637 /* Extract the values saved by EXECUTE. */
5638 insn = s->ex_value & 0xffffffffffff0000ull;
5639 ilen = s->ex_value & 0xf;
5640 op = insn >> 56;
5641 } else {
5642 insn = ld_code2(env, pc);
5643 op = (insn >> 8) & 0xff;
5644 ilen = get_ilen(op);
5645 switch (ilen) {
5646 case 2:
5647 insn = insn << 48;
5648 break;
5649 case 4:
5650 insn = ld_code4(env, pc) << 32;
5651 break;
5652 case 6:
5653 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5654 break;
5655 default:
5656 g_assert_not_reached();
5659 s->next_pc = s->pc + ilen;
5660 s->ilen = ilen;
5662 /* We can't actually determine the insn format until we've looked up
5663 the full insn opcode. Which we can't do without locating the
5664 secondary opcode. Assume by default that OP2 is at bit 40; for
5665 those smaller insns that don't actually have a secondary opcode
5666 this will correctly result in OP2 = 0. */
5667 switch (op) {
5668 case 0x01: /* E */
5669 case 0x80: /* S */
5670 case 0x82: /* S */
5671 case 0x93: /* S */
5672 case 0xb2: /* S, RRF, RRE, IE */
5673 case 0xb3: /* RRE, RRD, RRF */
5674 case 0xb9: /* RRE, RRF */
5675 case 0xe5: /* SSE, SIL */
5676 op2 = (insn << 8) >> 56;
5677 break;
5678 case 0xa5: /* RI */
5679 case 0xa7: /* RI */
5680 case 0xc0: /* RIL */
5681 case 0xc2: /* RIL */
5682 case 0xc4: /* RIL */
5683 case 0xc6: /* RIL */
5684 case 0xc8: /* SSF */
5685 case 0xcc: /* RIL */
5686 op2 = (insn << 12) >> 60;
5687 break;
5688 case 0xc5: /* MII */
5689 case 0xc7: /* SMI */
5690 case 0xd0 ... 0xdf: /* SS */
5691 case 0xe1: /* SS */
5692 case 0xe2: /* SS */
5693 case 0xe8: /* SS */
5694 case 0xe9: /* SS */
5695 case 0xea: /* SS */
5696 case 0xee ... 0xf3: /* SS */
5697 case 0xf8 ... 0xfd: /* SS */
5698 op2 = 0;
5699 break;
5700 default:
5701 op2 = (insn << 40) >> 56;
5702 break;
5705 memset(f, 0, sizeof(*f));
5706 f->raw_insn = insn;
5707 f->op = op;
5708 f->op2 = op2;
5710 /* Lookup the instruction. */
5711 info = lookup_opc(op << 8 | op2);
5713 /* If we found it, extract the operands. */
5714 if (info != NULL) {
5715 DisasFormat fmt = info->fmt;
5716 int i;
5718 for (i = 0; i < NUM_C_FIELD; ++i) {
5719 extract_field(f, &format_info[fmt].op[i], insn);
5722 return info;
5725 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5727 const DisasInsn *insn;
5728 ExitStatus ret = NO_EXIT;
5729 DisasFields f;
5730 DisasOps o;
5732 /* Search for the insn in the table. */
5733 insn = extract_insn(env, s, &f);
5735 /* Not found means unimplemented/illegal opcode. */
5736 if (insn == NULL) {
5737 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5738 f.op, f.op2);
5739 gen_illegal_opcode(s);
5740 return EXIT_NORETURN;
5743 #ifndef CONFIG_USER_ONLY
5744 if (s->tb->flags & FLAG_MASK_PER) {
5745 TCGv_i64 addr = tcg_const_i64(s->pc);
5746 gen_helper_per_ifetch(cpu_env, addr);
5747 tcg_temp_free_i64(addr);
5749 #endif
5751 /* Check for insn specification exceptions. */
5752 if (insn->spec) {
5753 int spec = insn->spec, excp = 0, r;
5755 if (spec & SPEC_r1_even) {
5756 r = get_field(&f, r1);
5757 if (r & 1) {
5758 excp = PGM_SPECIFICATION;
5761 if (spec & SPEC_r2_even) {
5762 r = get_field(&f, r2);
5763 if (r & 1) {
5764 excp = PGM_SPECIFICATION;
5767 if (spec & SPEC_r3_even) {
5768 r = get_field(&f, r3);
5769 if (r & 1) {
5770 excp = PGM_SPECIFICATION;
5773 if (spec & SPEC_r1_f128) {
5774 r = get_field(&f, r1);
5775 if (r > 13) {
5776 excp = PGM_SPECIFICATION;
5779 if (spec & SPEC_r2_f128) {
5780 r = get_field(&f, r2);
5781 if (r > 13) {
5782 excp = PGM_SPECIFICATION;
5785 if (excp) {
5786 gen_program_exception(s, excp);
5787 return EXIT_NORETURN;
5791 /* Set up the strutures we use to communicate with the helpers. */
5792 s->insn = insn;
5793 s->fields = &f;
5794 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5795 TCGV_UNUSED_I64(o.out);
5796 TCGV_UNUSED_I64(o.out2);
5797 TCGV_UNUSED_I64(o.in1);
5798 TCGV_UNUSED_I64(o.in2);
5799 TCGV_UNUSED_I64(o.addr1);
5801 /* Implement the instruction. */
5802 if (insn->help_in1) {
5803 insn->help_in1(s, &f, &o);
5805 if (insn->help_in2) {
5806 insn->help_in2(s, &f, &o);
5808 if (insn->help_prep) {
5809 insn->help_prep(s, &f, &o);
5811 if (insn->help_op) {
5812 ret = insn->help_op(s, &o);
5814 if (insn->help_wout) {
5815 insn->help_wout(s, &f, &o);
5817 if (insn->help_cout) {
5818 insn->help_cout(s, &o);
5821 /* Free any temporaries created by the helpers. */
5822 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5823 tcg_temp_free_i64(o.out);
5825 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5826 tcg_temp_free_i64(o.out2);
5828 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5829 tcg_temp_free_i64(o.in1);
5831 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5832 tcg_temp_free_i64(o.in2);
5834 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5835 tcg_temp_free_i64(o.addr1);
5838 #ifndef CONFIG_USER_ONLY
5839 if (s->tb->flags & FLAG_MASK_PER) {
5840 /* An exception might be triggered, save PSW if not already done. */
5841 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5842 tcg_gen_movi_i64(psw_addr, s->next_pc);
5845 /* Save off cc. */
5846 update_cc_op(s);
5848 /* Call the helper to check for a possible PER exception. */
5849 gen_helper_per_check_exception(cpu_env);
5851 #endif
5853 /* Advance to the next instruction. */
5854 s->pc = s->next_pc;
5855 return ret;
5858 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5860 CPUS390XState *env = cs->env_ptr;
5861 DisasContext dc;
5862 target_ulong pc_start;
5863 uint64_t next_page_start;
5864 int num_insns, max_insns;
5865 ExitStatus status;
5866 bool do_debug;
5868 pc_start = tb->pc;
5870 /* 31-bit mode */
5871 if (!(tb->flags & FLAG_MASK_64)) {
5872 pc_start &= 0x7fffffff;
5875 dc.tb = tb;
5876 dc.pc = pc_start;
5877 dc.cc_op = CC_OP_DYNAMIC;
5878 dc.ex_value = tb->cs_base;
5879 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5881 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5883 num_insns = 0;
5884 max_insns = tb->cflags & CF_COUNT_MASK;
5885 if (max_insns == 0) {
5886 max_insns = CF_COUNT_MASK;
5888 if (max_insns > TCG_MAX_INSNS) {
5889 max_insns = TCG_MAX_INSNS;
5892 gen_tb_start(tb);
5894 do {
5895 tcg_gen_insn_start(dc.pc, dc.cc_op);
5896 num_insns++;
5898 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5899 status = EXIT_PC_STALE;
5900 do_debug = true;
5901 /* The address covered by the breakpoint must be included in
5902 [tb->pc, tb->pc + tb->size) in order to for it to be
5903 properly cleared -- thus we increment the PC here so that
5904 the logic setting tb->size below does the right thing. */
5905 dc.pc += 2;
5906 break;
5909 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5910 gen_io_start();
5913 status = translate_one(env, &dc);
5915 /* If we reach a page boundary, are single stepping,
5916 or exhaust instruction count, stop generation. */
5917 if (status == NO_EXIT
5918 && (dc.pc >= next_page_start
5919 || tcg_op_buf_full()
5920 || num_insns >= max_insns
5921 || singlestep
5922 || cs->singlestep_enabled
5923 || dc.ex_value)) {
5924 status = EXIT_PC_STALE;
5926 } while (status == NO_EXIT);
5928 if (tb->cflags & CF_LAST_IO) {
5929 gen_io_end();
5932 switch (status) {
5933 case EXIT_GOTO_TB:
5934 case EXIT_NORETURN:
5935 break;
5936 case EXIT_PC_STALE:
5937 case EXIT_PC_STALE_NOCHAIN:
5938 update_psw_addr(&dc);
5939 /* FALLTHRU */
5940 case EXIT_PC_UPDATED:
5941 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5942 cc op type is in env */
5943 update_cc_op(&dc);
5944 /* FALLTHRU */
5945 case EXIT_PC_CC_UPDATED:
5946 /* Exit the TB, either by raising a debug exception or by return. */
5947 if (do_debug) {
5948 gen_exception(EXCP_DEBUG);
5949 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5950 tcg_gen_exit_tb(0);
5951 } else {
5952 tcg_gen_lookup_and_goto_ptr(psw_addr);
5954 break;
5955 default:
5956 g_assert_not_reached();
5959 gen_tb_end(tb, num_insns);
5961 tb->size = dc.pc - pc_start;
5962 tb->icount = num_insns;
5964 #if defined(S390X_DEBUG_DISAS)
5965 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5966 && qemu_log_in_addr_range(pc_start)) {
5967 qemu_log_lock();
5968 if (unlikely(dc.ex_value)) {
5969 /* ??? Unfortunately log_target_disas can't use host memory. */
5970 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5971 } else {
5972 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5973 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5974 qemu_log("\n");
5976 qemu_log_unlock();
5978 #endif
5981 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5982 target_ulong *data)
5984 int cc_op = data[1];
5985 env->psw.addr = data[0];
5986 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5987 env->cc_op = cc_op;