target/s390x: Move s390_cpu_dump_state() to helper.c
[qemu/ericb.git] / target / s390x / translate.c
blobcd96a8dee2e70f1ab49f934af64b1a4265645890
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "disas/disas.h"
34 #include "exec/exec-all.h"
35 #include "tcg-op.h"
36 #include "qemu/log.h"
37 #include "qemu/host-utils.h"
38 #include "exec/cpu_ldst.h"
40 /* global register indexes */
41 static TCGv_env cpu_env;
43 #include "exec/gen-icount.h"
44 #include "exec/helper-proto.h"
45 #include "exec/helper-gen.h"
47 #include "trace-tcg.h"
48 #include "exec/log.h"
51 /* Information that (most) every instruction needs to manipulate. */
52 typedef struct DisasContext DisasContext;
53 typedef struct DisasInsn DisasInsn;
54 typedef struct DisasFields DisasFields;
56 struct DisasContext {
57 struct TranslationBlock *tb;
58 const DisasInsn *insn;
59 DisasFields *fields;
60 uint64_t ex_value;
61 uint64_t pc, next_pc;
62 uint32_t ilen;
63 enum cc_op cc_op;
64 bool singlestep_enabled;
67 /* Information carried about a condition to be evaluated. */
68 typedef struct {
69 TCGCond cond:8;
70 bool is_64;
71 bool g1;
72 bool g2;
73 union {
74 struct { TCGv_i64 a, b; } s64;
75 struct { TCGv_i32 a, b; } s32;
76 } u;
77 } DisasCompare;
79 #define DISAS_EXCP 4
81 #ifdef DEBUG_INLINE_BRANCHES
82 static uint64_t inline_branch_hit[CC_OP_MAX];
83 static uint64_t inline_branch_miss[CC_OP_MAX];
84 #endif
86 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
88 if (!(s->tb->flags & FLAG_MASK_64)) {
89 if (s->tb->flags & FLAG_MASK_32) {
90 return pc | 0x80000000;
93 return pc;
96 static TCGv_i64 psw_addr;
97 static TCGv_i64 psw_mask;
98 static TCGv_i64 gbea;
100 static TCGv_i32 cc_op;
101 static TCGv_i64 cc_src;
102 static TCGv_i64 cc_dst;
103 static TCGv_i64 cc_vr;
105 static char cpu_reg_names[32][4];
106 static TCGv_i64 regs[16];
107 static TCGv_i64 fregs[16];
109 void s390x_translate_init(void)
111 int i;
113 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
114 tcg_ctx.tcg_env = cpu_env;
115 psw_addr = tcg_global_mem_new_i64(cpu_env,
116 offsetof(CPUS390XState, psw.addr),
117 "psw_addr");
118 psw_mask = tcg_global_mem_new_i64(cpu_env,
119 offsetof(CPUS390XState, psw.mask),
120 "psw_mask");
121 gbea = tcg_global_mem_new_i64(cpu_env,
122 offsetof(CPUS390XState, gbea),
123 "gbea");
125 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
126 "cc_op");
127 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
128 "cc_src");
129 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
130 "cc_dst");
131 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
132 "cc_vr");
134 for (i = 0; i < 16; i++) {
135 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
136 regs[i] = tcg_global_mem_new(cpu_env,
137 offsetof(CPUS390XState, regs[i]),
138 cpu_reg_names[i]);
141 for (i = 0; i < 16; i++) {
142 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
143 fregs[i] = tcg_global_mem_new(cpu_env,
144 offsetof(CPUS390XState, vregs[i][0].d),
145 cpu_reg_names[i + 16]);
149 static TCGv_i64 load_reg(int reg)
151 TCGv_i64 r = tcg_temp_new_i64();
152 tcg_gen_mov_i64(r, regs[reg]);
153 return r;
156 static TCGv_i64 load_freg32_i64(int reg)
158 TCGv_i64 r = tcg_temp_new_i64();
159 tcg_gen_shri_i64(r, fregs[reg], 32);
160 return r;
163 static void store_reg(int reg, TCGv_i64 v)
165 tcg_gen_mov_i64(regs[reg], v);
168 static void store_freg(int reg, TCGv_i64 v)
170 tcg_gen_mov_i64(fregs[reg], v);
173 static void store_reg32_i64(int reg, TCGv_i64 v)
175 /* 32 bit register writes keep the upper half */
176 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
179 static void store_reg32h_i64(int reg, TCGv_i64 v)
181 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
184 static void store_freg32_i64(int reg, TCGv_i64 v)
186 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
189 static void return_low128(TCGv_i64 dest)
191 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
194 static void update_psw_addr(DisasContext *s)
196 /* psw.addr */
197 tcg_gen_movi_i64(psw_addr, s->pc);
200 static void per_branch(DisasContext *s, bool to_next)
202 #ifndef CONFIG_USER_ONLY
203 tcg_gen_movi_i64(gbea, s->pc);
205 if (s->tb->flags & FLAG_MASK_PER) {
206 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
207 gen_helper_per_branch(cpu_env, gbea, next_pc);
208 if (to_next) {
209 tcg_temp_free_i64(next_pc);
212 #endif
215 static void per_branch_cond(DisasContext *s, TCGCond cond,
216 TCGv_i64 arg1, TCGv_i64 arg2)
218 #ifndef CONFIG_USER_ONLY
219 if (s->tb->flags & FLAG_MASK_PER) {
220 TCGLabel *lab = gen_new_label();
221 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
223 tcg_gen_movi_i64(gbea, s->pc);
224 gen_helper_per_branch(cpu_env, gbea, psw_addr);
226 gen_set_label(lab);
227 } else {
228 TCGv_i64 pc = tcg_const_i64(s->pc);
229 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
230 tcg_temp_free_i64(pc);
232 #endif
235 static void per_breaking_event(DisasContext *s)
237 tcg_gen_movi_i64(gbea, s->pc);
240 static void update_cc_op(DisasContext *s)
242 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
243 tcg_gen_movi_i32(cc_op, s->cc_op);
247 static void potential_page_fault(DisasContext *s)
249 update_psw_addr(s);
250 update_cc_op(s);
253 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
255 return (uint64_t)cpu_lduw_code(env, pc);
258 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
260 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
263 static int get_mem_index(DisasContext *s)
265 switch (s->tb->flags & FLAG_MASK_ASC) {
266 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
267 return 0;
268 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
269 return 1;
270 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
271 return 2;
272 default:
273 tcg_abort();
274 break;
278 static void gen_exception(int excp)
280 TCGv_i32 tmp = tcg_const_i32(excp);
281 gen_helper_exception(cpu_env, tmp);
282 tcg_temp_free_i32(tmp);
285 static void gen_program_exception(DisasContext *s, int code)
287 TCGv_i32 tmp;
289 /* Remember what pgm exeption this was. */
290 tmp = tcg_const_i32(code);
291 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
292 tcg_temp_free_i32(tmp);
294 tmp = tcg_const_i32(s->ilen);
295 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
296 tcg_temp_free_i32(tmp);
298 /* update the psw */
299 update_psw_addr(s);
301 /* Save off cc. */
302 update_cc_op(s);
304 /* Trigger exception. */
305 gen_exception(EXCP_PGM);
308 static inline void gen_illegal_opcode(DisasContext *s)
310 gen_program_exception(s, PGM_OPERATION);
313 static inline void gen_trap(DisasContext *s)
315 TCGv_i32 t;
317 /* Set DXC to 0xff. */
318 t = tcg_temp_new_i32();
319 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
320 tcg_gen_ori_i32(t, t, 0xff00);
321 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
322 tcg_temp_free_i32(t);
324 gen_program_exception(s, PGM_DATA);
327 #ifndef CONFIG_USER_ONLY
328 static void check_privileged(DisasContext *s)
330 if (s->tb->flags & FLAG_MASK_PSTATE) {
331 gen_program_exception(s, PGM_PRIVILEGED);
334 #endif
336 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
338 TCGv_i64 tmp = tcg_temp_new_i64();
339 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
341 /* Note that d2 is limited to 20 bits, signed. If we crop negative
342 displacements early we create larger immedate addends. */
344 /* Note that addi optimizes the imm==0 case. */
345 if (b2 && x2) {
346 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
347 tcg_gen_addi_i64(tmp, tmp, d2);
348 } else if (b2) {
349 tcg_gen_addi_i64(tmp, regs[b2], d2);
350 } else if (x2) {
351 tcg_gen_addi_i64(tmp, regs[x2], d2);
352 } else {
353 if (need_31) {
354 d2 &= 0x7fffffff;
355 need_31 = false;
357 tcg_gen_movi_i64(tmp, d2);
359 if (need_31) {
360 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
363 return tmp;
366 static inline bool live_cc_data(DisasContext *s)
368 return (s->cc_op != CC_OP_DYNAMIC
369 && s->cc_op != CC_OP_STATIC
370 && s->cc_op > 3);
373 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
375 if (live_cc_data(s)) {
376 tcg_gen_discard_i64(cc_src);
377 tcg_gen_discard_i64(cc_dst);
378 tcg_gen_discard_i64(cc_vr);
380 s->cc_op = CC_OP_CONST0 + val;
383 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
385 if (live_cc_data(s)) {
386 tcg_gen_discard_i64(cc_src);
387 tcg_gen_discard_i64(cc_vr);
389 tcg_gen_mov_i64(cc_dst, dst);
390 s->cc_op = op;
393 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
394 TCGv_i64 dst)
396 if (live_cc_data(s)) {
397 tcg_gen_discard_i64(cc_vr);
399 tcg_gen_mov_i64(cc_src, src);
400 tcg_gen_mov_i64(cc_dst, dst);
401 s->cc_op = op;
404 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
405 TCGv_i64 dst, TCGv_i64 vr)
407 tcg_gen_mov_i64(cc_src, src);
408 tcg_gen_mov_i64(cc_dst, dst);
409 tcg_gen_mov_i64(cc_vr, vr);
410 s->cc_op = op;
413 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
415 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
418 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
420 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
423 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
425 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
428 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
430 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
433 /* CC value is in env->cc_op */
434 static void set_cc_static(DisasContext *s)
436 if (live_cc_data(s)) {
437 tcg_gen_discard_i64(cc_src);
438 tcg_gen_discard_i64(cc_dst);
439 tcg_gen_discard_i64(cc_vr);
441 s->cc_op = CC_OP_STATIC;
444 /* calculates cc into cc_op */
445 static void gen_op_calc_cc(DisasContext *s)
447 TCGv_i32 local_cc_op;
448 TCGv_i64 dummy;
450 TCGV_UNUSED_I32(local_cc_op);
451 TCGV_UNUSED_I64(dummy);
452 switch (s->cc_op) {
453 default:
454 dummy = tcg_const_i64(0);
455 /* FALLTHRU */
456 case CC_OP_ADD_64:
457 case CC_OP_ADDU_64:
458 case CC_OP_ADDC_64:
459 case CC_OP_SUB_64:
460 case CC_OP_SUBU_64:
461 case CC_OP_SUBB_64:
462 case CC_OP_ADD_32:
463 case CC_OP_ADDU_32:
464 case CC_OP_ADDC_32:
465 case CC_OP_SUB_32:
466 case CC_OP_SUBU_32:
467 case CC_OP_SUBB_32:
468 local_cc_op = tcg_const_i32(s->cc_op);
469 break;
470 case CC_OP_CONST0:
471 case CC_OP_CONST1:
472 case CC_OP_CONST2:
473 case CC_OP_CONST3:
474 case CC_OP_STATIC:
475 case CC_OP_DYNAMIC:
476 break;
479 switch (s->cc_op) {
480 case CC_OP_CONST0:
481 case CC_OP_CONST1:
482 case CC_OP_CONST2:
483 case CC_OP_CONST3:
484 /* s->cc_op is the cc value */
485 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
486 break;
487 case CC_OP_STATIC:
488 /* env->cc_op already is the cc value */
489 break;
490 case CC_OP_NZ:
491 case CC_OP_ABS_64:
492 case CC_OP_NABS_64:
493 case CC_OP_ABS_32:
494 case CC_OP_NABS_32:
495 case CC_OP_LTGT0_32:
496 case CC_OP_LTGT0_64:
497 case CC_OP_COMP_32:
498 case CC_OP_COMP_64:
499 case CC_OP_NZ_F32:
500 case CC_OP_NZ_F64:
501 case CC_OP_FLOGR:
502 /* 1 argument */
503 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
504 break;
505 case CC_OP_ICM:
506 case CC_OP_LTGT_32:
507 case CC_OP_LTGT_64:
508 case CC_OP_LTUGTU_32:
509 case CC_OP_LTUGTU_64:
510 case CC_OP_TM_32:
511 case CC_OP_TM_64:
512 case CC_OP_SLA_32:
513 case CC_OP_SLA_64:
514 case CC_OP_NZ_F128:
515 /* 2 arguments */
516 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
517 break;
518 case CC_OP_ADD_64:
519 case CC_OP_ADDU_64:
520 case CC_OP_ADDC_64:
521 case CC_OP_SUB_64:
522 case CC_OP_SUBU_64:
523 case CC_OP_SUBB_64:
524 case CC_OP_ADD_32:
525 case CC_OP_ADDU_32:
526 case CC_OP_ADDC_32:
527 case CC_OP_SUB_32:
528 case CC_OP_SUBU_32:
529 case CC_OP_SUBB_32:
530 /* 3 arguments */
531 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
532 break;
533 case CC_OP_DYNAMIC:
534 /* unknown operation - assume 3 arguments and cc_op in env */
535 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
536 break;
537 default:
538 tcg_abort();
541 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
542 tcg_temp_free_i32(local_cc_op);
544 if (!TCGV_IS_UNUSED_I64(dummy)) {
545 tcg_temp_free_i64(dummy);
548 /* We now have cc in cc_op as constant */
549 set_cc_static(s);
552 static bool use_exit_tb(DisasContext *s)
554 return (s->singlestep_enabled ||
555 (s->tb->cflags & CF_LAST_IO) ||
556 (s->tb->flags & FLAG_MASK_PER));
559 static bool use_goto_tb(DisasContext *s, uint64_t dest)
561 if (unlikely(use_exit_tb(s))) {
562 return false;
564 #ifndef CONFIG_USER_ONLY
565 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
566 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
567 #else
568 return true;
569 #endif
572 static void account_noninline_branch(DisasContext *s, int cc_op)
574 #ifdef DEBUG_INLINE_BRANCHES
575 inline_branch_miss[cc_op]++;
576 #endif
579 static void account_inline_branch(DisasContext *s, int cc_op)
581 #ifdef DEBUG_INLINE_BRANCHES
582 inline_branch_hit[cc_op]++;
583 #endif
586 /* Table of mask values to comparison codes, given a comparison as input.
587 For such, CC=3 should not be possible. */
588 static const TCGCond ltgt_cond[16] = {
589 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
590 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
591 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
592 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
593 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
594 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
595 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
596 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
599 /* Table of mask values to comparison codes, given a logic op as input.
600 For such, only CC=0 and CC=1 should be possible. */
601 static const TCGCond nz_cond[16] = {
602 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
603 TCG_COND_NEVER, TCG_COND_NEVER,
604 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
605 TCG_COND_NE, TCG_COND_NE,
606 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
607 TCG_COND_EQ, TCG_COND_EQ,
608 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
609 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
612 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
613 details required to generate a TCG comparison. */
614 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
616 TCGCond cond;
617 enum cc_op old_cc_op = s->cc_op;
619 if (mask == 15 || mask == 0) {
620 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
621 c->u.s32.a = cc_op;
622 c->u.s32.b = cc_op;
623 c->g1 = c->g2 = true;
624 c->is_64 = false;
625 return;
628 /* Find the TCG condition for the mask + cc op. */
629 switch (old_cc_op) {
630 case CC_OP_LTGT0_32:
631 case CC_OP_LTGT0_64:
632 case CC_OP_LTGT_32:
633 case CC_OP_LTGT_64:
634 cond = ltgt_cond[mask];
635 if (cond == TCG_COND_NEVER) {
636 goto do_dynamic;
638 account_inline_branch(s, old_cc_op);
639 break;
641 case CC_OP_LTUGTU_32:
642 case CC_OP_LTUGTU_64:
643 cond = tcg_unsigned_cond(ltgt_cond[mask]);
644 if (cond == TCG_COND_NEVER) {
645 goto do_dynamic;
647 account_inline_branch(s, old_cc_op);
648 break;
650 case CC_OP_NZ:
651 cond = nz_cond[mask];
652 if (cond == TCG_COND_NEVER) {
653 goto do_dynamic;
655 account_inline_branch(s, old_cc_op);
656 break;
658 case CC_OP_TM_32:
659 case CC_OP_TM_64:
660 switch (mask) {
661 case 8:
662 cond = TCG_COND_EQ;
663 break;
664 case 4 | 2 | 1:
665 cond = TCG_COND_NE;
666 break;
667 default:
668 goto do_dynamic;
670 account_inline_branch(s, old_cc_op);
671 break;
673 case CC_OP_ICM:
674 switch (mask) {
675 case 8:
676 cond = TCG_COND_EQ;
677 break;
678 case 4 | 2 | 1:
679 case 4 | 2:
680 cond = TCG_COND_NE;
681 break;
682 default:
683 goto do_dynamic;
685 account_inline_branch(s, old_cc_op);
686 break;
688 case CC_OP_FLOGR:
689 switch (mask & 0xa) {
690 case 8: /* src == 0 -> no one bit found */
691 cond = TCG_COND_EQ;
692 break;
693 case 2: /* src != 0 -> one bit found */
694 cond = TCG_COND_NE;
695 break;
696 default:
697 goto do_dynamic;
699 account_inline_branch(s, old_cc_op);
700 break;
702 case CC_OP_ADDU_32:
703 case CC_OP_ADDU_64:
704 switch (mask) {
705 case 8 | 2: /* vr == 0 */
706 cond = TCG_COND_EQ;
707 break;
708 case 4 | 1: /* vr != 0 */
709 cond = TCG_COND_NE;
710 break;
711 case 8 | 4: /* no carry -> vr >= src */
712 cond = TCG_COND_GEU;
713 break;
714 case 2 | 1: /* carry -> vr < src */
715 cond = TCG_COND_LTU;
716 break;
717 default:
718 goto do_dynamic;
720 account_inline_branch(s, old_cc_op);
721 break;
723 case CC_OP_SUBU_32:
724 case CC_OP_SUBU_64:
725 /* Note that CC=0 is impossible; treat it as dont-care. */
726 switch (mask & 7) {
727 case 2: /* zero -> op1 == op2 */
728 cond = TCG_COND_EQ;
729 break;
730 case 4 | 1: /* !zero -> op1 != op2 */
731 cond = TCG_COND_NE;
732 break;
733 case 4: /* borrow (!carry) -> op1 < op2 */
734 cond = TCG_COND_LTU;
735 break;
736 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
737 cond = TCG_COND_GEU;
738 break;
739 default:
740 goto do_dynamic;
742 account_inline_branch(s, old_cc_op);
743 break;
745 default:
746 do_dynamic:
747 /* Calculate cc value. */
748 gen_op_calc_cc(s);
749 /* FALLTHRU */
751 case CC_OP_STATIC:
752 /* Jump based on CC. We'll load up the real cond below;
753 the assignment here merely avoids a compiler warning. */
754 account_noninline_branch(s, old_cc_op);
755 old_cc_op = CC_OP_STATIC;
756 cond = TCG_COND_NEVER;
757 break;
760 /* Load up the arguments of the comparison. */
761 c->is_64 = true;
762 c->g1 = c->g2 = false;
763 switch (old_cc_op) {
764 case CC_OP_LTGT0_32:
765 c->is_64 = false;
766 c->u.s32.a = tcg_temp_new_i32();
767 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
768 c->u.s32.b = tcg_const_i32(0);
769 break;
770 case CC_OP_LTGT_32:
771 case CC_OP_LTUGTU_32:
772 case CC_OP_SUBU_32:
773 c->is_64 = false;
774 c->u.s32.a = tcg_temp_new_i32();
775 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
776 c->u.s32.b = tcg_temp_new_i32();
777 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
778 break;
780 case CC_OP_LTGT0_64:
781 case CC_OP_NZ:
782 case CC_OP_FLOGR:
783 c->u.s64.a = cc_dst;
784 c->u.s64.b = tcg_const_i64(0);
785 c->g1 = true;
786 break;
787 case CC_OP_LTGT_64:
788 case CC_OP_LTUGTU_64:
789 case CC_OP_SUBU_64:
790 c->u.s64.a = cc_src;
791 c->u.s64.b = cc_dst;
792 c->g1 = c->g2 = true;
793 break;
795 case CC_OP_TM_32:
796 case CC_OP_TM_64:
797 case CC_OP_ICM:
798 c->u.s64.a = tcg_temp_new_i64();
799 c->u.s64.b = tcg_const_i64(0);
800 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
801 break;
803 case CC_OP_ADDU_32:
804 c->is_64 = false;
805 c->u.s32.a = tcg_temp_new_i32();
806 c->u.s32.b = tcg_temp_new_i32();
807 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
808 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
809 tcg_gen_movi_i32(c->u.s32.b, 0);
810 } else {
811 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
813 break;
815 case CC_OP_ADDU_64:
816 c->u.s64.a = cc_vr;
817 c->g1 = true;
818 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
819 c->u.s64.b = tcg_const_i64(0);
820 } else {
821 c->u.s64.b = cc_src;
822 c->g2 = true;
824 break;
826 case CC_OP_STATIC:
827 c->is_64 = false;
828 c->u.s32.a = cc_op;
829 c->g1 = true;
830 switch (mask) {
831 case 0x8 | 0x4 | 0x2: /* cc != 3 */
832 cond = TCG_COND_NE;
833 c->u.s32.b = tcg_const_i32(3);
834 break;
835 case 0x8 | 0x4 | 0x1: /* cc != 2 */
836 cond = TCG_COND_NE;
837 c->u.s32.b = tcg_const_i32(2);
838 break;
839 case 0x8 | 0x2 | 0x1: /* cc != 1 */
840 cond = TCG_COND_NE;
841 c->u.s32.b = tcg_const_i32(1);
842 break;
843 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
844 cond = TCG_COND_EQ;
845 c->g1 = false;
846 c->u.s32.a = tcg_temp_new_i32();
847 c->u.s32.b = tcg_const_i32(0);
848 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
849 break;
850 case 0x8 | 0x4: /* cc < 2 */
851 cond = TCG_COND_LTU;
852 c->u.s32.b = tcg_const_i32(2);
853 break;
854 case 0x8: /* cc == 0 */
855 cond = TCG_COND_EQ;
856 c->u.s32.b = tcg_const_i32(0);
857 break;
858 case 0x4 | 0x2 | 0x1: /* cc != 0 */
859 cond = TCG_COND_NE;
860 c->u.s32.b = tcg_const_i32(0);
861 break;
862 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
863 cond = TCG_COND_NE;
864 c->g1 = false;
865 c->u.s32.a = tcg_temp_new_i32();
866 c->u.s32.b = tcg_const_i32(0);
867 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
868 break;
869 case 0x4: /* cc == 1 */
870 cond = TCG_COND_EQ;
871 c->u.s32.b = tcg_const_i32(1);
872 break;
873 case 0x2 | 0x1: /* cc > 1 */
874 cond = TCG_COND_GTU;
875 c->u.s32.b = tcg_const_i32(1);
876 break;
877 case 0x2: /* cc == 2 */
878 cond = TCG_COND_EQ;
879 c->u.s32.b = tcg_const_i32(2);
880 break;
881 case 0x1: /* cc == 3 */
882 cond = TCG_COND_EQ;
883 c->u.s32.b = tcg_const_i32(3);
884 break;
885 default:
886 /* CC is masked by something else: (8 >> cc) & mask. */
887 cond = TCG_COND_NE;
888 c->g1 = false;
889 c->u.s32.a = tcg_const_i32(8);
890 c->u.s32.b = tcg_const_i32(0);
891 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
892 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
893 break;
895 break;
897 default:
898 abort();
900 c->cond = cond;
903 static void free_compare(DisasCompare *c)
905 if (!c->g1) {
906 if (c->is_64) {
907 tcg_temp_free_i64(c->u.s64.a);
908 } else {
909 tcg_temp_free_i32(c->u.s32.a);
912 if (!c->g2) {
913 if (c->is_64) {
914 tcg_temp_free_i64(c->u.s64.b);
915 } else {
916 tcg_temp_free_i32(c->u.s32.b);
921 /* ====================================================================== */
922 /* Define the insn format enumeration. */
923 #define F0(N) FMT_##N,
924 #define F1(N, X1) F0(N)
925 #define F2(N, X1, X2) F0(N)
926 #define F3(N, X1, X2, X3) F0(N)
927 #define F4(N, X1, X2, X3, X4) F0(N)
928 #define F5(N, X1, X2, X3, X4, X5) F0(N)
930 typedef enum {
931 #include "insn-format.def"
932 } DisasFormat;
934 #undef F0
935 #undef F1
936 #undef F2
937 #undef F3
938 #undef F4
939 #undef F5
941 /* Define a structure to hold the decoded fields. We'll store each inside
942 an array indexed by an enum. In order to conserve memory, we'll arrange
943 for fields that do not exist at the same time to overlap, thus the "C"
944 for compact. For checking purposes there is an "O" for original index
945 as well that will be applied to availability bitmaps. */
947 enum DisasFieldIndexO {
948 FLD_O_r1,
949 FLD_O_r2,
950 FLD_O_r3,
951 FLD_O_m1,
952 FLD_O_m3,
953 FLD_O_m4,
954 FLD_O_b1,
955 FLD_O_b2,
956 FLD_O_b4,
957 FLD_O_d1,
958 FLD_O_d2,
959 FLD_O_d4,
960 FLD_O_x2,
961 FLD_O_l1,
962 FLD_O_l2,
963 FLD_O_i1,
964 FLD_O_i2,
965 FLD_O_i3,
966 FLD_O_i4,
967 FLD_O_i5
970 enum DisasFieldIndexC {
971 FLD_C_r1 = 0,
972 FLD_C_m1 = 0,
973 FLD_C_b1 = 0,
974 FLD_C_i1 = 0,
976 FLD_C_r2 = 1,
977 FLD_C_b2 = 1,
978 FLD_C_i2 = 1,
980 FLD_C_r3 = 2,
981 FLD_C_m3 = 2,
982 FLD_C_i3 = 2,
984 FLD_C_m4 = 3,
985 FLD_C_b4 = 3,
986 FLD_C_i4 = 3,
987 FLD_C_l1 = 3,
989 FLD_C_i5 = 4,
990 FLD_C_d1 = 4,
992 FLD_C_d2 = 5,
994 FLD_C_d4 = 6,
995 FLD_C_x2 = 6,
996 FLD_C_l2 = 6,
998 NUM_C_FIELD = 7
1001 struct DisasFields {
1002 uint64_t raw_insn;
1003 unsigned op:8;
1004 unsigned op2:8;
1005 unsigned presentC:16;
1006 unsigned int presentO;
1007 int c[NUM_C_FIELD];
1010 /* This is the way fields are to be accessed out of DisasFields. */
1011 #define have_field(S, F) have_field1((S), FLD_O_##F)
1012 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1014 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1016 return (f->presentO >> c) & 1;
1019 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1020 enum DisasFieldIndexC c)
1022 assert(have_field1(f, o));
1023 return f->c[c];
1026 /* Describe the layout of each field in each format. */
1027 typedef struct DisasField {
1028 unsigned int beg:8;
1029 unsigned int size:8;
1030 unsigned int type:2;
1031 unsigned int indexC:6;
1032 enum DisasFieldIndexO indexO:8;
1033 } DisasField;
1035 typedef struct DisasFormatInfo {
1036 DisasField op[NUM_C_FIELD];
1037 } DisasFormatInfo;
1039 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1040 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1041 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1042 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1043 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1044 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1045 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1046 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1047 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1048 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1049 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1050 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1051 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1052 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1054 #define F0(N) { { } },
1055 #define F1(N, X1) { { X1 } },
1056 #define F2(N, X1, X2) { { X1, X2 } },
1057 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1058 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1059 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1061 static const DisasFormatInfo format_info[] = {
1062 #include "insn-format.def"
1065 #undef F0
1066 #undef F1
1067 #undef F2
1068 #undef F3
1069 #undef F4
1070 #undef F5
1071 #undef R
1072 #undef M
1073 #undef BD
1074 #undef BXD
1075 #undef BDL
1076 #undef BXDL
1077 #undef I
1078 #undef L
1080 /* Generally, we'll extract operands into this structures, operate upon
1081 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1082 of routines below for more details. */
1083 typedef struct {
1084 bool g_out, g_out2, g_in1, g_in2;
1085 TCGv_i64 out, out2, in1, in2;
1086 TCGv_i64 addr1;
1087 } DisasOps;
1089 /* Instructions can place constraints on their operands, raising specification
1090 exceptions if they are violated. To make this easy to automate, each "in1",
1091 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1092 of the following, or 0. To make this easy to document, we'll put the
1093 SPEC_<name> defines next to <name>. */
1095 #define SPEC_r1_even 1
1096 #define SPEC_r2_even 2
1097 #define SPEC_r3_even 4
1098 #define SPEC_r1_f128 8
1099 #define SPEC_r2_f128 16
1101 /* Return values from translate_one, indicating the state of the TB. */
1102 typedef enum {
1103 /* Continue the TB. */
1104 NO_EXIT,
1105 /* We have emitted one or more goto_tb. No fixup required. */
1106 EXIT_GOTO_TB,
1107 /* We are not using a goto_tb (for whatever reason), but have updated
1108 the PC (for whatever reason), so there's no need to do it again on
1109 exiting the TB. */
1110 EXIT_PC_UPDATED,
1111 /* We have updated the PC and CC values. */
1112 EXIT_PC_CC_UPDATED,
1113 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1114 updated the PC for the next instruction to be executed. */
1115 EXIT_PC_STALE,
1116 /* We are exiting the TB to the main loop. */
1117 EXIT_PC_STALE_NOCHAIN,
1118 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1119 No following code will be executed. */
1120 EXIT_NORETURN,
1121 } ExitStatus;
1123 struct DisasInsn {
1124 unsigned opc:16;
1125 DisasFormat fmt:8;
1126 unsigned fac:8;
1127 unsigned spec:8;
1129 const char *name;
1131 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1132 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1133 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1134 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1135 void (*help_cout)(DisasContext *, DisasOps *);
1136 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1138 uint64_t data;
1141 /* ====================================================================== */
1142 /* Miscellaneous helpers, used by several operations. */
1144 static void help_l2_shift(DisasContext *s, DisasFields *f,
1145 DisasOps *o, int mask)
1147 int b2 = get_field(f, b2);
1148 int d2 = get_field(f, d2);
1150 if (b2 == 0) {
1151 o->in2 = tcg_const_i64(d2 & mask);
1152 } else {
1153 o->in2 = get_address(s, 0, b2, d2);
1154 tcg_gen_andi_i64(o->in2, o->in2, mask);
1158 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1160 if (dest == s->next_pc) {
1161 per_branch(s, true);
1162 return NO_EXIT;
1164 if (use_goto_tb(s, dest)) {
1165 update_cc_op(s);
1166 per_breaking_event(s);
1167 tcg_gen_goto_tb(0);
1168 tcg_gen_movi_i64(psw_addr, dest);
1169 tcg_gen_exit_tb((uintptr_t)s->tb);
1170 return EXIT_GOTO_TB;
1171 } else {
1172 tcg_gen_movi_i64(psw_addr, dest);
1173 per_branch(s, false);
1174 return EXIT_PC_UPDATED;
1178 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1179 bool is_imm, int imm, TCGv_i64 cdest)
1181 ExitStatus ret;
1182 uint64_t dest = s->pc + 2 * imm;
1183 TCGLabel *lab;
1185 /* Take care of the special cases first. */
1186 if (c->cond == TCG_COND_NEVER) {
1187 ret = NO_EXIT;
1188 goto egress;
1190 if (is_imm) {
1191 if (dest == s->next_pc) {
1192 /* Branch to next. */
1193 per_branch(s, true);
1194 ret = NO_EXIT;
1195 goto egress;
1197 if (c->cond == TCG_COND_ALWAYS) {
1198 ret = help_goto_direct(s, dest);
1199 goto egress;
1201 } else {
1202 if (TCGV_IS_UNUSED_I64(cdest)) {
1203 /* E.g. bcr %r0 -> no branch. */
1204 ret = NO_EXIT;
1205 goto egress;
1207 if (c->cond == TCG_COND_ALWAYS) {
1208 tcg_gen_mov_i64(psw_addr, cdest);
1209 per_branch(s, false);
1210 ret = EXIT_PC_UPDATED;
1211 goto egress;
1215 if (use_goto_tb(s, s->next_pc)) {
1216 if (is_imm && use_goto_tb(s, dest)) {
1217 /* Both exits can use goto_tb. */
1218 update_cc_op(s);
1220 lab = gen_new_label();
1221 if (c->is_64) {
1222 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1223 } else {
1224 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1227 /* Branch not taken. */
1228 tcg_gen_goto_tb(0);
1229 tcg_gen_movi_i64(psw_addr, s->next_pc);
1230 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1232 /* Branch taken. */
1233 gen_set_label(lab);
1234 per_breaking_event(s);
1235 tcg_gen_goto_tb(1);
1236 tcg_gen_movi_i64(psw_addr, dest);
1237 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1239 ret = EXIT_GOTO_TB;
1240 } else {
1241 /* Fallthru can use goto_tb, but taken branch cannot. */
1242 /* Store taken branch destination before the brcond. This
1243 avoids having to allocate a new local temp to hold it.
1244 We'll overwrite this in the not taken case anyway. */
1245 if (!is_imm) {
1246 tcg_gen_mov_i64(psw_addr, cdest);
1249 lab = gen_new_label();
1250 if (c->is_64) {
1251 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1252 } else {
1253 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1256 /* Branch not taken. */
1257 update_cc_op(s);
1258 tcg_gen_goto_tb(0);
1259 tcg_gen_movi_i64(psw_addr, s->next_pc);
1260 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1262 gen_set_label(lab);
1263 if (is_imm) {
1264 tcg_gen_movi_i64(psw_addr, dest);
1266 per_breaking_event(s);
1267 ret = EXIT_PC_UPDATED;
1269 } else {
1270 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1271 Most commonly we're single-stepping or some other condition that
1272 disables all use of goto_tb. Just update the PC and exit. */
1274 TCGv_i64 next = tcg_const_i64(s->next_pc);
1275 if (is_imm) {
1276 cdest = tcg_const_i64(dest);
1279 if (c->is_64) {
1280 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1281 cdest, next);
1282 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1283 } else {
1284 TCGv_i32 t0 = tcg_temp_new_i32();
1285 TCGv_i64 t1 = tcg_temp_new_i64();
1286 TCGv_i64 z = tcg_const_i64(0);
1287 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1288 tcg_gen_extu_i32_i64(t1, t0);
1289 tcg_temp_free_i32(t0);
1290 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1291 per_branch_cond(s, TCG_COND_NE, t1, z);
1292 tcg_temp_free_i64(t1);
1293 tcg_temp_free_i64(z);
1296 if (is_imm) {
1297 tcg_temp_free_i64(cdest);
1299 tcg_temp_free_i64(next);
1301 ret = EXIT_PC_UPDATED;
1304 egress:
1305 free_compare(c);
1306 return ret;
1309 /* ====================================================================== */
1310 /* The operations. These perform the bulk of the work for any insn,
1311 usually after the operands have been loaded and output initialized. */
1313 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1315 TCGv_i64 z, n;
1316 z = tcg_const_i64(0);
1317 n = tcg_temp_new_i64();
1318 tcg_gen_neg_i64(n, o->in2);
1319 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1320 tcg_temp_free_i64(n);
1321 tcg_temp_free_i64(z);
1322 return NO_EXIT;
1325 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1327 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1328 return NO_EXIT;
1331 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1333 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1334 return NO_EXIT;
1337 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1339 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1340 tcg_gen_mov_i64(o->out2, o->in2);
1341 return NO_EXIT;
1344 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1346 tcg_gen_add_i64(o->out, o->in1, o->in2);
1347 return NO_EXIT;
1350 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1352 DisasCompare cmp;
1353 TCGv_i64 carry;
1355 tcg_gen_add_i64(o->out, o->in1, o->in2);
1357 /* The carry flag is the msb of CC, therefore the branch mask that would
1358 create that comparison is 3. Feeding the generated comparison to
1359 setcond produces the carry flag that we desire. */
1360 disas_jcc(s, &cmp, 3);
1361 carry = tcg_temp_new_i64();
1362 if (cmp.is_64) {
1363 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1364 } else {
1365 TCGv_i32 t = tcg_temp_new_i32();
1366 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1367 tcg_gen_extu_i32_i64(carry, t);
1368 tcg_temp_free_i32(t);
1370 free_compare(&cmp);
1372 tcg_gen_add_i64(o->out, o->out, carry);
1373 tcg_temp_free_i64(carry);
1374 return NO_EXIT;
1377 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1379 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1380 return NO_EXIT;
1383 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1385 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1386 return NO_EXIT;
1389 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1391 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1392 return_low128(o->out2);
1393 return NO_EXIT;
1396 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1398 tcg_gen_and_i64(o->out, o->in1, o->in2);
1399 return NO_EXIT;
1402 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1404 int shift = s->insn->data & 0xff;
1405 int size = s->insn->data >> 8;
1406 uint64_t mask = ((1ull << size) - 1) << shift;
1408 assert(!o->g_in2);
1409 tcg_gen_shli_i64(o->in2, o->in2, shift);
1410 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1411 tcg_gen_and_i64(o->out, o->in1, o->in2);
1413 /* Produce the CC from only the bits manipulated. */
1414 tcg_gen_andi_i64(cc_dst, o->out, mask);
1415 set_cc_nz_u64(s, cc_dst);
1416 return NO_EXIT;
1419 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1421 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1422 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1423 tcg_gen_mov_i64(psw_addr, o->in2);
1424 per_branch(s, false);
1425 return EXIT_PC_UPDATED;
1426 } else {
1427 return NO_EXIT;
1431 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1433 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1434 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1437 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1439 int m1 = get_field(s->fields, m1);
1440 bool is_imm = have_field(s->fields, i2);
1441 int imm = is_imm ? get_field(s->fields, i2) : 0;
1442 DisasCompare c;
1444 /* BCR with R2 = 0 causes no branching */
1445 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1446 if (m1 == 14) {
1447 /* Perform serialization */
1448 /* FIXME: check for fast-BCR-serialization facility */
1449 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1451 if (m1 == 15) {
1452 /* Perform serialization */
1453 /* FIXME: perform checkpoint-synchronisation */
1454 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1456 return NO_EXIT;
1459 disas_jcc(s, &c, m1);
1460 return help_branch(s, &c, is_imm, imm, o->in2);
1463 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1465 int r1 = get_field(s->fields, r1);
1466 bool is_imm = have_field(s->fields, i2);
1467 int imm = is_imm ? get_field(s->fields, i2) : 0;
1468 DisasCompare c;
1469 TCGv_i64 t;
1471 c.cond = TCG_COND_NE;
1472 c.is_64 = false;
1473 c.g1 = false;
1474 c.g2 = false;
1476 t = tcg_temp_new_i64();
1477 tcg_gen_subi_i64(t, regs[r1], 1);
1478 store_reg32_i64(r1, t);
1479 c.u.s32.a = tcg_temp_new_i32();
1480 c.u.s32.b = tcg_const_i32(0);
1481 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1482 tcg_temp_free_i64(t);
1484 return help_branch(s, &c, is_imm, imm, o->in2);
1487 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1489 int r1 = get_field(s->fields, r1);
1490 int imm = get_field(s->fields, i2);
1491 DisasCompare c;
1492 TCGv_i64 t;
1494 c.cond = TCG_COND_NE;
1495 c.is_64 = false;
1496 c.g1 = false;
1497 c.g2 = false;
1499 t = tcg_temp_new_i64();
1500 tcg_gen_shri_i64(t, regs[r1], 32);
1501 tcg_gen_subi_i64(t, t, 1);
1502 store_reg32h_i64(r1, t);
1503 c.u.s32.a = tcg_temp_new_i32();
1504 c.u.s32.b = tcg_const_i32(0);
1505 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1506 tcg_temp_free_i64(t);
1508 return help_branch(s, &c, 1, imm, o->in2);
1511 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1513 int r1 = get_field(s->fields, r1);
1514 bool is_imm = have_field(s->fields, i2);
1515 int imm = is_imm ? get_field(s->fields, i2) : 0;
1516 DisasCompare c;
1518 c.cond = TCG_COND_NE;
1519 c.is_64 = true;
1520 c.g1 = true;
1521 c.g2 = false;
1523 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1524 c.u.s64.a = regs[r1];
1525 c.u.s64.b = tcg_const_i64(0);
1527 return help_branch(s, &c, is_imm, imm, o->in2);
1530 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1532 int r1 = get_field(s->fields, r1);
1533 int r3 = get_field(s->fields, r3);
1534 bool is_imm = have_field(s->fields, i2);
1535 int imm = is_imm ? get_field(s->fields, i2) : 0;
1536 DisasCompare c;
1537 TCGv_i64 t;
1539 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1540 c.is_64 = false;
1541 c.g1 = false;
1542 c.g2 = false;
1544 t = tcg_temp_new_i64();
1545 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1546 c.u.s32.a = tcg_temp_new_i32();
1547 c.u.s32.b = tcg_temp_new_i32();
1548 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1549 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1550 store_reg32_i64(r1, t);
1551 tcg_temp_free_i64(t);
1553 return help_branch(s, &c, is_imm, imm, o->in2);
1556 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1558 int r1 = get_field(s->fields, r1);
1559 int r3 = get_field(s->fields, r3);
1560 bool is_imm = have_field(s->fields, i2);
1561 int imm = is_imm ? get_field(s->fields, i2) : 0;
1562 DisasCompare c;
1564 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1565 c.is_64 = true;
1567 if (r1 == (r3 | 1)) {
1568 c.u.s64.b = load_reg(r3 | 1);
1569 c.g2 = false;
1570 } else {
1571 c.u.s64.b = regs[r3 | 1];
1572 c.g2 = true;
1575 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1576 c.u.s64.a = regs[r1];
1577 c.g1 = true;
1579 return help_branch(s, &c, is_imm, imm, o->in2);
1582 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1584 int imm, m3 = get_field(s->fields, m3);
1585 bool is_imm;
1586 DisasCompare c;
1588 c.cond = ltgt_cond[m3];
1589 if (s->insn->data) {
1590 c.cond = tcg_unsigned_cond(c.cond);
1592 c.is_64 = c.g1 = c.g2 = true;
1593 c.u.s64.a = o->in1;
1594 c.u.s64.b = o->in2;
1596 is_imm = have_field(s->fields, i4);
1597 if (is_imm) {
1598 imm = get_field(s->fields, i4);
1599 } else {
1600 imm = 0;
1601 o->out = get_address(s, 0, get_field(s->fields, b4),
1602 get_field(s->fields, d4));
1605 return help_branch(s, &c, is_imm, imm, o->out);
1608 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1610 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1611 set_cc_static(s);
1612 return NO_EXIT;
1615 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1617 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1618 set_cc_static(s);
1619 return NO_EXIT;
1622 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1624 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1625 set_cc_static(s);
1626 return NO_EXIT;
1629 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1631 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1632 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1633 tcg_temp_free_i32(m3);
1634 gen_set_cc_nz_f32(s, o->in2);
1635 return NO_EXIT;
1638 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1640 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1641 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1642 tcg_temp_free_i32(m3);
1643 gen_set_cc_nz_f64(s, o->in2);
1644 return NO_EXIT;
1647 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1649 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1650 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1651 tcg_temp_free_i32(m3);
1652 gen_set_cc_nz_f128(s, o->in1, o->in2);
1653 return NO_EXIT;
1656 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1658 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1659 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1660 tcg_temp_free_i32(m3);
1661 gen_set_cc_nz_f32(s, o->in2);
1662 return NO_EXIT;
1665 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1667 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1668 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1669 tcg_temp_free_i32(m3);
1670 gen_set_cc_nz_f64(s, o->in2);
1671 return NO_EXIT;
1674 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1676 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1677 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1678 tcg_temp_free_i32(m3);
1679 gen_set_cc_nz_f128(s, o->in1, o->in2);
1680 return NO_EXIT;
1683 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1685 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1686 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1687 tcg_temp_free_i32(m3);
1688 gen_set_cc_nz_f32(s, o->in2);
1689 return NO_EXIT;
1692 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1694 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1695 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1696 tcg_temp_free_i32(m3);
1697 gen_set_cc_nz_f64(s, o->in2);
1698 return NO_EXIT;
1701 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1703 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1704 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1705 tcg_temp_free_i32(m3);
1706 gen_set_cc_nz_f128(s, o->in1, o->in2);
1707 return NO_EXIT;
1710 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1712 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1713 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1714 tcg_temp_free_i32(m3);
1715 gen_set_cc_nz_f32(s, o->in2);
1716 return NO_EXIT;
1719 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1721 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1722 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1723 tcg_temp_free_i32(m3);
1724 gen_set_cc_nz_f64(s, o->in2);
1725 return NO_EXIT;
1728 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1730 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1731 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1732 tcg_temp_free_i32(m3);
1733 gen_set_cc_nz_f128(s, o->in1, o->in2);
1734 return NO_EXIT;
1737 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1739 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1740 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1741 tcg_temp_free_i32(m3);
1742 return NO_EXIT;
1745 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1747 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1748 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1749 tcg_temp_free_i32(m3);
1750 return NO_EXIT;
1753 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1755 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1756 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1757 tcg_temp_free_i32(m3);
1758 return_low128(o->out2);
1759 return NO_EXIT;
1762 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1764 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1765 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1766 tcg_temp_free_i32(m3);
1767 return NO_EXIT;
1770 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1772 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1773 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1774 tcg_temp_free_i32(m3);
1775 return NO_EXIT;
1778 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1780 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1781 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1782 tcg_temp_free_i32(m3);
1783 return_low128(o->out2);
1784 return NO_EXIT;
1787 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1789 int r2 = get_field(s->fields, r2);
1790 TCGv_i64 len = tcg_temp_new_i64();
1792 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1793 set_cc_static(s);
1794 return_low128(o->out);
1796 tcg_gen_add_i64(regs[r2], regs[r2], len);
1797 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1798 tcg_temp_free_i64(len);
1800 return NO_EXIT;
1803 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1805 int l = get_field(s->fields, l1);
1806 TCGv_i32 vl;
1808 switch (l + 1) {
1809 case 1:
1810 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1811 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1812 break;
1813 case 2:
1814 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1815 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1816 break;
1817 case 4:
1818 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1819 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1820 break;
1821 case 8:
1822 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1823 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1824 break;
1825 default:
1826 vl = tcg_const_i32(l);
1827 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1828 tcg_temp_free_i32(vl);
1829 set_cc_static(s);
1830 return NO_EXIT;
1832 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1833 return NO_EXIT;
1836 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1838 int r1 = get_field(s->fields, r1);
1839 int r2 = get_field(s->fields, r2);
1840 TCGv_i32 t1, t2;
1842 /* r1 and r2 must be even. */
1843 if (r1 & 1 || r2 & 1) {
1844 gen_program_exception(s, PGM_SPECIFICATION);
1845 return EXIT_NORETURN;
1848 t1 = tcg_const_i32(r1);
1849 t2 = tcg_const_i32(r2);
1850 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1851 tcg_temp_free_i32(t1);
1852 tcg_temp_free_i32(t2);
1853 set_cc_static(s);
1854 return NO_EXIT;
1857 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1859 int r1 = get_field(s->fields, r1);
1860 int r3 = get_field(s->fields, r3);
1861 TCGv_i32 t1, t3;
1863 /* r1 and r3 must be even. */
1864 if (r1 & 1 || r3 & 1) {
1865 gen_program_exception(s, PGM_SPECIFICATION);
1866 return EXIT_NORETURN;
1869 t1 = tcg_const_i32(r1);
1870 t3 = tcg_const_i32(r3);
1871 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1872 tcg_temp_free_i32(t1);
1873 tcg_temp_free_i32(t3);
1874 set_cc_static(s);
1875 return NO_EXIT;
1878 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1880 int r1 = get_field(s->fields, r1);
1881 int r3 = get_field(s->fields, r3);
1882 TCGv_i32 t1, t3;
1884 /* r1 and r3 must be even. */
1885 if (r1 & 1 || r3 & 1) {
1886 gen_program_exception(s, PGM_SPECIFICATION);
1887 return EXIT_NORETURN;
1890 t1 = tcg_const_i32(r1);
1891 t3 = tcg_const_i32(r3);
1892 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1893 tcg_temp_free_i32(t1);
1894 tcg_temp_free_i32(t3);
1895 set_cc_static(s);
1896 return NO_EXIT;
1899 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1901 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1902 TCGv_i32 t1 = tcg_temp_new_i32();
1903 tcg_gen_extrl_i64_i32(t1, o->in1);
1904 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1905 set_cc_static(s);
1906 tcg_temp_free_i32(t1);
1907 tcg_temp_free_i32(m3);
1908 return NO_EXIT;
1911 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1913 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1914 set_cc_static(s);
1915 return_low128(o->in2);
1916 return NO_EXIT;
1919 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1921 TCGv_i64 t = tcg_temp_new_i64();
1922 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1923 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1924 tcg_gen_or_i64(o->out, o->out, t);
1925 tcg_temp_free_i64(t);
1926 return NO_EXIT;
1929 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1931 int d2 = get_field(s->fields, d2);
1932 int b2 = get_field(s->fields, b2);
1933 TCGv_i64 addr, cc;
1935 /* Note that in1 = R3 (new value) and
1936 in2 = (zero-extended) R1 (expected value). */
1938 addr = get_address(s, 0, b2, d2);
1939 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1940 get_mem_index(s), s->insn->data | MO_ALIGN);
1941 tcg_temp_free_i64(addr);
1943 /* Are the memory and expected values (un)equal? Note that this setcond
1944 produces the output CC value, thus the NE sense of the test. */
1945 cc = tcg_temp_new_i64();
1946 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1947 tcg_gen_extrl_i64_i32(cc_op, cc);
1948 tcg_temp_free_i64(cc);
1949 set_cc_static(s);
1951 return NO_EXIT;
1954 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1956 int r1 = get_field(s->fields, r1);
1957 int r3 = get_field(s->fields, r3);
1958 int d2 = get_field(s->fields, d2);
1959 int b2 = get_field(s->fields, b2);
1960 TCGv_i64 addr;
1961 TCGv_i32 t_r1, t_r3;
1963 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1964 addr = get_address(s, 0, b2, d2);
1965 t_r1 = tcg_const_i32(r1);
1966 t_r3 = tcg_const_i32(r3);
1967 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1968 tcg_temp_free_i64(addr);
1969 tcg_temp_free_i32(t_r1);
1970 tcg_temp_free_i32(t_r3);
1972 set_cc_static(s);
1973 return NO_EXIT;
1976 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
1978 int r3 = get_field(s->fields, r3);
1979 TCGv_i32 t_r3 = tcg_const_i32(r3);
1981 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
1982 tcg_temp_free_i32(t_r3);
1984 set_cc_static(s);
1985 return NO_EXIT;
1988 #ifndef CONFIG_USER_ONLY
1989 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1991 TCGMemOp mop = s->insn->data;
1992 TCGv_i64 addr, old, cc;
1993 TCGLabel *lab = gen_new_label();
1995 /* Note that in1 = R1 (zero-extended expected value),
1996 out = R1 (original reg), out2 = R1+1 (new value). */
1998 check_privileged(s);
1999 addr = tcg_temp_new_i64();
2000 old = tcg_temp_new_i64();
2001 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2002 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2003 get_mem_index(s), mop | MO_ALIGN);
2004 tcg_temp_free_i64(addr);
2006 /* Are the memory and expected values (un)equal? */
2007 cc = tcg_temp_new_i64();
2008 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2009 tcg_gen_extrl_i64_i32(cc_op, cc);
2011 /* Write back the output now, so that it happens before the
2012 following branch, so that we don't need local temps. */
2013 if ((mop & MO_SIZE) == MO_32) {
2014 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2015 } else {
2016 tcg_gen_mov_i64(o->out, old);
2018 tcg_temp_free_i64(old);
2020 /* If the comparison was equal, and the LSB of R2 was set,
2021 then we need to flush the TLB (for all cpus). */
2022 tcg_gen_xori_i64(cc, cc, 1);
2023 tcg_gen_and_i64(cc, cc, o->in2);
2024 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2025 tcg_temp_free_i64(cc);
2027 gen_helper_purge(cpu_env);
2028 gen_set_label(lab);
2030 return NO_EXIT;
2032 #endif
2034 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2036 TCGv_i64 t1 = tcg_temp_new_i64();
2037 TCGv_i32 t2 = tcg_temp_new_i32();
2038 tcg_gen_extrl_i64_i32(t2, o->in1);
2039 gen_helper_cvd(t1, t2);
2040 tcg_temp_free_i32(t2);
2041 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2042 tcg_temp_free_i64(t1);
2043 return NO_EXIT;
2046 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2048 int m3 = get_field(s->fields, m3);
2049 TCGLabel *lab = gen_new_label();
2050 TCGCond c;
2052 c = tcg_invert_cond(ltgt_cond[m3]);
2053 if (s->insn->data) {
2054 c = tcg_unsigned_cond(c);
2056 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2058 /* Trap. */
2059 gen_trap(s);
2061 gen_set_label(lab);
2062 return NO_EXIT;
2065 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2067 int m3 = get_field(s->fields, m3);
2068 int r1 = get_field(s->fields, r1);
2069 int r2 = get_field(s->fields, r2);
2070 TCGv_i32 tr1, tr2, chk;
2072 /* R1 and R2 must both be even. */
2073 if ((r1 | r2) & 1) {
2074 gen_program_exception(s, PGM_SPECIFICATION);
2075 return EXIT_NORETURN;
2077 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2078 m3 = 0;
2081 tr1 = tcg_const_i32(r1);
2082 tr2 = tcg_const_i32(r2);
2083 chk = tcg_const_i32(m3);
2085 switch (s->insn->data) {
2086 case 12:
2087 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2088 break;
2089 case 14:
2090 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2091 break;
2092 case 21:
2093 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2094 break;
2095 case 24:
2096 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2097 break;
2098 case 41:
2099 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2100 break;
2101 case 42:
2102 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2103 break;
2104 default:
2105 g_assert_not_reached();
2108 tcg_temp_free_i32(tr1);
2109 tcg_temp_free_i32(tr2);
2110 tcg_temp_free_i32(chk);
2111 set_cc_static(s);
2112 return NO_EXIT;
2115 #ifndef CONFIG_USER_ONLY
2116 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2118 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2119 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2120 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2122 check_privileged(s);
2123 update_psw_addr(s);
2124 gen_op_calc_cc(s);
2126 gen_helper_diag(cpu_env, r1, r3, func_code);
2128 tcg_temp_free_i32(func_code);
2129 tcg_temp_free_i32(r3);
2130 tcg_temp_free_i32(r1);
2131 return NO_EXIT;
2133 #endif
2135 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2137 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2138 return_low128(o->out);
2139 return NO_EXIT;
2142 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2144 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2145 return_low128(o->out);
2146 return NO_EXIT;
2149 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2151 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2152 return_low128(o->out);
2153 return NO_EXIT;
2156 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2158 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2159 return_low128(o->out);
2160 return NO_EXIT;
2163 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2165 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2166 return NO_EXIT;
2169 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2171 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2172 return NO_EXIT;
2175 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2177 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2178 return_low128(o->out2);
2179 return NO_EXIT;
2182 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2184 int r2 = get_field(s->fields, r2);
2185 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2186 return NO_EXIT;
2189 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2191 /* No cache information provided. */
2192 tcg_gen_movi_i64(o->out, -1);
2193 return NO_EXIT;
2196 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2198 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2199 return NO_EXIT;
2202 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2204 int r1 = get_field(s->fields, r1);
2205 int r2 = get_field(s->fields, r2);
2206 TCGv_i64 t = tcg_temp_new_i64();
2208 /* Note the "subsequently" in the PoO, which implies a defined result
2209 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2210 tcg_gen_shri_i64(t, psw_mask, 32);
2211 store_reg32_i64(r1, t);
2212 if (r2 != 0) {
2213 store_reg32_i64(r2, psw_mask);
2216 tcg_temp_free_i64(t);
2217 return NO_EXIT;
2220 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2222 int r1 = get_field(s->fields, r1);
2223 TCGv_i32 ilen;
2224 TCGv_i64 v1;
2226 /* Nested EXECUTE is not allowed. */
2227 if (unlikely(s->ex_value)) {
2228 gen_program_exception(s, PGM_EXECUTE);
2229 return EXIT_NORETURN;
2232 update_psw_addr(s);
2233 update_cc_op(s);
2235 if (r1 == 0) {
2236 v1 = tcg_const_i64(0);
2237 } else {
2238 v1 = regs[r1];
2241 ilen = tcg_const_i32(s->ilen);
2242 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2243 tcg_temp_free_i32(ilen);
2245 if (r1 == 0) {
2246 tcg_temp_free_i64(v1);
2249 return EXIT_PC_CC_UPDATED;
2252 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2254 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2255 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2256 tcg_temp_free_i32(m3);
2257 return NO_EXIT;
2260 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2262 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2263 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2264 tcg_temp_free_i32(m3);
2265 return NO_EXIT;
2268 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2270 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2271 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2272 return_low128(o->out2);
2273 tcg_temp_free_i32(m3);
2274 return NO_EXIT;
2277 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2279 /* We'll use the original input for cc computation, since we get to
2280 compare that against 0, which ought to be better than comparing
2281 the real output against 64. It also lets cc_dst be a convenient
2282 temporary during our computation. */
2283 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2285 /* R1 = IN ? CLZ(IN) : 64. */
2286 tcg_gen_clzi_i64(o->out, o->in2, 64);
2288 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2289 value by 64, which is undefined. But since the shift is 64 iff the
2290 input is zero, we still get the correct result after and'ing. */
2291 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2292 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2293 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2294 return NO_EXIT;
2297 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2299 int m3 = get_field(s->fields, m3);
2300 int pos, len, base = s->insn->data;
2301 TCGv_i64 tmp = tcg_temp_new_i64();
2302 uint64_t ccm;
2304 switch (m3) {
2305 case 0xf:
2306 /* Effectively a 32-bit load. */
2307 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2308 len = 32;
2309 goto one_insert;
2311 case 0xc:
2312 case 0x6:
2313 case 0x3:
2314 /* Effectively a 16-bit load. */
2315 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2316 len = 16;
2317 goto one_insert;
2319 case 0x8:
2320 case 0x4:
2321 case 0x2:
2322 case 0x1:
2323 /* Effectively an 8-bit load. */
2324 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2325 len = 8;
2326 goto one_insert;
2328 one_insert:
2329 pos = base + ctz32(m3) * 8;
2330 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2331 ccm = ((1ull << len) - 1) << pos;
2332 break;
2334 default:
2335 /* This is going to be a sequence of loads and inserts. */
2336 pos = base + 32 - 8;
2337 ccm = 0;
2338 while (m3) {
2339 if (m3 & 0x8) {
2340 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2341 tcg_gen_addi_i64(o->in2, o->in2, 1);
2342 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2343 ccm |= 0xff << pos;
2345 m3 = (m3 << 1) & 0xf;
2346 pos -= 8;
2348 break;
2351 tcg_gen_movi_i64(tmp, ccm);
2352 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2353 tcg_temp_free_i64(tmp);
2354 return NO_EXIT;
2357 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2359 int shift = s->insn->data & 0xff;
2360 int size = s->insn->data >> 8;
2361 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2362 return NO_EXIT;
2365 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2367 TCGv_i64 t1;
2369 gen_op_calc_cc(s);
2370 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2372 t1 = tcg_temp_new_i64();
2373 tcg_gen_shli_i64(t1, psw_mask, 20);
2374 tcg_gen_shri_i64(t1, t1, 36);
2375 tcg_gen_or_i64(o->out, o->out, t1);
2377 tcg_gen_extu_i32_i64(t1, cc_op);
2378 tcg_gen_shli_i64(t1, t1, 28);
2379 tcg_gen_or_i64(o->out, o->out, t1);
2380 tcg_temp_free_i64(t1);
2381 return NO_EXIT;
2384 #ifndef CONFIG_USER_ONLY
2385 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2387 TCGv_i32 m4;
2389 check_privileged(s);
2390 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2391 m4 = tcg_const_i32(get_field(s->fields, m4));
2392 } else {
2393 m4 = tcg_const_i32(0);
2395 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2396 tcg_temp_free_i32(m4);
2397 return NO_EXIT;
2400 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2402 TCGv_i32 m4;
2404 check_privileged(s);
2405 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2406 m4 = tcg_const_i32(get_field(s->fields, m4));
2407 } else {
2408 m4 = tcg_const_i32(0);
2410 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2411 tcg_temp_free_i32(m4);
2412 return NO_EXIT;
2415 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2417 check_privileged(s);
2418 gen_helper_iske(o->out, cpu_env, o->in2);
2419 return NO_EXIT;
2421 #endif
2423 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2425 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2426 set_cc_static(s);
2427 return NO_EXIT;
2430 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2432 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2433 set_cc_static(s);
2434 return NO_EXIT;
2437 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2439 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2440 set_cc_static(s);
2441 return NO_EXIT;
2444 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2446 /* The real output is indeed the original value in memory;
2447 recompute the addition for the computation of CC. */
2448 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2449 s->insn->data | MO_ALIGN);
2450 /* However, we need to recompute the addition for setting CC. */
2451 tcg_gen_add_i64(o->out, o->in1, o->in2);
2452 return NO_EXIT;
2455 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2457 /* The real output is indeed the original value in memory;
2458 recompute the addition for the computation of CC. */
2459 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2460 s->insn->data | MO_ALIGN);
2461 /* However, we need to recompute the operation for setting CC. */
2462 tcg_gen_and_i64(o->out, o->in1, o->in2);
2463 return NO_EXIT;
2466 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2468 /* The real output is indeed the original value in memory;
2469 recompute the addition for the computation of CC. */
2470 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2471 s->insn->data | MO_ALIGN);
2472 /* However, we need to recompute the operation for setting CC. */
2473 tcg_gen_or_i64(o->out, o->in1, o->in2);
2474 return NO_EXIT;
2477 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2479 /* The real output is indeed the original value in memory;
2480 recompute the addition for the computation of CC. */
2481 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2482 s->insn->data | MO_ALIGN);
2483 /* However, we need to recompute the operation for setting CC. */
2484 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2485 return NO_EXIT;
2488 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2490 gen_helper_ldeb(o->out, cpu_env, o->in2);
2491 return NO_EXIT;
2494 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2496 gen_helper_ledb(o->out, cpu_env, o->in2);
2497 return NO_EXIT;
2500 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2502 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2503 return NO_EXIT;
2506 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2508 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2509 return NO_EXIT;
2512 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2514 gen_helper_lxdb(o->out, cpu_env, o->in2);
2515 return_low128(o->out2);
2516 return NO_EXIT;
2519 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2521 gen_helper_lxeb(o->out, cpu_env, o->in2);
2522 return_low128(o->out2);
2523 return NO_EXIT;
2526 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2528 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2529 return NO_EXIT;
2532 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2534 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2535 return NO_EXIT;
2538 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2540 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2541 return NO_EXIT;
2544 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2546 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2547 return NO_EXIT;
2550 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2552 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2553 return NO_EXIT;
2556 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2558 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2559 return NO_EXIT;
2562 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2564 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2565 return NO_EXIT;
2568 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2570 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2571 return NO_EXIT;
2574 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2576 TCGLabel *lab = gen_new_label();
2577 store_reg32_i64(get_field(s->fields, r1), o->in2);
2578 /* The value is stored even in case of trap. */
2579 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2580 gen_trap(s);
2581 gen_set_label(lab);
2582 return NO_EXIT;
2585 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2587 TCGLabel *lab = gen_new_label();
2588 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2589 /* The value is stored even in case of trap. */
2590 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2591 gen_trap(s);
2592 gen_set_label(lab);
2593 return NO_EXIT;
2596 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2598 TCGLabel *lab = gen_new_label();
2599 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2600 /* The value is stored even in case of trap. */
2601 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2602 gen_trap(s);
2603 gen_set_label(lab);
2604 return NO_EXIT;
2607 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2609 TCGLabel *lab = gen_new_label();
2610 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2611 /* The value is stored even in case of trap. */
2612 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2613 gen_trap(s);
2614 gen_set_label(lab);
2615 return NO_EXIT;
2618 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2620 TCGLabel *lab = gen_new_label();
2621 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2622 /* The value is stored even in case of trap. */
2623 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2624 gen_trap(s);
2625 gen_set_label(lab);
2626 return NO_EXIT;
2629 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2631 DisasCompare c;
2633 disas_jcc(s, &c, get_field(s->fields, m3));
2635 if (c.is_64) {
2636 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2637 o->in2, o->in1);
2638 free_compare(&c);
2639 } else {
2640 TCGv_i32 t32 = tcg_temp_new_i32();
2641 TCGv_i64 t, z;
2643 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2644 free_compare(&c);
2646 t = tcg_temp_new_i64();
2647 tcg_gen_extu_i32_i64(t, t32);
2648 tcg_temp_free_i32(t32);
2650 z = tcg_const_i64(0);
2651 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2652 tcg_temp_free_i64(t);
2653 tcg_temp_free_i64(z);
2656 return NO_EXIT;
2659 #ifndef CONFIG_USER_ONLY
2660 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2662 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2663 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2664 check_privileged(s);
2665 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2666 tcg_temp_free_i32(r1);
2667 tcg_temp_free_i32(r3);
2668 return NO_EXIT;
2671 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2673 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2674 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2675 check_privileged(s);
2676 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2677 tcg_temp_free_i32(r1);
2678 tcg_temp_free_i32(r3);
2679 return NO_EXIT;
2682 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2684 check_privileged(s);
2685 gen_helper_lra(o->out, cpu_env, o->in2);
2686 set_cc_static(s);
2687 return NO_EXIT;
2690 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2692 check_privileged(s);
2694 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2695 return NO_EXIT;
2698 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2700 TCGv_i64 t1, t2;
2702 check_privileged(s);
2703 per_breaking_event(s);
2705 t1 = tcg_temp_new_i64();
2706 t2 = tcg_temp_new_i64();
2707 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2708 tcg_gen_addi_i64(o->in2, o->in2, 4);
2709 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2710 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2711 tcg_gen_shli_i64(t1, t1, 32);
2712 gen_helper_load_psw(cpu_env, t1, t2);
2713 tcg_temp_free_i64(t1);
2714 tcg_temp_free_i64(t2);
2715 return EXIT_NORETURN;
2718 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2720 TCGv_i64 t1, t2;
2722 check_privileged(s);
2723 per_breaking_event(s);
2725 t1 = tcg_temp_new_i64();
2726 t2 = tcg_temp_new_i64();
2727 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2728 tcg_gen_addi_i64(o->in2, o->in2, 8);
2729 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2730 gen_helper_load_psw(cpu_env, t1, t2);
2731 tcg_temp_free_i64(t1);
2732 tcg_temp_free_i64(t2);
2733 return EXIT_NORETURN;
2735 #endif
2737 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2739 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2740 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2741 gen_helper_lam(cpu_env, r1, o->in2, r3);
2742 tcg_temp_free_i32(r1);
2743 tcg_temp_free_i32(r3);
2744 return NO_EXIT;
2747 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2749 int r1 = get_field(s->fields, r1);
2750 int r3 = get_field(s->fields, r3);
2751 TCGv_i64 t1, t2;
2753 /* Only one register to read. */
2754 t1 = tcg_temp_new_i64();
2755 if (unlikely(r1 == r3)) {
2756 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2757 store_reg32_i64(r1, t1);
2758 tcg_temp_free(t1);
2759 return NO_EXIT;
2762 /* First load the values of the first and last registers to trigger
2763 possible page faults. */
2764 t2 = tcg_temp_new_i64();
2765 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2766 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2767 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2768 store_reg32_i64(r1, t1);
2769 store_reg32_i64(r3, t2);
2771 /* Only two registers to read. */
2772 if (((r1 + 1) & 15) == r3) {
2773 tcg_temp_free(t2);
2774 tcg_temp_free(t1);
2775 return NO_EXIT;
2778 /* Then load the remaining registers. Page fault can't occur. */
2779 r3 = (r3 - 1) & 15;
2780 tcg_gen_movi_i64(t2, 4);
2781 while (r1 != r3) {
2782 r1 = (r1 + 1) & 15;
2783 tcg_gen_add_i64(o->in2, o->in2, t2);
2784 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2785 store_reg32_i64(r1, t1);
2787 tcg_temp_free(t2);
2788 tcg_temp_free(t1);
2790 return NO_EXIT;
2793 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2795 int r1 = get_field(s->fields, r1);
2796 int r3 = get_field(s->fields, r3);
2797 TCGv_i64 t1, t2;
2799 /* Only one register to read. */
2800 t1 = tcg_temp_new_i64();
2801 if (unlikely(r1 == r3)) {
2802 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2803 store_reg32h_i64(r1, t1);
2804 tcg_temp_free(t1);
2805 return NO_EXIT;
2808 /* First load the values of the first and last registers to trigger
2809 possible page faults. */
2810 t2 = tcg_temp_new_i64();
2811 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2812 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2813 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2814 store_reg32h_i64(r1, t1);
2815 store_reg32h_i64(r3, t2);
2817 /* Only two registers to read. */
2818 if (((r1 + 1) & 15) == r3) {
2819 tcg_temp_free(t2);
2820 tcg_temp_free(t1);
2821 return NO_EXIT;
2824 /* Then load the remaining registers. Page fault can't occur. */
2825 r3 = (r3 - 1) & 15;
2826 tcg_gen_movi_i64(t2, 4);
2827 while (r1 != r3) {
2828 r1 = (r1 + 1) & 15;
2829 tcg_gen_add_i64(o->in2, o->in2, t2);
2830 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2831 store_reg32h_i64(r1, t1);
2833 tcg_temp_free(t2);
2834 tcg_temp_free(t1);
2836 return NO_EXIT;
2839 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2841 int r1 = get_field(s->fields, r1);
2842 int r3 = get_field(s->fields, r3);
2843 TCGv_i64 t1, t2;
2845 /* Only one register to read. */
2846 if (unlikely(r1 == r3)) {
2847 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2848 return NO_EXIT;
2851 /* First load the values of the first and last registers to trigger
2852 possible page faults. */
2853 t1 = tcg_temp_new_i64();
2854 t2 = tcg_temp_new_i64();
2855 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2856 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2857 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2858 tcg_gen_mov_i64(regs[r1], t1);
2859 tcg_temp_free(t2);
2861 /* Only two registers to read. */
2862 if (((r1 + 1) & 15) == r3) {
2863 tcg_temp_free(t1);
2864 return NO_EXIT;
2867 /* Then load the remaining registers. Page fault can't occur. */
2868 r3 = (r3 - 1) & 15;
2869 tcg_gen_movi_i64(t1, 8);
2870 while (r1 != r3) {
2871 r1 = (r1 + 1) & 15;
2872 tcg_gen_add_i64(o->in2, o->in2, t1);
2873 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2875 tcg_temp_free(t1);
2877 return NO_EXIT;
2880 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2882 TCGv_i64 a1, a2;
2883 TCGMemOp mop = s->insn->data;
2885 /* In a parallel context, stop the world and single step. */
2886 if (parallel_cpus) {
2887 potential_page_fault(s);
2888 gen_exception(EXCP_ATOMIC);
2889 return EXIT_NORETURN;
2892 /* In a serial context, perform the two loads ... */
2893 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2894 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2895 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2896 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2897 tcg_temp_free_i64(a1);
2898 tcg_temp_free_i64(a2);
2900 /* ... and indicate that we performed them while interlocked. */
2901 gen_op_movi_cc(s, 0);
2902 return NO_EXIT;
2905 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2907 gen_helper_lpq(o->out, cpu_env, o->in2);
2908 return_low128(o->out2);
2909 return NO_EXIT;
2912 #ifndef CONFIG_USER_ONLY
2913 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2915 check_privileged(s);
2916 potential_page_fault(s);
2917 gen_helper_lura(o->out, cpu_env, o->in2);
2918 return NO_EXIT;
2921 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2923 check_privileged(s);
2924 potential_page_fault(s);
2925 gen_helper_lurag(o->out, cpu_env, o->in2);
2926 return NO_EXIT;
2928 #endif
2930 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2932 tcg_gen_andi_i64(o->out, o->in2, -256);
2933 return NO_EXIT;
2936 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2938 o->out = o->in2;
2939 o->g_out = o->g_in2;
2940 TCGV_UNUSED_I64(o->in2);
2941 o->g_in2 = false;
2942 return NO_EXIT;
2945 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
2947 int b2 = get_field(s->fields, b2);
2948 TCGv ar1 = tcg_temp_new_i64();
2950 o->out = o->in2;
2951 o->g_out = o->g_in2;
2952 TCGV_UNUSED_I64(o->in2);
2953 o->g_in2 = false;
2955 switch (s->tb->flags & FLAG_MASK_ASC) {
2956 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
2957 tcg_gen_movi_i64(ar1, 0);
2958 break;
2959 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
2960 tcg_gen_movi_i64(ar1, 1);
2961 break;
2962 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
2963 if (b2) {
2964 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
2965 } else {
2966 tcg_gen_movi_i64(ar1, 0);
2968 break;
2969 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
2970 tcg_gen_movi_i64(ar1, 2);
2971 break;
2974 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
2975 tcg_temp_free_i64(ar1);
2977 return NO_EXIT;
2980 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
2982 o->out = o->in1;
2983 o->out2 = o->in2;
2984 o->g_out = o->g_in1;
2985 o->g_out2 = o->g_in2;
2986 TCGV_UNUSED_I64(o->in1);
2987 TCGV_UNUSED_I64(o->in2);
2988 o->g_in1 = o->g_in2 = false;
2989 return NO_EXIT;
2992 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
2994 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
2995 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
2996 tcg_temp_free_i32(l);
2997 return NO_EXIT;
3000 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3002 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3003 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3004 tcg_temp_free_i32(l);
3005 return NO_EXIT;
3008 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3010 int r1 = get_field(s->fields, r1);
3011 int r2 = get_field(s->fields, r2);
3012 TCGv_i32 t1, t2;
3014 /* r1 and r2 must be even. */
3015 if (r1 & 1 || r2 & 1) {
3016 gen_program_exception(s, PGM_SPECIFICATION);
3017 return EXIT_NORETURN;
3020 t1 = tcg_const_i32(r1);
3021 t2 = tcg_const_i32(r2);
3022 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3023 tcg_temp_free_i32(t1);
3024 tcg_temp_free_i32(t2);
3025 set_cc_static(s);
3026 return NO_EXIT;
3029 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3031 int r1 = get_field(s->fields, r1);
3032 int r3 = get_field(s->fields, r3);
3033 TCGv_i32 t1, t3;
3035 /* r1 and r3 must be even. */
3036 if (r1 & 1 || r3 & 1) {
3037 gen_program_exception(s, PGM_SPECIFICATION);
3038 return EXIT_NORETURN;
3041 t1 = tcg_const_i32(r1);
3042 t3 = tcg_const_i32(r3);
3043 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3044 tcg_temp_free_i32(t1);
3045 tcg_temp_free_i32(t3);
3046 set_cc_static(s);
3047 return NO_EXIT;
3050 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3052 int r1 = get_field(s->fields, r1);
3053 int r3 = get_field(s->fields, r3);
3054 TCGv_i32 t1, t3;
3056 /* r1 and r3 must be even. */
3057 if (r1 & 1 || r3 & 1) {
3058 gen_program_exception(s, PGM_SPECIFICATION);
3059 return EXIT_NORETURN;
3062 t1 = tcg_const_i32(r1);
3063 t3 = tcg_const_i32(r3);
3064 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3065 tcg_temp_free_i32(t1);
3066 tcg_temp_free_i32(t3);
3067 set_cc_static(s);
3068 return NO_EXIT;
3071 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3073 int r3 = get_field(s->fields, r3);
3074 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3075 set_cc_static(s);
3076 return NO_EXIT;
3079 #ifndef CONFIG_USER_ONLY
3080 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3082 int r1 = get_field(s->fields, l1);
3083 check_privileged(s);
3084 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3085 set_cc_static(s);
3086 return NO_EXIT;
3089 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3091 int r1 = get_field(s->fields, l1);
3092 check_privileged(s);
3093 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3094 set_cc_static(s);
3095 return NO_EXIT;
3097 #endif
3099 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3101 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3102 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3103 tcg_temp_free_i32(l);
3104 return NO_EXIT;
3107 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3109 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3110 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3111 tcg_temp_free_i32(l);
3112 return NO_EXIT;
3115 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3117 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3118 set_cc_static(s);
3119 return NO_EXIT;
3122 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3124 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3125 set_cc_static(s);
3126 return_low128(o->in2);
3127 return NO_EXIT;
3130 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3132 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3133 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3134 tcg_temp_free_i32(l);
3135 return NO_EXIT;
3138 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3140 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3141 return NO_EXIT;
3144 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3146 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3147 return NO_EXIT;
3150 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3152 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3153 return NO_EXIT;
3156 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3158 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3159 return NO_EXIT;
3162 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3164 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3165 return NO_EXIT;
3168 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3170 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3171 return_low128(o->out2);
3172 return NO_EXIT;
3175 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3177 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3178 return_low128(o->out2);
3179 return NO_EXIT;
3182 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3184 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3185 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3186 tcg_temp_free_i64(r3);
3187 return NO_EXIT;
3190 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3192 int r3 = get_field(s->fields, r3);
3193 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3194 return NO_EXIT;
3197 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3199 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3200 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3201 tcg_temp_free_i64(r3);
3202 return NO_EXIT;
3205 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3207 int r3 = get_field(s->fields, r3);
3208 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3209 return NO_EXIT;
3212 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3214 TCGv_i64 z, n;
3215 z = tcg_const_i64(0);
3216 n = tcg_temp_new_i64();
3217 tcg_gen_neg_i64(n, o->in2);
3218 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3219 tcg_temp_free_i64(n);
3220 tcg_temp_free_i64(z);
3221 return NO_EXIT;
3224 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3226 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3227 return NO_EXIT;
3230 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3232 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3233 return NO_EXIT;
3236 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3238 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3239 tcg_gen_mov_i64(o->out2, o->in2);
3240 return NO_EXIT;
3243 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3245 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3246 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3247 tcg_temp_free_i32(l);
3248 set_cc_static(s);
3249 return NO_EXIT;
3252 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3254 tcg_gen_neg_i64(o->out, o->in2);
3255 return NO_EXIT;
3258 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3260 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3261 return NO_EXIT;
3264 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3266 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3267 return NO_EXIT;
3270 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3272 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3273 tcg_gen_mov_i64(o->out2, o->in2);
3274 return NO_EXIT;
3277 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3279 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3280 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3281 tcg_temp_free_i32(l);
3282 set_cc_static(s);
3283 return NO_EXIT;
3286 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3288 tcg_gen_or_i64(o->out, o->in1, o->in2);
3289 return NO_EXIT;
3292 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3294 int shift = s->insn->data & 0xff;
3295 int size = s->insn->data >> 8;
3296 uint64_t mask = ((1ull << size) - 1) << shift;
3298 assert(!o->g_in2);
3299 tcg_gen_shli_i64(o->in2, o->in2, shift);
3300 tcg_gen_or_i64(o->out, o->in1, o->in2);
3302 /* Produce the CC from only the bits manipulated. */
3303 tcg_gen_andi_i64(cc_dst, o->out, mask);
3304 set_cc_nz_u64(s, cc_dst);
3305 return NO_EXIT;
3308 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3310 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3311 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3312 tcg_temp_free_i32(l);
3313 return NO_EXIT;
3316 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3318 int l2 = get_field(s->fields, l2) + 1;
3319 TCGv_i32 l;
3321 /* The length must not exceed 32 bytes. */
3322 if (l2 > 32) {
3323 gen_program_exception(s, PGM_SPECIFICATION);
3324 return EXIT_NORETURN;
3326 l = tcg_const_i32(l2);
3327 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3328 tcg_temp_free_i32(l);
3329 return NO_EXIT;
3332 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3334 int l2 = get_field(s->fields, l2) + 1;
3335 TCGv_i32 l;
3337 /* The length must be even and should not exceed 64 bytes. */
3338 if ((l2 & 1) || (l2 > 64)) {
3339 gen_program_exception(s, PGM_SPECIFICATION);
3340 return EXIT_NORETURN;
3342 l = tcg_const_i32(l2);
3343 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3344 tcg_temp_free_i32(l);
3345 return NO_EXIT;
3348 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3350 gen_helper_popcnt(o->out, o->in2);
3351 return NO_EXIT;
3354 #ifndef CONFIG_USER_ONLY
3355 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3357 check_privileged(s);
3358 gen_helper_ptlb(cpu_env);
3359 return NO_EXIT;
3361 #endif
3363 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3365 int i3 = get_field(s->fields, i3);
3366 int i4 = get_field(s->fields, i4);
3367 int i5 = get_field(s->fields, i5);
3368 int do_zero = i4 & 0x80;
3369 uint64_t mask, imask, pmask;
3370 int pos, len, rot;
3372 /* Adjust the arguments for the specific insn. */
3373 switch (s->fields->op2) {
3374 case 0x55: /* risbg */
3375 i3 &= 63;
3376 i4 &= 63;
3377 pmask = ~0;
3378 break;
3379 case 0x5d: /* risbhg */
3380 i3 &= 31;
3381 i4 &= 31;
3382 pmask = 0xffffffff00000000ull;
3383 break;
3384 case 0x51: /* risblg */
3385 i3 &= 31;
3386 i4 &= 31;
3387 pmask = 0x00000000ffffffffull;
3388 break;
3389 default:
3390 abort();
3393 /* MASK is the set of bits to be inserted from R2.
3394 Take care for I3/I4 wraparound. */
3395 mask = pmask >> i3;
3396 if (i3 <= i4) {
3397 mask ^= pmask >> i4 >> 1;
3398 } else {
3399 mask |= ~(pmask >> i4 >> 1);
3401 mask &= pmask;
3403 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3404 insns, we need to keep the other half of the register. */
3405 imask = ~mask | ~pmask;
3406 if (do_zero) {
3407 if (s->fields->op2 == 0x55) {
3408 imask = 0;
3409 } else {
3410 imask = ~pmask;
3414 len = i4 - i3 + 1;
3415 pos = 63 - i4;
3416 rot = i5 & 63;
3417 if (s->fields->op2 == 0x5d) {
3418 pos += 32;
3421 /* In some cases we can implement this with extract. */
3422 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3423 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3424 return NO_EXIT;
3427 /* In some cases we can implement this with deposit. */
3428 if (len > 0 && (imask == 0 || ~mask == imask)) {
3429 /* Note that we rotate the bits to be inserted to the lsb, not to
3430 the position as described in the PoO. */
3431 rot = (rot - pos) & 63;
3432 } else {
3433 pos = -1;
3436 /* Rotate the input as necessary. */
3437 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3439 /* Insert the selected bits into the output. */
3440 if (pos >= 0) {
3441 if (imask == 0) {
3442 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3443 } else {
3444 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3446 } else if (imask == 0) {
3447 tcg_gen_andi_i64(o->out, o->in2, mask);
3448 } else {
3449 tcg_gen_andi_i64(o->in2, o->in2, mask);
3450 tcg_gen_andi_i64(o->out, o->out, imask);
3451 tcg_gen_or_i64(o->out, o->out, o->in2);
3453 return NO_EXIT;
3456 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3458 int i3 = get_field(s->fields, i3);
3459 int i4 = get_field(s->fields, i4);
3460 int i5 = get_field(s->fields, i5);
3461 uint64_t mask;
3463 /* If this is a test-only form, arrange to discard the result. */
3464 if (i3 & 0x80) {
3465 o->out = tcg_temp_new_i64();
3466 o->g_out = false;
3469 i3 &= 63;
3470 i4 &= 63;
3471 i5 &= 63;
3473 /* MASK is the set of bits to be operated on from R2.
3474 Take care for I3/I4 wraparound. */
3475 mask = ~0ull >> i3;
3476 if (i3 <= i4) {
3477 mask ^= ~0ull >> i4 >> 1;
3478 } else {
3479 mask |= ~(~0ull >> i4 >> 1);
3482 /* Rotate the input as necessary. */
3483 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3485 /* Operate. */
3486 switch (s->fields->op2) {
3487 case 0x55: /* AND */
3488 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3489 tcg_gen_and_i64(o->out, o->out, o->in2);
3490 break;
3491 case 0x56: /* OR */
3492 tcg_gen_andi_i64(o->in2, o->in2, mask);
3493 tcg_gen_or_i64(o->out, o->out, o->in2);
3494 break;
3495 case 0x57: /* XOR */
3496 tcg_gen_andi_i64(o->in2, o->in2, mask);
3497 tcg_gen_xor_i64(o->out, o->out, o->in2);
3498 break;
3499 default:
3500 abort();
3503 /* Set the CC. */
3504 tcg_gen_andi_i64(cc_dst, o->out, mask);
3505 set_cc_nz_u64(s, cc_dst);
3506 return NO_EXIT;
3509 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3511 tcg_gen_bswap16_i64(o->out, o->in2);
3512 return NO_EXIT;
3515 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3517 tcg_gen_bswap32_i64(o->out, o->in2);
3518 return NO_EXIT;
3521 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3523 tcg_gen_bswap64_i64(o->out, o->in2);
3524 return NO_EXIT;
3527 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3529 TCGv_i32 t1 = tcg_temp_new_i32();
3530 TCGv_i32 t2 = tcg_temp_new_i32();
3531 TCGv_i32 to = tcg_temp_new_i32();
3532 tcg_gen_extrl_i64_i32(t1, o->in1);
3533 tcg_gen_extrl_i64_i32(t2, o->in2);
3534 tcg_gen_rotl_i32(to, t1, t2);
3535 tcg_gen_extu_i32_i64(o->out, to);
3536 tcg_temp_free_i32(t1);
3537 tcg_temp_free_i32(t2);
3538 tcg_temp_free_i32(to);
3539 return NO_EXIT;
3542 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3544 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3545 return NO_EXIT;
3548 #ifndef CONFIG_USER_ONLY
3549 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3551 check_privileged(s);
3552 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3553 set_cc_static(s);
3554 return NO_EXIT;
3557 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3559 check_privileged(s);
3560 gen_helper_sacf(cpu_env, o->in2);
3561 /* Addressing mode has changed, so end the block. */
3562 return EXIT_PC_STALE;
3564 #endif
3566 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3568 int sam = s->insn->data;
3569 TCGv_i64 tsam;
3570 uint64_t mask;
3572 switch (sam) {
3573 case 0:
3574 mask = 0xffffff;
3575 break;
3576 case 1:
3577 mask = 0x7fffffff;
3578 break;
3579 default:
3580 mask = -1;
3581 break;
3584 /* Bizarre but true, we check the address of the current insn for the
3585 specification exception, not the next to be executed. Thus the PoO
3586 documents that Bad Things Happen two bytes before the end. */
3587 if (s->pc & ~mask) {
3588 gen_program_exception(s, PGM_SPECIFICATION);
3589 return EXIT_NORETURN;
3591 s->next_pc &= mask;
3593 tsam = tcg_const_i64(sam);
3594 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3595 tcg_temp_free_i64(tsam);
3597 /* Always exit the TB, since we (may have) changed execution mode. */
3598 return EXIT_PC_STALE;
3601 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3603 int r1 = get_field(s->fields, r1);
3604 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3605 return NO_EXIT;
3608 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3610 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3611 return NO_EXIT;
3614 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3616 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3617 return NO_EXIT;
3620 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3622 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3623 return_low128(o->out2);
3624 return NO_EXIT;
3627 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3629 gen_helper_sqeb(o->out, cpu_env, o->in2);
3630 return NO_EXIT;
3633 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3635 gen_helper_sqdb(o->out, cpu_env, o->in2);
3636 return NO_EXIT;
3639 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3641 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3642 return_low128(o->out2);
3643 return NO_EXIT;
3646 #ifndef CONFIG_USER_ONLY
3647 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3649 check_privileged(s);
3650 potential_page_fault(s);
3651 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3652 set_cc_static(s);
3653 return NO_EXIT;
3656 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3658 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3659 check_privileged(s);
3660 potential_page_fault(s);
3661 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
3662 set_cc_static(s);
3663 tcg_temp_free_i32(r1);
3664 return NO_EXIT;
3666 #endif
3668 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3670 DisasCompare c;
3671 TCGv_i64 a, h;
3672 TCGLabel *lab;
3673 int r1;
3675 disas_jcc(s, &c, get_field(s->fields, m3));
3677 /* We want to store when the condition is fulfilled, so branch
3678 out when it's not */
3679 c.cond = tcg_invert_cond(c.cond);
3681 lab = gen_new_label();
3682 if (c.is_64) {
3683 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3684 } else {
3685 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3687 free_compare(&c);
3689 r1 = get_field(s->fields, r1);
3690 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3691 switch (s->insn->data) {
3692 case 1: /* STOCG */
3693 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3694 break;
3695 case 0: /* STOC */
3696 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3697 break;
3698 case 2: /* STOCFH */
3699 h = tcg_temp_new_i64();
3700 tcg_gen_shri_i64(h, regs[r1], 32);
3701 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3702 tcg_temp_free_i64(h);
3703 break;
3704 default:
3705 g_assert_not_reached();
3707 tcg_temp_free_i64(a);
3709 gen_set_label(lab);
3710 return NO_EXIT;
3713 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3715 uint64_t sign = 1ull << s->insn->data;
3716 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3717 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3718 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3719 /* The arithmetic left shift is curious in that it does not affect
3720 the sign bit. Copy that over from the source unchanged. */
3721 tcg_gen_andi_i64(o->out, o->out, ~sign);
3722 tcg_gen_andi_i64(o->in1, o->in1, sign);
3723 tcg_gen_or_i64(o->out, o->out, o->in1);
3724 return NO_EXIT;
3727 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3729 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3730 return NO_EXIT;
3733 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3735 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3736 return NO_EXIT;
3739 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3741 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3742 return NO_EXIT;
3745 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3747 gen_helper_sfpc(cpu_env, o->in2);
3748 return NO_EXIT;
3751 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3753 gen_helper_sfas(cpu_env, o->in2);
3754 return NO_EXIT;
3757 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3759 int b2 = get_field(s->fields, b2);
3760 int d2 = get_field(s->fields, d2);
3761 TCGv_i64 t1 = tcg_temp_new_i64();
3762 TCGv_i64 t2 = tcg_temp_new_i64();
3763 int mask, pos, len;
3765 switch (s->fields->op2) {
3766 case 0x99: /* SRNM */
3767 pos = 0, len = 2;
3768 break;
3769 case 0xb8: /* SRNMB */
3770 pos = 0, len = 3;
3771 break;
3772 case 0xb9: /* SRNMT */
3773 pos = 4, len = 3;
3774 break;
3775 default:
3776 tcg_abort();
3778 mask = (1 << len) - 1;
3780 /* Insert the value into the appropriate field of the FPC. */
3781 if (b2 == 0) {
3782 tcg_gen_movi_i64(t1, d2 & mask);
3783 } else {
3784 tcg_gen_addi_i64(t1, regs[b2], d2);
3785 tcg_gen_andi_i64(t1, t1, mask);
3787 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3788 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3789 tcg_temp_free_i64(t1);
3791 /* Then install the new FPC to set the rounding mode in fpu_status. */
3792 gen_helper_sfpc(cpu_env, t2);
3793 tcg_temp_free_i64(t2);
3794 return NO_EXIT;
3797 #ifndef CONFIG_USER_ONLY
3798 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3800 check_privileged(s);
3801 tcg_gen_shri_i64(o->in2, o->in2, 4);
3802 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3803 return NO_EXIT;
3806 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3808 check_privileged(s);
3809 gen_helper_sske(cpu_env, o->in1, o->in2);
3810 return NO_EXIT;
3813 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3815 check_privileged(s);
3816 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3817 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3818 return EXIT_PC_STALE_NOCHAIN;
3821 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3823 check_privileged(s);
3824 /* ??? Surely cpu address != cpu number. In any case the previous
3825 version of this stored more than the required half-word, so it
3826 is unlikely this has ever been tested. */
3827 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
3828 return NO_EXIT;
3831 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3833 gen_helper_stck(o->out, cpu_env);
3834 /* ??? We don't implement clock states. */
3835 gen_op_movi_cc(s, 0);
3836 return NO_EXIT;
3839 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3841 TCGv_i64 c1 = tcg_temp_new_i64();
3842 TCGv_i64 c2 = tcg_temp_new_i64();
3843 gen_helper_stck(c1, cpu_env);
3844 /* Shift the 64-bit value into its place as a zero-extended
3845 104-bit value. Note that "bit positions 64-103 are always
3846 non-zero so that they compare differently to STCK"; we set
3847 the least significant bit to 1. */
3848 tcg_gen_shli_i64(c2, c1, 56);
3849 tcg_gen_shri_i64(c1, c1, 8);
3850 tcg_gen_ori_i64(c2, c2, 0x10000);
3851 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3852 tcg_gen_addi_i64(o->in2, o->in2, 8);
3853 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3854 tcg_temp_free_i64(c1);
3855 tcg_temp_free_i64(c2);
3856 /* ??? We don't implement clock states. */
3857 gen_op_movi_cc(s, 0);
3858 return NO_EXIT;
3861 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3863 check_privileged(s);
3864 gen_helper_sckc(cpu_env, o->in2);
3865 return NO_EXIT;
3868 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3870 check_privileged(s);
3871 gen_helper_stckc(o->out, cpu_env);
3872 return NO_EXIT;
3875 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3877 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3878 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3879 check_privileged(s);
3880 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3881 tcg_temp_free_i32(r1);
3882 tcg_temp_free_i32(r3);
3883 return NO_EXIT;
3886 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3888 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3889 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3890 check_privileged(s);
3891 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3892 tcg_temp_free_i32(r1);
3893 tcg_temp_free_i32(r3);
3894 return NO_EXIT;
3897 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3899 check_privileged(s);
3900 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3901 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3902 return NO_EXIT;
3905 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3907 check_privileged(s);
3908 gen_helper_spt(cpu_env, o->in2);
3909 return NO_EXIT;
3912 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3914 check_privileged(s);
3915 gen_helper_stfl(cpu_env);
3916 return NO_EXIT;
3919 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3921 check_privileged(s);
3922 gen_helper_stpt(o->out, cpu_env);
3923 return NO_EXIT;
3926 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3928 check_privileged(s);
3929 potential_page_fault(s);
3930 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3931 set_cc_static(s);
3932 return NO_EXIT;
3935 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
3937 check_privileged(s);
3938 gen_helper_spx(cpu_env, o->in2);
3939 return NO_EXIT;
3942 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
3944 check_privileged(s);
3945 potential_page_fault(s);
3946 gen_helper_xsch(cpu_env, regs[1]);
3947 set_cc_static(s);
3948 return NO_EXIT;
3951 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
3953 check_privileged(s);
3954 potential_page_fault(s);
3955 gen_helper_csch(cpu_env, regs[1]);
3956 set_cc_static(s);
3957 return NO_EXIT;
3960 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
3962 check_privileged(s);
3963 potential_page_fault(s);
3964 gen_helper_hsch(cpu_env, regs[1]);
3965 set_cc_static(s);
3966 return NO_EXIT;
3969 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
3971 check_privileged(s);
3972 potential_page_fault(s);
3973 gen_helper_msch(cpu_env, regs[1], o->in2);
3974 set_cc_static(s);
3975 return NO_EXIT;
3978 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
3980 check_privileged(s);
3981 potential_page_fault(s);
3982 gen_helper_rchp(cpu_env, regs[1]);
3983 set_cc_static(s);
3984 return NO_EXIT;
3987 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
3989 check_privileged(s);
3990 potential_page_fault(s);
3991 gen_helper_rsch(cpu_env, regs[1]);
3992 set_cc_static(s);
3993 return NO_EXIT;
3996 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
3998 check_privileged(s);
3999 potential_page_fault(s);
4000 gen_helper_ssch(cpu_env, regs[1], o->in2);
4001 set_cc_static(s);
4002 return NO_EXIT;
4005 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4007 check_privileged(s);
4008 potential_page_fault(s);
4009 gen_helper_stsch(cpu_env, regs[1], o->in2);
4010 set_cc_static(s);
4011 return NO_EXIT;
4014 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4016 check_privileged(s);
4017 potential_page_fault(s);
4018 gen_helper_tsch(cpu_env, regs[1], o->in2);
4019 set_cc_static(s);
4020 return NO_EXIT;
4023 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4025 check_privileged(s);
4026 potential_page_fault(s);
4027 gen_helper_chsc(cpu_env, o->in2);
4028 set_cc_static(s);
4029 return NO_EXIT;
4032 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4034 check_privileged(s);
4035 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4036 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4037 return NO_EXIT;
4040 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4042 uint64_t i2 = get_field(s->fields, i2);
4043 TCGv_i64 t;
4045 check_privileged(s);
4047 /* It is important to do what the instruction name says: STORE THEN.
4048 If we let the output hook perform the store then if we fault and
4049 restart, we'll have the wrong SYSTEM MASK in place. */
4050 t = tcg_temp_new_i64();
4051 tcg_gen_shri_i64(t, psw_mask, 56);
4052 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4053 tcg_temp_free_i64(t);
4055 if (s->fields->op == 0xac) {
4056 tcg_gen_andi_i64(psw_mask, psw_mask,
4057 (i2 << 56) | 0x00ffffffffffffffull);
4058 } else {
4059 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4062 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4063 return EXIT_PC_STALE_NOCHAIN;
4066 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4068 check_privileged(s);
4069 potential_page_fault(s);
4070 gen_helper_stura(cpu_env, o->in2, o->in1);
4071 return NO_EXIT;
4074 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4076 check_privileged(s);
4077 potential_page_fault(s);
4078 gen_helper_sturg(cpu_env, o->in2, o->in1);
4079 return NO_EXIT;
4081 #endif
4083 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4085 potential_page_fault(s);
4086 gen_helper_stfle(cc_op, cpu_env, o->in2);
4087 set_cc_static(s);
4088 return NO_EXIT;
4091 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4093 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4094 return NO_EXIT;
4097 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4099 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4100 return NO_EXIT;
4103 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4105 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4106 return NO_EXIT;
4109 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4111 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4112 return NO_EXIT;
4115 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4117 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4118 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4119 gen_helper_stam(cpu_env, r1, o->in2, r3);
4120 tcg_temp_free_i32(r1);
4121 tcg_temp_free_i32(r3);
4122 return NO_EXIT;
4125 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4127 int m3 = get_field(s->fields, m3);
4128 int pos, base = s->insn->data;
4129 TCGv_i64 tmp = tcg_temp_new_i64();
4131 pos = base + ctz32(m3) * 8;
4132 switch (m3) {
4133 case 0xf:
4134 /* Effectively a 32-bit store. */
4135 tcg_gen_shri_i64(tmp, o->in1, pos);
4136 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4137 break;
4139 case 0xc:
4140 case 0x6:
4141 case 0x3:
4142 /* Effectively a 16-bit store. */
4143 tcg_gen_shri_i64(tmp, o->in1, pos);
4144 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4145 break;
4147 case 0x8:
4148 case 0x4:
4149 case 0x2:
4150 case 0x1:
4151 /* Effectively an 8-bit store. */
4152 tcg_gen_shri_i64(tmp, o->in1, pos);
4153 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4154 break;
4156 default:
4157 /* This is going to be a sequence of shifts and stores. */
4158 pos = base + 32 - 8;
4159 while (m3) {
4160 if (m3 & 0x8) {
4161 tcg_gen_shri_i64(tmp, o->in1, pos);
4162 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4163 tcg_gen_addi_i64(o->in2, o->in2, 1);
4165 m3 = (m3 << 1) & 0xf;
4166 pos -= 8;
4168 break;
4170 tcg_temp_free_i64(tmp);
4171 return NO_EXIT;
4174 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4176 int r1 = get_field(s->fields, r1);
4177 int r3 = get_field(s->fields, r3);
4178 int size = s->insn->data;
4179 TCGv_i64 tsize = tcg_const_i64(size);
4181 while (1) {
4182 if (size == 8) {
4183 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4184 } else {
4185 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4187 if (r1 == r3) {
4188 break;
4190 tcg_gen_add_i64(o->in2, o->in2, tsize);
4191 r1 = (r1 + 1) & 15;
4194 tcg_temp_free_i64(tsize);
4195 return NO_EXIT;
4198 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4200 int r1 = get_field(s->fields, r1);
4201 int r3 = get_field(s->fields, r3);
4202 TCGv_i64 t = tcg_temp_new_i64();
4203 TCGv_i64 t4 = tcg_const_i64(4);
4204 TCGv_i64 t32 = tcg_const_i64(32);
4206 while (1) {
4207 tcg_gen_shl_i64(t, regs[r1], t32);
4208 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4209 if (r1 == r3) {
4210 break;
4212 tcg_gen_add_i64(o->in2, o->in2, t4);
4213 r1 = (r1 + 1) & 15;
4216 tcg_temp_free_i64(t);
4217 tcg_temp_free_i64(t4);
4218 tcg_temp_free_i64(t32);
4219 return NO_EXIT;
4222 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4224 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4225 return NO_EXIT;
4228 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4230 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4231 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4233 gen_helper_srst(cpu_env, r1, r2);
4235 tcg_temp_free_i32(r1);
4236 tcg_temp_free_i32(r2);
4237 set_cc_static(s);
4238 return NO_EXIT;
4241 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4243 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4244 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4246 gen_helper_srstu(cpu_env, r1, r2);
4248 tcg_temp_free_i32(r1);
4249 tcg_temp_free_i32(r2);
4250 set_cc_static(s);
4251 return NO_EXIT;
4254 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4256 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4257 return NO_EXIT;
4260 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4262 DisasCompare cmp;
4263 TCGv_i64 borrow;
4265 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4267 /* The !borrow flag is the msb of CC. Since we want the inverse of
4268 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4269 disas_jcc(s, &cmp, 8 | 4);
4270 borrow = tcg_temp_new_i64();
4271 if (cmp.is_64) {
4272 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4273 } else {
4274 TCGv_i32 t = tcg_temp_new_i32();
4275 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4276 tcg_gen_extu_i32_i64(borrow, t);
4277 tcg_temp_free_i32(t);
4279 free_compare(&cmp);
4281 tcg_gen_sub_i64(o->out, o->out, borrow);
4282 tcg_temp_free_i64(borrow);
4283 return NO_EXIT;
4286 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4288 TCGv_i32 t;
4290 update_psw_addr(s);
4291 update_cc_op(s);
4293 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4294 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4295 tcg_temp_free_i32(t);
4297 t = tcg_const_i32(s->ilen);
4298 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4299 tcg_temp_free_i32(t);
4301 gen_exception(EXCP_SVC);
4302 return EXIT_NORETURN;
4305 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4307 int cc = 0;
4309 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4310 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4311 gen_op_movi_cc(s, cc);
4312 return NO_EXIT;
4315 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4317 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4318 set_cc_static(s);
4319 return NO_EXIT;
4322 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4324 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4325 set_cc_static(s);
4326 return NO_EXIT;
4329 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4331 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4332 set_cc_static(s);
4333 return NO_EXIT;
4336 #ifndef CONFIG_USER_ONLY
4338 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4340 check_privileged(s);
4341 gen_helper_testblock(cc_op, cpu_env, o->in2);
4342 set_cc_static(s);
4343 return NO_EXIT;
4346 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4348 gen_helper_tprot(cc_op, o->addr1, o->in2);
4349 set_cc_static(s);
4350 return NO_EXIT;
4353 #endif
4355 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4357 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4358 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4359 tcg_temp_free_i32(l1);
4360 set_cc_static(s);
4361 return NO_EXIT;
4364 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4366 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4367 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4368 tcg_temp_free_i32(l);
4369 set_cc_static(s);
4370 return NO_EXIT;
4373 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4375 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4376 return_low128(o->out2);
4377 set_cc_static(s);
4378 return NO_EXIT;
4381 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4383 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4384 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4385 tcg_temp_free_i32(l);
4386 set_cc_static(s);
4387 return NO_EXIT;
4390 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4392 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4393 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4394 tcg_temp_free_i32(l);
4395 set_cc_static(s);
4396 return NO_EXIT;
4399 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4401 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4402 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4403 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4404 TCGv_i32 tst = tcg_temp_new_i32();
4405 int m3 = get_field(s->fields, m3);
4407 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4408 m3 = 0;
4410 if (m3 & 1) {
4411 tcg_gen_movi_i32(tst, -1);
4412 } else {
4413 tcg_gen_extrl_i64_i32(tst, regs[0]);
4414 if (s->insn->opc & 3) {
4415 tcg_gen_ext8u_i32(tst, tst);
4416 } else {
4417 tcg_gen_ext16u_i32(tst, tst);
4420 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4422 tcg_temp_free_i32(r1);
4423 tcg_temp_free_i32(r2);
4424 tcg_temp_free_i32(sizes);
4425 tcg_temp_free_i32(tst);
4426 set_cc_static(s);
4427 return NO_EXIT;
4430 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4432 TCGv_i32 t1 = tcg_const_i32(0xff);
4433 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4434 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4435 tcg_temp_free_i32(t1);
4436 set_cc_static(s);
4437 return NO_EXIT;
4440 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4442 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4443 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4444 tcg_temp_free_i32(l);
4445 return NO_EXIT;
4448 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4450 int l1 = get_field(s->fields, l1) + 1;
4451 TCGv_i32 l;
4453 /* The length must not exceed 32 bytes. */
4454 if (l1 > 32) {
4455 gen_program_exception(s, PGM_SPECIFICATION);
4456 return EXIT_NORETURN;
4458 l = tcg_const_i32(l1);
4459 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4460 tcg_temp_free_i32(l);
4461 set_cc_static(s);
4462 return NO_EXIT;
4465 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4467 int l1 = get_field(s->fields, l1) + 1;
4468 TCGv_i32 l;
4470 /* The length must be even and should not exceed 64 bytes. */
4471 if ((l1 & 1) || (l1 > 64)) {
4472 gen_program_exception(s, PGM_SPECIFICATION);
4473 return EXIT_NORETURN;
4475 l = tcg_const_i32(l1);
4476 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4477 tcg_temp_free_i32(l);
4478 set_cc_static(s);
4479 return NO_EXIT;
4483 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4485 int d1 = get_field(s->fields, d1);
4486 int d2 = get_field(s->fields, d2);
4487 int b1 = get_field(s->fields, b1);
4488 int b2 = get_field(s->fields, b2);
4489 int l = get_field(s->fields, l1);
4490 TCGv_i32 t32;
4492 o->addr1 = get_address(s, 0, b1, d1);
4494 /* If the addresses are identical, this is a store/memset of zero. */
4495 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4496 o->in2 = tcg_const_i64(0);
4498 l++;
4499 while (l >= 8) {
4500 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4501 l -= 8;
4502 if (l > 0) {
4503 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4506 if (l >= 4) {
4507 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4508 l -= 4;
4509 if (l > 0) {
4510 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4513 if (l >= 2) {
4514 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4515 l -= 2;
4516 if (l > 0) {
4517 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4520 if (l) {
4521 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4523 gen_op_movi_cc(s, 0);
4524 return NO_EXIT;
4527 /* But in general we'll defer to a helper. */
4528 o->in2 = get_address(s, 0, b2, d2);
4529 t32 = tcg_const_i32(l);
4530 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4531 tcg_temp_free_i32(t32);
4532 set_cc_static(s);
4533 return NO_EXIT;
4536 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4538 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4539 return NO_EXIT;
4542 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4544 int shift = s->insn->data & 0xff;
4545 int size = s->insn->data >> 8;
4546 uint64_t mask = ((1ull << size) - 1) << shift;
4548 assert(!o->g_in2);
4549 tcg_gen_shli_i64(o->in2, o->in2, shift);
4550 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4552 /* Produce the CC from only the bits manipulated. */
4553 tcg_gen_andi_i64(cc_dst, o->out, mask);
4554 set_cc_nz_u64(s, cc_dst);
4555 return NO_EXIT;
4558 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4560 o->out = tcg_const_i64(0);
4561 return NO_EXIT;
4564 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4566 o->out = tcg_const_i64(0);
4567 o->out2 = o->out;
4568 o->g_out2 = true;
4569 return NO_EXIT;
4572 /* ====================================================================== */
4573 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4574 the original inputs), update the various cc data structures in order to
4575 be able to compute the new condition code. */
4577 static void cout_abs32(DisasContext *s, DisasOps *o)
4579 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4582 static void cout_abs64(DisasContext *s, DisasOps *o)
4584 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4587 static void cout_adds32(DisasContext *s, DisasOps *o)
4589 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4592 static void cout_adds64(DisasContext *s, DisasOps *o)
4594 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4597 static void cout_addu32(DisasContext *s, DisasOps *o)
4599 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4602 static void cout_addu64(DisasContext *s, DisasOps *o)
4604 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4607 static void cout_addc32(DisasContext *s, DisasOps *o)
4609 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4612 static void cout_addc64(DisasContext *s, DisasOps *o)
4614 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4617 static void cout_cmps32(DisasContext *s, DisasOps *o)
4619 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4622 static void cout_cmps64(DisasContext *s, DisasOps *o)
4624 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4627 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4629 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4632 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4634 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4637 static void cout_f32(DisasContext *s, DisasOps *o)
4639 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4642 static void cout_f64(DisasContext *s, DisasOps *o)
4644 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4647 static void cout_f128(DisasContext *s, DisasOps *o)
4649 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4652 static void cout_nabs32(DisasContext *s, DisasOps *o)
4654 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4657 static void cout_nabs64(DisasContext *s, DisasOps *o)
4659 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4662 static void cout_neg32(DisasContext *s, DisasOps *o)
4664 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4667 static void cout_neg64(DisasContext *s, DisasOps *o)
4669 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4672 static void cout_nz32(DisasContext *s, DisasOps *o)
4674 tcg_gen_ext32u_i64(cc_dst, o->out);
4675 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4678 static void cout_nz64(DisasContext *s, DisasOps *o)
4680 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4683 static void cout_s32(DisasContext *s, DisasOps *o)
4685 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4688 static void cout_s64(DisasContext *s, DisasOps *o)
4690 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4693 static void cout_subs32(DisasContext *s, DisasOps *o)
4695 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4698 static void cout_subs64(DisasContext *s, DisasOps *o)
4700 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4703 static void cout_subu32(DisasContext *s, DisasOps *o)
4705 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4708 static void cout_subu64(DisasContext *s, DisasOps *o)
4710 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4713 static void cout_subb32(DisasContext *s, DisasOps *o)
4715 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4718 static void cout_subb64(DisasContext *s, DisasOps *o)
4720 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4723 static void cout_tm32(DisasContext *s, DisasOps *o)
4725 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4728 static void cout_tm64(DisasContext *s, DisasOps *o)
4730 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4733 /* ====================================================================== */
4734 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4735 with the TCG register to which we will write. Used in combination with
4736 the "wout" generators, in some cases we need a new temporary, and in
4737 some cases we can write to a TCG global. */
4739 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4741 o->out = tcg_temp_new_i64();
4743 #define SPEC_prep_new 0
4745 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4747 o->out = tcg_temp_new_i64();
4748 o->out2 = tcg_temp_new_i64();
4750 #define SPEC_prep_new_P 0
4752 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4754 o->out = regs[get_field(f, r1)];
4755 o->g_out = true;
4757 #define SPEC_prep_r1 0
4759 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4761 int r1 = get_field(f, r1);
4762 o->out = regs[r1];
4763 o->out2 = regs[r1 + 1];
4764 o->g_out = o->g_out2 = true;
4766 #define SPEC_prep_r1_P SPEC_r1_even
4768 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4770 o->out = fregs[get_field(f, r1)];
4771 o->g_out = true;
4773 #define SPEC_prep_f1 0
4775 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4777 int r1 = get_field(f, r1);
4778 o->out = fregs[r1];
4779 o->out2 = fregs[r1 + 2];
4780 o->g_out = o->g_out2 = true;
4782 #define SPEC_prep_x1 SPEC_r1_f128
4784 /* ====================================================================== */
4785 /* The "Write OUTput" generators. These generally perform some non-trivial
4786 copy of data to TCG globals, or to main memory. The trivial cases are
4787 generally handled by having a "prep" generator install the TCG global
4788 as the destination of the operation. */
4790 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4792 store_reg(get_field(f, r1), o->out);
4794 #define SPEC_wout_r1 0
4796 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4798 int r1 = get_field(f, r1);
4799 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4801 #define SPEC_wout_r1_8 0
4803 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4805 int r1 = get_field(f, r1);
4806 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4808 #define SPEC_wout_r1_16 0
4810 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4812 store_reg32_i64(get_field(f, r1), o->out);
4814 #define SPEC_wout_r1_32 0
4816 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4818 store_reg32h_i64(get_field(f, r1), o->out);
4820 #define SPEC_wout_r1_32h 0
4822 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4824 int r1 = get_field(f, r1);
4825 store_reg32_i64(r1, o->out);
4826 store_reg32_i64(r1 + 1, o->out2);
4828 #define SPEC_wout_r1_P32 SPEC_r1_even
4830 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4832 int r1 = get_field(f, r1);
4833 store_reg32_i64(r1 + 1, o->out);
4834 tcg_gen_shri_i64(o->out, o->out, 32);
4835 store_reg32_i64(r1, o->out);
4837 #define SPEC_wout_r1_D32 SPEC_r1_even
4839 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4841 int r3 = get_field(f, r3);
4842 store_reg32_i64(r3, o->out);
4843 store_reg32_i64(r3 + 1, o->out2);
4845 #define SPEC_wout_r3_P32 SPEC_r3_even
4847 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4849 int r3 = get_field(f, r3);
4850 store_reg(r3, o->out);
4851 store_reg(r3 + 1, o->out2);
4853 #define SPEC_wout_r3_P64 SPEC_r3_even
4855 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4857 store_freg32_i64(get_field(f, r1), o->out);
4859 #define SPEC_wout_e1 0
4861 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4863 store_freg(get_field(f, r1), o->out);
4865 #define SPEC_wout_f1 0
4867 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4869 int f1 = get_field(s->fields, r1);
4870 store_freg(f1, o->out);
4871 store_freg(f1 + 2, o->out2);
4873 #define SPEC_wout_x1 SPEC_r1_f128
4875 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4877 if (get_field(f, r1) != get_field(f, r2)) {
4878 store_reg32_i64(get_field(f, r1), o->out);
4881 #define SPEC_wout_cond_r1r2_32 0
4883 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4885 if (get_field(f, r1) != get_field(f, r2)) {
4886 store_freg32_i64(get_field(f, r1), o->out);
4889 #define SPEC_wout_cond_e1e2 0
4891 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4893 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4895 #define SPEC_wout_m1_8 0
4897 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4899 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4901 #define SPEC_wout_m1_16 0
4903 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4905 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4907 #define SPEC_wout_m1_32 0
4909 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4911 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4913 #define SPEC_wout_m1_64 0
4915 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4917 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4919 #define SPEC_wout_m2_32 0
4921 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4923 store_reg(get_field(f, r1), o->in2);
4925 #define SPEC_wout_in2_r1 0
4927 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4929 store_reg32_i64(get_field(f, r1), o->in2);
4931 #define SPEC_wout_in2_r1_32 0
4933 /* ====================================================================== */
4934 /* The "INput 1" generators. These load the first operand to an insn. */
4936 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4938 o->in1 = load_reg(get_field(f, r1));
4940 #define SPEC_in1_r1 0
4942 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
4944 o->in1 = regs[get_field(f, r1)];
4945 o->g_in1 = true;
4947 #define SPEC_in1_r1_o 0
4949 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4951 o->in1 = tcg_temp_new_i64();
4952 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
4954 #define SPEC_in1_r1_32s 0
4956 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4958 o->in1 = tcg_temp_new_i64();
4959 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
4961 #define SPEC_in1_r1_32u 0
4963 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
4965 o->in1 = tcg_temp_new_i64();
4966 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
4968 #define SPEC_in1_r1_sr32 0
4970 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
4972 o->in1 = load_reg(get_field(f, r1) + 1);
4974 #define SPEC_in1_r1p1 SPEC_r1_even
4976 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
4978 o->in1 = tcg_temp_new_i64();
4979 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
4981 #define SPEC_in1_r1p1_32s SPEC_r1_even
4983 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
4985 o->in1 = tcg_temp_new_i64();
4986 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
4988 #define SPEC_in1_r1p1_32u SPEC_r1_even
4990 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4992 int r1 = get_field(f, r1);
4993 o->in1 = tcg_temp_new_i64();
4994 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
4996 #define SPEC_in1_r1_D32 SPEC_r1_even
4998 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5000 o->in1 = load_reg(get_field(f, r2));
5002 #define SPEC_in1_r2 0
5004 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5006 o->in1 = tcg_temp_new_i64();
5007 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5009 #define SPEC_in1_r2_sr32 0
5011 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5013 o->in1 = load_reg(get_field(f, r3));
5015 #define SPEC_in1_r3 0
5017 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5019 o->in1 = regs[get_field(f, r3)];
5020 o->g_in1 = true;
5022 #define SPEC_in1_r3_o 0
5024 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5026 o->in1 = tcg_temp_new_i64();
5027 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5029 #define SPEC_in1_r3_32s 0
5031 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5033 o->in1 = tcg_temp_new_i64();
5034 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5036 #define SPEC_in1_r3_32u 0
5038 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5040 int r3 = get_field(f, r3);
5041 o->in1 = tcg_temp_new_i64();
5042 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5044 #define SPEC_in1_r3_D32 SPEC_r3_even
5046 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5048 o->in1 = load_freg32_i64(get_field(f, r1));
5050 #define SPEC_in1_e1 0
5052 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5054 o->in1 = fregs[get_field(f, r1)];
5055 o->g_in1 = true;
5057 #define SPEC_in1_f1_o 0
5059 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5061 int r1 = get_field(f, r1);
5062 o->out = fregs[r1];
5063 o->out2 = fregs[r1 + 2];
5064 o->g_out = o->g_out2 = true;
5066 #define SPEC_in1_x1_o SPEC_r1_f128
5068 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5070 o->in1 = fregs[get_field(f, r3)];
5071 o->g_in1 = true;
5073 #define SPEC_in1_f3_o 0
5075 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5077 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5079 #define SPEC_in1_la1 0
5081 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5083 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5084 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5086 #define SPEC_in1_la2 0
5088 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5090 in1_la1(s, f, o);
5091 o->in1 = tcg_temp_new_i64();
5092 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5094 #define SPEC_in1_m1_8u 0
5096 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5098 in1_la1(s, f, o);
5099 o->in1 = tcg_temp_new_i64();
5100 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5102 #define SPEC_in1_m1_16s 0
5104 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5106 in1_la1(s, f, o);
5107 o->in1 = tcg_temp_new_i64();
5108 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5110 #define SPEC_in1_m1_16u 0
5112 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5114 in1_la1(s, f, o);
5115 o->in1 = tcg_temp_new_i64();
5116 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5118 #define SPEC_in1_m1_32s 0
5120 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5122 in1_la1(s, f, o);
5123 o->in1 = tcg_temp_new_i64();
5124 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5126 #define SPEC_in1_m1_32u 0
5128 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5130 in1_la1(s, f, o);
5131 o->in1 = tcg_temp_new_i64();
5132 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5134 #define SPEC_in1_m1_64 0
5136 /* ====================================================================== */
5137 /* The "INput 2" generators. These load the second operand to an insn. */
5139 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5141 o->in2 = regs[get_field(f, r1)];
5142 o->g_in2 = true;
5144 #define SPEC_in2_r1_o 0
5146 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5148 o->in2 = tcg_temp_new_i64();
5149 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5151 #define SPEC_in2_r1_16u 0
5153 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5155 o->in2 = tcg_temp_new_i64();
5156 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5158 #define SPEC_in2_r1_32u 0
5160 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5162 int r1 = get_field(f, r1);
5163 o->in2 = tcg_temp_new_i64();
5164 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5166 #define SPEC_in2_r1_D32 SPEC_r1_even
5168 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5170 o->in2 = load_reg(get_field(f, r2));
5172 #define SPEC_in2_r2 0
5174 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5176 o->in2 = regs[get_field(f, r2)];
5177 o->g_in2 = true;
5179 #define SPEC_in2_r2_o 0
5181 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5183 int r2 = get_field(f, r2);
5184 if (r2 != 0) {
5185 o->in2 = load_reg(r2);
5188 #define SPEC_in2_r2_nz 0
5190 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5192 o->in2 = tcg_temp_new_i64();
5193 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5195 #define SPEC_in2_r2_8s 0
5197 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5199 o->in2 = tcg_temp_new_i64();
5200 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5202 #define SPEC_in2_r2_8u 0
5204 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5206 o->in2 = tcg_temp_new_i64();
5207 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5209 #define SPEC_in2_r2_16s 0
5211 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5213 o->in2 = tcg_temp_new_i64();
5214 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5216 #define SPEC_in2_r2_16u 0
5218 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5220 o->in2 = load_reg(get_field(f, r3));
5222 #define SPEC_in2_r3 0
5224 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5226 o->in2 = tcg_temp_new_i64();
5227 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5229 #define SPEC_in2_r3_sr32 0
5231 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5233 o->in2 = tcg_temp_new_i64();
5234 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5236 #define SPEC_in2_r2_32s 0
5238 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5240 o->in2 = tcg_temp_new_i64();
5241 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5243 #define SPEC_in2_r2_32u 0
5245 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5247 o->in2 = tcg_temp_new_i64();
5248 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5250 #define SPEC_in2_r2_sr32 0
5252 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5254 o->in2 = load_freg32_i64(get_field(f, r2));
5256 #define SPEC_in2_e2 0
5258 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5260 o->in2 = fregs[get_field(f, r2)];
5261 o->g_in2 = true;
5263 #define SPEC_in2_f2_o 0
5265 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5267 int r2 = get_field(f, r2);
5268 o->in1 = fregs[r2];
5269 o->in2 = fregs[r2 + 2];
5270 o->g_in1 = o->g_in2 = true;
5272 #define SPEC_in2_x2_o SPEC_r2_f128
5274 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5276 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5278 #define SPEC_in2_ra2 0
5280 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5282 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5283 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5285 #define SPEC_in2_a2 0
5287 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5289 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5291 #define SPEC_in2_ri2 0
5293 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5295 help_l2_shift(s, f, o, 31);
5297 #define SPEC_in2_sh32 0
5299 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5301 help_l2_shift(s, f, o, 63);
5303 #define SPEC_in2_sh64 0
5305 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5307 in2_a2(s, f, o);
5308 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5310 #define SPEC_in2_m2_8u 0
5312 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5314 in2_a2(s, f, o);
5315 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5317 #define SPEC_in2_m2_16s 0
5319 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5321 in2_a2(s, f, o);
5322 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5324 #define SPEC_in2_m2_16u 0
5326 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5328 in2_a2(s, f, o);
5329 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5331 #define SPEC_in2_m2_32s 0
5333 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5335 in2_a2(s, f, o);
5336 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5338 #define SPEC_in2_m2_32u 0
5340 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5342 in2_a2(s, f, o);
5343 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5345 #define SPEC_in2_m2_64 0
5347 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5349 in2_ri2(s, f, o);
5350 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5352 #define SPEC_in2_mri2_16u 0
5354 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5356 in2_ri2(s, f, o);
5357 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5359 #define SPEC_in2_mri2_32s 0
5361 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5363 in2_ri2(s, f, o);
5364 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5366 #define SPEC_in2_mri2_32u 0
5368 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5370 in2_ri2(s, f, o);
5371 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5373 #define SPEC_in2_mri2_64 0
5375 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5377 o->in2 = tcg_const_i64(get_field(f, i2));
5379 #define SPEC_in2_i2 0
5381 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5383 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5385 #define SPEC_in2_i2_8u 0
5387 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5389 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5391 #define SPEC_in2_i2_16u 0
5393 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5395 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5397 #define SPEC_in2_i2_32u 0
5399 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5401 uint64_t i2 = (uint16_t)get_field(f, i2);
5402 o->in2 = tcg_const_i64(i2 << s->insn->data);
5404 #define SPEC_in2_i2_16u_shl 0
5406 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5408 uint64_t i2 = (uint32_t)get_field(f, i2);
5409 o->in2 = tcg_const_i64(i2 << s->insn->data);
5411 #define SPEC_in2_i2_32u_shl 0
5413 #ifndef CONFIG_USER_ONLY
5414 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5416 o->in2 = tcg_const_i64(s->fields->raw_insn);
5418 #define SPEC_in2_insn 0
5419 #endif
5421 /* ====================================================================== */
5423 /* Find opc within the table of insns. This is formulated as a switch
5424 statement so that (1) we get compile-time notice of cut-paste errors
5425 for duplicated opcodes, and (2) the compiler generates the binary
5426 search tree, rather than us having to post-process the table. */
5428 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5429 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5431 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5433 enum DisasInsnEnum {
5434 #include "insn-data.def"
5437 #undef D
5438 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5439 .opc = OPC, \
5440 .fmt = FMT_##FT, \
5441 .fac = FAC_##FC, \
5442 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5443 .name = #NM, \
5444 .help_in1 = in1_##I1, \
5445 .help_in2 = in2_##I2, \
5446 .help_prep = prep_##P, \
5447 .help_wout = wout_##W, \
5448 .help_cout = cout_##CC, \
5449 .help_op = op_##OP, \
5450 .data = D \
5453 /* Allow 0 to be used for NULL in the table below. */
5454 #define in1_0 NULL
5455 #define in2_0 NULL
5456 #define prep_0 NULL
5457 #define wout_0 NULL
5458 #define cout_0 NULL
5459 #define op_0 NULL
5461 #define SPEC_in1_0 0
5462 #define SPEC_in2_0 0
5463 #define SPEC_prep_0 0
5464 #define SPEC_wout_0 0
5466 /* Give smaller names to the various facilities. */
5467 #define FAC_Z S390_FEAT_ZARCH
5468 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5469 #define FAC_DFP S390_FEAT_DFP
5470 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5471 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5472 #define FAC_EE S390_FEAT_EXECUTE_EXT
5473 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5474 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5475 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5476 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5477 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5478 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5479 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5480 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5481 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5482 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5483 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5484 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5485 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5486 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5487 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5488 #define FAC_SFLE S390_FEAT_STFLE
5489 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5490 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5491 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5492 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5493 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5494 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5495 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5496 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5497 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5499 static const DisasInsn insn_info[] = {
5500 #include "insn-data.def"
5503 #undef D
5504 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5505 case OPC: return &insn_info[insn_ ## NM];
5507 static const DisasInsn *lookup_opc(uint16_t opc)
5509 switch (opc) {
5510 #include "insn-data.def"
5511 default:
5512 return NULL;
5516 #undef D
5517 #undef C
5519 /* Extract a field from the insn. The INSN should be left-aligned in
5520 the uint64_t so that we can more easily utilize the big-bit-endian
5521 definitions we extract from the Principals of Operation. */
5523 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5525 uint32_t r, m;
5527 if (f->size == 0) {
5528 return;
5531 /* Zero extract the field from the insn. */
5532 r = (insn << f->beg) >> (64 - f->size);
5534 /* Sign-extend, or un-swap the field as necessary. */
5535 switch (f->type) {
5536 case 0: /* unsigned */
5537 break;
5538 case 1: /* signed */
5539 assert(f->size <= 32);
5540 m = 1u << (f->size - 1);
5541 r = (r ^ m) - m;
5542 break;
5543 case 2: /* dl+dh split, signed 20 bit. */
5544 r = ((int8_t)r << 12) | (r >> 8);
5545 break;
5546 default:
5547 abort();
5550 /* Validate that the "compressed" encoding we selected above is valid.
5551 I.e. we havn't make two different original fields overlap. */
5552 assert(((o->presentC >> f->indexC) & 1) == 0);
5553 o->presentC |= 1 << f->indexC;
5554 o->presentO |= 1 << f->indexO;
5556 o->c[f->indexC] = r;
5559 /* Lookup the insn at the current PC, extracting the operands into O and
5560 returning the info struct for the insn. Returns NULL for invalid insn. */
5562 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5563 DisasFields *f)
5565 uint64_t insn, pc = s->pc;
5566 int op, op2, ilen;
5567 const DisasInsn *info;
5569 if (unlikely(s->ex_value)) {
5570 /* Drop the EX data now, so that it's clear on exception paths. */
5571 TCGv_i64 zero = tcg_const_i64(0);
5572 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5573 tcg_temp_free_i64(zero);
5575 /* Extract the values saved by EXECUTE. */
5576 insn = s->ex_value & 0xffffffffffff0000ull;
5577 ilen = s->ex_value & 0xf;
5578 op = insn >> 56;
5579 } else {
5580 insn = ld_code2(env, pc);
5581 op = (insn >> 8) & 0xff;
5582 ilen = get_ilen(op);
5583 switch (ilen) {
5584 case 2:
5585 insn = insn << 48;
5586 break;
5587 case 4:
5588 insn = ld_code4(env, pc) << 32;
5589 break;
5590 case 6:
5591 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5592 break;
5593 default:
5594 g_assert_not_reached();
5597 s->next_pc = s->pc + ilen;
5598 s->ilen = ilen;
5600 /* We can't actually determine the insn format until we've looked up
5601 the full insn opcode. Which we can't do without locating the
5602 secondary opcode. Assume by default that OP2 is at bit 40; for
5603 those smaller insns that don't actually have a secondary opcode
5604 this will correctly result in OP2 = 0. */
5605 switch (op) {
5606 case 0x01: /* E */
5607 case 0x80: /* S */
5608 case 0x82: /* S */
5609 case 0x93: /* S */
5610 case 0xb2: /* S, RRF, RRE, IE */
5611 case 0xb3: /* RRE, RRD, RRF */
5612 case 0xb9: /* RRE, RRF */
5613 case 0xe5: /* SSE, SIL */
5614 op2 = (insn << 8) >> 56;
5615 break;
5616 case 0xa5: /* RI */
5617 case 0xa7: /* RI */
5618 case 0xc0: /* RIL */
5619 case 0xc2: /* RIL */
5620 case 0xc4: /* RIL */
5621 case 0xc6: /* RIL */
5622 case 0xc8: /* SSF */
5623 case 0xcc: /* RIL */
5624 op2 = (insn << 12) >> 60;
5625 break;
5626 case 0xc5: /* MII */
5627 case 0xc7: /* SMI */
5628 case 0xd0 ... 0xdf: /* SS */
5629 case 0xe1: /* SS */
5630 case 0xe2: /* SS */
5631 case 0xe8: /* SS */
5632 case 0xe9: /* SS */
5633 case 0xea: /* SS */
5634 case 0xee ... 0xf3: /* SS */
5635 case 0xf8 ... 0xfd: /* SS */
5636 op2 = 0;
5637 break;
5638 default:
5639 op2 = (insn << 40) >> 56;
5640 break;
5643 memset(f, 0, sizeof(*f));
5644 f->raw_insn = insn;
5645 f->op = op;
5646 f->op2 = op2;
5648 /* Lookup the instruction. */
5649 info = lookup_opc(op << 8 | op2);
5651 /* If we found it, extract the operands. */
5652 if (info != NULL) {
5653 DisasFormat fmt = info->fmt;
5654 int i;
5656 for (i = 0; i < NUM_C_FIELD; ++i) {
5657 extract_field(f, &format_info[fmt].op[i], insn);
5660 return info;
5663 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5665 const DisasInsn *insn;
5666 ExitStatus ret = NO_EXIT;
5667 DisasFields f;
5668 DisasOps o;
5670 /* Search for the insn in the table. */
5671 insn = extract_insn(env, s, &f);
5673 /* Not found means unimplemented/illegal opcode. */
5674 if (insn == NULL) {
5675 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5676 f.op, f.op2);
5677 gen_illegal_opcode(s);
5678 return EXIT_NORETURN;
5681 #ifndef CONFIG_USER_ONLY
5682 if (s->tb->flags & FLAG_MASK_PER) {
5683 TCGv_i64 addr = tcg_const_i64(s->pc);
5684 gen_helper_per_ifetch(cpu_env, addr);
5685 tcg_temp_free_i64(addr);
5687 #endif
5689 /* Check for insn specification exceptions. */
5690 if (insn->spec) {
5691 int spec = insn->spec, excp = 0, r;
5693 if (spec & SPEC_r1_even) {
5694 r = get_field(&f, r1);
5695 if (r & 1) {
5696 excp = PGM_SPECIFICATION;
5699 if (spec & SPEC_r2_even) {
5700 r = get_field(&f, r2);
5701 if (r & 1) {
5702 excp = PGM_SPECIFICATION;
5705 if (spec & SPEC_r3_even) {
5706 r = get_field(&f, r3);
5707 if (r & 1) {
5708 excp = PGM_SPECIFICATION;
5711 if (spec & SPEC_r1_f128) {
5712 r = get_field(&f, r1);
5713 if (r > 13) {
5714 excp = PGM_SPECIFICATION;
5717 if (spec & SPEC_r2_f128) {
5718 r = get_field(&f, r2);
5719 if (r > 13) {
5720 excp = PGM_SPECIFICATION;
5723 if (excp) {
5724 gen_program_exception(s, excp);
5725 return EXIT_NORETURN;
5729 /* Set up the strutures we use to communicate with the helpers. */
5730 s->insn = insn;
5731 s->fields = &f;
5732 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5733 TCGV_UNUSED_I64(o.out);
5734 TCGV_UNUSED_I64(o.out2);
5735 TCGV_UNUSED_I64(o.in1);
5736 TCGV_UNUSED_I64(o.in2);
5737 TCGV_UNUSED_I64(o.addr1);
5739 /* Implement the instruction. */
5740 if (insn->help_in1) {
5741 insn->help_in1(s, &f, &o);
5743 if (insn->help_in2) {
5744 insn->help_in2(s, &f, &o);
5746 if (insn->help_prep) {
5747 insn->help_prep(s, &f, &o);
5749 if (insn->help_op) {
5750 ret = insn->help_op(s, &o);
5752 if (insn->help_wout) {
5753 insn->help_wout(s, &f, &o);
5755 if (insn->help_cout) {
5756 insn->help_cout(s, &o);
5759 /* Free any temporaries created by the helpers. */
5760 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5761 tcg_temp_free_i64(o.out);
5763 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5764 tcg_temp_free_i64(o.out2);
5766 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5767 tcg_temp_free_i64(o.in1);
5769 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5770 tcg_temp_free_i64(o.in2);
5772 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5773 tcg_temp_free_i64(o.addr1);
5776 #ifndef CONFIG_USER_ONLY
5777 if (s->tb->flags & FLAG_MASK_PER) {
5778 /* An exception might be triggered, save PSW if not already done. */
5779 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5780 tcg_gen_movi_i64(psw_addr, s->next_pc);
5783 /* Save off cc. */
5784 update_cc_op(s);
5786 /* Call the helper to check for a possible PER exception. */
5787 gen_helper_per_check_exception(cpu_env);
5789 #endif
5791 /* Advance to the next instruction. */
5792 s->pc = s->next_pc;
5793 return ret;
5796 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5798 CPUS390XState *env = cs->env_ptr;
5799 DisasContext dc;
5800 target_ulong pc_start;
5801 uint64_t next_page_start;
5802 int num_insns, max_insns;
5803 ExitStatus status;
5804 bool do_debug;
5806 pc_start = tb->pc;
5808 /* 31-bit mode */
5809 if (!(tb->flags & FLAG_MASK_64)) {
5810 pc_start &= 0x7fffffff;
5813 dc.tb = tb;
5814 dc.pc = pc_start;
5815 dc.cc_op = CC_OP_DYNAMIC;
5816 dc.ex_value = tb->cs_base;
5817 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5819 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5821 num_insns = 0;
5822 max_insns = tb->cflags & CF_COUNT_MASK;
5823 if (max_insns == 0) {
5824 max_insns = CF_COUNT_MASK;
5826 if (max_insns > TCG_MAX_INSNS) {
5827 max_insns = TCG_MAX_INSNS;
5830 gen_tb_start(tb);
5832 do {
5833 tcg_gen_insn_start(dc.pc, dc.cc_op);
5834 num_insns++;
5836 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5837 status = EXIT_PC_STALE;
5838 do_debug = true;
5839 /* The address covered by the breakpoint must be included in
5840 [tb->pc, tb->pc + tb->size) in order to for it to be
5841 properly cleared -- thus we increment the PC here so that
5842 the logic setting tb->size below does the right thing. */
5843 dc.pc += 2;
5844 break;
5847 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
5848 gen_io_start();
5851 status = translate_one(env, &dc);
5853 /* If we reach a page boundary, are single stepping,
5854 or exhaust instruction count, stop generation. */
5855 if (status == NO_EXIT
5856 && (dc.pc >= next_page_start
5857 || tcg_op_buf_full()
5858 || num_insns >= max_insns
5859 || singlestep
5860 || cs->singlestep_enabled
5861 || dc.ex_value)) {
5862 status = EXIT_PC_STALE;
5864 } while (status == NO_EXIT);
5866 if (tb->cflags & CF_LAST_IO) {
5867 gen_io_end();
5870 switch (status) {
5871 case EXIT_GOTO_TB:
5872 case EXIT_NORETURN:
5873 break;
5874 case EXIT_PC_STALE:
5875 case EXIT_PC_STALE_NOCHAIN:
5876 update_psw_addr(&dc);
5877 /* FALLTHRU */
5878 case EXIT_PC_UPDATED:
5879 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5880 cc op type is in env */
5881 update_cc_op(&dc);
5882 /* FALLTHRU */
5883 case EXIT_PC_CC_UPDATED:
5884 /* Exit the TB, either by raising a debug exception or by return. */
5885 if (do_debug) {
5886 gen_exception(EXCP_DEBUG);
5887 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5888 tcg_gen_exit_tb(0);
5889 } else {
5890 tcg_gen_lookup_and_goto_ptr(psw_addr);
5892 break;
5893 default:
5894 g_assert_not_reached();
5897 gen_tb_end(tb, num_insns);
5899 tb->size = dc.pc - pc_start;
5900 tb->icount = num_insns;
5902 #if defined(S390X_DEBUG_DISAS)
5903 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5904 && qemu_log_in_addr_range(pc_start)) {
5905 qemu_log_lock();
5906 if (unlikely(dc.ex_value)) {
5907 /* ??? Unfortunately log_target_disas can't use host memory. */
5908 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5909 } else {
5910 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5911 log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
5912 qemu_log("\n");
5914 qemu_log_unlock();
5916 #endif
5919 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5920 target_ulong *data)
5922 int cc_op = data[1];
5923 env->psw.addr = data[0];
5924 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5925 env->cc_op = cc_op;