highbank: validate register offset before access
[qemu/ar7.git] / target / s390x / translate.c
blob85d0a6c3aff7f2c735e6a2891dfabe3856c8056d
1 /*
2 * S/390 translation
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2010 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 /* #define DEBUG_INLINE_BRANCHES */
22 #define S390X_DEBUG_DISAS
23 /* #define S390X_DEBUG_DISAS_VERBOSE */
25 #ifdef S390X_DEBUG_DISAS_VERBOSE
26 # define LOG_DISAS(...) qemu_log(__VA_ARGS__)
27 #else
28 # define LOG_DISAS(...) do { } while (0)
29 #endif
31 #include "qemu/osdep.h"
32 #include "cpu.h"
33 #include "internal.h"
34 #include "disas/disas.h"
35 #include "exec/exec-all.h"
36 #include "tcg-op.h"
37 #include "qemu/log.h"
38 #include "qemu/host-utils.h"
39 #include "exec/cpu_ldst.h"
40 #include "exec/gen-icount.h"
41 #include "exec/helper-proto.h"
42 #include "exec/helper-gen.h"
44 #include "trace-tcg.h"
45 #include "exec/log.h"
48 /* Information that (most) every instruction needs to manipulate. */
49 typedef struct DisasContext DisasContext;
50 typedef struct DisasInsn DisasInsn;
51 typedef struct DisasFields DisasFields;
53 struct DisasContext {
54 struct TranslationBlock *tb;
55 const DisasInsn *insn;
56 DisasFields *fields;
57 uint64_t ex_value;
58 uint64_t pc, next_pc;
59 uint32_t ilen;
60 enum cc_op cc_op;
61 bool singlestep_enabled;
64 /* Information carried about a condition to be evaluated. */
65 typedef struct {
66 TCGCond cond:8;
67 bool is_64;
68 bool g1;
69 bool g2;
70 union {
71 struct { TCGv_i64 a, b; } s64;
72 struct { TCGv_i32 a, b; } s32;
73 } u;
74 } DisasCompare;
76 /* is_jmp field values */
77 #define DISAS_EXCP DISAS_TARGET_0
79 #ifdef DEBUG_INLINE_BRANCHES
80 static uint64_t inline_branch_hit[CC_OP_MAX];
81 static uint64_t inline_branch_miss[CC_OP_MAX];
82 #endif
84 static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
86 if (!(s->tb->flags & FLAG_MASK_64)) {
87 if (s->tb->flags & FLAG_MASK_32) {
88 return pc | 0x80000000;
91 return pc;
94 static TCGv_i64 psw_addr;
95 static TCGv_i64 psw_mask;
96 static TCGv_i64 gbea;
98 static TCGv_i32 cc_op;
99 static TCGv_i64 cc_src;
100 static TCGv_i64 cc_dst;
101 static TCGv_i64 cc_vr;
103 static char cpu_reg_names[32][4];
104 static TCGv_i64 regs[16];
105 static TCGv_i64 fregs[16];
107 void s390x_translate_init(void)
109 int i;
111 psw_addr = tcg_global_mem_new_i64(cpu_env,
112 offsetof(CPUS390XState, psw.addr),
113 "psw_addr");
114 psw_mask = tcg_global_mem_new_i64(cpu_env,
115 offsetof(CPUS390XState, psw.mask),
116 "psw_mask");
117 gbea = tcg_global_mem_new_i64(cpu_env,
118 offsetof(CPUS390XState, gbea),
119 "gbea");
121 cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
122 "cc_op");
123 cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
124 "cc_src");
125 cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
126 "cc_dst");
127 cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
128 "cc_vr");
130 for (i = 0; i < 16; i++) {
131 snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
132 regs[i] = tcg_global_mem_new(cpu_env,
133 offsetof(CPUS390XState, regs[i]),
134 cpu_reg_names[i]);
137 for (i = 0; i < 16; i++) {
138 snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
139 fregs[i] = tcg_global_mem_new(cpu_env,
140 offsetof(CPUS390XState, vregs[i][0].d),
141 cpu_reg_names[i + 16]);
145 static TCGv_i64 load_reg(int reg)
147 TCGv_i64 r = tcg_temp_new_i64();
148 tcg_gen_mov_i64(r, regs[reg]);
149 return r;
152 static TCGv_i64 load_freg32_i64(int reg)
154 TCGv_i64 r = tcg_temp_new_i64();
155 tcg_gen_shri_i64(r, fregs[reg], 32);
156 return r;
159 static void store_reg(int reg, TCGv_i64 v)
161 tcg_gen_mov_i64(regs[reg], v);
164 static void store_freg(int reg, TCGv_i64 v)
166 tcg_gen_mov_i64(fregs[reg], v);
169 static void store_reg32_i64(int reg, TCGv_i64 v)
171 /* 32 bit register writes keep the upper half */
172 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
175 static void store_reg32h_i64(int reg, TCGv_i64 v)
177 tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
180 static void store_freg32_i64(int reg, TCGv_i64 v)
182 tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
185 static void return_low128(TCGv_i64 dest)
187 tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
190 static void update_psw_addr(DisasContext *s)
192 /* psw.addr */
193 tcg_gen_movi_i64(psw_addr, s->pc);
196 static void per_branch(DisasContext *s, bool to_next)
198 #ifndef CONFIG_USER_ONLY
199 tcg_gen_movi_i64(gbea, s->pc);
201 if (s->tb->flags & FLAG_MASK_PER) {
202 TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
203 gen_helper_per_branch(cpu_env, gbea, next_pc);
204 if (to_next) {
205 tcg_temp_free_i64(next_pc);
208 #endif
211 static void per_branch_cond(DisasContext *s, TCGCond cond,
212 TCGv_i64 arg1, TCGv_i64 arg2)
214 #ifndef CONFIG_USER_ONLY
215 if (s->tb->flags & FLAG_MASK_PER) {
216 TCGLabel *lab = gen_new_label();
217 tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
219 tcg_gen_movi_i64(gbea, s->pc);
220 gen_helper_per_branch(cpu_env, gbea, psw_addr);
222 gen_set_label(lab);
223 } else {
224 TCGv_i64 pc = tcg_const_i64(s->pc);
225 tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
226 tcg_temp_free_i64(pc);
228 #endif
231 static void per_breaking_event(DisasContext *s)
233 tcg_gen_movi_i64(gbea, s->pc);
236 static void update_cc_op(DisasContext *s)
238 if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
239 tcg_gen_movi_i32(cc_op, s->cc_op);
243 static void potential_page_fault(DisasContext *s)
245 update_psw_addr(s);
246 update_cc_op(s);
249 static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
251 return (uint64_t)cpu_lduw_code(env, pc);
254 static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
256 return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
259 static int get_mem_index(DisasContext *s)
261 switch (s->tb->flags & FLAG_MASK_ASC) {
262 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
263 return 0;
264 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
265 return 1;
266 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
267 return 2;
268 default:
269 tcg_abort();
270 break;
274 static void gen_exception(int excp)
276 TCGv_i32 tmp = tcg_const_i32(excp);
277 gen_helper_exception(cpu_env, tmp);
278 tcg_temp_free_i32(tmp);
281 static void gen_program_exception(DisasContext *s, int code)
283 TCGv_i32 tmp;
285 /* Remember what pgm exeption this was. */
286 tmp = tcg_const_i32(code);
287 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
288 tcg_temp_free_i32(tmp);
290 tmp = tcg_const_i32(s->ilen);
291 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
292 tcg_temp_free_i32(tmp);
294 /* update the psw */
295 update_psw_addr(s);
297 /* Save off cc. */
298 update_cc_op(s);
300 /* Trigger exception. */
301 gen_exception(EXCP_PGM);
304 static inline void gen_illegal_opcode(DisasContext *s)
306 gen_program_exception(s, PGM_OPERATION);
309 static inline void gen_trap(DisasContext *s)
311 TCGv_i32 t;
313 /* Set DXC to 0xff. */
314 t = tcg_temp_new_i32();
315 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
316 tcg_gen_ori_i32(t, t, 0xff00);
317 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
318 tcg_temp_free_i32(t);
320 gen_program_exception(s, PGM_DATA);
323 #ifndef CONFIG_USER_ONLY
324 static void check_privileged(DisasContext *s)
326 if (s->tb->flags & FLAG_MASK_PSTATE) {
327 gen_program_exception(s, PGM_PRIVILEGED);
330 #endif
332 static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
334 TCGv_i64 tmp = tcg_temp_new_i64();
335 bool need_31 = !(s->tb->flags & FLAG_MASK_64);
337 /* Note that d2 is limited to 20 bits, signed. If we crop negative
338 displacements early we create larger immedate addends. */
340 /* Note that addi optimizes the imm==0 case. */
341 if (b2 && x2) {
342 tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
343 tcg_gen_addi_i64(tmp, tmp, d2);
344 } else if (b2) {
345 tcg_gen_addi_i64(tmp, regs[b2], d2);
346 } else if (x2) {
347 tcg_gen_addi_i64(tmp, regs[x2], d2);
348 } else {
349 if (need_31) {
350 d2 &= 0x7fffffff;
351 need_31 = false;
353 tcg_gen_movi_i64(tmp, d2);
355 if (need_31) {
356 tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
359 return tmp;
362 static inline bool live_cc_data(DisasContext *s)
364 return (s->cc_op != CC_OP_DYNAMIC
365 && s->cc_op != CC_OP_STATIC
366 && s->cc_op > 3);
369 static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
371 if (live_cc_data(s)) {
372 tcg_gen_discard_i64(cc_src);
373 tcg_gen_discard_i64(cc_dst);
374 tcg_gen_discard_i64(cc_vr);
376 s->cc_op = CC_OP_CONST0 + val;
379 static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
381 if (live_cc_data(s)) {
382 tcg_gen_discard_i64(cc_src);
383 tcg_gen_discard_i64(cc_vr);
385 tcg_gen_mov_i64(cc_dst, dst);
386 s->cc_op = op;
389 static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
390 TCGv_i64 dst)
392 if (live_cc_data(s)) {
393 tcg_gen_discard_i64(cc_vr);
395 tcg_gen_mov_i64(cc_src, src);
396 tcg_gen_mov_i64(cc_dst, dst);
397 s->cc_op = op;
400 static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
401 TCGv_i64 dst, TCGv_i64 vr)
403 tcg_gen_mov_i64(cc_src, src);
404 tcg_gen_mov_i64(cc_dst, dst);
405 tcg_gen_mov_i64(cc_vr, vr);
406 s->cc_op = op;
409 static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
411 gen_op_update1_cc_i64(s, CC_OP_NZ, val);
414 static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
416 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
419 static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
421 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
424 static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
426 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
429 /* CC value is in env->cc_op */
430 static void set_cc_static(DisasContext *s)
432 if (live_cc_data(s)) {
433 tcg_gen_discard_i64(cc_src);
434 tcg_gen_discard_i64(cc_dst);
435 tcg_gen_discard_i64(cc_vr);
437 s->cc_op = CC_OP_STATIC;
440 /* calculates cc into cc_op */
441 static void gen_op_calc_cc(DisasContext *s)
443 TCGv_i32 local_cc_op;
444 TCGv_i64 dummy;
446 TCGV_UNUSED_I32(local_cc_op);
447 TCGV_UNUSED_I64(dummy);
448 switch (s->cc_op) {
449 default:
450 dummy = tcg_const_i64(0);
451 /* FALLTHRU */
452 case CC_OP_ADD_64:
453 case CC_OP_ADDU_64:
454 case CC_OP_ADDC_64:
455 case CC_OP_SUB_64:
456 case CC_OP_SUBU_64:
457 case CC_OP_SUBB_64:
458 case CC_OP_ADD_32:
459 case CC_OP_ADDU_32:
460 case CC_OP_ADDC_32:
461 case CC_OP_SUB_32:
462 case CC_OP_SUBU_32:
463 case CC_OP_SUBB_32:
464 local_cc_op = tcg_const_i32(s->cc_op);
465 break;
466 case CC_OP_CONST0:
467 case CC_OP_CONST1:
468 case CC_OP_CONST2:
469 case CC_OP_CONST3:
470 case CC_OP_STATIC:
471 case CC_OP_DYNAMIC:
472 break;
475 switch (s->cc_op) {
476 case CC_OP_CONST0:
477 case CC_OP_CONST1:
478 case CC_OP_CONST2:
479 case CC_OP_CONST3:
480 /* s->cc_op is the cc value */
481 tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
482 break;
483 case CC_OP_STATIC:
484 /* env->cc_op already is the cc value */
485 break;
486 case CC_OP_NZ:
487 case CC_OP_ABS_64:
488 case CC_OP_NABS_64:
489 case CC_OP_ABS_32:
490 case CC_OP_NABS_32:
491 case CC_OP_LTGT0_32:
492 case CC_OP_LTGT0_64:
493 case CC_OP_COMP_32:
494 case CC_OP_COMP_64:
495 case CC_OP_NZ_F32:
496 case CC_OP_NZ_F64:
497 case CC_OP_FLOGR:
498 /* 1 argument */
499 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
500 break;
501 case CC_OP_ICM:
502 case CC_OP_LTGT_32:
503 case CC_OP_LTGT_64:
504 case CC_OP_LTUGTU_32:
505 case CC_OP_LTUGTU_64:
506 case CC_OP_TM_32:
507 case CC_OP_TM_64:
508 case CC_OP_SLA_32:
509 case CC_OP_SLA_64:
510 case CC_OP_NZ_F128:
511 /* 2 arguments */
512 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
513 break;
514 case CC_OP_ADD_64:
515 case CC_OP_ADDU_64:
516 case CC_OP_ADDC_64:
517 case CC_OP_SUB_64:
518 case CC_OP_SUBU_64:
519 case CC_OP_SUBB_64:
520 case CC_OP_ADD_32:
521 case CC_OP_ADDU_32:
522 case CC_OP_ADDC_32:
523 case CC_OP_SUB_32:
524 case CC_OP_SUBU_32:
525 case CC_OP_SUBB_32:
526 /* 3 arguments */
527 gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
528 break;
529 case CC_OP_DYNAMIC:
530 /* unknown operation - assume 3 arguments and cc_op in env */
531 gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
532 break;
533 default:
534 tcg_abort();
537 if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
538 tcg_temp_free_i32(local_cc_op);
540 if (!TCGV_IS_UNUSED_I64(dummy)) {
541 tcg_temp_free_i64(dummy);
544 /* We now have cc in cc_op as constant */
545 set_cc_static(s);
548 static bool use_exit_tb(DisasContext *s)
550 return (s->singlestep_enabled ||
551 (tb_cflags(s->tb) & CF_LAST_IO) ||
552 (s->tb->flags & FLAG_MASK_PER));
555 static bool use_goto_tb(DisasContext *s, uint64_t dest)
557 if (unlikely(use_exit_tb(s))) {
558 return false;
560 #ifndef CONFIG_USER_ONLY
561 return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
562 (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
563 #else
564 return true;
565 #endif
568 static void account_noninline_branch(DisasContext *s, int cc_op)
570 #ifdef DEBUG_INLINE_BRANCHES
571 inline_branch_miss[cc_op]++;
572 #endif
575 static void account_inline_branch(DisasContext *s, int cc_op)
577 #ifdef DEBUG_INLINE_BRANCHES
578 inline_branch_hit[cc_op]++;
579 #endif
582 /* Table of mask values to comparison codes, given a comparison as input.
583 For such, CC=3 should not be possible. */
584 static const TCGCond ltgt_cond[16] = {
585 TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
586 TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
587 TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
588 TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
589 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
590 TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
591 TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
592 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
595 /* Table of mask values to comparison codes, given a logic op as input.
596 For such, only CC=0 and CC=1 should be possible. */
597 static const TCGCond nz_cond[16] = {
598 TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
599 TCG_COND_NEVER, TCG_COND_NEVER,
600 TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
601 TCG_COND_NE, TCG_COND_NE,
602 TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
603 TCG_COND_EQ, TCG_COND_EQ,
604 TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
605 TCG_COND_ALWAYS, TCG_COND_ALWAYS,
608 /* Interpret MASK in terms of S->CC_OP, and fill in C with all the
609 details required to generate a TCG comparison. */
610 static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
612 TCGCond cond;
613 enum cc_op old_cc_op = s->cc_op;
615 if (mask == 15 || mask == 0) {
616 c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
617 c->u.s32.a = cc_op;
618 c->u.s32.b = cc_op;
619 c->g1 = c->g2 = true;
620 c->is_64 = false;
621 return;
624 /* Find the TCG condition for the mask + cc op. */
625 switch (old_cc_op) {
626 case CC_OP_LTGT0_32:
627 case CC_OP_LTGT0_64:
628 case CC_OP_LTGT_32:
629 case CC_OP_LTGT_64:
630 cond = ltgt_cond[mask];
631 if (cond == TCG_COND_NEVER) {
632 goto do_dynamic;
634 account_inline_branch(s, old_cc_op);
635 break;
637 case CC_OP_LTUGTU_32:
638 case CC_OP_LTUGTU_64:
639 cond = tcg_unsigned_cond(ltgt_cond[mask]);
640 if (cond == TCG_COND_NEVER) {
641 goto do_dynamic;
643 account_inline_branch(s, old_cc_op);
644 break;
646 case CC_OP_NZ:
647 cond = nz_cond[mask];
648 if (cond == TCG_COND_NEVER) {
649 goto do_dynamic;
651 account_inline_branch(s, old_cc_op);
652 break;
654 case CC_OP_TM_32:
655 case CC_OP_TM_64:
656 switch (mask) {
657 case 8:
658 cond = TCG_COND_EQ;
659 break;
660 case 4 | 2 | 1:
661 cond = TCG_COND_NE;
662 break;
663 default:
664 goto do_dynamic;
666 account_inline_branch(s, old_cc_op);
667 break;
669 case CC_OP_ICM:
670 switch (mask) {
671 case 8:
672 cond = TCG_COND_EQ;
673 break;
674 case 4 | 2 | 1:
675 case 4 | 2:
676 cond = TCG_COND_NE;
677 break;
678 default:
679 goto do_dynamic;
681 account_inline_branch(s, old_cc_op);
682 break;
684 case CC_OP_FLOGR:
685 switch (mask & 0xa) {
686 case 8: /* src == 0 -> no one bit found */
687 cond = TCG_COND_EQ;
688 break;
689 case 2: /* src != 0 -> one bit found */
690 cond = TCG_COND_NE;
691 break;
692 default:
693 goto do_dynamic;
695 account_inline_branch(s, old_cc_op);
696 break;
698 case CC_OP_ADDU_32:
699 case CC_OP_ADDU_64:
700 switch (mask) {
701 case 8 | 2: /* vr == 0 */
702 cond = TCG_COND_EQ;
703 break;
704 case 4 | 1: /* vr != 0 */
705 cond = TCG_COND_NE;
706 break;
707 case 8 | 4: /* no carry -> vr >= src */
708 cond = TCG_COND_GEU;
709 break;
710 case 2 | 1: /* carry -> vr < src */
711 cond = TCG_COND_LTU;
712 break;
713 default:
714 goto do_dynamic;
716 account_inline_branch(s, old_cc_op);
717 break;
719 case CC_OP_SUBU_32:
720 case CC_OP_SUBU_64:
721 /* Note that CC=0 is impossible; treat it as dont-care. */
722 switch (mask & 7) {
723 case 2: /* zero -> op1 == op2 */
724 cond = TCG_COND_EQ;
725 break;
726 case 4 | 1: /* !zero -> op1 != op2 */
727 cond = TCG_COND_NE;
728 break;
729 case 4: /* borrow (!carry) -> op1 < op2 */
730 cond = TCG_COND_LTU;
731 break;
732 case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
733 cond = TCG_COND_GEU;
734 break;
735 default:
736 goto do_dynamic;
738 account_inline_branch(s, old_cc_op);
739 break;
741 default:
742 do_dynamic:
743 /* Calculate cc value. */
744 gen_op_calc_cc(s);
745 /* FALLTHRU */
747 case CC_OP_STATIC:
748 /* Jump based on CC. We'll load up the real cond below;
749 the assignment here merely avoids a compiler warning. */
750 account_noninline_branch(s, old_cc_op);
751 old_cc_op = CC_OP_STATIC;
752 cond = TCG_COND_NEVER;
753 break;
756 /* Load up the arguments of the comparison. */
757 c->is_64 = true;
758 c->g1 = c->g2 = false;
759 switch (old_cc_op) {
760 case CC_OP_LTGT0_32:
761 c->is_64 = false;
762 c->u.s32.a = tcg_temp_new_i32();
763 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
764 c->u.s32.b = tcg_const_i32(0);
765 break;
766 case CC_OP_LTGT_32:
767 case CC_OP_LTUGTU_32:
768 case CC_OP_SUBU_32:
769 c->is_64 = false;
770 c->u.s32.a = tcg_temp_new_i32();
771 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
772 c->u.s32.b = tcg_temp_new_i32();
773 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
774 break;
776 case CC_OP_LTGT0_64:
777 case CC_OP_NZ:
778 case CC_OP_FLOGR:
779 c->u.s64.a = cc_dst;
780 c->u.s64.b = tcg_const_i64(0);
781 c->g1 = true;
782 break;
783 case CC_OP_LTGT_64:
784 case CC_OP_LTUGTU_64:
785 case CC_OP_SUBU_64:
786 c->u.s64.a = cc_src;
787 c->u.s64.b = cc_dst;
788 c->g1 = c->g2 = true;
789 break;
791 case CC_OP_TM_32:
792 case CC_OP_TM_64:
793 case CC_OP_ICM:
794 c->u.s64.a = tcg_temp_new_i64();
795 c->u.s64.b = tcg_const_i64(0);
796 tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
797 break;
799 case CC_OP_ADDU_32:
800 c->is_64 = false;
801 c->u.s32.a = tcg_temp_new_i32();
802 c->u.s32.b = tcg_temp_new_i32();
803 tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
804 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
805 tcg_gen_movi_i32(c->u.s32.b, 0);
806 } else {
807 tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
809 break;
811 case CC_OP_ADDU_64:
812 c->u.s64.a = cc_vr;
813 c->g1 = true;
814 if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
815 c->u.s64.b = tcg_const_i64(0);
816 } else {
817 c->u.s64.b = cc_src;
818 c->g2 = true;
820 break;
822 case CC_OP_STATIC:
823 c->is_64 = false;
824 c->u.s32.a = cc_op;
825 c->g1 = true;
826 switch (mask) {
827 case 0x8 | 0x4 | 0x2: /* cc != 3 */
828 cond = TCG_COND_NE;
829 c->u.s32.b = tcg_const_i32(3);
830 break;
831 case 0x8 | 0x4 | 0x1: /* cc != 2 */
832 cond = TCG_COND_NE;
833 c->u.s32.b = tcg_const_i32(2);
834 break;
835 case 0x8 | 0x2 | 0x1: /* cc != 1 */
836 cond = TCG_COND_NE;
837 c->u.s32.b = tcg_const_i32(1);
838 break;
839 case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
840 cond = TCG_COND_EQ;
841 c->g1 = false;
842 c->u.s32.a = tcg_temp_new_i32();
843 c->u.s32.b = tcg_const_i32(0);
844 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
845 break;
846 case 0x8 | 0x4: /* cc < 2 */
847 cond = TCG_COND_LTU;
848 c->u.s32.b = tcg_const_i32(2);
849 break;
850 case 0x8: /* cc == 0 */
851 cond = TCG_COND_EQ;
852 c->u.s32.b = tcg_const_i32(0);
853 break;
854 case 0x4 | 0x2 | 0x1: /* cc != 0 */
855 cond = TCG_COND_NE;
856 c->u.s32.b = tcg_const_i32(0);
857 break;
858 case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
859 cond = TCG_COND_NE;
860 c->g1 = false;
861 c->u.s32.a = tcg_temp_new_i32();
862 c->u.s32.b = tcg_const_i32(0);
863 tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
864 break;
865 case 0x4: /* cc == 1 */
866 cond = TCG_COND_EQ;
867 c->u.s32.b = tcg_const_i32(1);
868 break;
869 case 0x2 | 0x1: /* cc > 1 */
870 cond = TCG_COND_GTU;
871 c->u.s32.b = tcg_const_i32(1);
872 break;
873 case 0x2: /* cc == 2 */
874 cond = TCG_COND_EQ;
875 c->u.s32.b = tcg_const_i32(2);
876 break;
877 case 0x1: /* cc == 3 */
878 cond = TCG_COND_EQ;
879 c->u.s32.b = tcg_const_i32(3);
880 break;
881 default:
882 /* CC is masked by something else: (8 >> cc) & mask. */
883 cond = TCG_COND_NE;
884 c->g1 = false;
885 c->u.s32.a = tcg_const_i32(8);
886 c->u.s32.b = tcg_const_i32(0);
887 tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
888 tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
889 break;
891 break;
893 default:
894 abort();
896 c->cond = cond;
899 static void free_compare(DisasCompare *c)
901 if (!c->g1) {
902 if (c->is_64) {
903 tcg_temp_free_i64(c->u.s64.a);
904 } else {
905 tcg_temp_free_i32(c->u.s32.a);
908 if (!c->g2) {
909 if (c->is_64) {
910 tcg_temp_free_i64(c->u.s64.b);
911 } else {
912 tcg_temp_free_i32(c->u.s32.b);
917 /* ====================================================================== */
918 /* Define the insn format enumeration. */
919 #define F0(N) FMT_##N,
920 #define F1(N, X1) F0(N)
921 #define F2(N, X1, X2) F0(N)
922 #define F3(N, X1, X2, X3) F0(N)
923 #define F4(N, X1, X2, X3, X4) F0(N)
924 #define F5(N, X1, X2, X3, X4, X5) F0(N)
926 typedef enum {
927 #include "insn-format.def"
928 } DisasFormat;
930 #undef F0
931 #undef F1
932 #undef F2
933 #undef F3
934 #undef F4
935 #undef F5
937 /* Define a structure to hold the decoded fields. We'll store each inside
938 an array indexed by an enum. In order to conserve memory, we'll arrange
939 for fields that do not exist at the same time to overlap, thus the "C"
940 for compact. For checking purposes there is an "O" for original index
941 as well that will be applied to availability bitmaps. */
943 enum DisasFieldIndexO {
944 FLD_O_r1,
945 FLD_O_r2,
946 FLD_O_r3,
947 FLD_O_m1,
948 FLD_O_m3,
949 FLD_O_m4,
950 FLD_O_b1,
951 FLD_O_b2,
952 FLD_O_b4,
953 FLD_O_d1,
954 FLD_O_d2,
955 FLD_O_d4,
956 FLD_O_x2,
957 FLD_O_l1,
958 FLD_O_l2,
959 FLD_O_i1,
960 FLD_O_i2,
961 FLD_O_i3,
962 FLD_O_i4,
963 FLD_O_i5
966 enum DisasFieldIndexC {
967 FLD_C_r1 = 0,
968 FLD_C_m1 = 0,
969 FLD_C_b1 = 0,
970 FLD_C_i1 = 0,
972 FLD_C_r2 = 1,
973 FLD_C_b2 = 1,
974 FLD_C_i2 = 1,
976 FLD_C_r3 = 2,
977 FLD_C_m3 = 2,
978 FLD_C_i3 = 2,
980 FLD_C_m4 = 3,
981 FLD_C_b4 = 3,
982 FLD_C_i4 = 3,
983 FLD_C_l1 = 3,
985 FLD_C_i5 = 4,
986 FLD_C_d1 = 4,
988 FLD_C_d2 = 5,
990 FLD_C_d4 = 6,
991 FLD_C_x2 = 6,
992 FLD_C_l2 = 6,
994 NUM_C_FIELD = 7
997 struct DisasFields {
998 uint64_t raw_insn;
999 unsigned op:8;
1000 unsigned op2:8;
1001 unsigned presentC:16;
1002 unsigned int presentO;
1003 int c[NUM_C_FIELD];
1006 /* This is the way fields are to be accessed out of DisasFields. */
1007 #define have_field(S, F) have_field1((S), FLD_O_##F)
1008 #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
1010 static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
1012 return (f->presentO >> c) & 1;
1015 static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
1016 enum DisasFieldIndexC c)
1018 assert(have_field1(f, o));
1019 return f->c[c];
1022 /* Describe the layout of each field in each format. */
1023 typedef struct DisasField {
1024 unsigned int beg:8;
1025 unsigned int size:8;
1026 unsigned int type:2;
1027 unsigned int indexC:6;
1028 enum DisasFieldIndexO indexO:8;
1029 } DisasField;
1031 typedef struct DisasFormatInfo {
1032 DisasField op[NUM_C_FIELD];
1033 } DisasFormatInfo;
1035 #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
1036 #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
1037 #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1038 { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
1039 #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1040 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1041 { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
1042 #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1043 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1044 #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
1045 { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
1046 { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
1047 #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
1048 #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
1050 #define F0(N) { { } },
1051 #define F1(N, X1) { { X1 } },
1052 #define F2(N, X1, X2) { { X1, X2 } },
1053 #define F3(N, X1, X2, X3) { { X1, X2, X3 } },
1054 #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
1055 #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
1057 static const DisasFormatInfo format_info[] = {
1058 #include "insn-format.def"
1061 #undef F0
1062 #undef F1
1063 #undef F2
1064 #undef F3
1065 #undef F4
1066 #undef F5
1067 #undef R
1068 #undef M
1069 #undef BD
1070 #undef BXD
1071 #undef BDL
1072 #undef BXDL
1073 #undef I
1074 #undef L
1076 /* Generally, we'll extract operands into this structures, operate upon
1077 them, and store them back. See the "in1", "in2", "prep", "wout" sets
1078 of routines below for more details. */
1079 typedef struct {
1080 bool g_out, g_out2, g_in1, g_in2;
1081 TCGv_i64 out, out2, in1, in2;
1082 TCGv_i64 addr1;
1083 } DisasOps;
1085 /* Instructions can place constraints on their operands, raising specification
1086 exceptions if they are violated. To make this easy to automate, each "in1",
1087 "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
1088 of the following, or 0. To make this easy to document, we'll put the
1089 SPEC_<name> defines next to <name>. */
1091 #define SPEC_r1_even 1
1092 #define SPEC_r2_even 2
1093 #define SPEC_r3_even 4
1094 #define SPEC_r1_f128 8
1095 #define SPEC_r2_f128 16
1097 /* Return values from translate_one, indicating the state of the TB. */
1098 typedef enum {
1099 /* Continue the TB. */
1100 NO_EXIT,
1101 /* We have emitted one or more goto_tb. No fixup required. */
1102 EXIT_GOTO_TB,
1103 /* We are not using a goto_tb (for whatever reason), but have updated
1104 the PC (for whatever reason), so there's no need to do it again on
1105 exiting the TB. */
1106 EXIT_PC_UPDATED,
1107 /* We have updated the PC and CC values. */
1108 EXIT_PC_CC_UPDATED,
1109 /* We are exiting the TB, but have neither emitted a goto_tb, nor
1110 updated the PC for the next instruction to be executed. */
1111 EXIT_PC_STALE,
1112 /* We are exiting the TB to the main loop. */
1113 EXIT_PC_STALE_NOCHAIN,
1114 /* We are ending the TB with a noreturn function call, e.g. longjmp.
1115 No following code will be executed. */
1116 EXIT_NORETURN,
1117 } ExitStatus;
1119 struct DisasInsn {
1120 unsigned opc:16;
1121 DisasFormat fmt:8;
1122 unsigned fac:8;
1123 unsigned spec:8;
1125 const char *name;
1127 void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
1128 void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
1129 void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
1130 void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
1131 void (*help_cout)(DisasContext *, DisasOps *);
1132 ExitStatus (*help_op)(DisasContext *, DisasOps *);
1134 uint64_t data;
1137 /* ====================================================================== */
1138 /* Miscellaneous helpers, used by several operations. */
1140 static void help_l2_shift(DisasContext *s, DisasFields *f,
1141 DisasOps *o, int mask)
1143 int b2 = get_field(f, b2);
1144 int d2 = get_field(f, d2);
1146 if (b2 == 0) {
1147 o->in2 = tcg_const_i64(d2 & mask);
1148 } else {
1149 o->in2 = get_address(s, 0, b2, d2);
1150 tcg_gen_andi_i64(o->in2, o->in2, mask);
1154 static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
1156 if (dest == s->next_pc) {
1157 per_branch(s, true);
1158 return NO_EXIT;
1160 if (use_goto_tb(s, dest)) {
1161 update_cc_op(s);
1162 per_breaking_event(s);
1163 tcg_gen_goto_tb(0);
1164 tcg_gen_movi_i64(psw_addr, dest);
1165 tcg_gen_exit_tb((uintptr_t)s->tb);
1166 return EXIT_GOTO_TB;
1167 } else {
1168 tcg_gen_movi_i64(psw_addr, dest);
1169 per_branch(s, false);
1170 return EXIT_PC_UPDATED;
1174 static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
1175 bool is_imm, int imm, TCGv_i64 cdest)
1177 ExitStatus ret;
1178 uint64_t dest = s->pc + 2 * imm;
1179 TCGLabel *lab;
1181 /* Take care of the special cases first. */
1182 if (c->cond == TCG_COND_NEVER) {
1183 ret = NO_EXIT;
1184 goto egress;
1186 if (is_imm) {
1187 if (dest == s->next_pc) {
1188 /* Branch to next. */
1189 per_branch(s, true);
1190 ret = NO_EXIT;
1191 goto egress;
1193 if (c->cond == TCG_COND_ALWAYS) {
1194 ret = help_goto_direct(s, dest);
1195 goto egress;
1197 } else {
1198 if (TCGV_IS_UNUSED_I64(cdest)) {
1199 /* E.g. bcr %r0 -> no branch. */
1200 ret = NO_EXIT;
1201 goto egress;
1203 if (c->cond == TCG_COND_ALWAYS) {
1204 tcg_gen_mov_i64(psw_addr, cdest);
1205 per_branch(s, false);
1206 ret = EXIT_PC_UPDATED;
1207 goto egress;
1211 if (use_goto_tb(s, s->next_pc)) {
1212 if (is_imm && use_goto_tb(s, dest)) {
1213 /* Both exits can use goto_tb. */
1214 update_cc_op(s);
1216 lab = gen_new_label();
1217 if (c->is_64) {
1218 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1219 } else {
1220 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1223 /* Branch not taken. */
1224 tcg_gen_goto_tb(0);
1225 tcg_gen_movi_i64(psw_addr, s->next_pc);
1226 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1228 /* Branch taken. */
1229 gen_set_label(lab);
1230 per_breaking_event(s);
1231 tcg_gen_goto_tb(1);
1232 tcg_gen_movi_i64(psw_addr, dest);
1233 tcg_gen_exit_tb((uintptr_t)s->tb + 1);
1235 ret = EXIT_GOTO_TB;
1236 } else {
1237 /* Fallthru can use goto_tb, but taken branch cannot. */
1238 /* Store taken branch destination before the brcond. This
1239 avoids having to allocate a new local temp to hold it.
1240 We'll overwrite this in the not taken case anyway. */
1241 if (!is_imm) {
1242 tcg_gen_mov_i64(psw_addr, cdest);
1245 lab = gen_new_label();
1246 if (c->is_64) {
1247 tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
1248 } else {
1249 tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
1252 /* Branch not taken. */
1253 update_cc_op(s);
1254 tcg_gen_goto_tb(0);
1255 tcg_gen_movi_i64(psw_addr, s->next_pc);
1256 tcg_gen_exit_tb((uintptr_t)s->tb + 0);
1258 gen_set_label(lab);
1259 if (is_imm) {
1260 tcg_gen_movi_i64(psw_addr, dest);
1262 per_breaking_event(s);
1263 ret = EXIT_PC_UPDATED;
1265 } else {
1266 /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
1267 Most commonly we're single-stepping or some other condition that
1268 disables all use of goto_tb. Just update the PC and exit. */
1270 TCGv_i64 next = tcg_const_i64(s->next_pc);
1271 if (is_imm) {
1272 cdest = tcg_const_i64(dest);
1275 if (c->is_64) {
1276 tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
1277 cdest, next);
1278 per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
1279 } else {
1280 TCGv_i32 t0 = tcg_temp_new_i32();
1281 TCGv_i64 t1 = tcg_temp_new_i64();
1282 TCGv_i64 z = tcg_const_i64(0);
1283 tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
1284 tcg_gen_extu_i32_i64(t1, t0);
1285 tcg_temp_free_i32(t0);
1286 tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
1287 per_branch_cond(s, TCG_COND_NE, t1, z);
1288 tcg_temp_free_i64(t1);
1289 tcg_temp_free_i64(z);
1292 if (is_imm) {
1293 tcg_temp_free_i64(cdest);
1295 tcg_temp_free_i64(next);
1297 ret = EXIT_PC_UPDATED;
1300 egress:
1301 free_compare(c);
1302 return ret;
1305 /* ====================================================================== */
1306 /* The operations. These perform the bulk of the work for any insn,
1307 usually after the operands have been loaded and output initialized. */
1309 static ExitStatus op_abs(DisasContext *s, DisasOps *o)
1311 TCGv_i64 z, n;
1312 z = tcg_const_i64(0);
1313 n = tcg_temp_new_i64();
1314 tcg_gen_neg_i64(n, o->in2);
1315 tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
1316 tcg_temp_free_i64(n);
1317 tcg_temp_free_i64(z);
1318 return NO_EXIT;
1321 static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
1323 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
1324 return NO_EXIT;
1327 static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
1329 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1330 return NO_EXIT;
1333 static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
1335 tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
1336 tcg_gen_mov_i64(o->out2, o->in2);
1337 return NO_EXIT;
1340 static ExitStatus op_add(DisasContext *s, DisasOps *o)
1342 tcg_gen_add_i64(o->out, o->in1, o->in2);
1343 return NO_EXIT;
1346 static ExitStatus op_addc(DisasContext *s, DisasOps *o)
1348 DisasCompare cmp;
1349 TCGv_i64 carry;
1351 tcg_gen_add_i64(o->out, o->in1, o->in2);
1353 /* The carry flag is the msb of CC, therefore the branch mask that would
1354 create that comparison is 3. Feeding the generated comparison to
1355 setcond produces the carry flag that we desire. */
1356 disas_jcc(s, &cmp, 3);
1357 carry = tcg_temp_new_i64();
1358 if (cmp.is_64) {
1359 tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
1360 } else {
1361 TCGv_i32 t = tcg_temp_new_i32();
1362 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
1363 tcg_gen_extu_i32_i64(carry, t);
1364 tcg_temp_free_i32(t);
1366 free_compare(&cmp);
1368 tcg_gen_add_i64(o->out, o->out, carry);
1369 tcg_temp_free_i64(carry);
1370 return NO_EXIT;
1373 static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
1375 gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
1376 return NO_EXIT;
1379 static ExitStatus op_adb(DisasContext *s, DisasOps *o)
1381 gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
1382 return NO_EXIT;
1385 static ExitStatus op_axb(DisasContext *s, DisasOps *o)
1387 gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
1388 return_low128(o->out2);
1389 return NO_EXIT;
1392 static ExitStatus op_and(DisasContext *s, DisasOps *o)
1394 tcg_gen_and_i64(o->out, o->in1, o->in2);
1395 return NO_EXIT;
1398 static ExitStatus op_andi(DisasContext *s, DisasOps *o)
1400 int shift = s->insn->data & 0xff;
1401 int size = s->insn->data >> 8;
1402 uint64_t mask = ((1ull << size) - 1) << shift;
1404 assert(!o->g_in2);
1405 tcg_gen_shli_i64(o->in2, o->in2, shift);
1406 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
1407 tcg_gen_and_i64(o->out, o->in1, o->in2);
1409 /* Produce the CC from only the bits manipulated. */
1410 tcg_gen_andi_i64(cc_dst, o->out, mask);
1411 set_cc_nz_u64(s, cc_dst);
1412 return NO_EXIT;
1415 static ExitStatus op_bas(DisasContext *s, DisasOps *o)
1417 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1418 if (!TCGV_IS_UNUSED_I64(o->in2)) {
1419 tcg_gen_mov_i64(psw_addr, o->in2);
1420 per_branch(s, false);
1421 return EXIT_PC_UPDATED;
1422 } else {
1423 return NO_EXIT;
1427 static ExitStatus op_basi(DisasContext *s, DisasOps *o)
1429 tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
1430 return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
1433 static ExitStatus op_bc(DisasContext *s, DisasOps *o)
1435 int m1 = get_field(s->fields, m1);
1436 bool is_imm = have_field(s->fields, i2);
1437 int imm = is_imm ? get_field(s->fields, i2) : 0;
1438 DisasCompare c;
1440 /* BCR with R2 = 0 causes no branching */
1441 if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) {
1442 if (m1 == 14) {
1443 /* Perform serialization */
1444 /* FIXME: check for fast-BCR-serialization facility */
1445 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1447 if (m1 == 15) {
1448 /* Perform serialization */
1449 /* FIXME: perform checkpoint-synchronisation */
1450 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1452 return NO_EXIT;
1455 disas_jcc(s, &c, m1);
1456 return help_branch(s, &c, is_imm, imm, o->in2);
1459 static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
1461 int r1 = get_field(s->fields, r1);
1462 bool is_imm = have_field(s->fields, i2);
1463 int imm = is_imm ? get_field(s->fields, i2) : 0;
1464 DisasCompare c;
1465 TCGv_i64 t;
1467 c.cond = TCG_COND_NE;
1468 c.is_64 = false;
1469 c.g1 = false;
1470 c.g2 = false;
1472 t = tcg_temp_new_i64();
1473 tcg_gen_subi_i64(t, regs[r1], 1);
1474 store_reg32_i64(r1, t);
1475 c.u.s32.a = tcg_temp_new_i32();
1476 c.u.s32.b = tcg_const_i32(0);
1477 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1478 tcg_temp_free_i64(t);
1480 return help_branch(s, &c, is_imm, imm, o->in2);
1483 static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
1485 int r1 = get_field(s->fields, r1);
1486 int imm = get_field(s->fields, i2);
1487 DisasCompare c;
1488 TCGv_i64 t;
1490 c.cond = TCG_COND_NE;
1491 c.is_64 = false;
1492 c.g1 = false;
1493 c.g2 = false;
1495 t = tcg_temp_new_i64();
1496 tcg_gen_shri_i64(t, regs[r1], 32);
1497 tcg_gen_subi_i64(t, t, 1);
1498 store_reg32h_i64(r1, t);
1499 c.u.s32.a = tcg_temp_new_i32();
1500 c.u.s32.b = tcg_const_i32(0);
1501 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1502 tcg_temp_free_i64(t);
1504 return help_branch(s, &c, 1, imm, o->in2);
1507 static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
1509 int r1 = get_field(s->fields, r1);
1510 bool is_imm = have_field(s->fields, i2);
1511 int imm = is_imm ? get_field(s->fields, i2) : 0;
1512 DisasCompare c;
1514 c.cond = TCG_COND_NE;
1515 c.is_64 = true;
1516 c.g1 = true;
1517 c.g2 = false;
1519 tcg_gen_subi_i64(regs[r1], regs[r1], 1);
1520 c.u.s64.a = regs[r1];
1521 c.u.s64.b = tcg_const_i64(0);
1523 return help_branch(s, &c, is_imm, imm, o->in2);
1526 static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
1528 int r1 = get_field(s->fields, r1);
1529 int r3 = get_field(s->fields, r3);
1530 bool is_imm = have_field(s->fields, i2);
1531 int imm = is_imm ? get_field(s->fields, i2) : 0;
1532 DisasCompare c;
1533 TCGv_i64 t;
1535 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1536 c.is_64 = false;
1537 c.g1 = false;
1538 c.g2 = false;
1540 t = tcg_temp_new_i64();
1541 tcg_gen_add_i64(t, regs[r1], regs[r3]);
1542 c.u.s32.a = tcg_temp_new_i32();
1543 c.u.s32.b = tcg_temp_new_i32();
1544 tcg_gen_extrl_i64_i32(c.u.s32.a, t);
1545 tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
1546 store_reg32_i64(r1, t);
1547 tcg_temp_free_i64(t);
1549 return help_branch(s, &c, is_imm, imm, o->in2);
1552 static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
1554 int r1 = get_field(s->fields, r1);
1555 int r3 = get_field(s->fields, r3);
1556 bool is_imm = have_field(s->fields, i2);
1557 int imm = is_imm ? get_field(s->fields, i2) : 0;
1558 DisasCompare c;
1560 c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
1561 c.is_64 = true;
1563 if (r1 == (r3 | 1)) {
1564 c.u.s64.b = load_reg(r3 | 1);
1565 c.g2 = false;
1566 } else {
1567 c.u.s64.b = regs[r3 | 1];
1568 c.g2 = true;
1571 tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
1572 c.u.s64.a = regs[r1];
1573 c.g1 = true;
1575 return help_branch(s, &c, is_imm, imm, o->in2);
1578 static ExitStatus op_cj(DisasContext *s, DisasOps *o)
1580 int imm, m3 = get_field(s->fields, m3);
1581 bool is_imm;
1582 DisasCompare c;
1584 c.cond = ltgt_cond[m3];
1585 if (s->insn->data) {
1586 c.cond = tcg_unsigned_cond(c.cond);
1588 c.is_64 = c.g1 = c.g2 = true;
1589 c.u.s64.a = o->in1;
1590 c.u.s64.b = o->in2;
1592 is_imm = have_field(s->fields, i4);
1593 if (is_imm) {
1594 imm = get_field(s->fields, i4);
1595 } else {
1596 imm = 0;
1597 o->out = get_address(s, 0, get_field(s->fields, b4),
1598 get_field(s->fields, d4));
1601 return help_branch(s, &c, is_imm, imm, o->out);
1604 static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
1606 gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
1607 set_cc_static(s);
1608 return NO_EXIT;
1611 static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
1613 gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
1614 set_cc_static(s);
1615 return NO_EXIT;
1618 static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
1620 gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
1621 set_cc_static(s);
1622 return NO_EXIT;
1625 static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
1627 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1628 gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
1629 tcg_temp_free_i32(m3);
1630 gen_set_cc_nz_f32(s, o->in2);
1631 return NO_EXIT;
1634 static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
1636 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1637 gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
1638 tcg_temp_free_i32(m3);
1639 gen_set_cc_nz_f64(s, o->in2);
1640 return NO_EXIT;
1643 static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
1645 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1646 gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
1647 tcg_temp_free_i32(m3);
1648 gen_set_cc_nz_f128(s, o->in1, o->in2);
1649 return NO_EXIT;
1652 static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
1654 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1655 gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
1656 tcg_temp_free_i32(m3);
1657 gen_set_cc_nz_f32(s, o->in2);
1658 return NO_EXIT;
1661 static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
1663 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1664 gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
1665 tcg_temp_free_i32(m3);
1666 gen_set_cc_nz_f64(s, o->in2);
1667 return NO_EXIT;
1670 static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
1672 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1673 gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
1674 tcg_temp_free_i32(m3);
1675 gen_set_cc_nz_f128(s, o->in1, o->in2);
1676 return NO_EXIT;
1679 static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
1681 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1682 gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
1683 tcg_temp_free_i32(m3);
1684 gen_set_cc_nz_f32(s, o->in2);
1685 return NO_EXIT;
1688 static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
1690 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1691 gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
1692 tcg_temp_free_i32(m3);
1693 gen_set_cc_nz_f64(s, o->in2);
1694 return NO_EXIT;
1697 static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
1699 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1700 gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
1701 tcg_temp_free_i32(m3);
1702 gen_set_cc_nz_f128(s, o->in1, o->in2);
1703 return NO_EXIT;
1706 static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
1708 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1709 gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
1710 tcg_temp_free_i32(m3);
1711 gen_set_cc_nz_f32(s, o->in2);
1712 return NO_EXIT;
1715 static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
1717 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1718 gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
1719 tcg_temp_free_i32(m3);
1720 gen_set_cc_nz_f64(s, o->in2);
1721 return NO_EXIT;
1724 static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
1726 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1727 gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
1728 tcg_temp_free_i32(m3);
1729 gen_set_cc_nz_f128(s, o->in1, o->in2);
1730 return NO_EXIT;
1733 static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
1735 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1736 gen_helper_cegb(o->out, cpu_env, o->in2, m3);
1737 tcg_temp_free_i32(m3);
1738 return NO_EXIT;
1741 static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
1743 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1744 gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
1745 tcg_temp_free_i32(m3);
1746 return NO_EXIT;
1749 static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
1751 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1752 gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
1753 tcg_temp_free_i32(m3);
1754 return_low128(o->out2);
1755 return NO_EXIT;
1758 static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
1760 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1761 gen_helper_celgb(o->out, cpu_env, o->in2, m3);
1762 tcg_temp_free_i32(m3);
1763 return NO_EXIT;
1766 static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
1768 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1769 gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
1770 tcg_temp_free_i32(m3);
1771 return NO_EXIT;
1774 static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
1776 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1777 gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
1778 tcg_temp_free_i32(m3);
1779 return_low128(o->out2);
1780 return NO_EXIT;
1783 static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
1785 int r2 = get_field(s->fields, r2);
1786 TCGv_i64 len = tcg_temp_new_i64();
1788 gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
1789 set_cc_static(s);
1790 return_low128(o->out);
1792 tcg_gen_add_i64(regs[r2], regs[r2], len);
1793 tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
1794 tcg_temp_free_i64(len);
1796 return NO_EXIT;
1799 static ExitStatus op_clc(DisasContext *s, DisasOps *o)
1801 int l = get_field(s->fields, l1);
1802 TCGv_i32 vl;
1804 switch (l + 1) {
1805 case 1:
1806 tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
1807 tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
1808 break;
1809 case 2:
1810 tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
1811 tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
1812 break;
1813 case 4:
1814 tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
1815 tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
1816 break;
1817 case 8:
1818 tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
1819 tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
1820 break;
1821 default:
1822 vl = tcg_const_i32(l);
1823 gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
1824 tcg_temp_free_i32(vl);
1825 set_cc_static(s);
1826 return NO_EXIT;
1828 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
1829 return NO_EXIT;
1832 static ExitStatus op_clcl(DisasContext *s, DisasOps *o)
1834 int r1 = get_field(s->fields, r1);
1835 int r2 = get_field(s->fields, r2);
1836 TCGv_i32 t1, t2;
1838 /* r1 and r2 must be even. */
1839 if (r1 & 1 || r2 & 1) {
1840 gen_program_exception(s, PGM_SPECIFICATION);
1841 return EXIT_NORETURN;
1844 t1 = tcg_const_i32(r1);
1845 t2 = tcg_const_i32(r2);
1846 gen_helper_clcl(cc_op, cpu_env, t1, t2);
1847 tcg_temp_free_i32(t1);
1848 tcg_temp_free_i32(t2);
1849 set_cc_static(s);
1850 return NO_EXIT;
1853 static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
1855 int r1 = get_field(s->fields, r1);
1856 int r3 = get_field(s->fields, r3);
1857 TCGv_i32 t1, t3;
1859 /* r1 and r3 must be even. */
1860 if (r1 & 1 || r3 & 1) {
1861 gen_program_exception(s, PGM_SPECIFICATION);
1862 return EXIT_NORETURN;
1865 t1 = tcg_const_i32(r1);
1866 t3 = tcg_const_i32(r3);
1867 gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3);
1868 tcg_temp_free_i32(t1);
1869 tcg_temp_free_i32(t3);
1870 set_cc_static(s);
1871 return NO_EXIT;
1874 static ExitStatus op_clclu(DisasContext *s, DisasOps *o)
1876 int r1 = get_field(s->fields, r1);
1877 int r3 = get_field(s->fields, r3);
1878 TCGv_i32 t1, t3;
1880 /* r1 and r3 must be even. */
1881 if (r1 & 1 || r3 & 1) {
1882 gen_program_exception(s, PGM_SPECIFICATION);
1883 return EXIT_NORETURN;
1886 t1 = tcg_const_i32(r1);
1887 t3 = tcg_const_i32(r3);
1888 gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3);
1889 tcg_temp_free_i32(t1);
1890 tcg_temp_free_i32(t3);
1891 set_cc_static(s);
1892 return NO_EXIT;
1895 static ExitStatus op_clm(DisasContext *s, DisasOps *o)
1897 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
1898 TCGv_i32 t1 = tcg_temp_new_i32();
1899 tcg_gen_extrl_i64_i32(t1, o->in1);
1900 gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
1901 set_cc_static(s);
1902 tcg_temp_free_i32(t1);
1903 tcg_temp_free_i32(m3);
1904 return NO_EXIT;
1907 static ExitStatus op_clst(DisasContext *s, DisasOps *o)
1909 gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
1910 set_cc_static(s);
1911 return_low128(o->in2);
1912 return NO_EXIT;
1915 static ExitStatus op_cps(DisasContext *s, DisasOps *o)
1917 TCGv_i64 t = tcg_temp_new_i64();
1918 tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
1919 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
1920 tcg_gen_or_i64(o->out, o->out, t);
1921 tcg_temp_free_i64(t);
1922 return NO_EXIT;
1925 static ExitStatus op_cs(DisasContext *s, DisasOps *o)
1927 int d2 = get_field(s->fields, d2);
1928 int b2 = get_field(s->fields, b2);
1929 TCGv_i64 addr, cc;
1931 /* Note that in1 = R3 (new value) and
1932 in2 = (zero-extended) R1 (expected value). */
1934 addr = get_address(s, 0, b2, d2);
1935 tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
1936 get_mem_index(s), s->insn->data | MO_ALIGN);
1937 tcg_temp_free_i64(addr);
1939 /* Are the memory and expected values (un)equal? Note that this setcond
1940 produces the output CC value, thus the NE sense of the test. */
1941 cc = tcg_temp_new_i64();
1942 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
1943 tcg_gen_extrl_i64_i32(cc_op, cc);
1944 tcg_temp_free_i64(cc);
1945 set_cc_static(s);
1947 return NO_EXIT;
1950 static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
1952 int r1 = get_field(s->fields, r1);
1953 int r3 = get_field(s->fields, r3);
1954 int d2 = get_field(s->fields, d2);
1955 int b2 = get_field(s->fields, b2);
1956 TCGv_i64 addr;
1957 TCGv_i32 t_r1, t_r3;
1959 /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
1960 addr = get_address(s, 0, b2, d2);
1961 t_r1 = tcg_const_i32(r1);
1962 t_r3 = tcg_const_i32(r3);
1963 if (tb_cflags(s->tb) & CF_PARALLEL) {
1964 gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3);
1965 } else {
1966 gen_helper_cdsg(cpu_env, addr, t_r1, t_r3);
1968 tcg_temp_free_i64(addr);
1969 tcg_temp_free_i32(t_r1);
1970 tcg_temp_free_i32(t_r3);
1972 set_cc_static(s);
1973 return NO_EXIT;
1976 static ExitStatus op_csst(DisasContext *s, DisasOps *o)
1978 int r3 = get_field(s->fields, r3);
1979 TCGv_i32 t_r3 = tcg_const_i32(r3);
1981 if (tb_cflags(s->tb) & CF_PARALLEL) {
1982 gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->in1, o->in2);
1983 } else {
1984 gen_helper_csst(cc_op, cpu_env, t_r3, o->in1, o->in2);
1986 tcg_temp_free_i32(t_r3);
1988 set_cc_static(s);
1989 return NO_EXIT;
1992 #ifndef CONFIG_USER_ONLY
1993 static ExitStatus op_csp(DisasContext *s, DisasOps *o)
1995 TCGMemOp mop = s->insn->data;
1996 TCGv_i64 addr, old, cc;
1997 TCGLabel *lab = gen_new_label();
1999 /* Note that in1 = R1 (zero-extended expected value),
2000 out = R1 (original reg), out2 = R1+1 (new value). */
2002 check_privileged(s);
2003 addr = tcg_temp_new_i64();
2004 old = tcg_temp_new_i64();
2005 tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
2006 tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
2007 get_mem_index(s), mop | MO_ALIGN);
2008 tcg_temp_free_i64(addr);
2010 /* Are the memory and expected values (un)equal? */
2011 cc = tcg_temp_new_i64();
2012 tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old);
2013 tcg_gen_extrl_i64_i32(cc_op, cc);
2015 /* Write back the output now, so that it happens before the
2016 following branch, so that we don't need local temps. */
2017 if ((mop & MO_SIZE) == MO_32) {
2018 tcg_gen_deposit_i64(o->out, o->out, old, 0, 32);
2019 } else {
2020 tcg_gen_mov_i64(o->out, old);
2022 tcg_temp_free_i64(old);
2024 /* If the comparison was equal, and the LSB of R2 was set,
2025 then we need to flush the TLB (for all cpus). */
2026 tcg_gen_xori_i64(cc, cc, 1);
2027 tcg_gen_and_i64(cc, cc, o->in2);
2028 tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
2029 tcg_temp_free_i64(cc);
2031 gen_helper_purge(cpu_env);
2032 gen_set_label(lab);
2034 return NO_EXIT;
2036 #endif
2038 static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
2040 TCGv_i64 t1 = tcg_temp_new_i64();
2041 TCGv_i32 t2 = tcg_temp_new_i32();
2042 tcg_gen_extrl_i64_i32(t2, o->in1);
2043 gen_helper_cvd(t1, t2);
2044 tcg_temp_free_i32(t2);
2045 tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
2046 tcg_temp_free_i64(t1);
2047 return NO_EXIT;
2050 static ExitStatus op_ct(DisasContext *s, DisasOps *o)
2052 int m3 = get_field(s->fields, m3);
2053 TCGLabel *lab = gen_new_label();
2054 TCGCond c;
2056 c = tcg_invert_cond(ltgt_cond[m3]);
2057 if (s->insn->data) {
2058 c = tcg_unsigned_cond(c);
2060 tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
2062 /* Trap. */
2063 gen_trap(s);
2065 gen_set_label(lab);
2066 return NO_EXIT;
2069 static ExitStatus op_cuXX(DisasContext *s, DisasOps *o)
2071 int m3 = get_field(s->fields, m3);
2072 int r1 = get_field(s->fields, r1);
2073 int r2 = get_field(s->fields, r2);
2074 TCGv_i32 tr1, tr2, chk;
2076 /* R1 and R2 must both be even. */
2077 if ((r1 | r2) & 1) {
2078 gen_program_exception(s, PGM_SPECIFICATION);
2079 return EXIT_NORETURN;
2081 if (!s390_has_feat(S390_FEAT_ETF3_ENH)) {
2082 m3 = 0;
2085 tr1 = tcg_const_i32(r1);
2086 tr2 = tcg_const_i32(r2);
2087 chk = tcg_const_i32(m3);
2089 switch (s->insn->data) {
2090 case 12:
2091 gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk);
2092 break;
2093 case 14:
2094 gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk);
2095 break;
2096 case 21:
2097 gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk);
2098 break;
2099 case 24:
2100 gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk);
2101 break;
2102 case 41:
2103 gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk);
2104 break;
2105 case 42:
2106 gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk);
2107 break;
2108 default:
2109 g_assert_not_reached();
2112 tcg_temp_free_i32(tr1);
2113 tcg_temp_free_i32(tr2);
2114 tcg_temp_free_i32(chk);
2115 set_cc_static(s);
2116 return NO_EXIT;
2119 #ifndef CONFIG_USER_ONLY
2120 static ExitStatus op_diag(DisasContext *s, DisasOps *o)
2122 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2123 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2124 TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
2126 check_privileged(s);
2127 update_psw_addr(s);
2128 gen_op_calc_cc(s);
2130 gen_helper_diag(cpu_env, r1, r3, func_code);
2132 tcg_temp_free_i32(func_code);
2133 tcg_temp_free_i32(r3);
2134 tcg_temp_free_i32(r1);
2135 return NO_EXIT;
2137 #endif
2139 static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
2141 gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
2142 return_low128(o->out);
2143 return NO_EXIT;
2146 static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
2148 gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
2149 return_low128(o->out);
2150 return NO_EXIT;
2153 static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
2155 gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
2156 return_low128(o->out);
2157 return NO_EXIT;
2160 static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
2162 gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
2163 return_low128(o->out);
2164 return NO_EXIT;
2167 static ExitStatus op_deb(DisasContext *s, DisasOps *o)
2169 gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
2170 return NO_EXIT;
2173 static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
2175 gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
2176 return NO_EXIT;
2179 static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
2181 gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
2182 return_low128(o->out2);
2183 return NO_EXIT;
2186 static ExitStatus op_ear(DisasContext *s, DisasOps *o)
2188 int r2 = get_field(s->fields, r2);
2189 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
2190 return NO_EXIT;
2193 static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
2195 /* No cache information provided. */
2196 tcg_gen_movi_i64(o->out, -1);
2197 return NO_EXIT;
2200 static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
2202 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
2203 return NO_EXIT;
2206 static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
2208 int r1 = get_field(s->fields, r1);
2209 int r2 = get_field(s->fields, r2);
2210 TCGv_i64 t = tcg_temp_new_i64();
2212 /* Note the "subsequently" in the PoO, which implies a defined result
2213 if r1 == r2. Thus we cannot defer these writes to an output hook. */
2214 tcg_gen_shri_i64(t, psw_mask, 32);
2215 store_reg32_i64(r1, t);
2216 if (r2 != 0) {
2217 store_reg32_i64(r2, psw_mask);
2220 tcg_temp_free_i64(t);
2221 return NO_EXIT;
2224 static ExitStatus op_ex(DisasContext *s, DisasOps *o)
2226 int r1 = get_field(s->fields, r1);
2227 TCGv_i32 ilen;
2228 TCGv_i64 v1;
2230 /* Nested EXECUTE is not allowed. */
2231 if (unlikely(s->ex_value)) {
2232 gen_program_exception(s, PGM_EXECUTE);
2233 return EXIT_NORETURN;
2236 update_psw_addr(s);
2237 update_cc_op(s);
2239 if (r1 == 0) {
2240 v1 = tcg_const_i64(0);
2241 } else {
2242 v1 = regs[r1];
2245 ilen = tcg_const_i32(s->ilen);
2246 gen_helper_ex(cpu_env, ilen, v1, o->in2);
2247 tcg_temp_free_i32(ilen);
2249 if (r1 == 0) {
2250 tcg_temp_free_i64(v1);
2253 return EXIT_PC_CC_UPDATED;
2256 static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
2258 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2259 gen_helper_fieb(o->out, cpu_env, o->in2, m3);
2260 tcg_temp_free_i32(m3);
2261 return NO_EXIT;
2264 static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
2266 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2267 gen_helper_fidb(o->out, cpu_env, o->in2, m3);
2268 tcg_temp_free_i32(m3);
2269 return NO_EXIT;
2272 static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
2274 TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
2275 gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
2276 return_low128(o->out2);
2277 tcg_temp_free_i32(m3);
2278 return NO_EXIT;
2281 static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
2283 /* We'll use the original input for cc computation, since we get to
2284 compare that against 0, which ought to be better than comparing
2285 the real output against 64. It also lets cc_dst be a convenient
2286 temporary during our computation. */
2287 gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
2289 /* R1 = IN ? CLZ(IN) : 64. */
2290 tcg_gen_clzi_i64(o->out, o->in2, 64);
2292 /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
2293 value by 64, which is undefined. But since the shift is 64 iff the
2294 input is zero, we still get the correct result after and'ing. */
2295 tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
2296 tcg_gen_shr_i64(o->out2, o->out2, o->out);
2297 tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
2298 return NO_EXIT;
2301 static ExitStatus op_icm(DisasContext *s, DisasOps *o)
2303 int m3 = get_field(s->fields, m3);
2304 int pos, len, base = s->insn->data;
2305 TCGv_i64 tmp = tcg_temp_new_i64();
2306 uint64_t ccm;
2308 switch (m3) {
2309 case 0xf:
2310 /* Effectively a 32-bit load. */
2311 tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
2312 len = 32;
2313 goto one_insert;
2315 case 0xc:
2316 case 0x6:
2317 case 0x3:
2318 /* Effectively a 16-bit load. */
2319 tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
2320 len = 16;
2321 goto one_insert;
2323 case 0x8:
2324 case 0x4:
2325 case 0x2:
2326 case 0x1:
2327 /* Effectively an 8-bit load. */
2328 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2329 len = 8;
2330 goto one_insert;
2332 one_insert:
2333 pos = base + ctz32(m3) * 8;
2334 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
2335 ccm = ((1ull << len) - 1) << pos;
2336 break;
2338 default:
2339 /* This is going to be a sequence of loads and inserts. */
2340 pos = base + 32 - 8;
2341 ccm = 0;
2342 while (m3) {
2343 if (m3 & 0x8) {
2344 tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
2345 tcg_gen_addi_i64(o->in2, o->in2, 1);
2346 tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
2347 ccm |= 0xff << pos;
2349 m3 = (m3 << 1) & 0xf;
2350 pos -= 8;
2352 break;
2355 tcg_gen_movi_i64(tmp, ccm);
2356 gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
2357 tcg_temp_free_i64(tmp);
2358 return NO_EXIT;
2361 static ExitStatus op_insi(DisasContext *s, DisasOps *o)
2363 int shift = s->insn->data & 0xff;
2364 int size = s->insn->data >> 8;
2365 tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
2366 return NO_EXIT;
2369 static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
2371 TCGv_i64 t1;
2373 gen_op_calc_cc(s);
2374 tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
2376 t1 = tcg_temp_new_i64();
2377 tcg_gen_shli_i64(t1, psw_mask, 20);
2378 tcg_gen_shri_i64(t1, t1, 36);
2379 tcg_gen_or_i64(o->out, o->out, t1);
2381 tcg_gen_extu_i32_i64(t1, cc_op);
2382 tcg_gen_shli_i64(t1, t1, 28);
2383 tcg_gen_or_i64(o->out, o->out, t1);
2384 tcg_temp_free_i64(t1);
2385 return NO_EXIT;
2388 #ifndef CONFIG_USER_ONLY
2389 static ExitStatus op_idte(DisasContext *s, DisasOps *o)
2391 TCGv_i32 m4;
2393 check_privileged(s);
2394 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2395 m4 = tcg_const_i32(get_field(s->fields, m4));
2396 } else {
2397 m4 = tcg_const_i32(0);
2399 gen_helper_idte(cpu_env, o->in1, o->in2, m4);
2400 tcg_temp_free_i32(m4);
2401 return NO_EXIT;
2404 static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
2406 TCGv_i32 m4;
2408 check_privileged(s);
2409 if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) {
2410 m4 = tcg_const_i32(get_field(s->fields, m4));
2411 } else {
2412 m4 = tcg_const_i32(0);
2414 gen_helper_ipte(cpu_env, o->in1, o->in2, m4);
2415 tcg_temp_free_i32(m4);
2416 return NO_EXIT;
2419 static ExitStatus op_iske(DisasContext *s, DisasOps *o)
2421 check_privileged(s);
2422 gen_helper_iske(o->out, cpu_env, o->in2);
2423 return NO_EXIT;
2425 #endif
2427 static ExitStatus op_msa(DisasContext *s, DisasOps *o)
2429 int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0;
2430 int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0;
2431 int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0;
2432 TCGv_i32 t_r1, t_r2, t_r3, type;
2434 switch (s->insn->data) {
2435 case S390_FEAT_TYPE_KMCTR:
2436 if (r3 & 1 || !r3) {
2437 gen_program_exception(s, PGM_SPECIFICATION);
2438 return EXIT_NORETURN;
2440 /* FALL THROUGH */
2441 case S390_FEAT_TYPE_PPNO:
2442 case S390_FEAT_TYPE_KMF:
2443 case S390_FEAT_TYPE_KMC:
2444 case S390_FEAT_TYPE_KMO:
2445 case S390_FEAT_TYPE_KM:
2446 if (r1 & 1 || !r1) {
2447 gen_program_exception(s, PGM_SPECIFICATION);
2448 return EXIT_NORETURN;
2450 /* FALL THROUGH */
2451 case S390_FEAT_TYPE_KMAC:
2452 case S390_FEAT_TYPE_KIMD:
2453 case S390_FEAT_TYPE_KLMD:
2454 if (r2 & 1 || !r2) {
2455 gen_program_exception(s, PGM_SPECIFICATION);
2456 return EXIT_NORETURN;
2458 /* FALL THROUGH */
2459 case S390_FEAT_TYPE_PCKMO:
2460 case S390_FEAT_TYPE_PCC:
2461 break;
2462 default:
2463 g_assert_not_reached();
2466 t_r1 = tcg_const_i32(r1);
2467 t_r2 = tcg_const_i32(r2);
2468 t_r3 = tcg_const_i32(r3);
2469 type = tcg_const_i32(s->insn->data);
2470 gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type);
2471 set_cc_static(s);
2472 tcg_temp_free_i32(t_r1);
2473 tcg_temp_free_i32(t_r2);
2474 tcg_temp_free_i32(t_r3);
2475 tcg_temp_free_i32(type);
2476 return NO_EXIT;
2479 static ExitStatus op_keb(DisasContext *s, DisasOps *o)
2481 gen_helper_keb(cc_op, cpu_env, o->in1, o->in2);
2482 set_cc_static(s);
2483 return NO_EXIT;
2486 static ExitStatus op_kdb(DisasContext *s, DisasOps *o)
2488 gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2);
2489 set_cc_static(s);
2490 return NO_EXIT;
2493 static ExitStatus op_kxb(DisasContext *s, DisasOps *o)
2495 gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
2496 set_cc_static(s);
2497 return NO_EXIT;
2500 static ExitStatus op_laa(DisasContext *s, DisasOps *o)
2502 /* The real output is indeed the original value in memory;
2503 recompute the addition for the computation of CC. */
2504 tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2505 s->insn->data | MO_ALIGN);
2506 /* However, we need to recompute the addition for setting CC. */
2507 tcg_gen_add_i64(o->out, o->in1, o->in2);
2508 return NO_EXIT;
2511 static ExitStatus op_lan(DisasContext *s, DisasOps *o)
2513 /* The real output is indeed the original value in memory;
2514 recompute the addition for the computation of CC. */
2515 tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2516 s->insn->data | MO_ALIGN);
2517 /* However, we need to recompute the operation for setting CC. */
2518 tcg_gen_and_i64(o->out, o->in1, o->in2);
2519 return NO_EXIT;
2522 static ExitStatus op_lao(DisasContext *s, DisasOps *o)
2524 /* The real output is indeed the original value in memory;
2525 recompute the addition for the computation of CC. */
2526 tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2527 s->insn->data | MO_ALIGN);
2528 /* However, we need to recompute the operation for setting CC. */
2529 tcg_gen_or_i64(o->out, o->in1, o->in2);
2530 return NO_EXIT;
2533 static ExitStatus op_lax(DisasContext *s, DisasOps *o)
2535 /* The real output is indeed the original value in memory;
2536 recompute the addition for the computation of CC. */
2537 tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s),
2538 s->insn->data | MO_ALIGN);
2539 /* However, we need to recompute the operation for setting CC. */
2540 tcg_gen_xor_i64(o->out, o->in1, o->in2);
2541 return NO_EXIT;
2544 static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
2546 gen_helper_ldeb(o->out, cpu_env, o->in2);
2547 return NO_EXIT;
2550 static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
2552 gen_helper_ledb(o->out, cpu_env, o->in2);
2553 return NO_EXIT;
2556 static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
2558 gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
2559 return NO_EXIT;
2562 static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
2564 gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
2565 return NO_EXIT;
2568 static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
2570 gen_helper_lxdb(o->out, cpu_env, o->in2);
2571 return_low128(o->out2);
2572 return NO_EXIT;
2575 static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
2577 gen_helper_lxeb(o->out, cpu_env, o->in2);
2578 return_low128(o->out2);
2579 return NO_EXIT;
2582 static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
2584 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2585 return NO_EXIT;
2588 static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
2590 tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
2591 return NO_EXIT;
2594 static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
2596 tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
2597 return NO_EXIT;
2600 static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
2602 tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
2603 return NO_EXIT;
2606 static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
2608 tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
2609 return NO_EXIT;
2612 static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
2614 tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
2615 return NO_EXIT;
2618 static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
2620 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2621 return NO_EXIT;
2624 static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
2626 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2627 return NO_EXIT;
2630 static ExitStatus op_lat(DisasContext *s, DisasOps *o)
2632 TCGLabel *lab = gen_new_label();
2633 store_reg32_i64(get_field(s->fields, r1), o->in2);
2634 /* The value is stored even in case of trap. */
2635 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2636 gen_trap(s);
2637 gen_set_label(lab);
2638 return NO_EXIT;
2641 static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
2643 TCGLabel *lab = gen_new_label();
2644 tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
2645 /* The value is stored even in case of trap. */
2646 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2647 gen_trap(s);
2648 gen_set_label(lab);
2649 return NO_EXIT;
2652 static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
2654 TCGLabel *lab = gen_new_label();
2655 store_reg32h_i64(get_field(s->fields, r1), o->in2);
2656 /* The value is stored even in case of trap. */
2657 tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
2658 gen_trap(s);
2659 gen_set_label(lab);
2660 return NO_EXIT;
2663 static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
2665 TCGLabel *lab = gen_new_label();
2666 tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
2667 /* The value is stored even in case of trap. */
2668 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2669 gen_trap(s);
2670 gen_set_label(lab);
2671 return NO_EXIT;
2674 static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
2676 TCGLabel *lab = gen_new_label();
2677 tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
2678 /* The value is stored even in case of trap. */
2679 tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
2680 gen_trap(s);
2681 gen_set_label(lab);
2682 return NO_EXIT;
2685 static ExitStatus op_loc(DisasContext *s, DisasOps *o)
2687 DisasCompare c;
2689 disas_jcc(s, &c, get_field(s->fields, m3));
2691 if (c.is_64) {
2692 tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
2693 o->in2, o->in1);
2694 free_compare(&c);
2695 } else {
2696 TCGv_i32 t32 = tcg_temp_new_i32();
2697 TCGv_i64 t, z;
2699 tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
2700 free_compare(&c);
2702 t = tcg_temp_new_i64();
2703 tcg_gen_extu_i32_i64(t, t32);
2704 tcg_temp_free_i32(t32);
2706 z = tcg_const_i64(0);
2707 tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
2708 tcg_temp_free_i64(t);
2709 tcg_temp_free_i64(z);
2712 return NO_EXIT;
2715 #ifndef CONFIG_USER_ONLY
2716 static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
2718 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2719 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2720 check_privileged(s);
2721 gen_helper_lctl(cpu_env, r1, o->in2, r3);
2722 tcg_temp_free_i32(r1);
2723 tcg_temp_free_i32(r3);
2724 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2725 return EXIT_PC_STALE_NOCHAIN;
2728 static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
2730 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2731 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2732 check_privileged(s);
2733 gen_helper_lctlg(cpu_env, r1, o->in2, r3);
2734 tcg_temp_free_i32(r1);
2735 tcg_temp_free_i32(r3);
2736 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
2737 return EXIT_PC_STALE_NOCHAIN;
2740 static ExitStatus op_lra(DisasContext *s, DisasOps *o)
2742 check_privileged(s);
2743 gen_helper_lra(o->out, cpu_env, o->in2);
2744 set_cc_static(s);
2745 return NO_EXIT;
2748 static ExitStatus op_lpp(DisasContext *s, DisasOps *o)
2750 check_privileged(s);
2752 tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp));
2753 return NO_EXIT;
2756 static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
2758 TCGv_i64 t1, t2;
2760 check_privileged(s);
2761 per_breaking_event(s);
2763 t1 = tcg_temp_new_i64();
2764 t2 = tcg_temp_new_i64();
2765 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2766 tcg_gen_addi_i64(o->in2, o->in2, 4);
2767 tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
2768 /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
2769 tcg_gen_shli_i64(t1, t1, 32);
2770 gen_helper_load_psw(cpu_env, t1, t2);
2771 tcg_temp_free_i64(t1);
2772 tcg_temp_free_i64(t2);
2773 return EXIT_NORETURN;
2776 static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
2778 TCGv_i64 t1, t2;
2780 check_privileged(s);
2781 per_breaking_event(s);
2783 t1 = tcg_temp_new_i64();
2784 t2 = tcg_temp_new_i64();
2785 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2786 tcg_gen_addi_i64(o->in2, o->in2, 8);
2787 tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
2788 gen_helper_load_psw(cpu_env, t1, t2);
2789 tcg_temp_free_i64(t1);
2790 tcg_temp_free_i64(t2);
2791 return EXIT_NORETURN;
2793 #endif
2795 static ExitStatus op_lam(DisasContext *s, DisasOps *o)
2797 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
2798 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
2799 gen_helper_lam(cpu_env, r1, o->in2, r3);
2800 tcg_temp_free_i32(r1);
2801 tcg_temp_free_i32(r3);
2802 return NO_EXIT;
2805 static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
2807 int r1 = get_field(s->fields, r1);
2808 int r3 = get_field(s->fields, r3);
2809 TCGv_i64 t1, t2;
2811 /* Only one register to read. */
2812 t1 = tcg_temp_new_i64();
2813 if (unlikely(r1 == r3)) {
2814 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2815 store_reg32_i64(r1, t1);
2816 tcg_temp_free(t1);
2817 return NO_EXIT;
2820 /* First load the values of the first and last registers to trigger
2821 possible page faults. */
2822 t2 = tcg_temp_new_i64();
2823 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2824 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2825 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2826 store_reg32_i64(r1, t1);
2827 store_reg32_i64(r3, t2);
2829 /* Only two registers to read. */
2830 if (((r1 + 1) & 15) == r3) {
2831 tcg_temp_free(t2);
2832 tcg_temp_free(t1);
2833 return NO_EXIT;
2836 /* Then load the remaining registers. Page fault can't occur. */
2837 r3 = (r3 - 1) & 15;
2838 tcg_gen_movi_i64(t2, 4);
2839 while (r1 != r3) {
2840 r1 = (r1 + 1) & 15;
2841 tcg_gen_add_i64(o->in2, o->in2, t2);
2842 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2843 store_reg32_i64(r1, t1);
2845 tcg_temp_free(t2);
2846 tcg_temp_free(t1);
2848 return NO_EXIT;
2851 static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
2853 int r1 = get_field(s->fields, r1);
2854 int r3 = get_field(s->fields, r3);
2855 TCGv_i64 t1, t2;
2857 /* Only one register to read. */
2858 t1 = tcg_temp_new_i64();
2859 if (unlikely(r1 == r3)) {
2860 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2861 store_reg32h_i64(r1, t1);
2862 tcg_temp_free(t1);
2863 return NO_EXIT;
2866 /* First load the values of the first and last registers to trigger
2867 possible page faults. */
2868 t2 = tcg_temp_new_i64();
2869 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2870 tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
2871 tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
2872 store_reg32h_i64(r1, t1);
2873 store_reg32h_i64(r3, t2);
2875 /* Only two registers to read. */
2876 if (((r1 + 1) & 15) == r3) {
2877 tcg_temp_free(t2);
2878 tcg_temp_free(t1);
2879 return NO_EXIT;
2882 /* Then load the remaining registers. Page fault can't occur. */
2883 r3 = (r3 - 1) & 15;
2884 tcg_gen_movi_i64(t2, 4);
2885 while (r1 != r3) {
2886 r1 = (r1 + 1) & 15;
2887 tcg_gen_add_i64(o->in2, o->in2, t2);
2888 tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
2889 store_reg32h_i64(r1, t1);
2891 tcg_temp_free(t2);
2892 tcg_temp_free(t1);
2894 return NO_EXIT;
2897 static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
2899 int r1 = get_field(s->fields, r1);
2900 int r3 = get_field(s->fields, r3);
2901 TCGv_i64 t1, t2;
2903 /* Only one register to read. */
2904 if (unlikely(r1 == r3)) {
2905 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2906 return NO_EXIT;
2909 /* First load the values of the first and last registers to trigger
2910 possible page faults. */
2911 t1 = tcg_temp_new_i64();
2912 t2 = tcg_temp_new_i64();
2913 tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
2914 tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
2915 tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
2916 tcg_gen_mov_i64(regs[r1], t1);
2917 tcg_temp_free(t2);
2919 /* Only two registers to read. */
2920 if (((r1 + 1) & 15) == r3) {
2921 tcg_temp_free(t1);
2922 return NO_EXIT;
2925 /* Then load the remaining registers. Page fault can't occur. */
2926 r3 = (r3 - 1) & 15;
2927 tcg_gen_movi_i64(t1, 8);
2928 while (r1 != r3) {
2929 r1 = (r1 + 1) & 15;
2930 tcg_gen_add_i64(o->in2, o->in2, t1);
2931 tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
2933 tcg_temp_free(t1);
2935 return NO_EXIT;
2938 static ExitStatus op_lpd(DisasContext *s, DisasOps *o)
2940 TCGv_i64 a1, a2;
2941 TCGMemOp mop = s->insn->data;
2943 /* In a parallel context, stop the world and single step. */
2944 if (tb_cflags(s->tb) & CF_PARALLEL) {
2945 potential_page_fault(s);
2946 gen_exception(EXCP_ATOMIC);
2947 return EXIT_NORETURN;
2950 /* In a serial context, perform the two loads ... */
2951 a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1));
2952 a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
2953 tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
2954 tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
2955 tcg_temp_free_i64(a1);
2956 tcg_temp_free_i64(a2);
2958 /* ... and indicate that we performed them while interlocked. */
2959 gen_op_movi_cc(s, 0);
2960 return NO_EXIT;
2963 static ExitStatus op_lpq(DisasContext *s, DisasOps *o)
2965 if (tb_cflags(s->tb) & CF_PARALLEL) {
2966 gen_helper_lpq_parallel(o->out, cpu_env, o->in2);
2967 } else {
2968 gen_helper_lpq(o->out, cpu_env, o->in2);
2970 return_low128(o->out2);
2971 return NO_EXIT;
2974 #ifndef CONFIG_USER_ONLY
2975 static ExitStatus op_lura(DisasContext *s, DisasOps *o)
2977 check_privileged(s);
2978 gen_helper_lura(o->out, cpu_env, o->in2);
2979 return NO_EXIT;
2982 static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
2984 check_privileged(s);
2985 gen_helper_lurag(o->out, cpu_env, o->in2);
2986 return NO_EXIT;
2988 #endif
2990 static ExitStatus op_lzrb(DisasContext *s, DisasOps *o)
2992 tcg_gen_andi_i64(o->out, o->in2, -256);
2993 return NO_EXIT;
2996 static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
2998 o->out = o->in2;
2999 o->g_out = o->g_in2;
3000 TCGV_UNUSED_I64(o->in2);
3001 o->g_in2 = false;
3002 return NO_EXIT;
3005 static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
3007 int b2 = get_field(s->fields, b2);
3008 TCGv ar1 = tcg_temp_new_i64();
3010 o->out = o->in2;
3011 o->g_out = o->g_in2;
3012 TCGV_UNUSED_I64(o->in2);
3013 o->g_in2 = false;
3015 switch (s->tb->flags & FLAG_MASK_ASC) {
3016 case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
3017 tcg_gen_movi_i64(ar1, 0);
3018 break;
3019 case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT:
3020 tcg_gen_movi_i64(ar1, 1);
3021 break;
3022 case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT:
3023 if (b2) {
3024 tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
3025 } else {
3026 tcg_gen_movi_i64(ar1, 0);
3028 break;
3029 case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT:
3030 tcg_gen_movi_i64(ar1, 2);
3031 break;
3034 tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
3035 tcg_temp_free_i64(ar1);
3037 return NO_EXIT;
3040 static ExitStatus op_movx(DisasContext *s, DisasOps *o)
3042 o->out = o->in1;
3043 o->out2 = o->in2;
3044 o->g_out = o->g_in1;
3045 o->g_out2 = o->g_in2;
3046 TCGV_UNUSED_I64(o->in1);
3047 TCGV_UNUSED_I64(o->in2);
3048 o->g_in1 = o->g_in2 = false;
3049 return NO_EXIT;
3052 static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
3054 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3055 gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
3056 tcg_temp_free_i32(l);
3057 return NO_EXIT;
3060 static ExitStatus op_mvcin(DisasContext *s, DisasOps *o)
3062 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3063 gen_helper_mvcin(cpu_env, l, o->addr1, o->in2);
3064 tcg_temp_free_i32(l);
3065 return NO_EXIT;
3068 static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
3070 int r1 = get_field(s->fields, r1);
3071 int r2 = get_field(s->fields, r2);
3072 TCGv_i32 t1, t2;
3074 /* r1 and r2 must be even. */
3075 if (r1 & 1 || r2 & 1) {
3076 gen_program_exception(s, PGM_SPECIFICATION);
3077 return EXIT_NORETURN;
3080 t1 = tcg_const_i32(r1);
3081 t2 = tcg_const_i32(r2);
3082 gen_helper_mvcl(cc_op, cpu_env, t1, t2);
3083 tcg_temp_free_i32(t1);
3084 tcg_temp_free_i32(t2);
3085 set_cc_static(s);
3086 return NO_EXIT;
3089 static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
3091 int r1 = get_field(s->fields, r1);
3092 int r3 = get_field(s->fields, r3);
3093 TCGv_i32 t1, t3;
3095 /* r1 and r3 must be even. */
3096 if (r1 & 1 || r3 & 1) {
3097 gen_program_exception(s, PGM_SPECIFICATION);
3098 return EXIT_NORETURN;
3101 t1 = tcg_const_i32(r1);
3102 t3 = tcg_const_i32(r3);
3103 gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3);
3104 tcg_temp_free_i32(t1);
3105 tcg_temp_free_i32(t3);
3106 set_cc_static(s);
3107 return NO_EXIT;
3110 static ExitStatus op_mvclu(DisasContext *s, DisasOps *o)
3112 int r1 = get_field(s->fields, r1);
3113 int r3 = get_field(s->fields, r3);
3114 TCGv_i32 t1, t3;
3116 /* r1 and r3 must be even. */
3117 if (r1 & 1 || r3 & 1) {
3118 gen_program_exception(s, PGM_SPECIFICATION);
3119 return EXIT_NORETURN;
3122 t1 = tcg_const_i32(r1);
3123 t3 = tcg_const_i32(r3);
3124 gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3);
3125 tcg_temp_free_i32(t1);
3126 tcg_temp_free_i32(t3);
3127 set_cc_static(s);
3128 return NO_EXIT;
3131 static ExitStatus op_mvcos(DisasContext *s, DisasOps *o)
3133 int r3 = get_field(s->fields, r3);
3134 gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]);
3135 set_cc_static(s);
3136 return NO_EXIT;
3139 #ifndef CONFIG_USER_ONLY
3140 static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
3142 int r1 = get_field(s->fields, l1);
3143 check_privileged(s);
3144 gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3145 set_cc_static(s);
3146 return NO_EXIT;
3149 static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
3151 int r1 = get_field(s->fields, l1);
3152 check_privileged(s);
3153 gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
3154 set_cc_static(s);
3155 return NO_EXIT;
3157 #endif
3159 static ExitStatus op_mvn(DisasContext *s, DisasOps *o)
3161 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3162 gen_helper_mvn(cpu_env, l, o->addr1, o->in2);
3163 tcg_temp_free_i32(l);
3164 return NO_EXIT;
3167 static ExitStatus op_mvo(DisasContext *s, DisasOps *o)
3169 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3170 gen_helper_mvo(cpu_env, l, o->addr1, o->in2);
3171 tcg_temp_free_i32(l);
3172 return NO_EXIT;
3175 static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
3177 gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2);
3178 set_cc_static(s);
3179 return NO_EXIT;
3182 static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
3184 gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
3185 set_cc_static(s);
3186 return_low128(o->in2);
3187 return NO_EXIT;
3190 static ExitStatus op_mvz(DisasContext *s, DisasOps *o)
3192 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3193 gen_helper_mvz(cpu_env, l, o->addr1, o->in2);
3194 tcg_temp_free_i32(l);
3195 return NO_EXIT;
3198 static ExitStatus op_mul(DisasContext *s, DisasOps *o)
3200 tcg_gen_mul_i64(o->out, o->in1, o->in2);
3201 return NO_EXIT;
3204 static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
3206 tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
3207 return NO_EXIT;
3210 static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
3212 gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
3213 return NO_EXIT;
3216 static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
3218 gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
3219 return NO_EXIT;
3222 static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
3224 gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
3225 return NO_EXIT;
3228 static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
3230 gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3231 return_low128(o->out2);
3232 return NO_EXIT;
3235 static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
3237 gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
3238 return_low128(o->out2);
3239 return NO_EXIT;
3242 static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
3244 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3245 gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
3246 tcg_temp_free_i64(r3);
3247 return NO_EXIT;
3250 static ExitStatus op_madb(DisasContext *s, DisasOps *o)
3252 int r3 = get_field(s->fields, r3);
3253 gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3254 return NO_EXIT;
3257 static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
3259 TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
3260 gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
3261 tcg_temp_free_i64(r3);
3262 return NO_EXIT;
3265 static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
3267 int r3 = get_field(s->fields, r3);
3268 gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
3269 return NO_EXIT;
3272 static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
3274 TCGv_i64 z, n;
3275 z = tcg_const_i64(0);
3276 n = tcg_temp_new_i64();
3277 tcg_gen_neg_i64(n, o->in2);
3278 tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
3279 tcg_temp_free_i64(n);
3280 tcg_temp_free_i64(z);
3281 return NO_EXIT;
3284 static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
3286 tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
3287 return NO_EXIT;
3290 static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
3292 tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
3293 return NO_EXIT;
3296 static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
3298 tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
3299 tcg_gen_mov_i64(o->out2, o->in2);
3300 return NO_EXIT;
3303 static ExitStatus op_nc(DisasContext *s, DisasOps *o)
3305 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3306 gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
3307 tcg_temp_free_i32(l);
3308 set_cc_static(s);
3309 return NO_EXIT;
3312 static ExitStatus op_neg(DisasContext *s, DisasOps *o)
3314 tcg_gen_neg_i64(o->out, o->in2);
3315 return NO_EXIT;
3318 static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
3320 tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
3321 return NO_EXIT;
3324 static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
3326 tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
3327 return NO_EXIT;
3330 static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
3332 tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
3333 tcg_gen_mov_i64(o->out2, o->in2);
3334 return NO_EXIT;
3337 static ExitStatus op_oc(DisasContext *s, DisasOps *o)
3339 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3340 gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
3341 tcg_temp_free_i32(l);
3342 set_cc_static(s);
3343 return NO_EXIT;
3346 static ExitStatus op_or(DisasContext *s, DisasOps *o)
3348 tcg_gen_or_i64(o->out, o->in1, o->in2);
3349 return NO_EXIT;
3352 static ExitStatus op_ori(DisasContext *s, DisasOps *o)
3354 int shift = s->insn->data & 0xff;
3355 int size = s->insn->data >> 8;
3356 uint64_t mask = ((1ull << size) - 1) << shift;
3358 assert(!o->g_in2);
3359 tcg_gen_shli_i64(o->in2, o->in2, shift);
3360 tcg_gen_or_i64(o->out, o->in1, o->in2);
3362 /* Produce the CC from only the bits manipulated. */
3363 tcg_gen_andi_i64(cc_dst, o->out, mask);
3364 set_cc_nz_u64(s, cc_dst);
3365 return NO_EXIT;
3368 static ExitStatus op_pack(DisasContext *s, DisasOps *o)
3370 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
3371 gen_helper_pack(cpu_env, l, o->addr1, o->in2);
3372 tcg_temp_free_i32(l);
3373 return NO_EXIT;
3376 static ExitStatus op_pka(DisasContext *s, DisasOps *o)
3378 int l2 = get_field(s->fields, l2) + 1;
3379 TCGv_i32 l;
3381 /* The length must not exceed 32 bytes. */
3382 if (l2 > 32) {
3383 gen_program_exception(s, PGM_SPECIFICATION);
3384 return EXIT_NORETURN;
3386 l = tcg_const_i32(l2);
3387 gen_helper_pka(cpu_env, o->addr1, o->in2, l);
3388 tcg_temp_free_i32(l);
3389 return NO_EXIT;
3392 static ExitStatus op_pku(DisasContext *s, DisasOps *o)
3394 int l2 = get_field(s->fields, l2) + 1;
3395 TCGv_i32 l;
3397 /* The length must be even and should not exceed 64 bytes. */
3398 if ((l2 & 1) || (l2 > 64)) {
3399 gen_program_exception(s, PGM_SPECIFICATION);
3400 return EXIT_NORETURN;
3402 l = tcg_const_i32(l2);
3403 gen_helper_pku(cpu_env, o->addr1, o->in2, l);
3404 tcg_temp_free_i32(l);
3405 return NO_EXIT;
3408 static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
3410 gen_helper_popcnt(o->out, o->in2);
3411 return NO_EXIT;
3414 #ifndef CONFIG_USER_ONLY
3415 static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
3417 check_privileged(s);
3418 gen_helper_ptlb(cpu_env);
3419 return NO_EXIT;
3421 #endif
3423 static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
3425 int i3 = get_field(s->fields, i3);
3426 int i4 = get_field(s->fields, i4);
3427 int i5 = get_field(s->fields, i5);
3428 int do_zero = i4 & 0x80;
3429 uint64_t mask, imask, pmask;
3430 int pos, len, rot;
3432 /* Adjust the arguments for the specific insn. */
3433 switch (s->fields->op2) {
3434 case 0x55: /* risbg */
3435 case 0x59: /* risbgn */
3436 i3 &= 63;
3437 i4 &= 63;
3438 pmask = ~0;
3439 break;
3440 case 0x5d: /* risbhg */
3441 i3 &= 31;
3442 i4 &= 31;
3443 pmask = 0xffffffff00000000ull;
3444 break;
3445 case 0x51: /* risblg */
3446 i3 &= 31;
3447 i4 &= 31;
3448 pmask = 0x00000000ffffffffull;
3449 break;
3450 default:
3451 g_assert_not_reached();
3454 /* MASK is the set of bits to be inserted from R2.
3455 Take care for I3/I4 wraparound. */
3456 mask = pmask >> i3;
3457 if (i3 <= i4) {
3458 mask ^= pmask >> i4 >> 1;
3459 } else {
3460 mask |= ~(pmask >> i4 >> 1);
3462 mask &= pmask;
3464 /* IMASK is the set of bits to be kept from R1. In the case of the high/low
3465 insns, we need to keep the other half of the register. */
3466 imask = ~mask | ~pmask;
3467 if (do_zero) {
3468 imask = ~pmask;
3471 len = i4 - i3 + 1;
3472 pos = 63 - i4;
3473 rot = i5 & 63;
3474 if (s->fields->op2 == 0x5d) {
3475 pos += 32;
3478 /* In some cases we can implement this with extract. */
3479 if (imask == 0 && pos == 0 && len > 0 && len <= rot) {
3480 tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len);
3481 return NO_EXIT;
3484 /* In some cases we can implement this with deposit. */
3485 if (len > 0 && (imask == 0 || ~mask == imask)) {
3486 /* Note that we rotate the bits to be inserted to the lsb, not to
3487 the position as described in the PoO. */
3488 rot = (rot - pos) & 63;
3489 } else {
3490 pos = -1;
3493 /* Rotate the input as necessary. */
3494 tcg_gen_rotli_i64(o->in2, o->in2, rot);
3496 /* Insert the selected bits into the output. */
3497 if (pos >= 0) {
3498 if (imask == 0) {
3499 tcg_gen_deposit_z_i64(o->out, o->in2, pos, len);
3500 } else {
3501 tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
3503 } else if (imask == 0) {
3504 tcg_gen_andi_i64(o->out, o->in2, mask);
3505 } else {
3506 tcg_gen_andi_i64(o->in2, o->in2, mask);
3507 tcg_gen_andi_i64(o->out, o->out, imask);
3508 tcg_gen_or_i64(o->out, o->out, o->in2);
3510 return NO_EXIT;
3513 static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
3515 int i3 = get_field(s->fields, i3);
3516 int i4 = get_field(s->fields, i4);
3517 int i5 = get_field(s->fields, i5);
3518 uint64_t mask;
3520 /* If this is a test-only form, arrange to discard the result. */
3521 if (i3 & 0x80) {
3522 o->out = tcg_temp_new_i64();
3523 o->g_out = false;
3526 i3 &= 63;
3527 i4 &= 63;
3528 i5 &= 63;
3530 /* MASK is the set of bits to be operated on from R2.
3531 Take care for I3/I4 wraparound. */
3532 mask = ~0ull >> i3;
3533 if (i3 <= i4) {
3534 mask ^= ~0ull >> i4 >> 1;
3535 } else {
3536 mask |= ~(~0ull >> i4 >> 1);
3539 /* Rotate the input as necessary. */
3540 tcg_gen_rotli_i64(o->in2, o->in2, i5);
3542 /* Operate. */
3543 switch (s->fields->op2) {
3544 case 0x55: /* AND */
3545 tcg_gen_ori_i64(o->in2, o->in2, ~mask);
3546 tcg_gen_and_i64(o->out, o->out, o->in2);
3547 break;
3548 case 0x56: /* OR */
3549 tcg_gen_andi_i64(o->in2, o->in2, mask);
3550 tcg_gen_or_i64(o->out, o->out, o->in2);
3551 break;
3552 case 0x57: /* XOR */
3553 tcg_gen_andi_i64(o->in2, o->in2, mask);
3554 tcg_gen_xor_i64(o->out, o->out, o->in2);
3555 break;
3556 default:
3557 abort();
3560 /* Set the CC. */
3561 tcg_gen_andi_i64(cc_dst, o->out, mask);
3562 set_cc_nz_u64(s, cc_dst);
3563 return NO_EXIT;
3566 static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
3568 tcg_gen_bswap16_i64(o->out, o->in2);
3569 return NO_EXIT;
3572 static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
3574 tcg_gen_bswap32_i64(o->out, o->in2);
3575 return NO_EXIT;
3578 static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
3580 tcg_gen_bswap64_i64(o->out, o->in2);
3581 return NO_EXIT;
3584 static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
3586 TCGv_i32 t1 = tcg_temp_new_i32();
3587 TCGv_i32 t2 = tcg_temp_new_i32();
3588 TCGv_i32 to = tcg_temp_new_i32();
3589 tcg_gen_extrl_i64_i32(t1, o->in1);
3590 tcg_gen_extrl_i64_i32(t2, o->in2);
3591 tcg_gen_rotl_i32(to, t1, t2);
3592 tcg_gen_extu_i32_i64(o->out, to);
3593 tcg_temp_free_i32(t1);
3594 tcg_temp_free_i32(t2);
3595 tcg_temp_free_i32(to);
3596 return NO_EXIT;
3599 static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
3601 tcg_gen_rotl_i64(o->out, o->in1, o->in2);
3602 return NO_EXIT;
3605 #ifndef CONFIG_USER_ONLY
3606 static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
3608 check_privileged(s);
3609 gen_helper_rrbe(cc_op, cpu_env, o->in2);
3610 set_cc_static(s);
3611 return NO_EXIT;
3614 static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
3616 check_privileged(s);
3617 gen_helper_sacf(cpu_env, o->in2);
3618 /* Addressing mode has changed, so end the block. */
3619 return EXIT_PC_STALE;
3621 #endif
3623 static ExitStatus op_sam(DisasContext *s, DisasOps *o)
3625 int sam = s->insn->data;
3626 TCGv_i64 tsam;
3627 uint64_t mask;
3629 switch (sam) {
3630 case 0:
3631 mask = 0xffffff;
3632 break;
3633 case 1:
3634 mask = 0x7fffffff;
3635 break;
3636 default:
3637 mask = -1;
3638 break;
3641 /* Bizarre but true, we check the address of the current insn for the
3642 specification exception, not the next to be executed. Thus the PoO
3643 documents that Bad Things Happen two bytes before the end. */
3644 if (s->pc & ~mask) {
3645 gen_program_exception(s, PGM_SPECIFICATION);
3646 return EXIT_NORETURN;
3648 s->next_pc &= mask;
3650 tsam = tcg_const_i64(sam);
3651 tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
3652 tcg_temp_free_i64(tsam);
3654 /* Always exit the TB, since we (may have) changed execution mode. */
3655 return EXIT_PC_STALE;
3658 static ExitStatus op_sar(DisasContext *s, DisasOps *o)
3660 int r1 = get_field(s->fields, r1);
3661 tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
3662 return NO_EXIT;
3665 static ExitStatus op_seb(DisasContext *s, DisasOps *o)
3667 gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
3668 return NO_EXIT;
3671 static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
3673 gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
3674 return NO_EXIT;
3677 static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
3679 gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
3680 return_low128(o->out2);
3681 return NO_EXIT;
3684 static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
3686 gen_helper_sqeb(o->out, cpu_env, o->in2);
3687 return NO_EXIT;
3690 static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
3692 gen_helper_sqdb(o->out, cpu_env, o->in2);
3693 return NO_EXIT;
3696 static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
3698 gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
3699 return_low128(o->out2);
3700 return NO_EXIT;
3703 #ifndef CONFIG_USER_ONLY
3704 static ExitStatus op_servc(DisasContext *s, DisasOps *o)
3706 check_privileged(s);
3707 potential_page_fault(s);
3708 gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
3709 set_cc_static(s);
3710 return NO_EXIT;
3713 static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
3715 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3716 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3717 check_privileged(s);
3718 gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3);
3719 set_cc_static(s);
3720 tcg_temp_free_i32(r1);
3721 tcg_temp_free_i32(r3);
3722 return NO_EXIT;
3724 #endif
3726 static ExitStatus op_soc(DisasContext *s, DisasOps *o)
3728 DisasCompare c;
3729 TCGv_i64 a, h;
3730 TCGLabel *lab;
3731 int r1;
3733 disas_jcc(s, &c, get_field(s->fields, m3));
3735 /* We want to store when the condition is fulfilled, so branch
3736 out when it's not */
3737 c.cond = tcg_invert_cond(c.cond);
3739 lab = gen_new_label();
3740 if (c.is_64) {
3741 tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
3742 } else {
3743 tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
3745 free_compare(&c);
3747 r1 = get_field(s->fields, r1);
3748 a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
3749 switch (s->insn->data) {
3750 case 1: /* STOCG */
3751 tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
3752 break;
3753 case 0: /* STOC */
3754 tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
3755 break;
3756 case 2: /* STOCFH */
3757 h = tcg_temp_new_i64();
3758 tcg_gen_shri_i64(h, regs[r1], 32);
3759 tcg_gen_qemu_st32(h, a, get_mem_index(s));
3760 tcg_temp_free_i64(h);
3761 break;
3762 default:
3763 g_assert_not_reached();
3765 tcg_temp_free_i64(a);
3767 gen_set_label(lab);
3768 return NO_EXIT;
3771 static ExitStatus op_sla(DisasContext *s, DisasOps *o)
3773 uint64_t sign = 1ull << s->insn->data;
3774 enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
3775 gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
3776 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3777 /* The arithmetic left shift is curious in that it does not affect
3778 the sign bit. Copy that over from the source unchanged. */
3779 tcg_gen_andi_i64(o->out, o->out, ~sign);
3780 tcg_gen_andi_i64(o->in1, o->in1, sign);
3781 tcg_gen_or_i64(o->out, o->out, o->in1);
3782 return NO_EXIT;
3785 static ExitStatus op_sll(DisasContext *s, DisasOps *o)
3787 tcg_gen_shl_i64(o->out, o->in1, o->in2);
3788 return NO_EXIT;
3791 static ExitStatus op_sra(DisasContext *s, DisasOps *o)
3793 tcg_gen_sar_i64(o->out, o->in1, o->in2);
3794 return NO_EXIT;
3797 static ExitStatus op_srl(DisasContext *s, DisasOps *o)
3799 tcg_gen_shr_i64(o->out, o->in1, o->in2);
3800 return NO_EXIT;
3803 static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
3805 gen_helper_sfpc(cpu_env, o->in2);
3806 return NO_EXIT;
3809 static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
3811 gen_helper_sfas(cpu_env, o->in2);
3812 return NO_EXIT;
3815 static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
3817 int b2 = get_field(s->fields, b2);
3818 int d2 = get_field(s->fields, d2);
3819 TCGv_i64 t1 = tcg_temp_new_i64();
3820 TCGv_i64 t2 = tcg_temp_new_i64();
3821 int mask, pos, len;
3823 switch (s->fields->op2) {
3824 case 0x99: /* SRNM */
3825 pos = 0, len = 2;
3826 break;
3827 case 0xb8: /* SRNMB */
3828 pos = 0, len = 3;
3829 break;
3830 case 0xb9: /* SRNMT */
3831 pos = 4, len = 3;
3832 break;
3833 default:
3834 tcg_abort();
3836 mask = (1 << len) - 1;
3838 /* Insert the value into the appropriate field of the FPC. */
3839 if (b2 == 0) {
3840 tcg_gen_movi_i64(t1, d2 & mask);
3841 } else {
3842 tcg_gen_addi_i64(t1, regs[b2], d2);
3843 tcg_gen_andi_i64(t1, t1, mask);
3845 tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
3846 tcg_gen_deposit_i64(t2, t2, t1, pos, len);
3847 tcg_temp_free_i64(t1);
3849 /* Then install the new FPC to set the rounding mode in fpu_status. */
3850 gen_helper_sfpc(cpu_env, t2);
3851 tcg_temp_free_i64(t2);
3852 return NO_EXIT;
3855 static ExitStatus op_spm(DisasContext *s, DisasOps *o)
3857 tcg_gen_extrl_i64_i32(cc_op, o->in1);
3858 tcg_gen_extract_i32(cc_op, cc_op, 28, 2);
3859 set_cc_static(s);
3861 tcg_gen_shri_i64(o->in1, o->in1, 24);
3862 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4);
3863 return NO_EXIT;
3866 #ifndef CONFIG_USER_ONLY
3867 static ExitStatus op_spka(DisasContext *s, DisasOps *o)
3869 check_privileged(s);
3870 tcg_gen_shri_i64(o->in2, o->in2, 4);
3871 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4);
3872 return NO_EXIT;
3875 static ExitStatus op_sske(DisasContext *s, DisasOps *o)
3877 check_privileged(s);
3878 gen_helper_sske(cpu_env, o->in1, o->in2);
3879 return NO_EXIT;
3882 static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
3884 check_privileged(s);
3885 tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
3886 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
3887 return EXIT_PC_STALE_NOCHAIN;
3890 static ExitStatus op_stap(DisasContext *s, DisasOps *o)
3892 check_privileged(s);
3893 tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id));
3894 return NO_EXIT;
3897 static ExitStatus op_stck(DisasContext *s, DisasOps *o)
3899 gen_helper_stck(o->out, cpu_env);
3900 /* ??? We don't implement clock states. */
3901 gen_op_movi_cc(s, 0);
3902 return NO_EXIT;
3905 static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
3907 TCGv_i64 c1 = tcg_temp_new_i64();
3908 TCGv_i64 c2 = tcg_temp_new_i64();
3909 gen_helper_stck(c1, cpu_env);
3910 /* Shift the 64-bit value into its place as a zero-extended
3911 104-bit value. Note that "bit positions 64-103 are always
3912 non-zero so that they compare differently to STCK"; we set
3913 the least significant bit to 1. */
3914 tcg_gen_shli_i64(c2, c1, 56);
3915 tcg_gen_shri_i64(c1, c1, 8);
3916 tcg_gen_ori_i64(c2, c2, 0x10000);
3917 tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
3918 tcg_gen_addi_i64(o->in2, o->in2, 8);
3919 tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
3920 tcg_temp_free_i64(c1);
3921 tcg_temp_free_i64(c2);
3922 /* ??? We don't implement clock states. */
3923 gen_op_movi_cc(s, 0);
3924 return NO_EXIT;
3927 static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
3929 check_privileged(s);
3930 gen_helper_sckc(cpu_env, o->in2);
3931 return NO_EXIT;
3934 static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
3936 check_privileged(s);
3937 gen_helper_stckc(o->out, cpu_env);
3938 return NO_EXIT;
3941 static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
3943 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3944 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3945 check_privileged(s);
3946 gen_helper_stctg(cpu_env, r1, o->in2, r3);
3947 tcg_temp_free_i32(r1);
3948 tcg_temp_free_i32(r3);
3949 return NO_EXIT;
3952 static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
3954 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
3955 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
3956 check_privileged(s);
3957 gen_helper_stctl(cpu_env, r1, o->in2, r3);
3958 tcg_temp_free_i32(r1);
3959 tcg_temp_free_i32(r3);
3960 return NO_EXIT;
3963 static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
3965 check_privileged(s);
3966 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid));
3967 tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
3968 return NO_EXIT;
3971 static ExitStatus op_spt(DisasContext *s, DisasOps *o)
3973 check_privileged(s);
3974 gen_helper_spt(cpu_env, o->in2);
3975 return NO_EXIT;
3978 static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
3980 check_privileged(s);
3981 gen_helper_stfl(cpu_env);
3982 return NO_EXIT;
3985 static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
3987 check_privileged(s);
3988 gen_helper_stpt(o->out, cpu_env);
3989 return NO_EXIT;
3992 static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
3994 check_privileged(s);
3995 potential_page_fault(s);
3996 gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
3997 set_cc_static(s);
3998 return NO_EXIT;
4001 static ExitStatus op_spx(DisasContext *s, DisasOps *o)
4003 check_privileged(s);
4004 gen_helper_spx(cpu_env, o->in2);
4005 return NO_EXIT;
4008 static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
4010 check_privileged(s);
4011 potential_page_fault(s);
4012 gen_helper_xsch(cpu_env, regs[1]);
4013 set_cc_static(s);
4014 return NO_EXIT;
4017 static ExitStatus op_csch(DisasContext *s, DisasOps *o)
4019 check_privileged(s);
4020 potential_page_fault(s);
4021 gen_helper_csch(cpu_env, regs[1]);
4022 set_cc_static(s);
4023 return NO_EXIT;
4026 static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
4028 check_privileged(s);
4029 potential_page_fault(s);
4030 gen_helper_hsch(cpu_env, regs[1]);
4031 set_cc_static(s);
4032 return NO_EXIT;
4035 static ExitStatus op_msch(DisasContext *s, DisasOps *o)
4037 check_privileged(s);
4038 potential_page_fault(s);
4039 gen_helper_msch(cpu_env, regs[1], o->in2);
4040 set_cc_static(s);
4041 return NO_EXIT;
4044 static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
4046 check_privileged(s);
4047 potential_page_fault(s);
4048 gen_helper_rchp(cpu_env, regs[1]);
4049 set_cc_static(s);
4050 return NO_EXIT;
4053 static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
4055 check_privileged(s);
4056 potential_page_fault(s);
4057 gen_helper_rsch(cpu_env, regs[1]);
4058 set_cc_static(s);
4059 return NO_EXIT;
4062 static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
4064 check_privileged(s);
4065 potential_page_fault(s);
4066 gen_helper_ssch(cpu_env, regs[1], o->in2);
4067 set_cc_static(s);
4068 return NO_EXIT;
4071 static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
4073 check_privileged(s);
4074 potential_page_fault(s);
4075 gen_helper_stsch(cpu_env, regs[1], o->in2);
4076 set_cc_static(s);
4077 return NO_EXIT;
4080 static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
4082 check_privileged(s);
4083 potential_page_fault(s);
4084 gen_helper_tsch(cpu_env, regs[1], o->in2);
4085 set_cc_static(s);
4086 return NO_EXIT;
4089 static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
4091 check_privileged(s);
4092 potential_page_fault(s);
4093 gen_helper_chsc(cpu_env, o->in2);
4094 set_cc_static(s);
4095 return NO_EXIT;
4098 static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
4100 check_privileged(s);
4101 tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
4102 tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
4103 return NO_EXIT;
4106 static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
4108 uint64_t i2 = get_field(s->fields, i2);
4109 TCGv_i64 t;
4111 check_privileged(s);
4113 /* It is important to do what the instruction name says: STORE THEN.
4114 If we let the output hook perform the store then if we fault and
4115 restart, we'll have the wrong SYSTEM MASK in place. */
4116 t = tcg_temp_new_i64();
4117 tcg_gen_shri_i64(t, psw_mask, 56);
4118 tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
4119 tcg_temp_free_i64(t);
4121 if (s->fields->op == 0xac) {
4122 tcg_gen_andi_i64(psw_mask, psw_mask,
4123 (i2 << 56) | 0x00ffffffffffffffull);
4124 } else {
4125 tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
4128 /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */
4129 return EXIT_PC_STALE_NOCHAIN;
4132 static ExitStatus op_stura(DisasContext *s, DisasOps *o)
4134 check_privileged(s);
4135 gen_helper_stura(cpu_env, o->in2, o->in1);
4136 return NO_EXIT;
4139 static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
4141 check_privileged(s);
4142 gen_helper_sturg(cpu_env, o->in2, o->in1);
4143 return NO_EXIT;
4145 #endif
4147 static ExitStatus op_stfle(DisasContext *s, DisasOps *o)
4149 gen_helper_stfle(cc_op, cpu_env, o->in2);
4150 set_cc_static(s);
4151 return NO_EXIT;
4154 static ExitStatus op_st8(DisasContext *s, DisasOps *o)
4156 tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
4157 return NO_EXIT;
4160 static ExitStatus op_st16(DisasContext *s, DisasOps *o)
4162 tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
4163 return NO_EXIT;
4166 static ExitStatus op_st32(DisasContext *s, DisasOps *o)
4168 tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
4169 return NO_EXIT;
4172 static ExitStatus op_st64(DisasContext *s, DisasOps *o)
4174 tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
4175 return NO_EXIT;
4178 static ExitStatus op_stam(DisasContext *s, DisasOps *o)
4180 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4181 TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
4182 gen_helper_stam(cpu_env, r1, o->in2, r3);
4183 tcg_temp_free_i32(r1);
4184 tcg_temp_free_i32(r3);
4185 return NO_EXIT;
4188 static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
4190 int m3 = get_field(s->fields, m3);
4191 int pos, base = s->insn->data;
4192 TCGv_i64 tmp = tcg_temp_new_i64();
4194 pos = base + ctz32(m3) * 8;
4195 switch (m3) {
4196 case 0xf:
4197 /* Effectively a 32-bit store. */
4198 tcg_gen_shri_i64(tmp, o->in1, pos);
4199 tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
4200 break;
4202 case 0xc:
4203 case 0x6:
4204 case 0x3:
4205 /* Effectively a 16-bit store. */
4206 tcg_gen_shri_i64(tmp, o->in1, pos);
4207 tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
4208 break;
4210 case 0x8:
4211 case 0x4:
4212 case 0x2:
4213 case 0x1:
4214 /* Effectively an 8-bit store. */
4215 tcg_gen_shri_i64(tmp, o->in1, pos);
4216 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4217 break;
4219 default:
4220 /* This is going to be a sequence of shifts and stores. */
4221 pos = base + 32 - 8;
4222 while (m3) {
4223 if (m3 & 0x8) {
4224 tcg_gen_shri_i64(tmp, o->in1, pos);
4225 tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
4226 tcg_gen_addi_i64(o->in2, o->in2, 1);
4228 m3 = (m3 << 1) & 0xf;
4229 pos -= 8;
4231 break;
4233 tcg_temp_free_i64(tmp);
4234 return NO_EXIT;
4237 static ExitStatus op_stm(DisasContext *s, DisasOps *o)
4239 int r1 = get_field(s->fields, r1);
4240 int r3 = get_field(s->fields, r3);
4241 int size = s->insn->data;
4242 TCGv_i64 tsize = tcg_const_i64(size);
4244 while (1) {
4245 if (size == 8) {
4246 tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
4247 } else {
4248 tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
4250 if (r1 == r3) {
4251 break;
4253 tcg_gen_add_i64(o->in2, o->in2, tsize);
4254 r1 = (r1 + 1) & 15;
4257 tcg_temp_free_i64(tsize);
4258 return NO_EXIT;
4261 static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
4263 int r1 = get_field(s->fields, r1);
4264 int r3 = get_field(s->fields, r3);
4265 TCGv_i64 t = tcg_temp_new_i64();
4266 TCGv_i64 t4 = tcg_const_i64(4);
4267 TCGv_i64 t32 = tcg_const_i64(32);
4269 while (1) {
4270 tcg_gen_shl_i64(t, regs[r1], t32);
4271 tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
4272 if (r1 == r3) {
4273 break;
4275 tcg_gen_add_i64(o->in2, o->in2, t4);
4276 r1 = (r1 + 1) & 15;
4279 tcg_temp_free_i64(t);
4280 tcg_temp_free_i64(t4);
4281 tcg_temp_free_i64(t32);
4282 return NO_EXIT;
4285 static ExitStatus op_stpq(DisasContext *s, DisasOps *o)
4287 if (tb_cflags(s->tb) & CF_PARALLEL) {
4288 gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out);
4289 } else {
4290 gen_helper_stpq(cpu_env, o->in2, o->out2, o->out);
4292 return NO_EXIT;
4295 static ExitStatus op_srst(DisasContext *s, DisasOps *o)
4297 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4298 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4300 gen_helper_srst(cpu_env, r1, r2);
4302 tcg_temp_free_i32(r1);
4303 tcg_temp_free_i32(r2);
4304 set_cc_static(s);
4305 return NO_EXIT;
4308 static ExitStatus op_srstu(DisasContext *s, DisasOps *o)
4310 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4311 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4313 gen_helper_srstu(cpu_env, r1, r2);
4315 tcg_temp_free_i32(r1);
4316 tcg_temp_free_i32(r2);
4317 set_cc_static(s);
4318 return NO_EXIT;
4321 static ExitStatus op_sub(DisasContext *s, DisasOps *o)
4323 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4324 return NO_EXIT;
4327 static ExitStatus op_subb(DisasContext *s, DisasOps *o)
4329 DisasCompare cmp;
4330 TCGv_i64 borrow;
4332 tcg_gen_sub_i64(o->out, o->in1, o->in2);
4334 /* The !borrow flag is the msb of CC. Since we want the inverse of
4335 that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
4336 disas_jcc(s, &cmp, 8 | 4);
4337 borrow = tcg_temp_new_i64();
4338 if (cmp.is_64) {
4339 tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
4340 } else {
4341 TCGv_i32 t = tcg_temp_new_i32();
4342 tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
4343 tcg_gen_extu_i32_i64(borrow, t);
4344 tcg_temp_free_i32(t);
4346 free_compare(&cmp);
4348 tcg_gen_sub_i64(o->out, o->out, borrow);
4349 tcg_temp_free_i64(borrow);
4350 return NO_EXIT;
4353 static ExitStatus op_svc(DisasContext *s, DisasOps *o)
4355 TCGv_i32 t;
4357 update_psw_addr(s);
4358 update_cc_op(s);
4360 t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
4361 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
4362 tcg_temp_free_i32(t);
4364 t = tcg_const_i32(s->ilen);
4365 tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
4366 tcg_temp_free_i32(t);
4368 gen_exception(EXCP_SVC);
4369 return EXIT_NORETURN;
4372 static ExitStatus op_tam(DisasContext *s, DisasOps *o)
4374 int cc = 0;
4376 cc |= (s->tb->flags & FLAG_MASK_64) ? 2 : 0;
4377 cc |= (s->tb->flags & FLAG_MASK_32) ? 1 : 0;
4378 gen_op_movi_cc(s, cc);
4379 return NO_EXIT;
4382 static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
4384 gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
4385 set_cc_static(s);
4386 return NO_EXIT;
4389 static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
4391 gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
4392 set_cc_static(s);
4393 return NO_EXIT;
4396 static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
4398 gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
4399 set_cc_static(s);
4400 return NO_EXIT;
4403 #ifndef CONFIG_USER_ONLY
4405 static ExitStatus op_testblock(DisasContext *s, DisasOps *o)
4407 check_privileged(s);
4408 gen_helper_testblock(cc_op, cpu_env, o->in2);
4409 set_cc_static(s);
4410 return NO_EXIT;
4413 static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
4415 gen_helper_tprot(cc_op, o->addr1, o->in2);
4416 set_cc_static(s);
4417 return NO_EXIT;
4420 #endif
4422 static ExitStatus op_tp(DisasContext *s, DisasOps *o)
4424 TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1);
4425 gen_helper_tp(cc_op, cpu_env, o->addr1, l1);
4426 tcg_temp_free_i32(l1);
4427 set_cc_static(s);
4428 return NO_EXIT;
4431 static ExitStatus op_tr(DisasContext *s, DisasOps *o)
4433 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4434 gen_helper_tr(cpu_env, l, o->addr1, o->in2);
4435 tcg_temp_free_i32(l);
4436 set_cc_static(s);
4437 return NO_EXIT;
4440 static ExitStatus op_tre(DisasContext *s, DisasOps *o)
4442 gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
4443 return_low128(o->out2);
4444 set_cc_static(s);
4445 return NO_EXIT;
4448 static ExitStatus op_trt(DisasContext *s, DisasOps *o)
4450 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4451 gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
4452 tcg_temp_free_i32(l);
4453 set_cc_static(s);
4454 return NO_EXIT;
4457 static ExitStatus op_trtr(DisasContext *s, DisasOps *o)
4459 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4460 gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2);
4461 tcg_temp_free_i32(l);
4462 set_cc_static(s);
4463 return NO_EXIT;
4466 static ExitStatus op_trXX(DisasContext *s, DisasOps *o)
4468 TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
4469 TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
4470 TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3);
4471 TCGv_i32 tst = tcg_temp_new_i32();
4472 int m3 = get_field(s->fields, m3);
4474 if (!s390_has_feat(S390_FEAT_ETF2_ENH)) {
4475 m3 = 0;
4477 if (m3 & 1) {
4478 tcg_gen_movi_i32(tst, -1);
4479 } else {
4480 tcg_gen_extrl_i64_i32(tst, regs[0]);
4481 if (s->insn->opc & 3) {
4482 tcg_gen_ext8u_i32(tst, tst);
4483 } else {
4484 tcg_gen_ext16u_i32(tst, tst);
4487 gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
4489 tcg_temp_free_i32(r1);
4490 tcg_temp_free_i32(r2);
4491 tcg_temp_free_i32(sizes);
4492 tcg_temp_free_i32(tst);
4493 set_cc_static(s);
4494 return NO_EXIT;
4497 static ExitStatus op_ts(DisasContext *s, DisasOps *o)
4499 TCGv_i32 t1 = tcg_const_i32(0xff);
4500 tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB);
4501 tcg_gen_extract_i32(cc_op, t1, 7, 1);
4502 tcg_temp_free_i32(t1);
4503 set_cc_static(s);
4504 return NO_EXIT;
4507 static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
4509 TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
4510 gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
4511 tcg_temp_free_i32(l);
4512 return NO_EXIT;
4515 static ExitStatus op_unpka(DisasContext *s, DisasOps *o)
4517 int l1 = get_field(s->fields, l1) + 1;
4518 TCGv_i32 l;
4520 /* The length must not exceed 32 bytes. */
4521 if (l1 > 32) {
4522 gen_program_exception(s, PGM_SPECIFICATION);
4523 return EXIT_NORETURN;
4525 l = tcg_const_i32(l1);
4526 gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2);
4527 tcg_temp_free_i32(l);
4528 set_cc_static(s);
4529 return NO_EXIT;
4532 static ExitStatus op_unpku(DisasContext *s, DisasOps *o)
4534 int l1 = get_field(s->fields, l1) + 1;
4535 TCGv_i32 l;
4537 /* The length must be even and should not exceed 64 bytes. */
4538 if ((l1 & 1) || (l1 > 64)) {
4539 gen_program_exception(s, PGM_SPECIFICATION);
4540 return EXIT_NORETURN;
4542 l = tcg_const_i32(l1);
4543 gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2);
4544 tcg_temp_free_i32(l);
4545 set_cc_static(s);
4546 return NO_EXIT;
4550 static ExitStatus op_xc(DisasContext *s, DisasOps *o)
4552 int d1 = get_field(s->fields, d1);
4553 int d2 = get_field(s->fields, d2);
4554 int b1 = get_field(s->fields, b1);
4555 int b2 = get_field(s->fields, b2);
4556 int l = get_field(s->fields, l1);
4557 TCGv_i32 t32;
4559 o->addr1 = get_address(s, 0, b1, d1);
4561 /* If the addresses are identical, this is a store/memset of zero. */
4562 if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
4563 o->in2 = tcg_const_i64(0);
4565 l++;
4566 while (l >= 8) {
4567 tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
4568 l -= 8;
4569 if (l > 0) {
4570 tcg_gen_addi_i64(o->addr1, o->addr1, 8);
4573 if (l >= 4) {
4574 tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
4575 l -= 4;
4576 if (l > 0) {
4577 tcg_gen_addi_i64(o->addr1, o->addr1, 4);
4580 if (l >= 2) {
4581 tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
4582 l -= 2;
4583 if (l > 0) {
4584 tcg_gen_addi_i64(o->addr1, o->addr1, 2);
4587 if (l) {
4588 tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
4590 gen_op_movi_cc(s, 0);
4591 return NO_EXIT;
4594 /* But in general we'll defer to a helper. */
4595 o->in2 = get_address(s, 0, b2, d2);
4596 t32 = tcg_const_i32(l);
4597 gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
4598 tcg_temp_free_i32(t32);
4599 set_cc_static(s);
4600 return NO_EXIT;
4603 static ExitStatus op_xor(DisasContext *s, DisasOps *o)
4605 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4606 return NO_EXIT;
4609 static ExitStatus op_xori(DisasContext *s, DisasOps *o)
4611 int shift = s->insn->data & 0xff;
4612 int size = s->insn->data >> 8;
4613 uint64_t mask = ((1ull << size) - 1) << shift;
4615 assert(!o->g_in2);
4616 tcg_gen_shli_i64(o->in2, o->in2, shift);
4617 tcg_gen_xor_i64(o->out, o->in1, o->in2);
4619 /* Produce the CC from only the bits manipulated. */
4620 tcg_gen_andi_i64(cc_dst, o->out, mask);
4621 set_cc_nz_u64(s, cc_dst);
4622 return NO_EXIT;
4625 static ExitStatus op_zero(DisasContext *s, DisasOps *o)
4627 o->out = tcg_const_i64(0);
4628 return NO_EXIT;
4631 static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
4633 o->out = tcg_const_i64(0);
4634 o->out2 = o->out;
4635 o->g_out2 = true;
4636 return NO_EXIT;
4639 /* ====================================================================== */
4640 /* The "Cc OUTput" generators. Given the generated output (and in some cases
4641 the original inputs), update the various cc data structures in order to
4642 be able to compute the new condition code. */
4644 static void cout_abs32(DisasContext *s, DisasOps *o)
4646 gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
4649 static void cout_abs64(DisasContext *s, DisasOps *o)
4651 gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
4654 static void cout_adds32(DisasContext *s, DisasOps *o)
4656 gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
4659 static void cout_adds64(DisasContext *s, DisasOps *o)
4661 gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
4664 static void cout_addu32(DisasContext *s, DisasOps *o)
4666 gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
4669 static void cout_addu64(DisasContext *s, DisasOps *o)
4671 gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
4674 static void cout_addc32(DisasContext *s, DisasOps *o)
4676 gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
4679 static void cout_addc64(DisasContext *s, DisasOps *o)
4681 gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
4684 static void cout_cmps32(DisasContext *s, DisasOps *o)
4686 gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
4689 static void cout_cmps64(DisasContext *s, DisasOps *o)
4691 gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
4694 static void cout_cmpu32(DisasContext *s, DisasOps *o)
4696 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
4699 static void cout_cmpu64(DisasContext *s, DisasOps *o)
4701 gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
4704 static void cout_f32(DisasContext *s, DisasOps *o)
4706 gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
4709 static void cout_f64(DisasContext *s, DisasOps *o)
4711 gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
4714 static void cout_f128(DisasContext *s, DisasOps *o)
4716 gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
4719 static void cout_nabs32(DisasContext *s, DisasOps *o)
4721 gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
4724 static void cout_nabs64(DisasContext *s, DisasOps *o)
4726 gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
4729 static void cout_neg32(DisasContext *s, DisasOps *o)
4731 gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
4734 static void cout_neg64(DisasContext *s, DisasOps *o)
4736 gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
4739 static void cout_nz32(DisasContext *s, DisasOps *o)
4741 tcg_gen_ext32u_i64(cc_dst, o->out);
4742 gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
4745 static void cout_nz64(DisasContext *s, DisasOps *o)
4747 gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
4750 static void cout_s32(DisasContext *s, DisasOps *o)
4752 gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
4755 static void cout_s64(DisasContext *s, DisasOps *o)
4757 gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
4760 static void cout_subs32(DisasContext *s, DisasOps *o)
4762 gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
4765 static void cout_subs64(DisasContext *s, DisasOps *o)
4767 gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
4770 static void cout_subu32(DisasContext *s, DisasOps *o)
4772 gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
4775 static void cout_subu64(DisasContext *s, DisasOps *o)
4777 gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
4780 static void cout_subb32(DisasContext *s, DisasOps *o)
4782 gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
4785 static void cout_subb64(DisasContext *s, DisasOps *o)
4787 gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
4790 static void cout_tm32(DisasContext *s, DisasOps *o)
4792 gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
4795 static void cout_tm64(DisasContext *s, DisasOps *o)
4797 gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
4800 /* ====================================================================== */
4801 /* The "PREParation" generators. These initialize the DisasOps.OUT fields
4802 with the TCG register to which we will write. Used in combination with
4803 the "wout" generators, in some cases we need a new temporary, and in
4804 some cases we can write to a TCG global. */
4806 static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
4808 o->out = tcg_temp_new_i64();
4810 #define SPEC_prep_new 0
4812 static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
4814 o->out = tcg_temp_new_i64();
4815 o->out2 = tcg_temp_new_i64();
4817 #define SPEC_prep_new_P 0
4819 static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4821 o->out = regs[get_field(f, r1)];
4822 o->g_out = true;
4824 #define SPEC_prep_r1 0
4826 static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
4828 int r1 = get_field(f, r1);
4829 o->out = regs[r1];
4830 o->out2 = regs[r1 + 1];
4831 o->g_out = o->g_out2 = true;
4833 #define SPEC_prep_r1_P SPEC_r1_even
4835 static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4837 o->out = fregs[get_field(f, r1)];
4838 o->g_out = true;
4840 #define SPEC_prep_f1 0
4842 static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4844 int r1 = get_field(f, r1);
4845 o->out = fregs[r1];
4846 o->out2 = fregs[r1 + 2];
4847 o->g_out = o->g_out2 = true;
4849 #define SPEC_prep_x1 SPEC_r1_f128
4851 /* ====================================================================== */
4852 /* The "Write OUTput" generators. These generally perform some non-trivial
4853 copy of data to TCG globals, or to main memory. The trivial cases are
4854 generally handled by having a "prep" generator install the TCG global
4855 as the destination of the operation. */
4857 static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4859 store_reg(get_field(f, r1), o->out);
4861 #define SPEC_wout_r1 0
4863 static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4865 int r1 = get_field(f, r1);
4866 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
4868 #define SPEC_wout_r1_8 0
4870 static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4872 int r1 = get_field(f, r1);
4873 tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
4875 #define SPEC_wout_r1_16 0
4877 static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4879 store_reg32_i64(get_field(f, r1), o->out);
4881 #define SPEC_wout_r1_32 0
4883 static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
4885 store_reg32h_i64(get_field(f, r1), o->out);
4887 #define SPEC_wout_r1_32h 0
4889 static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4891 int r1 = get_field(f, r1);
4892 store_reg32_i64(r1, o->out);
4893 store_reg32_i64(r1 + 1, o->out2);
4895 #define SPEC_wout_r1_P32 SPEC_r1_even
4897 static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
4899 int r1 = get_field(f, r1);
4900 store_reg32_i64(r1 + 1, o->out);
4901 tcg_gen_shri_i64(o->out, o->out, 32);
4902 store_reg32_i64(r1, o->out);
4904 #define SPEC_wout_r1_D32 SPEC_r1_even
4906 static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o)
4908 int r3 = get_field(f, r3);
4909 store_reg32_i64(r3, o->out);
4910 store_reg32_i64(r3 + 1, o->out2);
4912 #define SPEC_wout_r3_P32 SPEC_r3_even
4914 static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o)
4916 int r3 = get_field(f, r3);
4917 store_reg(r3, o->out);
4918 store_reg(r3 + 1, o->out2);
4920 #define SPEC_wout_r3_P64 SPEC_r3_even
4922 static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
4924 store_freg32_i64(get_field(f, r1), o->out);
4926 #define SPEC_wout_e1 0
4928 static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
4930 store_freg(get_field(f, r1), o->out);
4932 #define SPEC_wout_f1 0
4934 static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
4936 int f1 = get_field(s->fields, r1);
4937 store_freg(f1, o->out);
4938 store_freg(f1 + 2, o->out2);
4940 #define SPEC_wout_x1 SPEC_r1_f128
4942 static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4944 if (get_field(f, r1) != get_field(f, r2)) {
4945 store_reg32_i64(get_field(f, r1), o->out);
4948 #define SPEC_wout_cond_r1r2_32 0
4950 static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
4952 if (get_field(f, r1) != get_field(f, r2)) {
4953 store_freg32_i64(get_field(f, r1), o->out);
4956 #define SPEC_wout_cond_e1e2 0
4958 static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
4960 tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
4962 #define SPEC_wout_m1_8 0
4964 static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
4966 tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
4968 #define SPEC_wout_m1_16 0
4970 static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4972 tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
4974 #define SPEC_wout_m1_32 0
4976 static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
4978 tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
4980 #define SPEC_wout_m1_64 0
4982 static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
4984 tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
4986 #define SPEC_wout_m2_32 0
4988 static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o)
4990 store_reg(get_field(f, r1), o->in2);
4992 #define SPEC_wout_in2_r1 0
4994 static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
4996 store_reg32_i64(get_field(f, r1), o->in2);
4998 #define SPEC_wout_in2_r1_32 0
5000 /* ====================================================================== */
5001 /* The "INput 1" generators. These load the first operand to an insn. */
5003 static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
5005 o->in1 = load_reg(get_field(f, r1));
5007 #define SPEC_in1_r1 0
5009 static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5011 o->in1 = regs[get_field(f, r1)];
5012 o->g_in1 = true;
5014 #define SPEC_in1_r1_o 0
5016 static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5018 o->in1 = tcg_temp_new_i64();
5019 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
5021 #define SPEC_in1_r1_32s 0
5023 static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5025 o->in1 = tcg_temp_new_i64();
5026 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
5028 #define SPEC_in1_r1_32u 0
5030 static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5032 o->in1 = tcg_temp_new_i64();
5033 tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
5035 #define SPEC_in1_r1_sr32 0
5037 static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
5039 o->in1 = load_reg(get_field(f, r1) + 1);
5041 #define SPEC_in1_r1p1 SPEC_r1_even
5043 static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5045 o->in1 = tcg_temp_new_i64();
5046 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
5048 #define SPEC_in1_r1p1_32s SPEC_r1_even
5050 static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5052 o->in1 = tcg_temp_new_i64();
5053 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
5055 #define SPEC_in1_r1p1_32u SPEC_r1_even
5057 static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5059 int r1 = get_field(f, r1);
5060 o->in1 = tcg_temp_new_i64();
5061 tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
5063 #define SPEC_in1_r1_D32 SPEC_r1_even
5065 static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5067 o->in1 = load_reg(get_field(f, r2));
5069 #define SPEC_in1_r2 0
5071 static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5073 o->in1 = tcg_temp_new_i64();
5074 tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
5076 #define SPEC_in1_r2_sr32 0
5078 static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5080 o->in1 = load_reg(get_field(f, r3));
5082 #define SPEC_in1_r3 0
5084 static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5086 o->in1 = regs[get_field(f, r3)];
5087 o->g_in1 = true;
5089 #define SPEC_in1_r3_o 0
5091 static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5093 o->in1 = tcg_temp_new_i64();
5094 tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
5096 #define SPEC_in1_r3_32s 0
5098 static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5100 o->in1 = tcg_temp_new_i64();
5101 tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
5103 #define SPEC_in1_r3_32u 0
5105 static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5107 int r3 = get_field(f, r3);
5108 o->in1 = tcg_temp_new_i64();
5109 tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
5111 #define SPEC_in1_r3_D32 SPEC_r3_even
5113 static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
5115 o->in1 = load_freg32_i64(get_field(f, r1));
5117 #define SPEC_in1_e1 0
5119 static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5121 o->in1 = fregs[get_field(f, r1)];
5122 o->g_in1 = true;
5124 #define SPEC_in1_f1_o 0
5126 static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5128 int r1 = get_field(f, r1);
5129 o->out = fregs[r1];
5130 o->out2 = fregs[r1 + 2];
5131 o->g_out = o->g_out2 = true;
5133 #define SPEC_in1_x1_o SPEC_r1_f128
5135 static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
5137 o->in1 = fregs[get_field(f, r3)];
5138 o->g_in1 = true;
5140 #define SPEC_in1_f3_o 0
5142 static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
5144 o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
5146 #define SPEC_in1_la1 0
5148 static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
5150 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5151 o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5153 #define SPEC_in1_la2 0
5155 static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5157 in1_la1(s, f, o);
5158 o->in1 = tcg_temp_new_i64();
5159 tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
5161 #define SPEC_in1_m1_8u 0
5163 static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5165 in1_la1(s, f, o);
5166 o->in1 = tcg_temp_new_i64();
5167 tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
5169 #define SPEC_in1_m1_16s 0
5171 static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5173 in1_la1(s, f, o);
5174 o->in1 = tcg_temp_new_i64();
5175 tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
5177 #define SPEC_in1_m1_16u 0
5179 static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5181 in1_la1(s, f, o);
5182 o->in1 = tcg_temp_new_i64();
5183 tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
5185 #define SPEC_in1_m1_32s 0
5187 static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5189 in1_la1(s, f, o);
5190 o->in1 = tcg_temp_new_i64();
5191 tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
5193 #define SPEC_in1_m1_32u 0
5195 static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
5197 in1_la1(s, f, o);
5198 o->in1 = tcg_temp_new_i64();
5199 tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
5201 #define SPEC_in1_m1_64 0
5203 /* ====================================================================== */
5204 /* The "INput 2" generators. These load the second operand to an insn. */
5206 static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
5208 o->in2 = regs[get_field(f, r1)];
5209 o->g_in2 = true;
5211 #define SPEC_in2_r1_o 0
5213 static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5215 o->in2 = tcg_temp_new_i64();
5216 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
5218 #define SPEC_in2_r1_16u 0
5220 static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5222 o->in2 = tcg_temp_new_i64();
5223 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
5225 #define SPEC_in2_r1_32u 0
5227 static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
5229 int r1 = get_field(f, r1);
5230 o->in2 = tcg_temp_new_i64();
5231 tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
5233 #define SPEC_in2_r1_D32 SPEC_r1_even
5235 static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
5237 o->in2 = load_reg(get_field(f, r2));
5239 #define SPEC_in2_r2 0
5241 static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5243 o->in2 = regs[get_field(f, r2)];
5244 o->g_in2 = true;
5246 #define SPEC_in2_r2_o 0
5248 static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
5250 int r2 = get_field(f, r2);
5251 if (r2 != 0) {
5252 o->in2 = load_reg(r2);
5255 #define SPEC_in2_r2_nz 0
5257 static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
5259 o->in2 = tcg_temp_new_i64();
5260 tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
5262 #define SPEC_in2_r2_8s 0
5264 static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5266 o->in2 = tcg_temp_new_i64();
5267 tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
5269 #define SPEC_in2_r2_8u 0
5271 static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5273 o->in2 = tcg_temp_new_i64();
5274 tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
5276 #define SPEC_in2_r2_16s 0
5278 static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5280 o->in2 = tcg_temp_new_i64();
5281 tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
5283 #define SPEC_in2_r2_16u 0
5285 static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
5287 o->in2 = load_reg(get_field(f, r3));
5289 #define SPEC_in2_r3 0
5291 static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5293 o->in2 = tcg_temp_new_i64();
5294 tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
5296 #define SPEC_in2_r3_sr32 0
5298 static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5300 o->in2 = tcg_temp_new_i64();
5301 tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
5303 #define SPEC_in2_r2_32s 0
5305 static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5307 o->in2 = tcg_temp_new_i64();
5308 tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
5310 #define SPEC_in2_r2_32u 0
5312 static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
5314 o->in2 = tcg_temp_new_i64();
5315 tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
5317 #define SPEC_in2_r2_sr32 0
5319 static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
5321 o->in2 = load_freg32_i64(get_field(f, r2));
5323 #define SPEC_in2_e2 0
5325 static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5327 o->in2 = fregs[get_field(f, r2)];
5328 o->g_in2 = true;
5330 #define SPEC_in2_f2_o 0
5332 static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
5334 int r2 = get_field(f, r2);
5335 o->in1 = fregs[r2];
5336 o->in2 = fregs[r2 + 2];
5337 o->g_in1 = o->g_in2 = true;
5339 #define SPEC_in2_x2_o SPEC_r2_f128
5341 static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
5343 o->in2 = get_address(s, 0, get_field(f, r2), 0);
5345 #define SPEC_in2_ra2 0
5347 static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
5349 int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
5350 o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
5352 #define SPEC_in2_a2 0
5354 static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
5356 o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
5358 #define SPEC_in2_ri2 0
5360 static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
5362 help_l2_shift(s, f, o, 31);
5364 #define SPEC_in2_sh32 0
5366 static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
5368 help_l2_shift(s, f, o, 63);
5370 #define SPEC_in2_sh64 0
5372 static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5374 in2_a2(s, f, o);
5375 tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
5377 #define SPEC_in2_m2_8u 0
5379 static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
5381 in2_a2(s, f, o);
5382 tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
5384 #define SPEC_in2_m2_16s 0
5386 static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5388 in2_a2(s, f, o);
5389 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5391 #define SPEC_in2_m2_16u 0
5393 static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5395 in2_a2(s, f, o);
5396 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5398 #define SPEC_in2_m2_32s 0
5400 static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5402 in2_a2(s, f, o);
5403 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5405 #define SPEC_in2_m2_32u 0
5407 static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5409 in2_a2(s, f, o);
5410 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5412 #define SPEC_in2_m2_64 0
5414 static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5416 in2_ri2(s, f, o);
5417 tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
5419 #define SPEC_in2_mri2_16u 0
5421 static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
5423 in2_ri2(s, f, o);
5424 tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
5426 #define SPEC_in2_mri2_32s 0
5428 static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5430 in2_ri2(s, f, o);
5431 tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
5433 #define SPEC_in2_mri2_32u 0
5435 static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
5437 in2_ri2(s, f, o);
5438 tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
5440 #define SPEC_in2_mri2_64 0
5442 static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
5444 o->in2 = tcg_const_i64(get_field(f, i2));
5446 #define SPEC_in2_i2 0
5448 static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
5450 o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
5452 #define SPEC_in2_i2_8u 0
5454 static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
5456 o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
5458 #define SPEC_in2_i2_16u 0
5460 static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
5462 o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
5464 #define SPEC_in2_i2_32u 0
5466 static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5468 uint64_t i2 = (uint16_t)get_field(f, i2);
5469 o->in2 = tcg_const_i64(i2 << s->insn->data);
5471 #define SPEC_in2_i2_16u_shl 0
5473 static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
5475 uint64_t i2 = (uint32_t)get_field(f, i2);
5476 o->in2 = tcg_const_i64(i2 << s->insn->data);
5478 #define SPEC_in2_i2_32u_shl 0
5480 #ifndef CONFIG_USER_ONLY
5481 static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
5483 o->in2 = tcg_const_i64(s->fields->raw_insn);
5485 #define SPEC_in2_insn 0
5486 #endif
5488 /* ====================================================================== */
5490 /* Find opc within the table of insns. This is formulated as a switch
5491 statement so that (1) we get compile-time notice of cut-paste errors
5492 for duplicated opcodes, and (2) the compiler generates the binary
5493 search tree, rather than us having to post-process the table. */
5495 #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
5496 D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
5498 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
5500 enum DisasInsnEnum {
5501 #include "insn-data.def"
5504 #undef D
5505 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
5506 .opc = OPC, \
5507 .fmt = FMT_##FT, \
5508 .fac = FAC_##FC, \
5509 .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
5510 .name = #NM, \
5511 .help_in1 = in1_##I1, \
5512 .help_in2 = in2_##I2, \
5513 .help_prep = prep_##P, \
5514 .help_wout = wout_##W, \
5515 .help_cout = cout_##CC, \
5516 .help_op = op_##OP, \
5517 .data = D \
5520 /* Allow 0 to be used for NULL in the table below. */
5521 #define in1_0 NULL
5522 #define in2_0 NULL
5523 #define prep_0 NULL
5524 #define wout_0 NULL
5525 #define cout_0 NULL
5526 #define op_0 NULL
5528 #define SPEC_in1_0 0
5529 #define SPEC_in2_0 0
5530 #define SPEC_prep_0 0
5531 #define SPEC_wout_0 0
5533 /* Give smaller names to the various facilities. */
5534 #define FAC_Z S390_FEAT_ZARCH
5535 #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE
5536 #define FAC_DFP S390_FEAT_DFP
5537 #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */
5538 #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */
5539 #define FAC_EE S390_FEAT_EXECUTE_EXT
5540 #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE
5541 #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT
5542 #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */
5543 #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */
5544 #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT
5545 #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB
5546 #define FAC_HW S390_FEAT_STFLE_45 /* high-word */
5547 #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */
5548 #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */
5549 #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */
5550 #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */
5551 #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */
5552 #define FAC_LD S390_FEAT_LONG_DISPLACEMENT
5553 #define FAC_PC S390_FEAT_STFLE_45 /* population count */
5554 #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST
5555 #define FAC_SFLE S390_FEAT_STFLE
5556 #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */
5557 #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC
5558 #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */
5559 #define FAC_DAT_ENH S390_FEAT_DAT_ENH
5560 #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2
5561 #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */
5562 #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */
5563 #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */
5564 #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3
5565 #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */
5566 #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */
5567 #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */
5568 #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */
5570 static const DisasInsn insn_info[] = {
5571 #include "insn-data.def"
5574 #undef D
5575 #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
5576 case OPC: return &insn_info[insn_ ## NM];
5578 static const DisasInsn *lookup_opc(uint16_t opc)
5580 switch (opc) {
5581 #include "insn-data.def"
5582 default:
5583 return NULL;
5587 #undef D
5588 #undef C
5590 /* Extract a field from the insn. The INSN should be left-aligned in
5591 the uint64_t so that we can more easily utilize the big-bit-endian
5592 definitions we extract from the Principals of Operation. */
5594 static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
5596 uint32_t r, m;
5598 if (f->size == 0) {
5599 return;
5602 /* Zero extract the field from the insn. */
5603 r = (insn << f->beg) >> (64 - f->size);
5605 /* Sign-extend, or un-swap the field as necessary. */
5606 switch (f->type) {
5607 case 0: /* unsigned */
5608 break;
5609 case 1: /* signed */
5610 assert(f->size <= 32);
5611 m = 1u << (f->size - 1);
5612 r = (r ^ m) - m;
5613 break;
5614 case 2: /* dl+dh split, signed 20 bit. */
5615 r = ((int8_t)r << 12) | (r >> 8);
5616 break;
5617 default:
5618 abort();
5621 /* Validate that the "compressed" encoding we selected above is valid.
5622 I.e. we havn't make two different original fields overlap. */
5623 assert(((o->presentC >> f->indexC) & 1) == 0);
5624 o->presentC |= 1 << f->indexC;
5625 o->presentO |= 1 << f->indexO;
5627 o->c[f->indexC] = r;
5630 /* Lookup the insn at the current PC, extracting the operands into O and
5631 returning the info struct for the insn. Returns NULL for invalid insn. */
5633 static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
5634 DisasFields *f)
5636 uint64_t insn, pc = s->pc;
5637 int op, op2, ilen;
5638 const DisasInsn *info;
5640 if (unlikely(s->ex_value)) {
5641 /* Drop the EX data now, so that it's clear on exception paths. */
5642 TCGv_i64 zero = tcg_const_i64(0);
5643 tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value));
5644 tcg_temp_free_i64(zero);
5646 /* Extract the values saved by EXECUTE. */
5647 insn = s->ex_value & 0xffffffffffff0000ull;
5648 ilen = s->ex_value & 0xf;
5649 op = insn >> 56;
5650 } else {
5651 insn = ld_code2(env, pc);
5652 op = (insn >> 8) & 0xff;
5653 ilen = get_ilen(op);
5654 switch (ilen) {
5655 case 2:
5656 insn = insn << 48;
5657 break;
5658 case 4:
5659 insn = ld_code4(env, pc) << 32;
5660 break;
5661 case 6:
5662 insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
5663 break;
5664 default:
5665 g_assert_not_reached();
5668 s->next_pc = s->pc + ilen;
5669 s->ilen = ilen;
5671 /* We can't actually determine the insn format until we've looked up
5672 the full insn opcode. Which we can't do without locating the
5673 secondary opcode. Assume by default that OP2 is at bit 40; for
5674 those smaller insns that don't actually have a secondary opcode
5675 this will correctly result in OP2 = 0. */
5676 switch (op) {
5677 case 0x01: /* E */
5678 case 0x80: /* S */
5679 case 0x82: /* S */
5680 case 0x93: /* S */
5681 case 0xb2: /* S, RRF, RRE, IE */
5682 case 0xb3: /* RRE, RRD, RRF */
5683 case 0xb9: /* RRE, RRF */
5684 case 0xe5: /* SSE, SIL */
5685 op2 = (insn << 8) >> 56;
5686 break;
5687 case 0xa5: /* RI */
5688 case 0xa7: /* RI */
5689 case 0xc0: /* RIL */
5690 case 0xc2: /* RIL */
5691 case 0xc4: /* RIL */
5692 case 0xc6: /* RIL */
5693 case 0xc8: /* SSF */
5694 case 0xcc: /* RIL */
5695 op2 = (insn << 12) >> 60;
5696 break;
5697 case 0xc5: /* MII */
5698 case 0xc7: /* SMI */
5699 case 0xd0 ... 0xdf: /* SS */
5700 case 0xe1: /* SS */
5701 case 0xe2: /* SS */
5702 case 0xe8: /* SS */
5703 case 0xe9: /* SS */
5704 case 0xea: /* SS */
5705 case 0xee ... 0xf3: /* SS */
5706 case 0xf8 ... 0xfd: /* SS */
5707 op2 = 0;
5708 break;
5709 default:
5710 op2 = (insn << 40) >> 56;
5711 break;
5714 memset(f, 0, sizeof(*f));
5715 f->raw_insn = insn;
5716 f->op = op;
5717 f->op2 = op2;
5719 /* Lookup the instruction. */
5720 info = lookup_opc(op << 8 | op2);
5722 /* If we found it, extract the operands. */
5723 if (info != NULL) {
5724 DisasFormat fmt = info->fmt;
5725 int i;
5727 for (i = 0; i < NUM_C_FIELD; ++i) {
5728 extract_field(f, &format_info[fmt].op[i], insn);
5731 return info;
5734 static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
5736 const DisasInsn *insn;
5737 ExitStatus ret = NO_EXIT;
5738 DisasFields f;
5739 DisasOps o;
5741 /* Search for the insn in the table. */
5742 insn = extract_insn(env, s, &f);
5744 /* Not found means unimplemented/illegal opcode. */
5745 if (insn == NULL) {
5746 qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
5747 f.op, f.op2);
5748 gen_illegal_opcode(s);
5749 return EXIT_NORETURN;
5752 #ifndef CONFIG_USER_ONLY
5753 if (s->tb->flags & FLAG_MASK_PER) {
5754 TCGv_i64 addr = tcg_const_i64(s->pc);
5755 gen_helper_per_ifetch(cpu_env, addr);
5756 tcg_temp_free_i64(addr);
5758 #endif
5760 /* Check for insn specification exceptions. */
5761 if (insn->spec) {
5762 int spec = insn->spec, excp = 0, r;
5764 if (spec & SPEC_r1_even) {
5765 r = get_field(&f, r1);
5766 if (r & 1) {
5767 excp = PGM_SPECIFICATION;
5770 if (spec & SPEC_r2_even) {
5771 r = get_field(&f, r2);
5772 if (r & 1) {
5773 excp = PGM_SPECIFICATION;
5776 if (spec & SPEC_r3_even) {
5777 r = get_field(&f, r3);
5778 if (r & 1) {
5779 excp = PGM_SPECIFICATION;
5782 if (spec & SPEC_r1_f128) {
5783 r = get_field(&f, r1);
5784 if (r > 13) {
5785 excp = PGM_SPECIFICATION;
5788 if (spec & SPEC_r2_f128) {
5789 r = get_field(&f, r2);
5790 if (r > 13) {
5791 excp = PGM_SPECIFICATION;
5794 if (excp) {
5795 gen_program_exception(s, excp);
5796 return EXIT_NORETURN;
5800 /* Set up the strutures we use to communicate with the helpers. */
5801 s->insn = insn;
5802 s->fields = &f;
5803 o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
5804 TCGV_UNUSED_I64(o.out);
5805 TCGV_UNUSED_I64(o.out2);
5806 TCGV_UNUSED_I64(o.in1);
5807 TCGV_UNUSED_I64(o.in2);
5808 TCGV_UNUSED_I64(o.addr1);
5810 /* Implement the instruction. */
5811 if (insn->help_in1) {
5812 insn->help_in1(s, &f, &o);
5814 if (insn->help_in2) {
5815 insn->help_in2(s, &f, &o);
5817 if (insn->help_prep) {
5818 insn->help_prep(s, &f, &o);
5820 if (insn->help_op) {
5821 ret = insn->help_op(s, &o);
5823 if (insn->help_wout) {
5824 insn->help_wout(s, &f, &o);
5826 if (insn->help_cout) {
5827 insn->help_cout(s, &o);
5830 /* Free any temporaries created by the helpers. */
5831 if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
5832 tcg_temp_free_i64(o.out);
5834 if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
5835 tcg_temp_free_i64(o.out2);
5837 if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
5838 tcg_temp_free_i64(o.in1);
5840 if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
5841 tcg_temp_free_i64(o.in2);
5843 if (!TCGV_IS_UNUSED_I64(o.addr1)) {
5844 tcg_temp_free_i64(o.addr1);
5847 #ifndef CONFIG_USER_ONLY
5848 if (s->tb->flags & FLAG_MASK_PER) {
5849 /* An exception might be triggered, save PSW if not already done. */
5850 if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
5851 tcg_gen_movi_i64(psw_addr, s->next_pc);
5854 /* Save off cc. */
5855 update_cc_op(s);
5857 /* Call the helper to check for a possible PER exception. */
5858 gen_helper_per_check_exception(cpu_env);
5860 #endif
5862 /* Advance to the next instruction. */
5863 s->pc = s->next_pc;
5864 return ret;
5867 void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
5869 CPUS390XState *env = cs->env_ptr;
5870 DisasContext dc;
5871 target_ulong pc_start;
5872 uint64_t next_page_start;
5873 int num_insns, max_insns;
5874 ExitStatus status;
5875 bool do_debug;
5877 pc_start = tb->pc;
5879 /* 31-bit mode */
5880 if (!(tb->flags & FLAG_MASK_64)) {
5881 pc_start &= 0x7fffffff;
5884 dc.tb = tb;
5885 dc.pc = pc_start;
5886 dc.cc_op = CC_OP_DYNAMIC;
5887 dc.ex_value = tb->cs_base;
5888 do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
5890 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
5892 num_insns = 0;
5893 max_insns = tb_cflags(tb) & CF_COUNT_MASK;
5894 if (max_insns == 0) {
5895 max_insns = CF_COUNT_MASK;
5897 if (max_insns > TCG_MAX_INSNS) {
5898 max_insns = TCG_MAX_INSNS;
5901 gen_tb_start(tb);
5903 do {
5904 tcg_gen_insn_start(dc.pc, dc.cc_op);
5905 num_insns++;
5907 if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
5908 status = EXIT_PC_STALE;
5909 do_debug = true;
5910 /* The address covered by the breakpoint must be included in
5911 [tb->pc, tb->pc + tb->size) in order to for it to be
5912 properly cleared -- thus we increment the PC here so that
5913 the logic setting tb->size below does the right thing. */
5914 dc.pc += 2;
5915 break;
5918 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
5919 gen_io_start();
5922 status = translate_one(env, &dc);
5924 /* If we reach a page boundary, are single stepping,
5925 or exhaust instruction count, stop generation. */
5926 if (status == NO_EXIT
5927 && (dc.pc >= next_page_start
5928 || tcg_op_buf_full()
5929 || num_insns >= max_insns
5930 || singlestep
5931 || cs->singlestep_enabled
5932 || dc.ex_value)) {
5933 status = EXIT_PC_STALE;
5935 } while (status == NO_EXIT);
5937 if (tb_cflags(tb) & CF_LAST_IO) {
5938 gen_io_end();
5941 switch (status) {
5942 case EXIT_GOTO_TB:
5943 case EXIT_NORETURN:
5944 break;
5945 case EXIT_PC_STALE:
5946 case EXIT_PC_STALE_NOCHAIN:
5947 update_psw_addr(&dc);
5948 /* FALLTHRU */
5949 case EXIT_PC_UPDATED:
5950 /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
5951 cc op type is in env */
5952 update_cc_op(&dc);
5953 /* FALLTHRU */
5954 case EXIT_PC_CC_UPDATED:
5955 /* Exit the TB, either by raising a debug exception or by return. */
5956 if (do_debug) {
5957 gen_exception(EXCP_DEBUG);
5958 } else if (use_exit_tb(&dc) || status == EXIT_PC_STALE_NOCHAIN) {
5959 tcg_gen_exit_tb(0);
5960 } else {
5961 tcg_gen_lookup_and_goto_ptr();
5963 break;
5964 default:
5965 g_assert_not_reached();
5968 gen_tb_end(tb, num_insns);
5970 tb->size = dc.pc - pc_start;
5971 tb->icount = num_insns;
5973 #if defined(S390X_DEBUG_DISAS)
5974 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
5975 && qemu_log_in_addr_range(pc_start)) {
5976 qemu_log_lock();
5977 if (unlikely(dc.ex_value)) {
5978 /* ??? Unfortunately log_target_disas can't use host memory. */
5979 qemu_log("IN: EXECUTE %016" PRIx64 "\n", dc.ex_value);
5980 } else {
5981 qemu_log("IN: %s\n", lookup_symbol(pc_start));
5982 log_target_disas(cs, pc_start, dc.pc - pc_start);
5983 qemu_log("\n");
5985 qemu_log_unlock();
5987 #endif
5990 void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
5991 target_ulong *data)
5993 int cc_op = data[1];
5994 env->psw.addr = data[0];
5995 if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
5996 env->cc_op = cc_op;