2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 /* Since we have a distinction between register size and address size,
33 we need to redefine all of these. */
37 #undef tcg_global_mem_new
38 #undef tcg_temp_local_new
41 #if TARGET_LONG_BITS == 64
42 #define TCGv_tl TCGv_i64
43 #define tcg_temp_new_tl tcg_temp_new_i64
44 #define tcg_temp_free_tl tcg_temp_free_i64
45 #if TARGET_REGISTER_BITS == 64
46 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
51 #define TCGv_tl TCGv_i32
52 #define tcg_temp_new_tl tcg_temp_new_i32
53 #define tcg_temp_free_tl tcg_temp_free_i32
54 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
57 #if TARGET_REGISTER_BITS == 64
58 #define TCGv_reg TCGv_i64
60 #define tcg_temp_new tcg_temp_new_i64
61 #define tcg_global_mem_new tcg_global_mem_new_i64
62 #define tcg_temp_local_new tcg_temp_local_new_i64
63 #define tcg_temp_free tcg_temp_free_i64
65 #define tcg_gen_movi_reg tcg_gen_movi_i64
66 #define tcg_gen_mov_reg tcg_gen_mov_i64
67 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
68 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
69 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
70 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
71 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
72 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
73 #define tcg_gen_ld_reg tcg_gen_ld_i64
74 #define tcg_gen_st8_reg tcg_gen_st8_i64
75 #define tcg_gen_st16_reg tcg_gen_st16_i64
76 #define tcg_gen_st32_reg tcg_gen_st32_i64
77 #define tcg_gen_st_reg tcg_gen_st_i64
78 #define tcg_gen_add_reg tcg_gen_add_i64
79 #define tcg_gen_addi_reg tcg_gen_addi_i64
80 #define tcg_gen_sub_reg tcg_gen_sub_i64
81 #define tcg_gen_neg_reg tcg_gen_neg_i64
82 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
83 #define tcg_gen_subi_reg tcg_gen_subi_i64
84 #define tcg_gen_and_reg tcg_gen_and_i64
85 #define tcg_gen_andi_reg tcg_gen_andi_i64
86 #define tcg_gen_or_reg tcg_gen_or_i64
87 #define tcg_gen_ori_reg tcg_gen_ori_i64
88 #define tcg_gen_xor_reg tcg_gen_xor_i64
89 #define tcg_gen_xori_reg tcg_gen_xori_i64
90 #define tcg_gen_not_reg tcg_gen_not_i64
91 #define tcg_gen_shl_reg tcg_gen_shl_i64
92 #define tcg_gen_shli_reg tcg_gen_shli_i64
93 #define tcg_gen_shr_reg tcg_gen_shr_i64
94 #define tcg_gen_shri_reg tcg_gen_shri_i64
95 #define tcg_gen_sar_reg tcg_gen_sar_i64
96 #define tcg_gen_sari_reg tcg_gen_sari_i64
97 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
98 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
99 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
100 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
101 #define tcg_gen_mul_reg tcg_gen_mul_i64
102 #define tcg_gen_muli_reg tcg_gen_muli_i64
103 #define tcg_gen_div_reg tcg_gen_div_i64
104 #define tcg_gen_rem_reg tcg_gen_rem_i64
105 #define tcg_gen_divu_reg tcg_gen_divu_i64
106 #define tcg_gen_remu_reg tcg_gen_remu_i64
107 #define tcg_gen_discard_reg tcg_gen_discard_i64
108 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
109 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
110 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
111 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
112 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
113 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
114 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
115 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
116 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
117 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
118 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
119 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
120 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
121 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
122 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
123 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
124 #define tcg_gen_andc_reg tcg_gen_andc_i64
125 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
126 #define tcg_gen_nand_reg tcg_gen_nand_i64
127 #define tcg_gen_nor_reg tcg_gen_nor_i64
128 #define tcg_gen_orc_reg tcg_gen_orc_i64
129 #define tcg_gen_clz_reg tcg_gen_clz_i64
130 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
131 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
132 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
133 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
134 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
135 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
136 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
137 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
138 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
139 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
140 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
141 #define tcg_gen_extract_reg tcg_gen_extract_i64
142 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
143 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
144 #define tcg_const_reg tcg_const_i64
145 #define tcg_const_local_reg tcg_const_local_i64
146 #define tcg_constant_reg tcg_constant_i64
147 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
148 #define tcg_gen_add2_reg tcg_gen_add2_i64
149 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
150 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
151 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
152 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
153 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
155 #define TCGv_reg TCGv_i32
156 #define tcg_temp_new tcg_temp_new_i32
157 #define tcg_global_mem_new tcg_global_mem_new_i32
158 #define tcg_temp_local_new tcg_temp_local_new_i32
159 #define tcg_temp_free tcg_temp_free_i32
161 #define tcg_gen_movi_reg tcg_gen_movi_i32
162 #define tcg_gen_mov_reg tcg_gen_mov_i32
163 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
164 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
165 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
166 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
167 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
168 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
169 #define tcg_gen_ld_reg tcg_gen_ld_i32
170 #define tcg_gen_st8_reg tcg_gen_st8_i32
171 #define tcg_gen_st16_reg tcg_gen_st16_i32
172 #define tcg_gen_st32_reg tcg_gen_st32_i32
173 #define tcg_gen_st_reg tcg_gen_st_i32
174 #define tcg_gen_add_reg tcg_gen_add_i32
175 #define tcg_gen_addi_reg tcg_gen_addi_i32
176 #define tcg_gen_sub_reg tcg_gen_sub_i32
177 #define tcg_gen_neg_reg tcg_gen_neg_i32
178 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
179 #define tcg_gen_subi_reg tcg_gen_subi_i32
180 #define tcg_gen_and_reg tcg_gen_and_i32
181 #define tcg_gen_andi_reg tcg_gen_andi_i32
182 #define tcg_gen_or_reg tcg_gen_or_i32
183 #define tcg_gen_ori_reg tcg_gen_ori_i32
184 #define tcg_gen_xor_reg tcg_gen_xor_i32
185 #define tcg_gen_xori_reg tcg_gen_xori_i32
186 #define tcg_gen_not_reg tcg_gen_not_i32
187 #define tcg_gen_shl_reg tcg_gen_shl_i32
188 #define tcg_gen_shli_reg tcg_gen_shli_i32
189 #define tcg_gen_shr_reg tcg_gen_shr_i32
190 #define tcg_gen_shri_reg tcg_gen_shri_i32
191 #define tcg_gen_sar_reg tcg_gen_sar_i32
192 #define tcg_gen_sari_reg tcg_gen_sari_i32
193 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
194 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
195 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
196 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
197 #define tcg_gen_mul_reg tcg_gen_mul_i32
198 #define tcg_gen_muli_reg tcg_gen_muli_i32
199 #define tcg_gen_div_reg tcg_gen_div_i32
200 #define tcg_gen_rem_reg tcg_gen_rem_i32
201 #define tcg_gen_divu_reg tcg_gen_divu_i32
202 #define tcg_gen_remu_reg tcg_gen_remu_i32
203 #define tcg_gen_discard_reg tcg_gen_discard_i32
204 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
205 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
206 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
207 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
208 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
209 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
210 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
211 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
212 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
213 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
214 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
215 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
216 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
217 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
218 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
219 #define tcg_gen_andc_reg tcg_gen_andc_i32
220 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
221 #define tcg_gen_nand_reg tcg_gen_nand_i32
222 #define tcg_gen_nor_reg tcg_gen_nor_i32
223 #define tcg_gen_orc_reg tcg_gen_orc_i32
224 #define tcg_gen_clz_reg tcg_gen_clz_i32
225 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
226 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
227 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
228 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
229 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
230 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
231 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
232 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
233 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
234 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
235 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
236 #define tcg_gen_extract_reg tcg_gen_extract_i32
237 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
238 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
239 #define tcg_const_reg tcg_const_i32
240 #define tcg_const_local_reg tcg_const_local_i32
241 #define tcg_constant_reg tcg_constant_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond
{
256 typedef struct DisasContext
{
257 DisasContextBase base
;
278 #ifdef CONFIG_USER_ONLY
283 #ifdef CONFIG_USER_ONLY
284 #define UNALIGN(C) (C)->unalign
289 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
290 static int expand_sm_imm(DisasContext
*ctx
, int val
)
292 if (val
& PSW_SM_E
) {
293 val
= (val
& ~PSW_SM_E
) | PSW_E
;
295 if (val
& PSW_SM_W
) {
296 val
= (val
& ~PSW_SM_W
) | PSW_W
;
301 /* Inverted space register indicates 0 means sr0 not inferred from base. */
302 static int expand_sr3x(DisasContext
*ctx
, int val
)
307 /* Convert the M:A bits within a memory insn to the tri-state value
308 we use for the final M. */
309 static int ma_to_m(DisasContext
*ctx
, int val
)
311 return val
& 2 ? (val
& 1 ? -1 : 1) : 0;
314 /* Convert the sign of the displacement to a pre or post-modify. */
315 static int pos_to_m(DisasContext
*ctx
, int val
)
320 static int neg_to_m(DisasContext
*ctx
, int val
)
325 /* Used for branch targets and fp memory ops. */
326 static int expand_shl2(DisasContext
*ctx
, int val
)
331 /* Used for fp memory ops. */
332 static int expand_shl3(DisasContext
*ctx
, int val
)
337 /* Used for assemble_21. */
338 static int expand_shl11(DisasContext
*ctx
, int val
)
344 /* Include the auto-generated decoder. */
345 #include "decode-insns.c.inc"
347 /* We are not using a goto_tb (for whatever reason), but have updated
348 the iaq (for whatever reason), so don't do it again on exit. */
349 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
351 /* We are exiting the TB, but have neither emitted a goto_tb, nor
352 updated the iaq for the next instruction to be executed. */
353 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
355 /* Similarly, but we want to return to the main loop immediately
356 to recognize unmasked interrupts. */
357 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
358 #define DISAS_EXIT DISAS_TARGET_3
360 /* global register indexes */
361 static TCGv_reg cpu_gr
[32];
362 static TCGv_i64 cpu_sr
[4];
363 static TCGv_i64 cpu_srH
;
364 static TCGv_reg cpu_iaoq_f
;
365 static TCGv_reg cpu_iaoq_b
;
366 static TCGv_i64 cpu_iasq_f
;
367 static TCGv_i64 cpu_iasq_b
;
368 static TCGv_reg cpu_sar
;
369 static TCGv_reg cpu_psw_n
;
370 static TCGv_reg cpu_psw_v
;
371 static TCGv_reg cpu_psw_cb
;
372 static TCGv_reg cpu_psw_cb_msb
;
374 #include "exec/gen-icount.h"
376 void hppa_translate_init(void)
378 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
380 typedef struct { TCGv_reg
*var
; const char *name
; int ofs
; } GlobalVar
;
381 static const GlobalVar vars
[] = {
382 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
393 /* Use the symbolic register names that match the disassembler. */
394 static const char gr_names
[32][4] = {
395 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
396 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
397 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
398 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
400 /* SR[4-7] are not global registers so that we can index them. */
401 static const char sr_names
[5][4] = {
402 "sr0", "sr1", "sr2", "sr3", "srH"
408 for (i
= 1; i
< 32; i
++) {
409 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
410 offsetof(CPUHPPAState
, gr
[i
]),
413 for (i
= 0; i
< 4; i
++) {
414 cpu_sr
[i
] = tcg_global_mem_new_i64(cpu_env
,
415 offsetof(CPUHPPAState
, sr
[i
]),
418 cpu_srH
= tcg_global_mem_new_i64(cpu_env
,
419 offsetof(CPUHPPAState
, sr
[4]),
422 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
423 const GlobalVar
*v
= &vars
[i
];
424 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
427 cpu_iasq_f
= tcg_global_mem_new_i64(cpu_env
,
428 offsetof(CPUHPPAState
, iasq_f
),
430 cpu_iasq_b
= tcg_global_mem_new_i64(cpu_env
,
431 offsetof(CPUHPPAState
, iasq_b
),
435 static DisasCond
cond_make_f(void)
444 static DisasCond
cond_make_t(void)
447 .c
= TCG_COND_ALWAYS
,
453 static DisasCond
cond_make_n(void)
458 .a1
= tcg_constant_reg(0)
462 static DisasCond
cond_make_0_tmp(TCGCond c
, TCGv_reg a0
)
464 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
466 .c
= c
, .a0
= a0
, .a1
= tcg_constant_reg(0)
470 static DisasCond
cond_make_0(TCGCond c
, TCGv_reg a0
)
472 TCGv_reg tmp
= tcg_temp_new();
473 tcg_gen_mov_reg(tmp
, a0
);
474 return cond_make_0_tmp(c
, tmp
);
477 static DisasCond
cond_make(TCGCond c
, TCGv_reg a0
, TCGv_reg a1
)
479 DisasCond r
= { .c
= c
};
481 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
482 r
.a0
= tcg_temp_new();
483 tcg_gen_mov_reg(r
.a0
, a0
);
484 r
.a1
= tcg_temp_new();
485 tcg_gen_mov_reg(r
.a1
, a1
);
490 static void cond_free(DisasCond
*cond
)
494 if (cond
->a0
!= cpu_psw_n
) {
495 tcg_temp_free(cond
->a0
);
497 tcg_temp_free(cond
->a1
);
501 case TCG_COND_ALWAYS
:
502 cond
->c
= TCG_COND_NEVER
;
509 static TCGv_reg
get_temp(DisasContext
*ctx
)
511 unsigned i
= ctx
->ntempr
++;
512 g_assert(i
< ARRAY_SIZE(ctx
->tempr
));
513 return ctx
->tempr
[i
] = tcg_temp_new();
516 #ifndef CONFIG_USER_ONLY
517 static TCGv_tl
get_temp_tl(DisasContext
*ctx
)
519 unsigned i
= ctx
->ntempl
++;
520 g_assert(i
< ARRAY_SIZE(ctx
->templ
));
521 return ctx
->templ
[i
] = tcg_temp_new_tl();
525 static TCGv_reg
load_const(DisasContext
*ctx
, target_sreg v
)
527 TCGv_reg t
= get_temp(ctx
);
528 tcg_gen_movi_reg(t
, v
);
532 static TCGv_reg
load_gpr(DisasContext
*ctx
, unsigned reg
)
535 TCGv_reg t
= get_temp(ctx
);
536 tcg_gen_movi_reg(t
, 0);
543 static TCGv_reg
dest_gpr(DisasContext
*ctx
, unsigned reg
)
545 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
546 return get_temp(ctx
);
552 static void save_or_nullify(DisasContext
*ctx
, TCGv_reg dest
, TCGv_reg t
)
554 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
555 tcg_gen_movcond_reg(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
556 ctx
->null_cond
.a1
, dest
, t
);
558 tcg_gen_mov_reg(dest
, t
);
562 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_reg t
)
565 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
577 static TCGv_i32
load_frw_i32(unsigned rt
)
579 TCGv_i32 ret
= tcg_temp_new_i32();
580 tcg_gen_ld_i32(ret
, cpu_env
,
581 offsetof(CPUHPPAState
, fr
[rt
& 31])
582 + (rt
& 32 ? LO_OFS
: HI_OFS
));
586 static TCGv_i32
load_frw0_i32(unsigned rt
)
589 return tcg_const_i32(0);
591 return load_frw_i32(rt
);
595 static TCGv_i64
load_frw0_i64(unsigned rt
)
598 return tcg_const_i64(0);
600 TCGv_i64 ret
= tcg_temp_new_i64();
601 tcg_gen_ld32u_i64(ret
, cpu_env
,
602 offsetof(CPUHPPAState
, fr
[rt
& 31])
603 + (rt
& 32 ? LO_OFS
: HI_OFS
));
608 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
610 tcg_gen_st_i32(val
, cpu_env
,
611 offsetof(CPUHPPAState
, fr
[rt
& 31])
612 + (rt
& 32 ? LO_OFS
: HI_OFS
));
618 static TCGv_i64
load_frd(unsigned rt
)
620 TCGv_i64 ret
= tcg_temp_new_i64();
621 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
625 static TCGv_i64
load_frd0(unsigned rt
)
628 return tcg_const_i64(0);
634 static void save_frd(unsigned rt
, TCGv_i64 val
)
636 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
639 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
641 #ifdef CONFIG_USER_ONLY
642 tcg_gen_movi_i64(dest
, 0);
645 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
646 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
647 tcg_gen_mov_i64(dest
, cpu_srH
);
649 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUHPPAState
, sr
[reg
]));
654 /* Skip over the implementation of an insn that has been nullified.
655 Use this when the insn is too complex for a conditional move. */
656 static void nullify_over(DisasContext
*ctx
)
658 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
659 /* The always condition should have been handled in the main loop. */
660 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
662 ctx
->null_lab
= gen_new_label();
664 /* If we're using PSW[N], copy it to a temp because... */
665 if (ctx
->null_cond
.a0
== cpu_psw_n
) {
666 ctx
->null_cond
.a0
= tcg_temp_new();
667 tcg_gen_mov_reg(ctx
->null_cond
.a0
, cpu_psw_n
);
669 /* ... we clear it before branching over the implementation,
670 so that (1) it's clear after nullifying this insn and
671 (2) if this insn nullifies the next, PSW[N] is valid. */
672 if (ctx
->psw_n_nonzero
) {
673 ctx
->psw_n_nonzero
= false;
674 tcg_gen_movi_reg(cpu_psw_n
, 0);
677 tcg_gen_brcond_reg(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
678 ctx
->null_cond
.a1
, ctx
->null_lab
);
679 cond_free(&ctx
->null_cond
);
683 /* Save the current nullification state to PSW[N]. */
684 static void nullify_save(DisasContext
*ctx
)
686 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
687 if (ctx
->psw_n_nonzero
) {
688 tcg_gen_movi_reg(cpu_psw_n
, 0);
692 if (ctx
->null_cond
.a0
!= cpu_psw_n
) {
693 tcg_gen_setcond_reg(ctx
->null_cond
.c
, cpu_psw_n
,
694 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
695 ctx
->psw_n_nonzero
= true;
697 cond_free(&ctx
->null_cond
);
700 /* Set a PSW[N] to X. The intention is that this is used immediately
701 before a goto_tb/exit_tb, so that there is no fallthru path to other
702 code within the TB. Therefore we do not update psw_n_nonzero. */
703 static void nullify_set(DisasContext
*ctx
, bool x
)
705 if (ctx
->psw_n_nonzero
|| x
) {
706 tcg_gen_movi_reg(cpu_psw_n
, x
);
710 /* Mark the end of an instruction that may have been nullified.
711 This is the pair to nullify_over. Always returns true so that
712 it may be tail-called from a translate function. */
713 static bool nullify_end(DisasContext
*ctx
)
715 TCGLabel
*null_lab
= ctx
->null_lab
;
716 DisasJumpType status
= ctx
->base
.is_jmp
;
718 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
719 For UPDATED, we cannot update on the nullified path. */
720 assert(status
!= DISAS_IAQ_N_UPDATED
);
722 if (likely(null_lab
== NULL
)) {
723 /* The current insn wasn't conditional or handled the condition
724 applied to it without a branch, so the (new) setting of
725 NULL_COND can be applied directly to the next insn. */
728 ctx
->null_lab
= NULL
;
730 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
731 /* The next instruction will be unconditional,
732 and NULL_COND already reflects that. */
733 gen_set_label(null_lab
);
735 /* The insn that we just executed is itself nullifying the next
736 instruction. Store the condition in the PSW[N] global.
737 We asserted PSW[N] = 0 in nullify_over, so that after the
738 label we have the proper value in place. */
740 gen_set_label(null_lab
);
741 ctx
->null_cond
= cond_make_n();
743 if (status
== DISAS_NORETURN
) {
744 ctx
->base
.is_jmp
= DISAS_NEXT
;
749 static void copy_iaoq_entry(TCGv_reg dest
, target_ureg ival
, TCGv_reg vval
)
751 if (unlikely(ival
== -1)) {
752 tcg_gen_mov_reg(dest
, vval
);
754 tcg_gen_movi_reg(dest
, ival
);
758 static inline target_ureg
iaoq_dest(DisasContext
*ctx
, target_sreg disp
)
760 return ctx
->iaoq_f
+ disp
+ 8;
763 static void gen_excp_1(int exception
)
765 gen_helper_excp(cpu_env
, tcg_constant_i32(exception
));
768 static void gen_excp(DisasContext
*ctx
, int exception
)
770 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
771 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
773 gen_excp_1(exception
);
774 ctx
->base
.is_jmp
= DISAS_NORETURN
;
777 static bool gen_excp_iir(DisasContext
*ctx
, int exc
)
780 tcg_gen_st_reg(tcg_constant_reg(ctx
->insn
),
781 cpu_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
783 return nullify_end(ctx
);
786 static bool gen_illegal(DisasContext
*ctx
)
788 return gen_excp_iir(ctx
, EXCP_ILL
);
791 #ifdef CONFIG_USER_ONLY
792 #define CHECK_MOST_PRIVILEGED(EXCP) \
793 return gen_excp_iir(ctx, EXCP)
795 #define CHECK_MOST_PRIVILEGED(EXCP) \
797 if (ctx->privilege != 0) { \
798 return gen_excp_iir(ctx, EXCP); \
803 static bool use_goto_tb(DisasContext
*ctx
, target_ureg dest
)
805 return translator_use_goto_tb(&ctx
->base
, dest
);
808 /* If the next insn is to be nullified, and it's on the same page,
809 and we're not attempting to set a breakpoint on it, then we can
810 totally skip the nullified insn. This avoids creating and
811 executing a TB that merely branches to the next TB. */
812 static bool use_nullify_skip(DisasContext
*ctx
)
814 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
815 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
818 static void gen_goto_tb(DisasContext
*ctx
, int which
,
819 target_ureg f
, target_ureg b
)
821 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
822 tcg_gen_goto_tb(which
);
823 tcg_gen_movi_reg(cpu_iaoq_f
, f
);
824 tcg_gen_movi_reg(cpu_iaoq_b
, b
);
825 tcg_gen_exit_tb(ctx
->base
.tb
, which
);
827 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
828 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
829 tcg_gen_lookup_and_goto_ptr();
833 static bool cond_need_sv(int c
)
835 return c
== 2 || c
== 3 || c
== 6;
838 static bool cond_need_cb(int c
)
840 return c
== 4 || c
== 5;
844 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
845 * the Parisc 1.1 Architecture Reference Manual for details.
848 static DisasCond
do_cond(unsigned cf
, TCGv_reg res
,
849 TCGv_reg cb_msb
, TCGv_reg sv
)
855 case 0: /* Never / TR (0 / 1) */
856 cond
= cond_make_f();
858 case 1: /* = / <> (Z / !Z) */
859 cond
= cond_make_0(TCG_COND_EQ
, res
);
861 case 2: /* < / >= (N ^ V / !(N ^ V) */
862 tmp
= tcg_temp_new();
863 tcg_gen_xor_reg(tmp
, res
, sv
);
864 cond
= cond_make_0_tmp(TCG_COND_LT
, tmp
);
866 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
870 * ((res < 0) ^ (sv < 0)) | !res
871 * ((res ^ sv) < 0) | !res
872 * (~(res ^ sv) >= 0) | !res
873 * !(~(res ^ sv) >> 31) | !res
874 * !(~(res ^ sv) >> 31 & res)
876 tmp
= tcg_temp_new();
877 tcg_gen_eqv_reg(tmp
, res
, sv
);
878 tcg_gen_sari_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
879 tcg_gen_and_reg(tmp
, tmp
, res
);
880 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
882 case 4: /* NUV / UV (!C / C) */
883 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
885 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
886 tmp
= tcg_temp_new();
887 tcg_gen_neg_reg(tmp
, cb_msb
);
888 tcg_gen_and_reg(tmp
, tmp
, res
);
889 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
891 case 6: /* SV / NSV (V / !V) */
892 cond
= cond_make_0(TCG_COND_LT
, sv
);
894 case 7: /* OD / EV */
895 tmp
= tcg_temp_new();
896 tcg_gen_andi_reg(tmp
, res
, 1);
897 cond
= cond_make_0_tmp(TCG_COND_NE
, tmp
);
900 g_assert_not_reached();
903 cond
.c
= tcg_invert_cond(cond
.c
);
909 /* Similar, but for the special case of subtraction without borrow, we
910 can use the inputs directly. This can allow other computation to be
911 deleted as unused. */
913 static DisasCond
do_sub_cond(unsigned cf
, TCGv_reg res
,
914 TCGv_reg in1
, TCGv_reg in2
, TCGv_reg sv
)
920 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
923 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
926 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
928 case 4: /* << / >>= */
929 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
931 case 5: /* <<= / >> */
932 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
935 return do_cond(cf
, res
, NULL
, sv
);
938 cond
.c
= tcg_invert_cond(cond
.c
);
945 * Similar, but for logicals, where the carry and overflow bits are not
946 * computed, and use of them is undefined.
948 * Undefined or not, hardware does not trap. It seems reasonable to
949 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
950 * how cases c={2,3} are treated.
953 static DisasCond
do_log_cond(unsigned cf
, TCGv_reg res
)
957 case 9: /* undef, C */
958 case 11: /* undef, C & !Z */
959 case 12: /* undef, V */
960 return cond_make_f();
963 case 8: /* undef, !C */
964 case 10: /* undef, !C | Z */
965 case 13: /* undef, !V */
966 return cond_make_t();
969 return cond_make_0(TCG_COND_EQ
, res
);
971 return cond_make_0(TCG_COND_NE
, res
);
973 return cond_make_0(TCG_COND_LT
, res
);
975 return cond_make_0(TCG_COND_GE
, res
);
977 return cond_make_0(TCG_COND_LE
, res
);
979 return cond_make_0(TCG_COND_GT
, res
);
983 return do_cond(cf
, res
, NULL
, NULL
);
986 g_assert_not_reached();
990 /* Similar, but for shift/extract/deposit conditions. */
992 static DisasCond
do_sed_cond(unsigned orig
, TCGv_reg res
)
996 /* Convert the compressed condition codes to standard.
997 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
998 4-7 are the reverse of 0-3. */
1005 return do_log_cond(c
* 2 + f
, res
);
1008 /* Similar, but for unit conditions. */
1010 static DisasCond
do_unit_cond(unsigned cf
, TCGv_reg res
,
1011 TCGv_reg in1
, TCGv_reg in2
)
1014 TCGv_reg tmp
, cb
= NULL
;
1017 /* Since we want to test lots of carry-out bits all at once, do not
1018 * do our normal thing and compute carry-in of bit B+1 since that
1019 * leaves us with carry bits spread across two words.
1021 cb
= tcg_temp_new();
1022 tmp
= tcg_temp_new();
1023 tcg_gen_or_reg(cb
, in1
, in2
);
1024 tcg_gen_and_reg(tmp
, in1
, in2
);
1025 tcg_gen_andc_reg(cb
, cb
, res
);
1026 tcg_gen_or_reg(cb
, cb
, tmp
);
1031 case 0: /* never / TR */
1032 case 1: /* undefined */
1033 case 5: /* undefined */
1034 cond
= cond_make_f();
1037 case 2: /* SBZ / NBZ */
1038 /* See hasless(v,1) from
1039 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1041 tmp
= tcg_temp_new();
1042 tcg_gen_subi_reg(tmp
, res
, 0x01010101u
);
1043 tcg_gen_andc_reg(tmp
, tmp
, res
);
1044 tcg_gen_andi_reg(tmp
, tmp
, 0x80808080u
);
1045 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1049 case 3: /* SHZ / NHZ */
1050 tmp
= tcg_temp_new();
1051 tcg_gen_subi_reg(tmp
, res
, 0x00010001u
);
1052 tcg_gen_andc_reg(tmp
, tmp
, res
);
1053 tcg_gen_andi_reg(tmp
, tmp
, 0x80008000u
);
1054 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1058 case 4: /* SDC / NDC */
1059 tcg_gen_andi_reg(cb
, cb
, 0x88888888u
);
1060 cond
= cond_make_0(TCG_COND_NE
, cb
);
1063 case 6: /* SBC / NBC */
1064 tcg_gen_andi_reg(cb
, cb
, 0x80808080u
);
1065 cond
= cond_make_0(TCG_COND_NE
, cb
);
1068 case 7: /* SHC / NHC */
1069 tcg_gen_andi_reg(cb
, cb
, 0x80008000u
);
1070 cond
= cond_make_0(TCG_COND_NE
, cb
);
1074 g_assert_not_reached();
1080 cond
.c
= tcg_invert_cond(cond
.c
);
1086 /* Compute signed overflow for addition. */
1087 static TCGv_reg
do_add_sv(DisasContext
*ctx
, TCGv_reg res
,
1088 TCGv_reg in1
, TCGv_reg in2
)
1090 TCGv_reg sv
= get_temp(ctx
);
1091 TCGv_reg tmp
= tcg_temp_new();
1093 tcg_gen_xor_reg(sv
, res
, in1
);
1094 tcg_gen_xor_reg(tmp
, in1
, in2
);
1095 tcg_gen_andc_reg(sv
, sv
, tmp
);
1101 /* Compute signed overflow for subtraction. */
1102 static TCGv_reg
do_sub_sv(DisasContext
*ctx
, TCGv_reg res
,
1103 TCGv_reg in1
, TCGv_reg in2
)
1105 TCGv_reg sv
= get_temp(ctx
);
1106 TCGv_reg tmp
= tcg_temp_new();
1108 tcg_gen_xor_reg(sv
, res
, in1
);
1109 tcg_gen_xor_reg(tmp
, in1
, in2
);
1110 tcg_gen_and_reg(sv
, sv
, tmp
);
1116 static void do_add(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1117 TCGv_reg in2
, unsigned shift
, bool is_l
,
1118 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
)
1120 TCGv_reg dest
, cb
, cb_msb
, sv
, tmp
;
1121 unsigned c
= cf
>> 1;
1124 dest
= tcg_temp_new();
1129 tmp
= get_temp(ctx
);
1130 tcg_gen_shli_reg(tmp
, in1
, shift
);
1134 if (!is_l
|| cond_need_cb(c
)) {
1135 TCGv_reg zero
= tcg_constant_reg(0);
1136 cb_msb
= get_temp(ctx
);
1137 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, in2
, zero
);
1139 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
1143 tcg_gen_xor_reg(cb
, in1
, in2
);
1144 tcg_gen_xor_reg(cb
, cb
, dest
);
1147 tcg_gen_add_reg(dest
, in1
, in2
);
1149 tcg_gen_add_reg(dest
, dest
, cpu_psw_cb_msb
);
1153 /* Compute signed overflow if required. */
1155 if (is_tsv
|| cond_need_sv(c
)) {
1156 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1158 /* ??? Need to include overflow from shift. */
1159 gen_helper_tsv(cpu_env
, sv
);
1163 /* Emit any conditional trap before any writeback. */
1164 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1166 tmp
= tcg_temp_new();
1167 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1168 gen_helper_tcond(cpu_env
, tmp
);
1172 /* Write back the result. */
1174 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1175 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1177 save_gpr(ctx
, rt
, dest
);
1178 tcg_temp_free(dest
);
1180 /* Install the new nullification. */
1181 cond_free(&ctx
->null_cond
);
1182 ctx
->null_cond
= cond
;
1185 static bool do_add_reg(DisasContext
*ctx
, arg_rrr_cf_sh
*a
,
1186 bool is_l
, bool is_tsv
, bool is_tc
, bool is_c
)
1188 TCGv_reg tcg_r1
, tcg_r2
;
1193 tcg_r1
= load_gpr(ctx
, a
->r1
);
1194 tcg_r2
= load_gpr(ctx
, a
->r2
);
1195 do_add(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->sh
, is_l
, is_tsv
, is_tc
, is_c
, a
->cf
);
1196 return nullify_end(ctx
);
1199 static bool do_add_imm(DisasContext
*ctx
, arg_rri_cf
*a
,
1200 bool is_tsv
, bool is_tc
)
1202 TCGv_reg tcg_im
, tcg_r2
;
1207 tcg_im
= load_const(ctx
, a
->i
);
1208 tcg_r2
= load_gpr(ctx
, a
->r
);
1209 do_add(ctx
, a
->t
, tcg_im
, tcg_r2
, 0, 0, is_tsv
, is_tc
, 0, a
->cf
);
1210 return nullify_end(ctx
);
1213 static void do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1214 TCGv_reg in2
, bool is_tsv
, bool is_b
,
1215 bool is_tc
, unsigned cf
)
1217 TCGv_reg dest
, sv
, cb
, cb_msb
, zero
, tmp
;
1218 unsigned c
= cf
>> 1;
1221 dest
= tcg_temp_new();
1222 cb
= tcg_temp_new();
1223 cb_msb
= tcg_temp_new();
1225 zero
= tcg_constant_reg(0);
1227 /* DEST,C = IN1 + ~IN2 + C. */
1228 tcg_gen_not_reg(cb
, in2
);
1229 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
1230 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
1231 tcg_gen_xor_reg(cb
, cb
, in1
);
1232 tcg_gen_xor_reg(cb
, cb
, dest
);
1234 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1235 operations by seeding the high word with 1 and subtracting. */
1236 tcg_gen_movi_reg(cb_msb
, 1);
1237 tcg_gen_sub2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
1238 tcg_gen_eqv_reg(cb
, in1
, in2
);
1239 tcg_gen_xor_reg(cb
, cb
, dest
);
1242 /* Compute signed overflow if required. */
1244 if (is_tsv
|| cond_need_sv(c
)) {
1245 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1247 gen_helper_tsv(cpu_env
, sv
);
1251 /* Compute the condition. We cannot use the special case for borrow. */
1253 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1255 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1258 /* Emit any conditional trap before any writeback. */
1260 tmp
= tcg_temp_new();
1261 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1262 gen_helper_tcond(cpu_env
, tmp
);
1266 /* Write back the result. */
1267 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1268 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1269 save_gpr(ctx
, rt
, dest
);
1270 tcg_temp_free(dest
);
1272 tcg_temp_free(cb_msb
);
1274 /* Install the new nullification. */
1275 cond_free(&ctx
->null_cond
);
1276 ctx
->null_cond
= cond
;
1279 static bool do_sub_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1280 bool is_tsv
, bool is_b
, bool is_tc
)
1282 TCGv_reg tcg_r1
, tcg_r2
;
1287 tcg_r1
= load_gpr(ctx
, a
->r1
);
1288 tcg_r2
= load_gpr(ctx
, a
->r2
);
1289 do_sub(ctx
, a
->t
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, a
->cf
);
1290 return nullify_end(ctx
);
1293 static bool do_sub_imm(DisasContext
*ctx
, arg_rri_cf
*a
, bool is_tsv
)
1295 TCGv_reg tcg_im
, tcg_r2
;
1300 tcg_im
= load_const(ctx
, a
->i
);
1301 tcg_r2
= load_gpr(ctx
, a
->r
);
1302 do_sub(ctx
, a
->t
, tcg_im
, tcg_r2
, is_tsv
, 0, 0, a
->cf
);
1303 return nullify_end(ctx
);
1306 static void do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1307 TCGv_reg in2
, unsigned cf
)
1312 dest
= tcg_temp_new();
1313 tcg_gen_sub_reg(dest
, in1
, in2
);
1315 /* Compute signed overflow if required. */
1317 if (cond_need_sv(cf
>> 1)) {
1318 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1321 /* Form the condition for the compare. */
1322 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1325 tcg_gen_movi_reg(dest
, 0);
1326 save_gpr(ctx
, rt
, dest
);
1327 tcg_temp_free(dest
);
1329 /* Install the new nullification. */
1330 cond_free(&ctx
->null_cond
);
1331 ctx
->null_cond
= cond
;
1334 static void do_log(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1335 TCGv_reg in2
, unsigned cf
,
1336 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1338 TCGv_reg dest
= dest_gpr(ctx
, rt
);
1340 /* Perform the operation, and writeback. */
1342 save_gpr(ctx
, rt
, dest
);
1344 /* Install the new nullification. */
1345 cond_free(&ctx
->null_cond
);
1347 ctx
->null_cond
= do_log_cond(cf
, dest
);
1351 static bool do_log_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1352 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1354 TCGv_reg tcg_r1
, tcg_r2
;
1359 tcg_r1
= load_gpr(ctx
, a
->r1
);
1360 tcg_r2
= load_gpr(ctx
, a
->r2
);
1361 do_log(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, fn
);
1362 return nullify_end(ctx
);
1365 static void do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1366 TCGv_reg in2
, unsigned cf
, bool is_tc
,
1367 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1373 dest
= dest_gpr(ctx
, rt
);
1375 save_gpr(ctx
, rt
, dest
);
1376 cond_free(&ctx
->null_cond
);
1378 dest
= tcg_temp_new();
1381 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
1384 TCGv_reg tmp
= tcg_temp_new();
1385 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1386 gen_helper_tcond(cpu_env
, tmp
);
1389 save_gpr(ctx
, rt
, dest
);
1391 cond_free(&ctx
->null_cond
);
1392 ctx
->null_cond
= cond
;
1396 #ifndef CONFIG_USER_ONLY
1397 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1398 from the top 2 bits of the base register. There are a few system
1399 instructions that have a 3-bit space specifier, for which SR0 is
1400 not special. To handle this, pass ~SP. */
1401 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_reg base
)
1411 spc
= get_temp_tl(ctx
);
1412 load_spr(ctx
, spc
, sp
);
1415 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1419 ptr
= tcg_temp_new_ptr();
1420 tmp
= tcg_temp_new();
1421 spc
= get_temp_tl(ctx
);
1423 tcg_gen_shri_reg(tmp
, base
, TARGET_REGISTER_BITS
- 5);
1424 tcg_gen_andi_reg(tmp
, tmp
, 030);
1425 tcg_gen_trunc_reg_ptr(ptr
, tmp
);
1428 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
1429 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1430 tcg_temp_free_ptr(ptr
);
1436 static void form_gva(DisasContext
*ctx
, TCGv_tl
*pgva
, TCGv_reg
*pofs
,
1437 unsigned rb
, unsigned rx
, int scale
, target_sreg disp
,
1438 unsigned sp
, int modify
, bool is_phys
)
1440 TCGv_reg base
= load_gpr(ctx
, rb
);
1443 /* Note that RX is mutually exclusive with DISP. */
1445 ofs
= get_temp(ctx
);
1446 tcg_gen_shli_reg(ofs
, cpu_gr
[rx
], scale
);
1447 tcg_gen_add_reg(ofs
, ofs
, base
);
1448 } else if (disp
|| modify
) {
1449 ofs
= get_temp(ctx
);
1450 tcg_gen_addi_reg(ofs
, base
, disp
);
1456 #ifdef CONFIG_USER_ONLY
1457 *pgva
= (modify
<= 0 ? ofs
: base
);
1459 TCGv_tl addr
= get_temp_tl(ctx
);
1460 tcg_gen_extu_reg_tl(addr
, modify
<= 0 ? ofs
: base
);
1461 if (ctx
->tb_flags
& PSW_W
) {
1462 tcg_gen_andi_tl(addr
, addr
, 0x3fffffffffffffffull
);
1465 tcg_gen_or_tl(addr
, addr
, space_select(ctx
, sp
, base
));
1471 /* Emit a memory load. The modify parameter should be
1472 * < 0 for pre-modify,
1473 * > 0 for post-modify,
1474 * = 0 for no base register update.
1476 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1477 unsigned rx
, int scale
, target_sreg disp
,
1478 unsigned sp
, int modify
, MemOp mop
)
1483 /* Caller uses nullify_over/nullify_end. */
1484 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1486 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1487 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1488 tcg_gen_qemu_ld_reg(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1490 save_gpr(ctx
, rb
, ofs
);
1494 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1495 unsigned rx
, int scale
, target_sreg disp
,
1496 unsigned sp
, int modify
, MemOp mop
)
1501 /* Caller uses nullify_over/nullify_end. */
1502 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1504 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1505 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1506 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1508 save_gpr(ctx
, rb
, ofs
);
1512 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1513 unsigned rx
, int scale
, target_sreg disp
,
1514 unsigned sp
, int modify
, MemOp mop
)
1519 /* Caller uses nullify_over/nullify_end. */
1520 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1522 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1523 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1524 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1526 save_gpr(ctx
, rb
, ofs
);
1530 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1531 unsigned rx
, int scale
, target_sreg disp
,
1532 unsigned sp
, int modify
, MemOp mop
)
1537 /* Caller uses nullify_over/nullify_end. */
1538 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1540 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1541 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1542 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1544 save_gpr(ctx
, rb
, ofs
);
1548 #if TARGET_REGISTER_BITS == 64
1549 #define do_load_reg do_load_64
1550 #define do_store_reg do_store_64
1552 #define do_load_reg do_load_32
1553 #define do_store_reg do_store_32
1556 static bool do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1557 unsigned rx
, int scale
, target_sreg disp
,
1558 unsigned sp
, int modify
, MemOp mop
)
1565 /* No base register update. */
1566 dest
= dest_gpr(ctx
, rt
);
1568 /* Make sure if RT == RB, we see the result of the load. */
1569 dest
= get_temp(ctx
);
1571 do_load_reg(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1572 save_gpr(ctx
, rt
, dest
);
1574 return nullify_end(ctx
);
1577 static bool do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1578 unsigned rx
, int scale
, target_sreg disp
,
1579 unsigned sp
, int modify
)
1585 tmp
= tcg_temp_new_i32();
1586 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1587 save_frw_i32(rt
, tmp
);
1588 tcg_temp_free_i32(tmp
);
1591 gen_helper_loaded_fr0(cpu_env
);
1594 return nullify_end(ctx
);
1597 static bool trans_fldw(DisasContext
*ctx
, arg_ldst
*a
)
1599 return do_floadw(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1600 a
->disp
, a
->sp
, a
->m
);
1603 static bool do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1604 unsigned rx
, int scale
, target_sreg disp
,
1605 unsigned sp
, int modify
)
1611 tmp
= tcg_temp_new_i64();
1612 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1614 tcg_temp_free_i64(tmp
);
1617 gen_helper_loaded_fr0(cpu_env
);
1620 return nullify_end(ctx
);
1623 static bool trans_fldd(DisasContext
*ctx
, arg_ldst
*a
)
1625 return do_floadd(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1626 a
->disp
, a
->sp
, a
->m
);
1629 static bool do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1630 target_sreg disp
, unsigned sp
,
1631 int modify
, MemOp mop
)
1634 do_store_reg(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1635 return nullify_end(ctx
);
1638 static bool do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1639 unsigned rx
, int scale
, target_sreg disp
,
1640 unsigned sp
, int modify
)
1646 tmp
= load_frw_i32(rt
);
1647 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1648 tcg_temp_free_i32(tmp
);
1650 return nullify_end(ctx
);
1653 static bool trans_fstw(DisasContext
*ctx
, arg_ldst
*a
)
1655 return do_fstorew(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1656 a
->disp
, a
->sp
, a
->m
);
1659 static bool do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1660 unsigned rx
, int scale
, target_sreg disp
,
1661 unsigned sp
, int modify
)
1668 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1669 tcg_temp_free_i64(tmp
);
1671 return nullify_end(ctx
);
1674 static bool trans_fstd(DisasContext
*ctx
, arg_ldst
*a
)
1676 return do_fstored(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1677 a
->disp
, a
->sp
, a
->m
);
1680 static bool do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1681 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1686 tmp
= load_frw0_i32(ra
);
1688 func(tmp
, cpu_env
, tmp
);
1690 save_frw_i32(rt
, tmp
);
1691 tcg_temp_free_i32(tmp
);
1692 return nullify_end(ctx
);
1695 static bool do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1696 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1703 dst
= tcg_temp_new_i32();
1705 func(dst
, cpu_env
, src
);
1707 tcg_temp_free_i64(src
);
1708 save_frw_i32(rt
, dst
);
1709 tcg_temp_free_i32(dst
);
1710 return nullify_end(ctx
);
1713 static bool do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1714 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1719 tmp
= load_frd0(ra
);
1721 func(tmp
, cpu_env
, tmp
);
1724 tcg_temp_free_i64(tmp
);
1725 return nullify_end(ctx
);
1728 static bool do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1729 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1735 src
= load_frw0_i32(ra
);
1736 dst
= tcg_temp_new_i64();
1738 func(dst
, cpu_env
, src
);
1740 tcg_temp_free_i32(src
);
1742 tcg_temp_free_i64(dst
);
1743 return nullify_end(ctx
);
1746 static bool do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1747 unsigned ra
, unsigned rb
,
1748 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
1753 a
= load_frw0_i32(ra
);
1754 b
= load_frw0_i32(rb
);
1756 func(a
, cpu_env
, a
, b
);
1758 tcg_temp_free_i32(b
);
1759 save_frw_i32(rt
, a
);
1760 tcg_temp_free_i32(a
);
1761 return nullify_end(ctx
);
1764 static bool do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1765 unsigned ra
, unsigned rb
,
1766 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1774 func(a
, cpu_env
, a
, b
);
1776 tcg_temp_free_i64(b
);
1778 tcg_temp_free_i64(a
);
1779 return nullify_end(ctx
);
1782 /* Emit an unconditional branch to a direct target, which may or may not
1783 have already had nullification handled. */
1784 static bool do_dbranch(DisasContext
*ctx
, target_ureg dest
,
1785 unsigned link
, bool is_n
)
1787 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1789 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1793 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1799 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1802 if (is_n
&& use_nullify_skip(ctx
)) {
1803 nullify_set(ctx
, 0);
1804 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1806 nullify_set(ctx
, is_n
);
1807 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1812 nullify_set(ctx
, 0);
1813 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1814 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1819 /* Emit a conditional branch to a direct target. If the branch itself
1820 is nullified, we should have already used nullify_over. */
1821 static bool do_cbranch(DisasContext
*ctx
, target_sreg disp
, bool is_n
,
1824 target_ureg dest
= iaoq_dest(ctx
, disp
);
1825 TCGLabel
*taken
= NULL
;
1826 TCGCond c
= cond
->c
;
1829 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1831 /* Handle TRUE and NEVER as direct branches. */
1832 if (c
== TCG_COND_ALWAYS
) {
1833 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1835 if (c
== TCG_COND_NEVER
) {
1836 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1839 taken
= gen_new_label();
1840 tcg_gen_brcond_reg(c
, cond
->a0
, cond
->a1
, taken
);
1843 /* Not taken: Condition not satisfied; nullify on backward branches. */
1844 n
= is_n
&& disp
< 0;
1845 if (n
&& use_nullify_skip(ctx
)) {
1846 nullify_set(ctx
, 0);
1847 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1849 if (!n
&& ctx
->null_lab
) {
1850 gen_set_label(ctx
->null_lab
);
1851 ctx
->null_lab
= NULL
;
1853 nullify_set(ctx
, n
);
1854 if (ctx
->iaoq_n
== -1) {
1855 /* The temporary iaoq_n_var died at the branch above.
1856 Regenerate it here instead of saving it. */
1857 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1859 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1862 gen_set_label(taken
);
1864 /* Taken: Condition satisfied; nullify on forward branches. */
1865 n
= is_n
&& disp
>= 0;
1866 if (n
&& use_nullify_skip(ctx
)) {
1867 nullify_set(ctx
, 0);
1868 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1870 nullify_set(ctx
, n
);
1871 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1874 /* Not taken: the branch itself was nullified. */
1875 if (ctx
->null_lab
) {
1876 gen_set_label(ctx
->null_lab
);
1877 ctx
->null_lab
= NULL
;
1878 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1880 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1885 /* Emit an unconditional branch to an indirect target. This handles
1886 nullification of the branch itself. */
1887 static bool do_ibranch(DisasContext
*ctx
, TCGv_reg dest
,
1888 unsigned link
, bool is_n
)
1890 TCGv_reg a0
, a1
, next
, tmp
;
1893 assert(ctx
->null_lab
== NULL
);
1895 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1897 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1899 next
= get_temp(ctx
);
1900 tcg_gen_mov_reg(next
, dest
);
1902 if (use_nullify_skip(ctx
)) {
1903 tcg_gen_mov_reg(cpu_iaoq_f
, next
);
1904 tcg_gen_addi_reg(cpu_iaoq_b
, next
, 4);
1905 nullify_set(ctx
, 0);
1906 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1909 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1912 ctx
->iaoq_n_var
= next
;
1913 } else if (is_n
&& use_nullify_skip(ctx
)) {
1914 /* The (conditional) branch, B, nullifies the next insn, N,
1915 and we're allowed to skip execution N (no single-step or
1916 tracepoint in effect). Since the goto_ptr that we must use
1917 for the indirect branch consumes no special resources, we
1918 can (conditionally) skip B and continue execution. */
1919 /* The use_nullify_skip test implies we have a known control path. */
1920 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1921 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1923 /* We do have to handle the non-local temporary, DEST, before
1924 branching. Since IOAQ_F is not really live at this point, we
1925 can simply store DEST optimistically. Similarly with IAOQ_B. */
1926 tcg_gen_mov_reg(cpu_iaoq_f
, dest
);
1927 tcg_gen_addi_reg(cpu_iaoq_b
, dest
, 4);
1931 tcg_gen_movi_reg(cpu_gr
[link
], ctx
->iaoq_n
);
1933 tcg_gen_lookup_and_goto_ptr();
1934 return nullify_end(ctx
);
1936 c
= ctx
->null_cond
.c
;
1937 a0
= ctx
->null_cond
.a0
;
1938 a1
= ctx
->null_cond
.a1
;
1940 tmp
= tcg_temp_new();
1941 next
= get_temp(ctx
);
1943 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1944 tcg_gen_movcond_reg(c
, next
, a0
, a1
, tmp
, dest
);
1946 ctx
->iaoq_n_var
= next
;
1949 tcg_gen_movcond_reg(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1953 /* The branch nullifies the next insn, which means the state of N
1954 after the branch is the inverse of the state of N that applied
1956 tcg_gen_setcond_reg(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1957 cond_free(&ctx
->null_cond
);
1958 ctx
->null_cond
= cond_make_n();
1959 ctx
->psw_n_nonzero
= true;
1961 cond_free(&ctx
->null_cond
);
1968 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1969 * IAOQ_Next{30..31} ← GR[b]{30..31};
1971 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1972 * which keeps the privilege level from being increased.
1974 static TCGv_reg
do_ibranch_priv(DisasContext
*ctx
, TCGv_reg offset
)
1977 switch (ctx
->privilege
) {
1979 /* Privilege 0 is maximum and is allowed to decrease. */
1982 /* Privilege 3 is minimum and is never allowed to increase. */
1983 dest
= get_temp(ctx
);
1984 tcg_gen_ori_reg(dest
, offset
, 3);
1987 dest
= get_temp(ctx
);
1988 tcg_gen_andi_reg(dest
, offset
, -4);
1989 tcg_gen_ori_reg(dest
, dest
, ctx
->privilege
);
1990 tcg_gen_movcond_reg(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1996 #ifdef CONFIG_USER_ONLY
1997 /* On Linux, page zero is normally marked execute only + gateway.
1998 Therefore normal read or write is supposed to fail, but specific
1999 offsets have kernel code mapped to raise permissions to implement
2000 system calls. Handling this via an explicit check here, rather
2001 in than the "be disp(sr2,r0)" instruction that probably sent us
2002 here, is the easiest way to handle the branch delay slot on the
2003 aforementioned BE. */
2004 static void do_page_zero(DisasContext
*ctx
)
2006 /* If by some means we get here with PSW[N]=1, that implies that
2007 the B,GATE instruction would be skipped, and we'd fault on the
2008 next insn within the privilaged page. */
2009 switch (ctx
->null_cond
.c
) {
2010 case TCG_COND_NEVER
:
2012 case TCG_COND_ALWAYS
:
2013 tcg_gen_movi_reg(cpu_psw_n
, 0);
2016 /* Since this is always the first (and only) insn within the
2017 TB, we should know the state of PSW[N] from TB->FLAGS. */
2018 g_assert_not_reached();
2021 /* Check that we didn't arrive here via some means that allowed
2022 non-sequential instruction execution. Normally the PSW[B] bit
2023 detects this by disallowing the B,GATE instruction to execute
2024 under such conditions. */
2025 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
2029 switch (ctx
->iaoq_f
& -4) {
2030 case 0x00: /* Null pointer call */
2031 gen_excp_1(EXCP_IMP
);
2032 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2035 case 0xb0: /* LWS */
2036 gen_excp_1(EXCP_SYSCALL_LWS
);
2037 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2040 case 0xe0: /* SET_THREAD_POINTER */
2041 tcg_gen_st_reg(cpu_gr
[26], cpu_env
, offsetof(CPUHPPAState
, cr
[27]));
2042 tcg_gen_ori_reg(cpu_iaoq_f
, cpu_gr
[31], 3);
2043 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
2044 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
2047 case 0x100: /* SYSCALL */
2048 gen_excp_1(EXCP_SYSCALL
);
2049 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2054 gen_excp_1(EXCP_ILL
);
2055 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2061 static bool trans_nop(DisasContext
*ctx
, arg_nop
*a
)
2063 cond_free(&ctx
->null_cond
);
2067 static bool trans_break(DisasContext
*ctx
, arg_break
*a
)
2069 return gen_excp_iir(ctx
, EXCP_BREAK
);
2072 static bool trans_sync(DisasContext
*ctx
, arg_sync
*a
)
2074 /* No point in nullifying the memory barrier. */
2075 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
2077 cond_free(&ctx
->null_cond
);
2081 static bool trans_mfia(DisasContext
*ctx
, arg_mfia
*a
)
2084 TCGv_reg tmp
= dest_gpr(ctx
, rt
);
2085 tcg_gen_movi_reg(tmp
, ctx
->iaoq_f
);
2086 save_gpr(ctx
, rt
, tmp
);
2088 cond_free(&ctx
->null_cond
);
2092 static bool trans_mfsp(DisasContext
*ctx
, arg_mfsp
*a
)
2095 unsigned rs
= a
->sp
;
2096 TCGv_i64 t0
= tcg_temp_new_i64();
2097 TCGv_reg t1
= tcg_temp_new();
2099 load_spr(ctx
, t0
, rs
);
2100 tcg_gen_shri_i64(t0
, t0
, 32);
2101 tcg_gen_trunc_i64_reg(t1
, t0
);
2103 save_gpr(ctx
, rt
, t1
);
2105 tcg_temp_free_i64(t0
);
2107 cond_free(&ctx
->null_cond
);
2111 static bool trans_mfctl(DisasContext
*ctx
, arg_mfctl
*a
)
2114 unsigned ctl
= a
->r
;
2119 #ifdef TARGET_HPPA64
2121 /* MFSAR without ,W masks low 5 bits. */
2122 tmp
= dest_gpr(ctx
, rt
);
2123 tcg_gen_andi_reg(tmp
, cpu_sar
, 31);
2124 save_gpr(ctx
, rt
, tmp
);
2128 save_gpr(ctx
, rt
, cpu_sar
);
2130 case CR_IT
: /* Interval Timer */
2131 /* FIXME: Respect PSW_S bit. */
2133 tmp
= dest_gpr(ctx
, rt
);
2134 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2136 gen_helper_read_interval_timer(tmp
);
2137 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2139 gen_helper_read_interval_timer(tmp
);
2141 save_gpr(ctx
, rt
, tmp
);
2142 return nullify_end(ctx
);
2147 /* All other control registers are privileged. */
2148 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2152 tmp
= get_temp(ctx
);
2153 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2154 save_gpr(ctx
, rt
, tmp
);
2157 cond_free(&ctx
->null_cond
);
2161 static bool trans_mtsp(DisasContext
*ctx
, arg_mtsp
*a
)
2164 unsigned rs
= a
->sp
;
2168 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2172 t64
= tcg_temp_new_i64();
2173 tcg_gen_extu_reg_i64(t64
, load_gpr(ctx
, rr
));
2174 tcg_gen_shli_i64(t64
, t64
, 32);
2177 tcg_gen_st_i64(t64
, cpu_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2178 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2180 tcg_gen_mov_i64(cpu_sr
[rs
], t64
);
2182 tcg_temp_free_i64(t64
);
2184 return nullify_end(ctx
);
2187 static bool trans_mtctl(DisasContext
*ctx
, arg_mtctl
*a
)
2189 unsigned ctl
= a
->t
;
2193 if (ctl
== CR_SAR
) {
2194 reg
= load_gpr(ctx
, a
->r
);
2195 tmp
= tcg_temp_new();
2196 tcg_gen_andi_reg(tmp
, reg
, TARGET_REGISTER_BITS
- 1);
2197 save_or_nullify(ctx
, cpu_sar
, tmp
);
2200 cond_free(&ctx
->null_cond
);
2204 /* All other control registers are privileged or read-only. */
2205 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2207 #ifndef CONFIG_USER_ONLY
2209 reg
= load_gpr(ctx
, a
->r
);
2213 gen_helper_write_interval_timer(cpu_env
, reg
);
2216 gen_helper_write_eirr(cpu_env
, reg
);
2219 gen_helper_write_eiem(cpu_env
, reg
);
2220 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2225 /* FIXME: Respect PSW_Q bit */
2226 /* The write advances the queue and stores to the back element. */
2227 tmp
= get_temp(ctx
);
2228 tcg_gen_ld_reg(tmp
, cpu_env
,
2229 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2230 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2231 tcg_gen_st_reg(reg
, cpu_env
,
2232 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2239 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2240 #ifndef CONFIG_USER_ONLY
2241 gen_helper_change_prot_id(cpu_env
);
2246 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2249 return nullify_end(ctx
);
2253 static bool trans_mtsarcm(DisasContext
*ctx
, arg_mtsarcm
*a
)
2255 TCGv_reg tmp
= tcg_temp_new();
2257 tcg_gen_not_reg(tmp
, load_gpr(ctx
, a
->r
));
2258 tcg_gen_andi_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
2259 save_or_nullify(ctx
, cpu_sar
, tmp
);
2262 cond_free(&ctx
->null_cond
);
2266 static bool trans_ldsid(DisasContext
*ctx
, arg_ldsid
*a
)
2268 TCGv_reg dest
= dest_gpr(ctx
, a
->t
);
2270 #ifdef CONFIG_USER_ONLY
2271 /* We don't implement space registers in user mode. */
2272 tcg_gen_movi_reg(dest
, 0);
2274 TCGv_i64 t0
= tcg_temp_new_i64();
2276 tcg_gen_mov_i64(t0
, space_select(ctx
, a
->sp
, load_gpr(ctx
, a
->b
)));
2277 tcg_gen_shri_i64(t0
, t0
, 32);
2278 tcg_gen_trunc_i64_reg(dest
, t0
);
2280 tcg_temp_free_i64(t0
);
2282 save_gpr(ctx
, a
->t
, dest
);
2284 cond_free(&ctx
->null_cond
);
2288 static bool trans_rsm(DisasContext
*ctx
, arg_rsm
*a
)
2290 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2291 #ifndef CONFIG_USER_ONLY
2296 tmp
= get_temp(ctx
);
2297 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2298 tcg_gen_andi_reg(tmp
, tmp
, ~a
->i
);
2299 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2300 save_gpr(ctx
, a
->t
, tmp
);
2302 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2303 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2304 return nullify_end(ctx
);
2308 static bool trans_ssm(DisasContext
*ctx
, arg_ssm
*a
)
2310 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2311 #ifndef CONFIG_USER_ONLY
2316 tmp
= get_temp(ctx
);
2317 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2318 tcg_gen_ori_reg(tmp
, tmp
, a
->i
);
2319 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2320 save_gpr(ctx
, a
->t
, tmp
);
2322 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2323 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2324 return nullify_end(ctx
);
2328 static bool trans_mtsm(DisasContext
*ctx
, arg_mtsm
*a
)
2330 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2331 #ifndef CONFIG_USER_ONLY
2335 reg
= load_gpr(ctx
, a
->r
);
2336 tmp
= get_temp(ctx
);
2337 gen_helper_swap_system_mask(tmp
, cpu_env
, reg
);
2339 /* Exit the TB to recognize new interrupts. */
2340 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2341 return nullify_end(ctx
);
2345 static bool do_rfi(DisasContext
*ctx
, bool rfi_r
)
2347 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2348 #ifndef CONFIG_USER_ONLY
2352 gen_helper_rfi_r(cpu_env
);
2354 gen_helper_rfi(cpu_env
);
2356 /* Exit the TB to recognize new interrupts. */
2357 tcg_gen_exit_tb(NULL
, 0);
2358 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2360 return nullify_end(ctx
);
2364 static bool trans_rfi(DisasContext
*ctx
, arg_rfi
*a
)
2366 return do_rfi(ctx
, false);
2369 static bool trans_rfi_r(DisasContext
*ctx
, arg_rfi_r
*a
)
2371 return do_rfi(ctx
, true);
2374 static bool trans_halt(DisasContext
*ctx
, arg_halt
*a
)
2376 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2377 #ifndef CONFIG_USER_ONLY
2379 gen_helper_halt(cpu_env
);
2380 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2381 return nullify_end(ctx
);
2385 static bool trans_reset(DisasContext
*ctx
, arg_reset
*a
)
2387 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2388 #ifndef CONFIG_USER_ONLY
2390 gen_helper_reset(cpu_env
);
2391 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2392 return nullify_end(ctx
);
2396 static bool trans_getshadowregs(DisasContext
*ctx
, arg_getshadowregs
*a
)
2398 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2399 #ifndef CONFIG_USER_ONLY
2401 gen_helper_getshadowregs(cpu_env
);
2402 return nullify_end(ctx
);
2406 static bool trans_nop_addrx(DisasContext
*ctx
, arg_ldst
*a
)
2409 TCGv_reg dest
= dest_gpr(ctx
, a
->b
);
2410 TCGv_reg src1
= load_gpr(ctx
, a
->b
);
2411 TCGv_reg src2
= load_gpr(ctx
, a
->x
);
2413 /* The only thing we need to do is the base register modification. */
2414 tcg_gen_add_reg(dest
, src1
, src2
);
2415 save_gpr(ctx
, a
->b
, dest
);
2417 cond_free(&ctx
->null_cond
);
2421 static bool trans_probe(DisasContext
*ctx
, arg_probe
*a
)
2424 TCGv_i32 level
, want
;
2429 dest
= dest_gpr(ctx
, a
->t
);
2430 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2433 level
= tcg_constant_i32(a
->ri
);
2435 level
= tcg_temp_new_i32();
2436 tcg_gen_trunc_reg_i32(level
, load_gpr(ctx
, a
->ri
));
2437 tcg_gen_andi_i32(level
, level
, 3);
2439 want
= tcg_constant_i32(a
->write
? PAGE_WRITE
: PAGE_READ
);
2441 gen_helper_probe(dest
, cpu_env
, addr
, level
, want
);
2443 tcg_temp_free_i32(level
);
2445 save_gpr(ctx
, a
->t
, dest
);
2446 return nullify_end(ctx
);
2449 static bool trans_ixtlbx(DisasContext
*ctx
, arg_ixtlbx
*a
)
2451 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2452 #ifndef CONFIG_USER_ONLY
2458 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2459 reg
= load_gpr(ctx
, a
->r
);
2461 gen_helper_itlba(cpu_env
, addr
, reg
);
2463 gen_helper_itlbp(cpu_env
, addr
, reg
);
2466 /* Exit TB for TLB change if mmu is enabled. */
2467 if (ctx
->tb_flags
& PSW_C
) {
2468 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2470 return nullify_end(ctx
);
2474 static bool trans_pxtlbx(DisasContext
*ctx
, arg_pxtlbx
*a
)
2476 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2477 #ifndef CONFIG_USER_ONLY
2483 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2485 save_gpr(ctx
, a
->b
, ofs
);
2488 gen_helper_ptlbe(cpu_env
);
2490 gen_helper_ptlb(cpu_env
, addr
);
2493 /* Exit TB for TLB change if mmu is enabled. */
2494 if (ctx
->tb_flags
& PSW_C
) {
2495 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2497 return nullify_end(ctx
);
2502 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2504 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2505 * page 13-9 (195/206)
2507 static bool trans_ixtlbxf(DisasContext
*ctx
, arg_ixtlbxf
*a
)
2509 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2510 #ifndef CONFIG_USER_ONLY
2511 TCGv_tl addr
, atl
, stl
;
2518 * if (not (pcxl or pcxl2))
2519 * return gen_illegal(ctx);
2521 * Note for future: these are 32-bit systems; no hppa64.
2524 atl
= tcg_temp_new_tl();
2525 stl
= tcg_temp_new_tl();
2526 addr
= tcg_temp_new_tl();
2528 tcg_gen_ld32u_i64(stl
, cpu_env
,
2529 a
->data
? offsetof(CPUHPPAState
, cr
[CR_ISR
])
2530 : offsetof(CPUHPPAState
, cr
[CR_IIASQ
]));
2531 tcg_gen_ld32u_i64(atl
, cpu_env
,
2532 a
->data
? offsetof(CPUHPPAState
, cr
[CR_IOR
])
2533 : offsetof(CPUHPPAState
, cr
[CR_IIAOQ
]));
2534 tcg_gen_shli_i64(stl
, stl
, 32);
2535 tcg_gen_or_tl(addr
, atl
, stl
);
2536 tcg_temp_free_tl(atl
);
2537 tcg_temp_free_tl(stl
);
2539 reg
= load_gpr(ctx
, a
->r
);
2541 gen_helper_itlba(cpu_env
, addr
, reg
);
2543 gen_helper_itlbp(cpu_env
, addr
, reg
);
2545 tcg_temp_free_tl(addr
);
2547 /* Exit TB for TLB change if mmu is enabled. */
2548 if (ctx
->tb_flags
& PSW_C
) {
2549 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2551 return nullify_end(ctx
);
2555 static bool trans_lpa(DisasContext
*ctx
, arg_ldst
*a
)
2557 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2558 #ifndef CONFIG_USER_ONLY
2560 TCGv_reg ofs
, paddr
;
2564 form_gva(ctx
, &vaddr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2566 paddr
= tcg_temp_new();
2567 gen_helper_lpa(paddr
, cpu_env
, vaddr
);
2569 /* Note that physical address result overrides base modification. */
2571 save_gpr(ctx
, a
->b
, ofs
);
2573 save_gpr(ctx
, a
->t
, paddr
);
2574 tcg_temp_free(paddr
);
2576 return nullify_end(ctx
);
2580 static bool trans_lci(DisasContext
*ctx
, arg_lci
*a
)
2582 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2584 /* The Coherence Index is an implementation-defined function of the
2585 physical address. Two addresses with the same CI have a coherent
2586 view of the cache. Our implementation is to return 0 for all,
2587 since the entire address space is coherent. */
2588 save_gpr(ctx
, a
->t
, tcg_constant_reg(0));
2590 cond_free(&ctx
->null_cond
);
2594 static bool trans_add(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2596 return do_add_reg(ctx
, a
, false, false, false, false);
2599 static bool trans_add_l(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2601 return do_add_reg(ctx
, a
, true, false, false, false);
2604 static bool trans_add_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2606 return do_add_reg(ctx
, a
, false, true, false, false);
2609 static bool trans_add_c(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2611 return do_add_reg(ctx
, a
, false, false, false, true);
2614 static bool trans_add_c_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2616 return do_add_reg(ctx
, a
, false, true, false, true);
2619 static bool trans_sub(DisasContext
*ctx
, arg_rrr_cf
*a
)
2621 return do_sub_reg(ctx
, a
, false, false, false);
2624 static bool trans_sub_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2626 return do_sub_reg(ctx
, a
, true, false, false);
2629 static bool trans_sub_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2631 return do_sub_reg(ctx
, a
, false, false, true);
2634 static bool trans_sub_tsv_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2636 return do_sub_reg(ctx
, a
, true, false, true);
2639 static bool trans_sub_b(DisasContext
*ctx
, arg_rrr_cf
*a
)
2641 return do_sub_reg(ctx
, a
, false, true, false);
2644 static bool trans_sub_b_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2646 return do_sub_reg(ctx
, a
, true, true, false);
2649 static bool trans_andcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2651 return do_log_reg(ctx
, a
, tcg_gen_andc_reg
);
2654 static bool trans_and(DisasContext
*ctx
, arg_rrr_cf
*a
)
2656 return do_log_reg(ctx
, a
, tcg_gen_and_reg
);
2659 static bool trans_or(DisasContext
*ctx
, arg_rrr_cf
*a
)
2662 unsigned r2
= a
->r2
;
2663 unsigned r1
= a
->r1
;
2666 if (rt
== 0) { /* NOP */
2667 cond_free(&ctx
->null_cond
);
2670 if (r2
== 0) { /* COPY */
2672 TCGv_reg dest
= dest_gpr(ctx
, rt
);
2673 tcg_gen_movi_reg(dest
, 0);
2674 save_gpr(ctx
, rt
, dest
);
2676 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2678 cond_free(&ctx
->null_cond
);
2681 #ifndef CONFIG_USER_ONLY
2682 /* These are QEMU extensions and are nops in the real architecture:
2684 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2685 * or %r31,%r31,%r31 -- death loop; offline cpu
2686 * currently implemented as idle.
2688 if ((rt
== 10 || rt
== 31) && r1
== rt
&& r2
== rt
) { /* PAUSE */
2689 /* No need to check for supervisor, as userland can only pause
2690 until the next timer interrupt. */
2693 /* Advance the instruction queue. */
2694 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
2695 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
2696 nullify_set(ctx
, 0);
2698 /* Tell the qemu main loop to halt until this cpu has work. */
2699 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
2700 offsetof(CPUState
, halted
) - offsetof(HPPACPU
, env
));
2701 gen_excp_1(EXCP_HALTED
);
2702 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2704 return nullify_end(ctx
);
2708 return do_log_reg(ctx
, a
, tcg_gen_or_reg
);
2711 static bool trans_xor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2713 return do_log_reg(ctx
, a
, tcg_gen_xor_reg
);
2716 static bool trans_cmpclr(DisasContext
*ctx
, arg_rrr_cf
*a
)
2718 TCGv_reg tcg_r1
, tcg_r2
;
2723 tcg_r1
= load_gpr(ctx
, a
->r1
);
2724 tcg_r2
= load_gpr(ctx
, a
->r2
);
2725 do_cmpclr(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
);
2726 return nullify_end(ctx
);
2729 static bool trans_uxor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2731 TCGv_reg tcg_r1
, tcg_r2
;
2736 tcg_r1
= load_gpr(ctx
, a
->r1
);
2737 tcg_r2
= load_gpr(ctx
, a
->r2
);
2738 do_unit(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, false, tcg_gen_xor_reg
);
2739 return nullify_end(ctx
);
2742 static bool do_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
, bool is_tc
)
2744 TCGv_reg tcg_r1
, tcg_r2
, tmp
;
2749 tcg_r1
= load_gpr(ctx
, a
->r1
);
2750 tcg_r2
= load_gpr(ctx
, a
->r2
);
2751 tmp
= get_temp(ctx
);
2752 tcg_gen_not_reg(tmp
, tcg_r2
);
2753 do_unit(ctx
, a
->t
, tcg_r1
, tmp
, a
->cf
, is_tc
, tcg_gen_add_reg
);
2754 return nullify_end(ctx
);
2757 static bool trans_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2759 return do_uaddcm(ctx
, a
, false);
2762 static bool trans_uaddcm_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2764 return do_uaddcm(ctx
, a
, true);
2767 static bool do_dcor(DisasContext
*ctx
, arg_rr_cf
*a
, bool is_i
)
2773 tmp
= get_temp(ctx
);
2774 tcg_gen_shri_reg(tmp
, cpu_psw_cb
, 3);
2776 tcg_gen_not_reg(tmp
, tmp
);
2778 tcg_gen_andi_reg(tmp
, tmp
, 0x11111111);
2779 tcg_gen_muli_reg(tmp
, tmp
, 6);
2780 do_unit(ctx
, a
->t
, load_gpr(ctx
, a
->r
), tmp
, a
->cf
, false,
2781 is_i
? tcg_gen_add_reg
: tcg_gen_sub_reg
);
2782 return nullify_end(ctx
);
2785 static bool trans_dcor(DisasContext
*ctx
, arg_rr_cf
*a
)
2787 return do_dcor(ctx
, a
, false);
2790 static bool trans_dcor_i(DisasContext
*ctx
, arg_rr_cf
*a
)
2792 return do_dcor(ctx
, a
, true);
2795 static bool trans_ds(DisasContext
*ctx
, arg_rrr_cf
*a
)
2797 TCGv_reg dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2801 in1
= load_gpr(ctx
, a
->r1
);
2802 in2
= load_gpr(ctx
, a
->r2
);
2804 add1
= tcg_temp_new();
2805 add2
= tcg_temp_new();
2806 addc
= tcg_temp_new();
2807 dest
= tcg_temp_new();
2808 zero
= tcg_constant_reg(0);
2810 /* Form R1 << 1 | PSW[CB]{8}. */
2811 tcg_gen_add_reg(add1
, in1
, in1
);
2812 tcg_gen_add_reg(add1
, add1
, cpu_psw_cb_msb
);
2814 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2815 carry{8} requires that we subtract via + ~R2 + 1, as described in
2816 the manual. By extracting and masking V, we can produce the
2817 proper inputs to the addition without movcond. */
2818 tcg_gen_sari_reg(addc
, cpu_psw_v
, TARGET_REGISTER_BITS
- 1);
2819 tcg_gen_xor_reg(add2
, in2
, addc
);
2820 tcg_gen_andi_reg(addc
, addc
, 1);
2821 /* ??? This is only correct for 32-bit. */
2822 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2823 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2825 tcg_temp_free(addc
);
2827 /* Write back the result register. */
2828 save_gpr(ctx
, a
->t
, dest
);
2830 /* Write back PSW[CB]. */
2831 tcg_gen_xor_reg(cpu_psw_cb
, add1
, add2
);
2832 tcg_gen_xor_reg(cpu_psw_cb
, cpu_psw_cb
, dest
);
2834 /* Write back PSW[V] for the division step. */
2835 tcg_gen_neg_reg(cpu_psw_v
, cpu_psw_cb_msb
);
2836 tcg_gen_xor_reg(cpu_psw_v
, cpu_psw_v
, in2
);
2838 /* Install the new nullification. */
2841 if (cond_need_sv(a
->cf
>> 1)) {
2842 /* ??? The lshift is supposed to contribute to overflow. */
2843 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2845 ctx
->null_cond
= do_cond(a
->cf
, dest
, cpu_psw_cb_msb
, sv
);
2848 tcg_temp_free(add1
);
2849 tcg_temp_free(add2
);
2850 tcg_temp_free(dest
);
2852 return nullify_end(ctx
);
2855 static bool trans_addi(DisasContext
*ctx
, arg_rri_cf
*a
)
2857 return do_add_imm(ctx
, a
, false, false);
2860 static bool trans_addi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2862 return do_add_imm(ctx
, a
, true, false);
2865 static bool trans_addi_tc(DisasContext
*ctx
, arg_rri_cf
*a
)
2867 return do_add_imm(ctx
, a
, false, true);
2870 static bool trans_addi_tc_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2872 return do_add_imm(ctx
, a
, true, true);
2875 static bool trans_subi(DisasContext
*ctx
, arg_rri_cf
*a
)
2877 return do_sub_imm(ctx
, a
, false);
2880 static bool trans_subi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2882 return do_sub_imm(ctx
, a
, true);
2885 static bool trans_cmpiclr(DisasContext
*ctx
, arg_rri_cf
*a
)
2887 TCGv_reg tcg_im
, tcg_r2
;
2893 tcg_im
= load_const(ctx
, a
->i
);
2894 tcg_r2
= load_gpr(ctx
, a
->r
);
2895 do_cmpclr(ctx
, a
->t
, tcg_im
, tcg_r2
, a
->cf
);
2897 return nullify_end(ctx
);
2900 static bool trans_ld(DisasContext
*ctx
, arg_ldst
*a
)
2902 return do_load(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2903 a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2906 static bool trans_st(DisasContext
*ctx
, arg_ldst
*a
)
2908 assert(a
->x
== 0 && a
->scale
== 0);
2909 return do_store(ctx
, a
->t
, a
->b
, a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2912 static bool trans_ldc(DisasContext
*ctx
, arg_ldst
*a
)
2914 MemOp mop
= MO_TE
| MO_ALIGN
| a
->size
;
2915 TCGv_reg zero
, dest
, ofs
;
2921 /* Base register modification. Make sure if RT == RB,
2922 we see the result of the load. */
2923 dest
= get_temp(ctx
);
2925 dest
= dest_gpr(ctx
, a
->t
);
2928 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2929 a
->disp
, a
->sp
, a
->m
, ctx
->mmu_idx
== MMU_PHYS_IDX
);
2932 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2933 * However actual hardware succeeds with aligned mod 4.
2934 * Detect this case and log a GUEST_ERROR.
2936 * TODO: HPPA64 relaxes the over-alignment requirement
2937 * with the ,co completer.
2939 gen_helper_ldc_check(addr
);
2941 zero
= tcg_constant_reg(0);
2942 tcg_gen_atomic_xchg_reg(dest
, addr
, zero
, ctx
->mmu_idx
, mop
);
2945 save_gpr(ctx
, a
->b
, ofs
);
2947 save_gpr(ctx
, a
->t
, dest
);
2949 return nullify_end(ctx
);
2952 static bool trans_stby(DisasContext
*ctx
, arg_stby
*a
)
2959 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
2960 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2961 val
= load_gpr(ctx
, a
->r
);
2963 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2964 gen_helper_stby_e_parallel(cpu_env
, addr
, val
);
2966 gen_helper_stby_e(cpu_env
, addr
, val
);
2969 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2970 gen_helper_stby_b_parallel(cpu_env
, addr
, val
);
2972 gen_helper_stby_b(cpu_env
, addr
, val
);
2976 tcg_gen_andi_reg(ofs
, ofs
, ~3);
2977 save_gpr(ctx
, a
->b
, ofs
);
2980 return nullify_end(ctx
);
2983 static bool trans_lda(DisasContext
*ctx
, arg_ldst
*a
)
2985 int hold_mmu_idx
= ctx
->mmu_idx
;
2987 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2988 ctx
->mmu_idx
= MMU_PHYS_IDX
;
2990 ctx
->mmu_idx
= hold_mmu_idx
;
2994 static bool trans_sta(DisasContext
*ctx
, arg_ldst
*a
)
2996 int hold_mmu_idx
= ctx
->mmu_idx
;
2998 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2999 ctx
->mmu_idx
= MMU_PHYS_IDX
;
3001 ctx
->mmu_idx
= hold_mmu_idx
;
3005 static bool trans_ldil(DisasContext
*ctx
, arg_ldil
*a
)
3007 TCGv_reg tcg_rt
= dest_gpr(ctx
, a
->t
);
3009 tcg_gen_movi_reg(tcg_rt
, a
->i
);
3010 save_gpr(ctx
, a
->t
, tcg_rt
);
3011 cond_free(&ctx
->null_cond
);
3015 static bool trans_addil(DisasContext
*ctx
, arg_addil
*a
)
3017 TCGv_reg tcg_rt
= load_gpr(ctx
, a
->r
);
3018 TCGv_reg tcg_r1
= dest_gpr(ctx
, 1);
3020 tcg_gen_addi_reg(tcg_r1
, tcg_rt
, a
->i
);
3021 save_gpr(ctx
, 1, tcg_r1
);
3022 cond_free(&ctx
->null_cond
);
3026 static bool trans_ldo(DisasContext
*ctx
, arg_ldo
*a
)
3028 TCGv_reg tcg_rt
= dest_gpr(ctx
, a
->t
);
3030 /* Special case rb == 0, for the LDI pseudo-op.
3031 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3033 tcg_gen_movi_reg(tcg_rt
, a
->i
);
3035 tcg_gen_addi_reg(tcg_rt
, cpu_gr
[a
->b
], a
->i
);
3037 save_gpr(ctx
, a
->t
, tcg_rt
);
3038 cond_free(&ctx
->null_cond
);
3042 static bool do_cmpb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3043 unsigned c
, unsigned f
, unsigned n
, int disp
)
3045 TCGv_reg dest
, in2
, sv
;
3048 in2
= load_gpr(ctx
, r
);
3049 dest
= get_temp(ctx
);
3051 tcg_gen_sub_reg(dest
, in1
, in2
);
3054 if (cond_need_sv(c
)) {
3055 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3058 cond
= do_sub_cond(c
* 2 + f
, dest
, in1
, in2
, sv
);
3059 return do_cbranch(ctx
, disp
, n
, &cond
);
3062 static bool trans_cmpb(DisasContext
*ctx
, arg_cmpb
*a
)
3065 return do_cmpb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3068 static bool trans_cmpbi(DisasContext
*ctx
, arg_cmpbi
*a
)
3071 return do_cmpb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3074 static bool do_addb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3075 unsigned c
, unsigned f
, unsigned n
, int disp
)
3077 TCGv_reg dest
, in2
, sv
, cb_msb
;
3080 in2
= load_gpr(ctx
, r
);
3081 dest
= tcg_temp_new();
3085 if (cond_need_cb(c
)) {
3086 cb_msb
= get_temp(ctx
);
3087 tcg_gen_movi_reg(cb_msb
, 0);
3088 tcg_gen_add2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3090 tcg_gen_add_reg(dest
, in1
, in2
);
3092 if (cond_need_sv(c
)) {
3093 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3096 cond
= do_cond(c
* 2 + f
, dest
, cb_msb
, sv
);
3097 save_gpr(ctx
, r
, dest
);
3098 tcg_temp_free(dest
);
3099 return do_cbranch(ctx
, disp
, n
, &cond
);
3102 static bool trans_addb(DisasContext
*ctx
, arg_addb
*a
)
3105 return do_addb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3108 static bool trans_addbi(DisasContext
*ctx
, arg_addbi
*a
)
3111 return do_addb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3114 static bool trans_bb_sar(DisasContext
*ctx
, arg_bb_sar
*a
)
3116 TCGv_reg tmp
, tcg_r
;
3121 tmp
= tcg_temp_new();
3122 tcg_r
= load_gpr(ctx
, a
->r
);
3123 tcg_gen_shl_reg(tmp
, tcg_r
, cpu_sar
);
3125 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3127 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3130 static bool trans_bb_imm(DisasContext
*ctx
, arg_bb_imm
*a
)
3132 TCGv_reg tmp
, tcg_r
;
3137 tmp
= tcg_temp_new();
3138 tcg_r
= load_gpr(ctx
, a
->r
);
3139 tcg_gen_shli_reg(tmp
, tcg_r
, a
->p
);
3141 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3143 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3146 static bool trans_movb(DisasContext
*ctx
, arg_movb
*a
)
3153 dest
= dest_gpr(ctx
, a
->r2
);
3155 tcg_gen_movi_reg(dest
, 0);
3157 tcg_gen_mov_reg(dest
, cpu_gr
[a
->r1
]);
3160 cond
= do_sed_cond(a
->c
, dest
);
3161 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3164 static bool trans_movbi(DisasContext
*ctx
, arg_movbi
*a
)
3171 dest
= dest_gpr(ctx
, a
->r
);
3172 tcg_gen_movi_reg(dest
, a
->i
);
3174 cond
= do_sed_cond(a
->c
, dest
);
3175 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3178 static bool trans_shrpw_sar(DisasContext
*ctx
, arg_shrpw_sar
*a
)
3186 dest
= dest_gpr(ctx
, a
->t
);
3188 tcg_gen_ext32u_reg(dest
, load_gpr(ctx
, a
->r2
));
3189 tcg_gen_shr_reg(dest
, dest
, cpu_sar
);
3190 } else if (a
->r1
== a
->r2
) {
3191 TCGv_i32 t32
= tcg_temp_new_i32();
3192 tcg_gen_trunc_reg_i32(t32
, load_gpr(ctx
, a
->r2
));
3193 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
3194 tcg_gen_extu_i32_reg(dest
, t32
);
3195 tcg_temp_free_i32(t32
);
3197 TCGv_i64 t
= tcg_temp_new_i64();
3198 TCGv_i64 s
= tcg_temp_new_i64();
3200 tcg_gen_concat_reg_i64(t
, load_gpr(ctx
, a
->r2
), load_gpr(ctx
, a
->r1
));
3201 tcg_gen_extu_reg_i64(s
, cpu_sar
);
3202 tcg_gen_shr_i64(t
, t
, s
);
3203 tcg_gen_trunc_i64_reg(dest
, t
);
3205 tcg_temp_free_i64(t
);
3206 tcg_temp_free_i64(s
);
3208 save_gpr(ctx
, a
->t
, dest
);
3210 /* Install the new nullification. */
3211 cond_free(&ctx
->null_cond
);
3213 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3215 return nullify_end(ctx
);
3218 static bool trans_shrpw_imm(DisasContext
*ctx
, arg_shrpw_imm
*a
)
3220 unsigned sa
= 31 - a
->cpos
;
3227 dest
= dest_gpr(ctx
, a
->t
);
3228 t2
= load_gpr(ctx
, a
->r2
);
3230 tcg_gen_extract_reg(dest
, t2
, sa
, 32 - sa
);
3231 } else if (TARGET_REGISTER_BITS
== 32) {
3232 tcg_gen_extract2_reg(dest
, t2
, cpu_gr
[a
->r1
], sa
);
3233 } else if (a
->r1
== a
->r2
) {
3234 TCGv_i32 t32
= tcg_temp_new_i32();
3235 tcg_gen_trunc_reg_i32(t32
, t2
);
3236 tcg_gen_rotri_i32(t32
, t32
, sa
);
3237 tcg_gen_extu_i32_reg(dest
, t32
);
3238 tcg_temp_free_i32(t32
);
3240 TCGv_i64 t64
= tcg_temp_new_i64();
3241 tcg_gen_concat_reg_i64(t64
, t2
, cpu_gr
[a
->r1
]);
3242 tcg_gen_shri_i64(t64
, t64
, sa
);
3243 tcg_gen_trunc_i64_reg(dest
, t64
);
3244 tcg_temp_free_i64(t64
);
3246 save_gpr(ctx
, a
->t
, dest
);
3248 /* Install the new nullification. */
3249 cond_free(&ctx
->null_cond
);
3251 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3253 return nullify_end(ctx
);
3256 static bool trans_extrw_sar(DisasContext
*ctx
, arg_extrw_sar
*a
)
3258 unsigned len
= 32 - a
->clen
;
3259 TCGv_reg dest
, src
, tmp
;
3265 dest
= dest_gpr(ctx
, a
->t
);
3266 src
= load_gpr(ctx
, a
->r
);
3267 tmp
= tcg_temp_new();
3269 /* Recall that SAR is using big-endian bit numbering. */
3270 tcg_gen_xori_reg(tmp
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3272 tcg_gen_sar_reg(dest
, src
, tmp
);
3273 tcg_gen_sextract_reg(dest
, dest
, 0, len
);
3275 tcg_gen_shr_reg(dest
, src
, tmp
);
3276 tcg_gen_extract_reg(dest
, dest
, 0, len
);
3279 save_gpr(ctx
, a
->t
, dest
);
3281 /* Install the new nullification. */
3282 cond_free(&ctx
->null_cond
);
3284 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3286 return nullify_end(ctx
);
3289 static bool trans_extrw_imm(DisasContext
*ctx
, arg_extrw_imm
*a
)
3291 unsigned len
= 32 - a
->clen
;
3292 unsigned cpos
= 31 - a
->pos
;
3299 dest
= dest_gpr(ctx
, a
->t
);
3300 src
= load_gpr(ctx
, a
->r
);
3302 tcg_gen_sextract_reg(dest
, src
, cpos
, len
);
3304 tcg_gen_extract_reg(dest
, src
, cpos
, len
);
3306 save_gpr(ctx
, a
->t
, dest
);
3308 /* Install the new nullification. */
3309 cond_free(&ctx
->null_cond
);
3311 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3313 return nullify_end(ctx
);
3316 static bool trans_depwi_imm(DisasContext
*ctx
, arg_depwi_imm
*a
)
3318 unsigned len
= 32 - a
->clen
;
3319 target_sreg mask0
, mask1
;
3325 if (a
->cpos
+ len
> 32) {
3329 dest
= dest_gpr(ctx
, a
->t
);
3330 mask0
= deposit64(0, a
->cpos
, len
, a
->i
);
3331 mask1
= deposit64(-1, a
->cpos
, len
, a
->i
);
3334 TCGv_reg src
= load_gpr(ctx
, a
->t
);
3336 tcg_gen_andi_reg(dest
, src
, mask1
);
3339 tcg_gen_ori_reg(dest
, src
, mask0
);
3341 tcg_gen_movi_reg(dest
, mask0
);
3343 save_gpr(ctx
, a
->t
, dest
);
3345 /* Install the new nullification. */
3346 cond_free(&ctx
->null_cond
);
3348 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3350 return nullify_end(ctx
);
3353 static bool trans_depw_imm(DisasContext
*ctx
, arg_depw_imm
*a
)
3355 unsigned rs
= a
->nz
? a
->t
: 0;
3356 unsigned len
= 32 - a
->clen
;
3362 if (a
->cpos
+ len
> 32) {
3366 dest
= dest_gpr(ctx
, a
->t
);
3367 val
= load_gpr(ctx
, a
->r
);
3369 tcg_gen_deposit_z_reg(dest
, val
, a
->cpos
, len
);
3371 tcg_gen_deposit_reg(dest
, cpu_gr
[rs
], val
, a
->cpos
, len
);
3373 save_gpr(ctx
, a
->t
, dest
);
3375 /* Install the new nullification. */
3376 cond_free(&ctx
->null_cond
);
3378 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3380 return nullify_end(ctx
);
3383 static bool do_depw_sar(DisasContext
*ctx
, unsigned rt
, unsigned c
,
3384 unsigned nz
, unsigned clen
, TCGv_reg val
)
3386 unsigned rs
= nz
? rt
: 0;
3387 unsigned len
= 32 - clen
;
3388 TCGv_reg mask
, tmp
, shift
, dest
;
3389 unsigned msb
= 1U << (len
- 1);
3391 dest
= dest_gpr(ctx
, rt
);
3392 shift
= tcg_temp_new();
3393 tmp
= tcg_temp_new();
3395 /* Convert big-endian bit numbering in SAR to left-shift. */
3396 tcg_gen_xori_reg(shift
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3398 mask
= tcg_const_reg(msb
+ (msb
- 1));
3399 tcg_gen_and_reg(tmp
, val
, mask
);
3401 tcg_gen_shl_reg(mask
, mask
, shift
);
3402 tcg_gen_shl_reg(tmp
, tmp
, shift
);
3403 tcg_gen_andc_reg(dest
, cpu_gr
[rs
], mask
);
3404 tcg_gen_or_reg(dest
, dest
, tmp
);
3406 tcg_gen_shl_reg(dest
, tmp
, shift
);
3408 tcg_temp_free(shift
);
3409 tcg_temp_free(mask
);
3411 save_gpr(ctx
, rt
, dest
);
3413 /* Install the new nullification. */
3414 cond_free(&ctx
->null_cond
);
3416 ctx
->null_cond
= do_sed_cond(c
, dest
);
3418 return nullify_end(ctx
);
3421 static bool trans_depw_sar(DisasContext
*ctx
, arg_depw_sar
*a
)
3426 return do_depw_sar(ctx
, a
->t
, a
->c
, a
->nz
, a
->clen
, load_gpr(ctx
, a
->r
));
3429 static bool trans_depwi_sar(DisasContext
*ctx
, arg_depwi_sar
*a
)
3434 return do_depw_sar(ctx
, a
->t
, a
->c
, a
->nz
, a
->clen
, load_const(ctx
, a
->i
));
3437 static bool trans_be(DisasContext
*ctx
, arg_be
*a
)
3441 #ifdef CONFIG_USER_ONLY
3442 /* ??? It seems like there should be a good way of using
3443 "be disp(sr2, r0)", the canonical gateway entry mechanism
3444 to our advantage. But that appears to be inconvenient to
3445 manage along side branch delay slots. Therefore we handle
3446 entry into the gateway page via absolute address. */
3447 /* Since we don't implement spaces, just branch. Do notice the special
3448 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3449 goto_tb to the TB containing the syscall. */
3451 return do_dbranch(ctx
, a
->disp
, a
->l
, a
->n
);
3457 tmp
= get_temp(ctx
);
3458 tcg_gen_addi_reg(tmp
, load_gpr(ctx
, a
->b
), a
->disp
);
3459 tmp
= do_ibranch_priv(ctx
, tmp
);
3461 #ifdef CONFIG_USER_ONLY
3462 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3464 TCGv_i64 new_spc
= tcg_temp_new_i64();
3466 load_spr(ctx
, new_spc
, a
->sp
);
3468 copy_iaoq_entry(cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3469 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3471 if (a
->n
&& use_nullify_skip(ctx
)) {
3472 tcg_gen_mov_reg(cpu_iaoq_f
, tmp
);
3473 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
3474 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3475 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3477 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3478 if (ctx
->iaoq_b
== -1) {
3479 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3481 tcg_gen_mov_reg(cpu_iaoq_b
, tmp
);
3482 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3483 nullify_set(ctx
, a
->n
);
3485 tcg_temp_free_i64(new_spc
);
3486 tcg_gen_lookup_and_goto_ptr();
3487 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3488 return nullify_end(ctx
);
3492 static bool trans_bl(DisasContext
*ctx
, arg_bl
*a
)
3494 return do_dbranch(ctx
, iaoq_dest(ctx
, a
->disp
), a
->l
, a
->n
);
3497 static bool trans_b_gate(DisasContext
*ctx
, arg_b_gate
*a
)
3499 target_ureg dest
= iaoq_dest(ctx
, a
->disp
);
3503 /* Make sure the caller hasn't done something weird with the queue.
3504 * ??? This is not quite the same as the PSW[B] bit, which would be
3505 * expensive to track. Real hardware will trap for
3507 * b gateway+4 (in delay slot of first branch)
3508 * However, checking for a non-sequential instruction queue *will*
3509 * diagnose the security hole
3512 * in which instructions at evil would run with increased privs.
3514 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3515 return gen_illegal(ctx
);
3518 #ifndef CONFIG_USER_ONLY
3519 if (ctx
->tb_flags
& PSW_C
) {
3520 CPUHPPAState
*env
= ctx
->cs
->env_ptr
;
3521 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3522 /* If we could not find a TLB entry, then we need to generate an
3523 ITLB miss exception so the kernel will provide it.
3524 The resulting TLB fill operation will invalidate this TB and
3525 we will re-translate, at which point we *will* be able to find
3526 the TLB entry and determine if this is in fact a gateway page. */
3528 gen_excp(ctx
, EXCP_ITLB_MISS
);
3531 /* No change for non-gateway pages or for priv decrease. */
3532 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3533 dest
= deposit32(dest
, 0, 2, type
- 4);
3536 dest
&= -4; /* priv = 0 */
3541 TCGv_reg tmp
= dest_gpr(ctx
, a
->l
);
3542 if (ctx
->privilege
< 3) {
3543 tcg_gen_andi_reg(tmp
, tmp
, -4);
3545 tcg_gen_ori_reg(tmp
, tmp
, ctx
->privilege
);
3546 save_gpr(ctx
, a
->l
, tmp
);
3549 return do_dbranch(ctx
, dest
, 0, a
->n
);
3552 static bool trans_blr(DisasContext
*ctx
, arg_blr
*a
)
3555 TCGv_reg tmp
= get_temp(ctx
);
3556 tcg_gen_shli_reg(tmp
, load_gpr(ctx
, a
->x
), 3);
3557 tcg_gen_addi_reg(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3558 /* The computation here never changes privilege level. */
3559 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3561 /* BLR R0,RX is a good way to load PC+8 into RX. */
3562 return do_dbranch(ctx
, ctx
->iaoq_f
+ 8, a
->l
, a
->n
);
3566 static bool trans_bv(DisasContext
*ctx
, arg_bv
*a
)
3571 dest
= load_gpr(ctx
, a
->b
);
3573 dest
= get_temp(ctx
);
3574 tcg_gen_shli_reg(dest
, load_gpr(ctx
, a
->x
), 3);
3575 tcg_gen_add_reg(dest
, dest
, load_gpr(ctx
, a
->b
));
3577 dest
= do_ibranch_priv(ctx
, dest
);
3578 return do_ibranch(ctx
, dest
, 0, a
->n
);
3581 static bool trans_bve(DisasContext
*ctx
, arg_bve
*a
)
3585 #ifdef CONFIG_USER_ONLY
3586 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3587 return do_ibranch(ctx
, dest
, a
->l
, a
->n
);
3590 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3592 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3593 if (ctx
->iaoq_b
== -1) {
3594 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3596 copy_iaoq_entry(cpu_iaoq_b
, -1, dest
);
3597 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3599 copy_iaoq_entry(cpu_gr
[a
->l
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3601 nullify_set(ctx
, a
->n
);
3602 tcg_gen_lookup_and_goto_ptr();
3603 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3604 return nullify_end(ctx
);
3612 static void gen_fcpy_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3614 tcg_gen_mov_i32(dst
, src
);
3617 static bool trans_fcpy_f(DisasContext
*ctx
, arg_fclass01
*a
)
3619 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fcpy_f
);
3622 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3624 tcg_gen_mov_i64(dst
, src
);
3627 static bool trans_fcpy_d(DisasContext
*ctx
, arg_fclass01
*a
)
3629 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fcpy_d
);
3632 static void gen_fabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3634 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3637 static bool trans_fabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3639 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fabs_f
);
3642 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3644 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3647 static bool trans_fabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3649 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fabs_d
);
3652 static bool trans_fsqrt_f(DisasContext
*ctx
, arg_fclass01
*a
)
3654 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_s
);
3657 static bool trans_fsqrt_d(DisasContext
*ctx
, arg_fclass01
*a
)
3659 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_d
);
3662 static bool trans_frnd_f(DisasContext
*ctx
, arg_fclass01
*a
)
3664 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_frnd_s
);
3667 static bool trans_frnd_d(DisasContext
*ctx
, arg_fclass01
*a
)
3669 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_frnd_d
);
3672 static void gen_fneg_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3674 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3677 static bool trans_fneg_f(DisasContext
*ctx
, arg_fclass01
*a
)
3679 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fneg_f
);
3682 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3684 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3687 static bool trans_fneg_d(DisasContext
*ctx
, arg_fclass01
*a
)
3689 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fneg_d
);
3692 static void gen_fnegabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3694 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
3697 static bool trans_fnegabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3699 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fnegabs_f
);
3702 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3704 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
3707 static bool trans_fnegabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3709 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fnegabs_d
);
3716 static bool trans_fcnv_d_f(DisasContext
*ctx
, arg_fclass01
*a
)
3718 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_s
);
3721 static bool trans_fcnv_f_d(DisasContext
*ctx
, arg_fclass01
*a
)
3723 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_d
);
3726 static bool trans_fcnv_w_f(DisasContext
*ctx
, arg_fclass01
*a
)
3728 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_s
);
3731 static bool trans_fcnv_q_f(DisasContext
*ctx
, arg_fclass01
*a
)
3733 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_s
);
3736 static bool trans_fcnv_w_d(DisasContext
*ctx
, arg_fclass01
*a
)
3738 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_d
);
3741 static bool trans_fcnv_q_d(DisasContext
*ctx
, arg_fclass01
*a
)
3743 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_d
);
3746 static bool trans_fcnv_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3748 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_w
);
3751 static bool trans_fcnv_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3753 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_w
);
3756 static bool trans_fcnv_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3758 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_dw
);
3761 static bool trans_fcnv_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3763 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_dw
);
3766 static bool trans_fcnv_t_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3768 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_w
);
3771 static bool trans_fcnv_t_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3773 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_w
);
3776 static bool trans_fcnv_t_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3778 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_dw
);
3781 static bool trans_fcnv_t_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3783 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_dw
);
3786 static bool trans_fcnv_uw_f(DisasContext
*ctx
, arg_fclass01
*a
)
3788 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_s
);
3791 static bool trans_fcnv_uq_f(DisasContext
*ctx
, arg_fclass01
*a
)
3793 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_s
);
3796 static bool trans_fcnv_uw_d(DisasContext
*ctx
, arg_fclass01
*a
)
3798 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_d
);
3801 static bool trans_fcnv_uq_d(DisasContext
*ctx
, arg_fclass01
*a
)
3803 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_d
);
3806 static bool trans_fcnv_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3808 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_uw
);
3811 static bool trans_fcnv_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3813 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_uw
);
3816 static bool trans_fcnv_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3818 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_udw
);
3821 static bool trans_fcnv_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3823 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_udw
);
3826 static bool trans_fcnv_t_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3828 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_uw
);
3831 static bool trans_fcnv_t_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3833 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_uw
);
3836 static bool trans_fcnv_t_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3838 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_udw
);
3841 static bool trans_fcnv_t_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3843 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_udw
);
3850 static bool trans_fcmp_f(DisasContext
*ctx
, arg_fclass2
*a
)
3852 TCGv_i32 ta
, tb
, tc
, ty
;
3856 ta
= load_frw0_i32(a
->r1
);
3857 tb
= load_frw0_i32(a
->r2
);
3858 ty
= tcg_constant_i32(a
->y
);
3859 tc
= tcg_constant_i32(a
->c
);
3861 gen_helper_fcmp_s(cpu_env
, ta
, tb
, ty
, tc
);
3863 tcg_temp_free_i32(ta
);
3864 tcg_temp_free_i32(tb
);
3866 return nullify_end(ctx
);
3869 static bool trans_fcmp_d(DisasContext
*ctx
, arg_fclass2
*a
)
3876 ta
= load_frd0(a
->r1
);
3877 tb
= load_frd0(a
->r2
);
3878 ty
= tcg_constant_i32(a
->y
);
3879 tc
= tcg_constant_i32(a
->c
);
3881 gen_helper_fcmp_d(cpu_env
, ta
, tb
, ty
, tc
);
3883 tcg_temp_free_i64(ta
);
3884 tcg_temp_free_i64(tb
);
3886 return nullify_end(ctx
);
3889 static bool trans_ftest(DisasContext
*ctx
, arg_ftest
*a
)
3896 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
3903 case 0: /* simple */
3904 tcg_gen_andi_reg(t
, t
, 0x4000000);
3905 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3933 TCGv_reg c
= load_const(ctx
, mask
);
3934 tcg_gen_or_reg(t
, t
, c
);
3935 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
3937 tcg_gen_andi_reg(t
, t
, mask
);
3938 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
3941 unsigned cbit
= (a
->y
^ 1) - 1;
3943 tcg_gen_extract_reg(t
, t
, 21 - cbit
, 1);
3944 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3949 return nullify_end(ctx
);
3956 static bool trans_fadd_f(DisasContext
*ctx
, arg_fclass3
*a
)
3958 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_s
);
3961 static bool trans_fadd_d(DisasContext
*ctx
, arg_fclass3
*a
)
3963 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_d
);
3966 static bool trans_fsub_f(DisasContext
*ctx
, arg_fclass3
*a
)
3968 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_s
);
3971 static bool trans_fsub_d(DisasContext
*ctx
, arg_fclass3
*a
)
3973 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_d
);
3976 static bool trans_fmpy_f(DisasContext
*ctx
, arg_fclass3
*a
)
3978 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_s
);
3981 static bool trans_fmpy_d(DisasContext
*ctx
, arg_fclass3
*a
)
3983 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_d
);
3986 static bool trans_fdiv_f(DisasContext
*ctx
, arg_fclass3
*a
)
3988 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_s
);
3991 static bool trans_fdiv_d(DisasContext
*ctx
, arg_fclass3
*a
)
3993 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_d
);
3996 static bool trans_xmpyu(DisasContext
*ctx
, arg_xmpyu
*a
)
4002 x
= load_frw0_i64(a
->r1
);
4003 y
= load_frw0_i64(a
->r2
);
4004 tcg_gen_mul_i64(x
, x
, y
);
4006 tcg_temp_free_i64(x
);
4007 tcg_temp_free_i64(y
);
4009 return nullify_end(ctx
);
4012 /* Convert the fmpyadd single-precision register encodings to standard. */
4013 static inline int fmpyadd_s_reg(unsigned r
)
4015 return (r
& 16) * 2 + 16 + (r
& 15);
4018 static bool do_fmpyadd_s(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4020 int tm
= fmpyadd_s_reg(a
->tm
);
4021 int ra
= fmpyadd_s_reg(a
->ra
);
4022 int ta
= fmpyadd_s_reg(a
->ta
);
4023 int rm2
= fmpyadd_s_reg(a
->rm2
);
4024 int rm1
= fmpyadd_s_reg(a
->rm1
);
4028 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
4029 do_fop_weww(ctx
, ta
, ta
, ra
,
4030 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
4032 return nullify_end(ctx
);
4035 static bool trans_fmpyadd_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4037 return do_fmpyadd_s(ctx
, a
, false);
4040 static bool trans_fmpysub_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4042 return do_fmpyadd_s(ctx
, a
, true);
4045 static bool do_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4049 do_fop_dedd(ctx
, a
->tm
, a
->rm1
, a
->rm2
, gen_helper_fmpy_d
);
4050 do_fop_dedd(ctx
, a
->ta
, a
->ta
, a
->ra
,
4051 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
4053 return nullify_end(ctx
);
4056 static bool trans_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4058 return do_fmpyadd_d(ctx
, a
, false);
4061 static bool trans_fmpysub_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4063 return do_fmpyadd_d(ctx
, a
, true);
4066 static bool trans_fmpyfadd_f(DisasContext
*ctx
, arg_fmpyfadd_f
*a
)
4071 x
= load_frw0_i32(a
->rm1
);
4072 y
= load_frw0_i32(a
->rm2
);
4073 z
= load_frw0_i32(a
->ra3
);
4076 gen_helper_fmpynfadd_s(x
, cpu_env
, x
, y
, z
);
4078 gen_helper_fmpyfadd_s(x
, cpu_env
, x
, y
, z
);
4081 tcg_temp_free_i32(y
);
4082 tcg_temp_free_i32(z
);
4083 save_frw_i32(a
->t
, x
);
4084 tcg_temp_free_i32(x
);
4085 return nullify_end(ctx
);
4088 static bool trans_fmpyfadd_d(DisasContext
*ctx
, arg_fmpyfadd_d
*a
)
4093 x
= load_frd0(a
->rm1
);
4094 y
= load_frd0(a
->rm2
);
4095 z
= load_frd0(a
->ra3
);
4098 gen_helper_fmpynfadd_d(x
, cpu_env
, x
, y
, z
);
4100 gen_helper_fmpyfadd_d(x
, cpu_env
, x
, y
, z
);
4103 tcg_temp_free_i64(y
);
4104 tcg_temp_free_i64(z
);
4106 tcg_temp_free_i64(x
);
4107 return nullify_end(ctx
);
4110 static bool trans_diag(DisasContext
*ctx
, arg_diag
*a
)
4112 qemu_log_mask(LOG_UNIMP
, "DIAG opcode ignored\n");
4113 cond_free(&ctx
->null_cond
);
4117 static void hppa_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4119 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4123 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4125 #ifdef CONFIG_USER_ONLY
4126 ctx
->privilege
= MMU_USER_IDX
;
4127 ctx
->mmu_idx
= MMU_USER_IDX
;
4128 ctx
->iaoq_f
= ctx
->base
.pc_first
| MMU_USER_IDX
;
4129 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
| MMU_USER_IDX
;
4130 ctx
->unalign
= (ctx
->tb_flags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
4132 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4133 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
? ctx
->privilege
: MMU_PHYS_IDX
);
4135 /* Recover the IAOQ values from the GVA + PRIV. */
4136 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4137 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4138 int32_t diff
= cs_base
;
4140 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4141 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4144 ctx
->iaoq_n_var
= NULL
;
4146 /* Bound the number of instructions by those left on the page. */
4147 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4148 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
4152 memset(ctx
->tempr
, 0, sizeof(ctx
->tempr
));
4153 memset(ctx
->templ
, 0, sizeof(ctx
->templ
));
4156 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4158 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4160 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4161 ctx
->null_cond
= cond_make_f();
4162 ctx
->psw_n_nonzero
= false;
4163 if (ctx
->tb_flags
& PSW_N
) {
4164 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4165 ctx
->psw_n_nonzero
= true;
4167 ctx
->null_lab
= NULL
;
4170 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4172 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4174 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
);
4177 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4179 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4180 CPUHPPAState
*env
= cs
->env_ptr
;
4184 /* Execute one insn. */
4185 #ifdef CONFIG_USER_ONLY
4186 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4188 ret
= ctx
->base
.is_jmp
;
4189 assert(ret
!= DISAS_NEXT
);
4193 /* Always fetch the insn, even if nullified, so that we check
4194 the page permissions for execute. */
4195 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
4197 /* Set up the IA queue for the next insn.
4198 This will be overwritten by a branch. */
4199 if (ctx
->iaoq_b
== -1) {
4201 ctx
->iaoq_n_var
= get_temp(ctx
);
4202 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4204 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4205 ctx
->iaoq_n_var
= NULL
;
4208 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4209 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4213 if (!decode(ctx
, insn
)) {
4216 ret
= ctx
->base
.is_jmp
;
4217 assert(ctx
->null_lab
== NULL
);
4221 /* Free any temporaries allocated. */
4222 for (i
= 0, n
= ctx
->ntempr
; i
< n
; ++i
) {
4223 tcg_temp_free(ctx
->tempr
[i
]);
4224 ctx
->tempr
[i
] = NULL
;
4226 for (i
= 0, n
= ctx
->ntempl
; i
< n
; ++i
) {
4227 tcg_temp_free_tl(ctx
->templ
[i
]);
4228 ctx
->templ
[i
] = NULL
;
4233 /* Advance the insn queue. Note that this check also detects
4234 a priority change within the instruction queue. */
4235 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4236 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4237 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4238 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4239 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4240 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4241 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4242 ctx
->base
.is_jmp
= ret
= DISAS_NORETURN
;
4244 ctx
->base
.is_jmp
= ret
= DISAS_IAQ_N_STALE
;
4247 ctx
->iaoq_f
= ctx
->iaoq_b
;
4248 ctx
->iaoq_b
= ctx
->iaoq_n
;
4249 ctx
->base
.pc_next
+= 4;
4252 case DISAS_NORETURN
:
4253 case DISAS_IAQ_N_UPDATED
:
4257 case DISAS_IAQ_N_STALE
:
4258 case DISAS_IAQ_N_STALE_EXIT
:
4259 if (ctx
->iaoq_f
== -1) {
4260 tcg_gen_mov_reg(cpu_iaoq_f
, cpu_iaoq_b
);
4261 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4262 #ifndef CONFIG_USER_ONLY
4263 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4266 ctx
->base
.is_jmp
= (ret
== DISAS_IAQ_N_STALE_EXIT
4268 : DISAS_IAQ_N_UPDATED
);
4269 } else if (ctx
->iaoq_b
== -1) {
4270 tcg_gen_mov_reg(cpu_iaoq_b
, ctx
->iaoq_n_var
);
4275 g_assert_not_reached();
4279 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4281 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4282 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4285 case DISAS_NORETURN
:
4287 case DISAS_TOO_MANY
:
4288 case DISAS_IAQ_N_STALE
:
4289 case DISAS_IAQ_N_STALE_EXIT
:
4290 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4291 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4294 case DISAS_IAQ_N_UPDATED
:
4295 if (is_jmp
!= DISAS_IAQ_N_STALE_EXIT
) {
4296 tcg_gen_lookup_and_goto_ptr();
4301 tcg_gen_exit_tb(NULL
, 0);
4304 g_assert_not_reached();
4308 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
,
4309 CPUState
*cs
, FILE *logfile
)
4311 target_ulong pc
= dcbase
->pc_first
;
4313 #ifdef CONFIG_USER_ONLY
4316 fprintf(logfile
, "IN:\n0x00000000: (null)\n");
4319 fprintf(logfile
, "IN:\n0x000000b0: light-weight-syscall\n");
4322 fprintf(logfile
, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4325 fprintf(logfile
, "IN:\n0x00000100: syscall\n");
4330 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc
));
4331 target_disas(logfile
, cs
, pc
, dcbase
->tb
->size
);
4334 static const TranslatorOps hppa_tr_ops
= {
4335 .init_disas_context
= hppa_tr_init_disas_context
,
4336 .tb_start
= hppa_tr_tb_start
,
4337 .insn_start
= hppa_tr_insn_start
,
4338 .translate_insn
= hppa_tr_translate_insn
,
4339 .tb_stop
= hppa_tr_tb_stop
,
4340 .disas_log
= hppa_tr_disas_log
,
4343 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int max_insns
,
4344 target_ulong pc
, void *host_pc
)
4347 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &hppa_tr_ops
, &ctx
.base
);
4350 void restore_state_to_opc(CPUHPPAState
*env
, TranslationBlock
*tb
,
4353 env
->iaoq_f
= data
[0];
4354 if (data
[1] != (target_ureg
)-1) {
4355 env
->iaoq_b
= data
[1];
4357 /* Since we were executing the instruction at IAOQ_F, and took some
4358 sort of action that provoked the cpu_restore_state, we can infer
4359 that the instruction was not nullified. */