2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
30 #include "trace-tcg.h"
33 /* Since we have a distinction between register size and address size,
34 we need to redefine all of these. */
38 #undef tcg_global_reg_new
39 #undef tcg_global_mem_new
40 #undef tcg_temp_local_new
43 #if TARGET_LONG_BITS == 64
44 #define TCGv_tl TCGv_i64
45 #define tcg_temp_new_tl tcg_temp_new_i64
46 #define tcg_temp_free_tl tcg_temp_free_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_temp_free_tl tcg_temp_free_i32
56 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
59 #if TARGET_REGISTER_BITS == 64
60 #define TCGv_reg TCGv_i64
62 #define tcg_temp_new tcg_temp_new_i64
63 #define tcg_global_reg_new tcg_global_reg_new_i64
64 #define tcg_global_mem_new tcg_global_mem_new_i64
65 #define tcg_temp_local_new tcg_temp_local_new_i64
66 #define tcg_temp_free tcg_temp_free_i64
68 #define tcg_gen_movi_reg tcg_gen_movi_i64
69 #define tcg_gen_mov_reg tcg_gen_mov_i64
70 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
71 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
72 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
73 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
74 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
75 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
76 #define tcg_gen_ld_reg tcg_gen_ld_i64
77 #define tcg_gen_st8_reg tcg_gen_st8_i64
78 #define tcg_gen_st16_reg tcg_gen_st16_i64
79 #define tcg_gen_st32_reg tcg_gen_st32_i64
80 #define tcg_gen_st_reg tcg_gen_st_i64
81 #define tcg_gen_add_reg tcg_gen_add_i64
82 #define tcg_gen_addi_reg tcg_gen_addi_i64
83 #define tcg_gen_sub_reg tcg_gen_sub_i64
84 #define tcg_gen_neg_reg tcg_gen_neg_i64
85 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
86 #define tcg_gen_subi_reg tcg_gen_subi_i64
87 #define tcg_gen_and_reg tcg_gen_and_i64
88 #define tcg_gen_andi_reg tcg_gen_andi_i64
89 #define tcg_gen_or_reg tcg_gen_or_i64
90 #define tcg_gen_ori_reg tcg_gen_ori_i64
91 #define tcg_gen_xor_reg tcg_gen_xor_i64
92 #define tcg_gen_xori_reg tcg_gen_xori_i64
93 #define tcg_gen_not_reg tcg_gen_not_i64
94 #define tcg_gen_shl_reg tcg_gen_shl_i64
95 #define tcg_gen_shli_reg tcg_gen_shli_i64
96 #define tcg_gen_shr_reg tcg_gen_shr_i64
97 #define tcg_gen_shri_reg tcg_gen_shri_i64
98 #define tcg_gen_sar_reg tcg_gen_sar_i64
99 #define tcg_gen_sari_reg tcg_gen_sari_i64
100 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
101 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
102 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
103 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
104 #define tcg_gen_mul_reg tcg_gen_mul_i64
105 #define tcg_gen_muli_reg tcg_gen_muli_i64
106 #define tcg_gen_div_reg tcg_gen_div_i64
107 #define tcg_gen_rem_reg tcg_gen_rem_i64
108 #define tcg_gen_divu_reg tcg_gen_divu_i64
109 #define tcg_gen_remu_reg tcg_gen_remu_i64
110 #define tcg_gen_discard_reg tcg_gen_discard_i64
111 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
112 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
113 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
114 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
115 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
116 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
117 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
118 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
119 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
120 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
121 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
122 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
123 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
124 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
125 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
126 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
127 #define tcg_gen_andc_reg tcg_gen_andc_i64
128 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
129 #define tcg_gen_nand_reg tcg_gen_nand_i64
130 #define tcg_gen_nor_reg tcg_gen_nor_i64
131 #define tcg_gen_orc_reg tcg_gen_orc_i64
132 #define tcg_gen_clz_reg tcg_gen_clz_i64
133 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
134 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
135 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
136 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
137 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
138 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
139 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
140 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
141 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
142 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
143 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
144 #define tcg_gen_extract_reg tcg_gen_extract_i64
145 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
146 #define tcg_const_reg tcg_const_i64
147 #define tcg_const_local_reg tcg_const_local_i64
148 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
149 #define tcg_gen_add2_reg tcg_gen_add2_i64
150 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
151 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
152 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
153 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
154 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
156 #define TCGv_reg TCGv_i32
157 #define tcg_temp_new tcg_temp_new_i32
158 #define tcg_global_reg_new tcg_global_reg_new_i32
159 #define tcg_global_mem_new tcg_global_mem_new_i32
160 #define tcg_temp_local_new tcg_temp_local_new_i32
161 #define tcg_temp_free tcg_temp_free_i32
163 #define tcg_gen_movi_reg tcg_gen_movi_i32
164 #define tcg_gen_mov_reg tcg_gen_mov_i32
165 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
166 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
167 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
168 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
169 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
170 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
171 #define tcg_gen_ld_reg tcg_gen_ld_i32
172 #define tcg_gen_st8_reg tcg_gen_st8_i32
173 #define tcg_gen_st16_reg tcg_gen_st16_i32
174 #define tcg_gen_st32_reg tcg_gen_st32_i32
175 #define tcg_gen_st_reg tcg_gen_st_i32
176 #define tcg_gen_add_reg tcg_gen_add_i32
177 #define tcg_gen_addi_reg tcg_gen_addi_i32
178 #define tcg_gen_sub_reg tcg_gen_sub_i32
179 #define tcg_gen_neg_reg tcg_gen_neg_i32
180 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
181 #define tcg_gen_subi_reg tcg_gen_subi_i32
182 #define tcg_gen_and_reg tcg_gen_and_i32
183 #define tcg_gen_andi_reg tcg_gen_andi_i32
184 #define tcg_gen_or_reg tcg_gen_or_i32
185 #define tcg_gen_ori_reg tcg_gen_ori_i32
186 #define tcg_gen_xor_reg tcg_gen_xor_i32
187 #define tcg_gen_xori_reg tcg_gen_xori_i32
188 #define tcg_gen_not_reg tcg_gen_not_i32
189 #define tcg_gen_shl_reg tcg_gen_shl_i32
190 #define tcg_gen_shli_reg tcg_gen_shli_i32
191 #define tcg_gen_shr_reg tcg_gen_shr_i32
192 #define tcg_gen_shri_reg tcg_gen_shri_i32
193 #define tcg_gen_sar_reg tcg_gen_sar_i32
194 #define tcg_gen_sari_reg tcg_gen_sari_i32
195 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
196 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
197 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
198 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
199 #define tcg_gen_mul_reg tcg_gen_mul_i32
200 #define tcg_gen_muli_reg tcg_gen_muli_i32
201 #define tcg_gen_div_reg tcg_gen_div_i32
202 #define tcg_gen_rem_reg tcg_gen_rem_i32
203 #define tcg_gen_divu_reg tcg_gen_divu_i32
204 #define tcg_gen_remu_reg tcg_gen_remu_i32
205 #define tcg_gen_discard_reg tcg_gen_discard_i32
206 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
207 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
208 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
209 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
210 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
211 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
212 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
213 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
214 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
215 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
216 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
217 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
218 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
219 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
220 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
221 #define tcg_gen_andc_reg tcg_gen_andc_i32
222 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
223 #define tcg_gen_nand_reg tcg_gen_nand_i32
224 #define tcg_gen_nor_reg tcg_gen_nor_i32
225 #define tcg_gen_orc_reg tcg_gen_orc_i32
226 #define tcg_gen_clz_reg tcg_gen_clz_i32
227 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
228 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
229 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
230 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
231 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
232 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
233 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
234 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
235 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
236 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
237 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
238 #define tcg_gen_extract_reg tcg_gen_extract_i32
239 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
240 #define tcg_const_reg tcg_const_i32
241 #define tcg_const_local_reg tcg_const_local_i32
242 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
243 #define tcg_gen_add2_reg tcg_gen_add2_i32
244 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
245 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
246 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
247 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
248 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
249 #endif /* TARGET_REGISTER_BITS */
251 typedef struct DisasCond
{
258 typedef struct DisasContext
{
259 DisasContextBase base
;
281 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
282 static int expand_sm_imm(int val
)
284 if (val
& PSW_SM_E
) {
285 val
= (val
& ~PSW_SM_E
) | PSW_E
;
287 if (val
& PSW_SM_W
) {
288 val
= (val
& ~PSW_SM_W
) | PSW_W
;
293 /* Inverted space register indicates 0 means sr0 not inferred from base. */
294 static int expand_sr3x(int val
)
299 /* Convert the M:A bits within a memory insn to the tri-state value
300 we use for the final M. */
301 static int ma_to_m(int val
)
303 return val
& 2 ? (val
& 1 ? -1 : 1) : 0;
306 /* Used for branch targets. */
307 static int expand_shl2(int val
)
313 /* Include the auto-generated decoder. */
314 #include "decode.inc.c"
316 /* We are not using a goto_tb (for whatever reason), but have updated
317 the iaq (for whatever reason), so don't do it again on exit. */
318 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
320 /* We are exiting the TB, but have neither emitted a goto_tb, nor
321 updated the iaq for the next instruction to be executed. */
322 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
324 /* Similarly, but we want to return to the main loop immediately
325 to recognize unmasked interrupts. */
326 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
328 typedef struct DisasInsn
{
330 bool (*trans
)(DisasContext
*ctx
, uint32_t insn
,
331 const struct DisasInsn
*f
);
333 void (*ttt
)(TCGv_reg
, TCGv_reg
, TCGv_reg
);
334 void (*weww
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
);
335 void (*dedd
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
);
336 void (*wew
)(TCGv_i32
, TCGv_env
, TCGv_i32
);
337 void (*ded
)(TCGv_i64
, TCGv_env
, TCGv_i64
);
338 void (*wed
)(TCGv_i32
, TCGv_env
, TCGv_i64
);
339 void (*dew
)(TCGv_i64
, TCGv_env
, TCGv_i32
);
343 /* global register indexes */
344 static TCGv_reg cpu_gr
[32];
345 static TCGv_i64 cpu_sr
[4];
346 static TCGv_i64 cpu_srH
;
347 static TCGv_reg cpu_iaoq_f
;
348 static TCGv_reg cpu_iaoq_b
;
349 static TCGv_i64 cpu_iasq_f
;
350 static TCGv_i64 cpu_iasq_b
;
351 static TCGv_reg cpu_sar
;
352 static TCGv_reg cpu_psw_n
;
353 static TCGv_reg cpu_psw_v
;
354 static TCGv_reg cpu_psw_cb
;
355 static TCGv_reg cpu_psw_cb_msb
;
357 #include "exec/gen-icount.h"
359 void hppa_translate_init(void)
361 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
363 typedef struct { TCGv_reg
*var
; const char *name
; int ofs
; } GlobalVar
;
364 static const GlobalVar vars
[] = {
365 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
376 /* Use the symbolic register names that match the disassembler. */
377 static const char gr_names
[32][4] = {
378 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
379 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
380 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
381 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
383 /* SR[4-7] are not global registers so that we can index them. */
384 static const char sr_names
[5][4] = {
385 "sr0", "sr1", "sr2", "sr3", "srH"
391 for (i
= 1; i
< 32; i
++) {
392 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
393 offsetof(CPUHPPAState
, gr
[i
]),
396 for (i
= 0; i
< 4; i
++) {
397 cpu_sr
[i
] = tcg_global_mem_new_i64(cpu_env
,
398 offsetof(CPUHPPAState
, sr
[i
]),
401 cpu_srH
= tcg_global_mem_new_i64(cpu_env
,
402 offsetof(CPUHPPAState
, sr
[4]),
405 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
406 const GlobalVar
*v
= &vars
[i
];
407 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
410 cpu_iasq_f
= tcg_global_mem_new_i64(cpu_env
,
411 offsetof(CPUHPPAState
, iasq_f
),
413 cpu_iasq_b
= tcg_global_mem_new_i64(cpu_env
,
414 offsetof(CPUHPPAState
, iasq_b
),
418 static DisasCond
cond_make_f(void)
427 static DisasCond
cond_make_n(void)
438 static DisasCond
cond_make_0(TCGCond c
, TCGv_reg a0
)
440 DisasCond r
= { .c
= c
, .a1
= NULL
, .a1_is_0
= true };
442 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
443 r
.a0
= tcg_temp_new();
444 tcg_gen_mov_reg(r
.a0
, a0
);
449 static DisasCond
cond_make(TCGCond c
, TCGv_reg a0
, TCGv_reg a1
)
451 DisasCond r
= { .c
= c
};
453 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
454 r
.a0
= tcg_temp_new();
455 tcg_gen_mov_reg(r
.a0
, a0
);
456 r
.a1
= tcg_temp_new();
457 tcg_gen_mov_reg(r
.a1
, a1
);
462 static void cond_prep(DisasCond
*cond
)
465 cond
->a1_is_0
= false;
466 cond
->a1
= tcg_const_reg(0);
470 static void cond_free(DisasCond
*cond
)
474 if (!cond
->a0_is_n
) {
475 tcg_temp_free(cond
->a0
);
477 if (!cond
->a1_is_0
) {
478 tcg_temp_free(cond
->a1
);
480 cond
->a0_is_n
= false;
481 cond
->a1_is_0
= false;
485 case TCG_COND_ALWAYS
:
486 cond
->c
= TCG_COND_NEVER
;
493 static TCGv_reg
get_temp(DisasContext
*ctx
)
495 unsigned i
= ctx
->ntempr
++;
496 g_assert(i
< ARRAY_SIZE(ctx
->tempr
));
497 return ctx
->tempr
[i
] = tcg_temp_new();
500 #ifndef CONFIG_USER_ONLY
501 static TCGv_tl
get_temp_tl(DisasContext
*ctx
)
503 unsigned i
= ctx
->ntempl
++;
504 g_assert(i
< ARRAY_SIZE(ctx
->templ
));
505 return ctx
->templ
[i
] = tcg_temp_new_tl();
509 static TCGv_reg
load_const(DisasContext
*ctx
, target_sreg v
)
511 TCGv_reg t
= get_temp(ctx
);
512 tcg_gen_movi_reg(t
, v
);
516 static TCGv_reg
load_gpr(DisasContext
*ctx
, unsigned reg
)
519 TCGv_reg t
= get_temp(ctx
);
520 tcg_gen_movi_reg(t
, 0);
527 static TCGv_reg
dest_gpr(DisasContext
*ctx
, unsigned reg
)
529 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
530 return get_temp(ctx
);
536 static void save_or_nullify(DisasContext
*ctx
, TCGv_reg dest
, TCGv_reg t
)
538 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
539 cond_prep(&ctx
->null_cond
);
540 tcg_gen_movcond_reg(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
541 ctx
->null_cond
.a1
, dest
, t
);
543 tcg_gen_mov_reg(dest
, t
);
547 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_reg t
)
550 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
554 #ifdef HOST_WORDS_BIGENDIAN
562 static TCGv_i32
load_frw_i32(unsigned rt
)
564 TCGv_i32 ret
= tcg_temp_new_i32();
565 tcg_gen_ld_i32(ret
, cpu_env
,
566 offsetof(CPUHPPAState
, fr
[rt
& 31])
567 + (rt
& 32 ? LO_OFS
: HI_OFS
));
571 static TCGv_i32
load_frw0_i32(unsigned rt
)
574 return tcg_const_i32(0);
576 return load_frw_i32(rt
);
580 static TCGv_i64
load_frw0_i64(unsigned rt
)
583 return tcg_const_i64(0);
585 TCGv_i64 ret
= tcg_temp_new_i64();
586 tcg_gen_ld32u_i64(ret
, cpu_env
,
587 offsetof(CPUHPPAState
, fr
[rt
& 31])
588 + (rt
& 32 ? LO_OFS
: HI_OFS
));
593 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
595 tcg_gen_st_i32(val
, cpu_env
,
596 offsetof(CPUHPPAState
, fr
[rt
& 31])
597 + (rt
& 32 ? LO_OFS
: HI_OFS
));
603 static TCGv_i64
load_frd(unsigned rt
)
605 TCGv_i64 ret
= tcg_temp_new_i64();
606 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
610 static TCGv_i64
load_frd0(unsigned rt
)
613 return tcg_const_i64(0);
619 static void save_frd(unsigned rt
, TCGv_i64 val
)
621 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
624 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
626 #ifdef CONFIG_USER_ONLY
627 tcg_gen_movi_i64(dest
, 0);
630 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
631 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
632 tcg_gen_mov_i64(dest
, cpu_srH
);
634 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUHPPAState
, sr
[reg
]));
639 /* Skip over the implementation of an insn that has been nullified.
640 Use this when the insn is too complex for a conditional move. */
641 static void nullify_over(DisasContext
*ctx
)
643 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
644 /* The always condition should have been handled in the main loop. */
645 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
647 ctx
->null_lab
= gen_new_label();
648 cond_prep(&ctx
->null_cond
);
650 /* If we're using PSW[N], copy it to a temp because... */
651 if (ctx
->null_cond
.a0_is_n
) {
652 ctx
->null_cond
.a0_is_n
= false;
653 ctx
->null_cond
.a0
= tcg_temp_new();
654 tcg_gen_mov_reg(ctx
->null_cond
.a0
, cpu_psw_n
);
656 /* ... we clear it before branching over the implementation,
657 so that (1) it's clear after nullifying this insn and
658 (2) if this insn nullifies the next, PSW[N] is valid. */
659 if (ctx
->psw_n_nonzero
) {
660 ctx
->psw_n_nonzero
= false;
661 tcg_gen_movi_reg(cpu_psw_n
, 0);
664 tcg_gen_brcond_reg(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
665 ctx
->null_cond
.a1
, ctx
->null_lab
);
666 cond_free(&ctx
->null_cond
);
670 /* Save the current nullification state to PSW[N]. */
671 static void nullify_save(DisasContext
*ctx
)
673 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
674 if (ctx
->psw_n_nonzero
) {
675 tcg_gen_movi_reg(cpu_psw_n
, 0);
679 if (!ctx
->null_cond
.a0_is_n
) {
680 cond_prep(&ctx
->null_cond
);
681 tcg_gen_setcond_reg(ctx
->null_cond
.c
, cpu_psw_n
,
682 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
683 ctx
->psw_n_nonzero
= true;
685 cond_free(&ctx
->null_cond
);
688 /* Set a PSW[N] to X. The intention is that this is used immediately
689 before a goto_tb/exit_tb, so that there is no fallthru path to other
690 code within the TB. Therefore we do not update psw_n_nonzero. */
691 static void nullify_set(DisasContext
*ctx
, bool x
)
693 if (ctx
->psw_n_nonzero
|| x
) {
694 tcg_gen_movi_reg(cpu_psw_n
, x
);
698 /* Mark the end of an instruction that may have been nullified.
699 This is the pair to nullify_over. Always returns true so that
700 it may be tail-called from a translate function. */
701 static bool nullify_end(DisasContext
*ctx
)
703 TCGLabel
*null_lab
= ctx
->null_lab
;
704 DisasJumpType status
= ctx
->base
.is_jmp
;
706 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
707 For UPDATED, we cannot update on the nullified path. */
708 assert(status
!= DISAS_IAQ_N_UPDATED
);
710 if (likely(null_lab
== NULL
)) {
711 /* The current insn wasn't conditional or handled the condition
712 applied to it without a branch, so the (new) setting of
713 NULL_COND can be applied directly to the next insn. */
716 ctx
->null_lab
= NULL
;
718 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
719 /* The next instruction will be unconditional,
720 and NULL_COND already reflects that. */
721 gen_set_label(null_lab
);
723 /* The insn that we just executed is itself nullifying the next
724 instruction. Store the condition in the PSW[N] global.
725 We asserted PSW[N] = 0 in nullify_over, so that after the
726 label we have the proper value in place. */
728 gen_set_label(null_lab
);
729 ctx
->null_cond
= cond_make_n();
731 if (status
== DISAS_NORETURN
) {
732 ctx
->base
.is_jmp
= DISAS_NEXT
;
737 static void copy_iaoq_entry(TCGv_reg dest
, target_ureg ival
, TCGv_reg vval
)
739 if (unlikely(ival
== -1)) {
740 tcg_gen_mov_reg(dest
, vval
);
742 tcg_gen_movi_reg(dest
, ival
);
746 static inline target_ureg
iaoq_dest(DisasContext
*ctx
, target_sreg disp
)
748 return ctx
->iaoq_f
+ disp
+ 8;
751 static void gen_excp_1(int exception
)
753 TCGv_i32 t
= tcg_const_i32(exception
);
754 gen_helper_excp(cpu_env
, t
);
755 tcg_temp_free_i32(t
);
758 static void gen_excp(DisasContext
*ctx
, int exception
)
760 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
761 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
763 gen_excp_1(exception
);
764 ctx
->base
.is_jmp
= DISAS_NORETURN
;
767 static bool gen_excp_iir(DisasContext
*ctx
, int exc
)
772 tmp
= tcg_const_reg(ctx
->insn
);
773 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
776 return nullify_end(ctx
);
779 static bool gen_illegal(DisasContext
*ctx
)
781 return gen_excp_iir(ctx
, EXCP_ILL
);
784 #ifdef CONFIG_USER_ONLY
785 #define CHECK_MOST_PRIVILEGED(EXCP) \
786 return gen_excp_iir(ctx, EXCP)
788 #define CHECK_MOST_PRIVILEGED(EXCP) \
790 if (ctx->privilege != 0) { \
791 return gen_excp_iir(ctx, EXCP); \
796 static bool use_goto_tb(DisasContext
*ctx
, target_ureg dest
)
798 /* Suppress goto_tb in the case of single-steping and IO. */
799 if ((tb_cflags(ctx
->base
.tb
) & CF_LAST_IO
)
800 || ctx
->base
.singlestep_enabled
) {
806 /* If the next insn is to be nullified, and it's on the same page,
807 and we're not attempting to set a breakpoint on it, then we can
808 totally skip the nullified insn. This avoids creating and
809 executing a TB that merely branches to the next TB. */
810 static bool use_nullify_skip(DisasContext
*ctx
)
812 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
813 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
816 static void gen_goto_tb(DisasContext
*ctx
, int which
,
817 target_ureg f
, target_ureg b
)
819 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
820 tcg_gen_goto_tb(which
);
821 tcg_gen_movi_reg(cpu_iaoq_f
, f
);
822 tcg_gen_movi_reg(cpu_iaoq_b
, b
);
823 tcg_gen_exit_tb(ctx
->base
.tb
, which
);
825 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
826 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
827 if (ctx
->base
.singlestep_enabled
) {
828 gen_excp_1(EXCP_DEBUG
);
830 tcg_gen_lookup_and_goto_ptr();
835 /* PA has a habit of taking the LSB of a field and using that as the sign,
836 with the rest of the field becoming the least significant bits. */
837 static target_sreg
low_sextract(uint32_t val
, int pos
, int len
)
839 target_ureg x
= -(target_ureg
)extract32(val
, pos
, 1);
840 x
= (x
<< (len
- 1)) | extract32(val
, pos
+ 1, len
- 1);
844 static unsigned assemble_rt64(uint32_t insn
)
846 unsigned r1
= extract32(insn
, 6, 1);
847 unsigned r0
= extract32(insn
, 0, 5);
851 static unsigned assemble_ra64(uint32_t insn
)
853 unsigned r1
= extract32(insn
, 7, 1);
854 unsigned r0
= extract32(insn
, 21, 5);
858 static unsigned assemble_rb64(uint32_t insn
)
860 unsigned r1
= extract32(insn
, 12, 1);
861 unsigned r0
= extract32(insn
, 16, 5);
865 static unsigned assemble_rc64(uint32_t insn
)
867 unsigned r2
= extract32(insn
, 8, 1);
868 unsigned r1
= extract32(insn
, 13, 3);
869 unsigned r0
= extract32(insn
, 9, 2);
870 return r2
* 32 + r1
* 4 + r0
;
873 static inline unsigned assemble_sr3(uint32_t insn
)
875 unsigned s2
= extract32(insn
, 13, 1);
876 unsigned s0
= extract32(insn
, 14, 2);
880 static target_sreg
assemble_16(uint32_t insn
)
882 /* Take the name from PA2.0, which produces a 16-bit number
883 only with wide mode; otherwise a 14-bit number. Since we don't
884 implement wide mode, this is always the 14-bit number. */
885 return low_sextract(insn
, 0, 14);
888 static target_sreg
assemble_16a(uint32_t insn
)
890 /* Take the name from PA2.0, which produces a 14-bit shifted number
891 only with wide mode; otherwise a 12-bit shifted number. Since we
892 don't implement wide mode, this is always the 12-bit number. */
893 target_ureg x
= -(target_ureg
)(insn
& 1);
894 x
= (x
<< 11) | extract32(insn
, 2, 11);
898 static target_sreg
assemble_17(uint32_t insn
)
900 target_ureg x
= -(target_ureg
)(insn
& 1);
901 x
= (x
<< 5) | extract32(insn
, 16, 5);
902 x
= (x
<< 1) | extract32(insn
, 2, 1);
903 x
= (x
<< 10) | extract32(insn
, 3, 10);
907 static target_sreg
assemble_21(uint32_t insn
)
909 target_ureg x
= -(target_ureg
)(insn
& 1);
910 x
= (x
<< 11) | extract32(insn
, 1, 11);
911 x
= (x
<< 2) | extract32(insn
, 14, 2);
912 x
= (x
<< 5) | extract32(insn
, 16, 5);
913 x
= (x
<< 2) | extract32(insn
, 12, 2);
917 static target_sreg
assemble_22(uint32_t insn
)
919 target_ureg x
= -(target_ureg
)(insn
& 1);
920 x
= (x
<< 10) | extract32(insn
, 16, 10);
921 x
= (x
<< 1) | extract32(insn
, 2, 1);
922 x
= (x
<< 10) | extract32(insn
, 3, 10);
926 /* The parisc documentation describes only the general interpretation of
927 the conditions, without describing their exact implementation. The
928 interpretations do not stand up well when considering ADD,C and SUB,B.
929 However, considering the Addition, Subtraction and Logical conditions
930 as a whole it would appear that these relations are similar to what
931 a traditional NZCV set of flags would produce. */
933 static DisasCond
do_cond(unsigned cf
, TCGv_reg res
,
934 TCGv_reg cb_msb
, TCGv_reg sv
)
940 case 0: /* Never / TR */
941 cond
= cond_make_f();
943 case 1: /* = / <> (Z / !Z) */
944 cond
= cond_make_0(TCG_COND_EQ
, res
);
946 case 2: /* < / >= (N / !N) */
947 cond
= cond_make_0(TCG_COND_LT
, res
);
949 case 3: /* <= / > (N | Z / !N & !Z) */
950 cond
= cond_make_0(TCG_COND_LE
, res
);
952 case 4: /* NUV / UV (!C / C) */
953 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
955 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
956 tmp
= tcg_temp_new();
957 tcg_gen_neg_reg(tmp
, cb_msb
);
958 tcg_gen_and_reg(tmp
, tmp
, res
);
959 cond
= cond_make_0(TCG_COND_EQ
, tmp
);
962 case 6: /* SV / NSV (V / !V) */
963 cond
= cond_make_0(TCG_COND_LT
, sv
);
965 case 7: /* OD / EV */
966 tmp
= tcg_temp_new();
967 tcg_gen_andi_reg(tmp
, res
, 1);
968 cond
= cond_make_0(TCG_COND_NE
, tmp
);
972 g_assert_not_reached();
975 cond
.c
= tcg_invert_cond(cond
.c
);
981 /* Similar, but for the special case of subtraction without borrow, we
982 can use the inputs directly. This can allow other computation to be
983 deleted as unused. */
985 static DisasCond
do_sub_cond(unsigned cf
, TCGv_reg res
,
986 TCGv_reg in1
, TCGv_reg in2
, TCGv_reg sv
)
992 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
995 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
998 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
1000 case 4: /* << / >>= */
1001 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
1003 case 5: /* <<= / >> */
1004 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
1007 return do_cond(cf
, res
, sv
, sv
);
1010 cond
.c
= tcg_invert_cond(cond
.c
);
1016 /* Similar, but for logicals, where the carry and overflow bits are not
1017 computed, and use of them is undefined. */
1019 static DisasCond
do_log_cond(unsigned cf
, TCGv_reg res
)
1022 case 4: case 5: case 6:
1026 return do_cond(cf
, res
, res
, res
);
1029 /* Similar, but for shift/extract/deposit conditions. */
1031 static DisasCond
do_sed_cond(unsigned orig
, TCGv_reg res
)
1035 /* Convert the compressed condition codes to standard.
1036 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1037 4-7 are the reverse of 0-3. */
1044 return do_log_cond(c
* 2 + f
, res
);
1047 /* Similar, but for unit conditions. */
1049 static DisasCond
do_unit_cond(unsigned cf
, TCGv_reg res
,
1050 TCGv_reg in1
, TCGv_reg in2
)
1053 TCGv_reg tmp
, cb
= NULL
;
1056 /* Since we want to test lots of carry-out bits all at once, do not
1057 * do our normal thing and compute carry-in of bit B+1 since that
1058 * leaves us with carry bits spread across two words.
1060 cb
= tcg_temp_new();
1061 tmp
= tcg_temp_new();
1062 tcg_gen_or_reg(cb
, in1
, in2
);
1063 tcg_gen_and_reg(tmp
, in1
, in2
);
1064 tcg_gen_andc_reg(cb
, cb
, res
);
1065 tcg_gen_or_reg(cb
, cb
, tmp
);
1070 case 0: /* never / TR */
1071 case 1: /* undefined */
1072 case 5: /* undefined */
1073 cond
= cond_make_f();
1076 case 2: /* SBZ / NBZ */
1077 /* See hasless(v,1) from
1078 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1080 tmp
= tcg_temp_new();
1081 tcg_gen_subi_reg(tmp
, res
, 0x01010101u
);
1082 tcg_gen_andc_reg(tmp
, tmp
, res
);
1083 tcg_gen_andi_reg(tmp
, tmp
, 0x80808080u
);
1084 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1088 case 3: /* SHZ / NHZ */
1089 tmp
= tcg_temp_new();
1090 tcg_gen_subi_reg(tmp
, res
, 0x00010001u
);
1091 tcg_gen_andc_reg(tmp
, tmp
, res
);
1092 tcg_gen_andi_reg(tmp
, tmp
, 0x80008000u
);
1093 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1097 case 4: /* SDC / NDC */
1098 tcg_gen_andi_reg(cb
, cb
, 0x88888888u
);
1099 cond
= cond_make_0(TCG_COND_NE
, cb
);
1102 case 6: /* SBC / NBC */
1103 tcg_gen_andi_reg(cb
, cb
, 0x80808080u
);
1104 cond
= cond_make_0(TCG_COND_NE
, cb
);
1107 case 7: /* SHC / NHC */
1108 tcg_gen_andi_reg(cb
, cb
, 0x80008000u
);
1109 cond
= cond_make_0(TCG_COND_NE
, cb
);
1113 g_assert_not_reached();
1119 cond
.c
= tcg_invert_cond(cond
.c
);
1125 /* Compute signed overflow for addition. */
1126 static TCGv_reg
do_add_sv(DisasContext
*ctx
, TCGv_reg res
,
1127 TCGv_reg in1
, TCGv_reg in2
)
1129 TCGv_reg sv
= get_temp(ctx
);
1130 TCGv_reg tmp
= tcg_temp_new();
1132 tcg_gen_xor_reg(sv
, res
, in1
);
1133 tcg_gen_xor_reg(tmp
, in1
, in2
);
1134 tcg_gen_andc_reg(sv
, sv
, tmp
);
1140 /* Compute signed overflow for subtraction. */
1141 static TCGv_reg
do_sub_sv(DisasContext
*ctx
, TCGv_reg res
,
1142 TCGv_reg in1
, TCGv_reg in2
)
1144 TCGv_reg sv
= get_temp(ctx
);
1145 TCGv_reg tmp
= tcg_temp_new();
1147 tcg_gen_xor_reg(sv
, res
, in1
);
1148 tcg_gen_xor_reg(tmp
, in1
, in2
);
1149 tcg_gen_and_reg(sv
, sv
, tmp
);
1155 static void do_add(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1156 TCGv_reg in2
, unsigned shift
, bool is_l
,
1157 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
)
1159 TCGv_reg dest
, cb
, cb_msb
, sv
, tmp
;
1160 unsigned c
= cf
>> 1;
1163 dest
= tcg_temp_new();
1168 tmp
= get_temp(ctx
);
1169 tcg_gen_shli_reg(tmp
, in1
, shift
);
1173 if (!is_l
|| c
== 4 || c
== 5) {
1174 TCGv_reg zero
= tcg_const_reg(0);
1175 cb_msb
= get_temp(ctx
);
1176 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, in2
, zero
);
1178 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
1180 tcg_temp_free(zero
);
1183 tcg_gen_xor_reg(cb
, in1
, in2
);
1184 tcg_gen_xor_reg(cb
, cb
, dest
);
1187 tcg_gen_add_reg(dest
, in1
, in2
);
1189 tcg_gen_add_reg(dest
, dest
, cpu_psw_cb_msb
);
1193 /* Compute signed overflow if required. */
1195 if (is_tsv
|| c
== 6) {
1196 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1198 /* ??? Need to include overflow from shift. */
1199 gen_helper_tsv(cpu_env
, sv
);
1203 /* Emit any conditional trap before any writeback. */
1204 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1207 tmp
= tcg_temp_new();
1208 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1209 gen_helper_tcond(cpu_env
, tmp
);
1213 /* Write back the result. */
1215 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1216 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1218 save_gpr(ctx
, rt
, dest
);
1219 tcg_temp_free(dest
);
1221 /* Install the new nullification. */
1222 cond_free(&ctx
->null_cond
);
1223 ctx
->null_cond
= cond
;
1226 static bool do_add_reg(DisasContext
*ctx
, arg_rrr_cf_sh
*a
,
1227 bool is_l
, bool is_tsv
, bool is_tc
, bool is_c
)
1229 TCGv_reg tcg_r1
, tcg_r2
;
1234 tcg_r1
= load_gpr(ctx
, a
->r1
);
1235 tcg_r2
= load_gpr(ctx
, a
->r2
);
1236 do_add(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->sh
, is_l
, is_tsv
, is_tc
, is_c
, a
->cf
);
1237 return nullify_end(ctx
);
1240 static void do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1241 TCGv_reg in2
, bool is_tsv
, bool is_b
,
1242 bool is_tc
, unsigned cf
)
1244 TCGv_reg dest
, sv
, cb
, cb_msb
, zero
, tmp
;
1245 unsigned c
= cf
>> 1;
1248 dest
= tcg_temp_new();
1249 cb
= tcg_temp_new();
1250 cb_msb
= tcg_temp_new();
1252 zero
= tcg_const_reg(0);
1254 /* DEST,C = IN1 + ~IN2 + C. */
1255 tcg_gen_not_reg(cb
, in2
);
1256 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
1257 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
1258 tcg_gen_xor_reg(cb
, cb
, in1
);
1259 tcg_gen_xor_reg(cb
, cb
, dest
);
1261 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1262 operations by seeding the high word with 1 and subtracting. */
1263 tcg_gen_movi_reg(cb_msb
, 1);
1264 tcg_gen_sub2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
1265 tcg_gen_eqv_reg(cb
, in1
, in2
);
1266 tcg_gen_xor_reg(cb
, cb
, dest
);
1268 tcg_temp_free(zero
);
1270 /* Compute signed overflow if required. */
1272 if (is_tsv
|| c
== 6) {
1273 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1275 gen_helper_tsv(cpu_env
, sv
);
1279 /* Compute the condition. We cannot use the special case for borrow. */
1281 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1283 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1286 /* Emit any conditional trap before any writeback. */
1289 tmp
= tcg_temp_new();
1290 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1291 gen_helper_tcond(cpu_env
, tmp
);
1295 /* Write back the result. */
1296 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1297 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1298 save_gpr(ctx
, rt
, dest
);
1299 tcg_temp_free(dest
);
1301 /* Install the new nullification. */
1302 cond_free(&ctx
->null_cond
);
1303 ctx
->null_cond
= cond
;
1306 static bool do_sub_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1307 bool is_tsv
, bool is_b
, bool is_tc
)
1309 TCGv_reg tcg_r1
, tcg_r2
;
1314 tcg_r1
= load_gpr(ctx
, a
->r1
);
1315 tcg_r2
= load_gpr(ctx
, a
->r2
);
1316 do_sub(ctx
, a
->t
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, a
->cf
);
1317 return nullify_end(ctx
);
1320 static void do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1321 TCGv_reg in2
, unsigned cf
)
1326 dest
= tcg_temp_new();
1327 tcg_gen_sub_reg(dest
, in1
, in2
);
1329 /* Compute signed overflow if required. */
1331 if ((cf
>> 1) == 6) {
1332 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1335 /* Form the condition for the compare. */
1336 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1339 tcg_gen_movi_reg(dest
, 0);
1340 save_gpr(ctx
, rt
, dest
);
1341 tcg_temp_free(dest
);
1343 /* Install the new nullification. */
1344 cond_free(&ctx
->null_cond
);
1345 ctx
->null_cond
= cond
;
1348 static void do_log(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1349 TCGv_reg in2
, unsigned cf
,
1350 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1352 TCGv_reg dest
= dest_gpr(ctx
, rt
);
1354 /* Perform the operation, and writeback. */
1356 save_gpr(ctx
, rt
, dest
);
1358 /* Install the new nullification. */
1359 cond_free(&ctx
->null_cond
);
1361 ctx
->null_cond
= do_log_cond(cf
, dest
);
1365 static bool do_log_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1366 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1368 TCGv_reg tcg_r1
, tcg_r2
;
1373 tcg_r1
= load_gpr(ctx
, a
->r1
);
1374 tcg_r2
= load_gpr(ctx
, a
->r2
);
1375 do_log(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, fn
);
1376 return nullify_end(ctx
);
1379 static void do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1380 TCGv_reg in2
, unsigned cf
, bool is_tc
,
1381 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1387 dest
= dest_gpr(ctx
, rt
);
1389 save_gpr(ctx
, rt
, dest
);
1390 cond_free(&ctx
->null_cond
);
1392 dest
= tcg_temp_new();
1395 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
1398 TCGv_reg tmp
= tcg_temp_new();
1400 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1401 gen_helper_tcond(cpu_env
, tmp
);
1404 save_gpr(ctx
, rt
, dest
);
1406 cond_free(&ctx
->null_cond
);
1407 ctx
->null_cond
= cond
;
1411 #ifndef CONFIG_USER_ONLY
1412 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1413 from the top 2 bits of the base register. There are a few system
1414 instructions that have a 3-bit space specifier, for which SR0 is
1415 not special. To handle this, pass ~SP. */
1416 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_reg base
)
1426 spc
= get_temp_tl(ctx
);
1427 load_spr(ctx
, spc
, sp
);
1430 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1434 ptr
= tcg_temp_new_ptr();
1435 tmp
= tcg_temp_new();
1436 spc
= get_temp_tl(ctx
);
1438 tcg_gen_shri_reg(tmp
, base
, TARGET_REGISTER_BITS
- 5);
1439 tcg_gen_andi_reg(tmp
, tmp
, 030);
1440 tcg_gen_trunc_reg_ptr(ptr
, tmp
);
1443 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
1444 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1445 tcg_temp_free_ptr(ptr
);
1451 static void form_gva(DisasContext
*ctx
, TCGv_tl
*pgva
, TCGv_reg
*pofs
,
1452 unsigned rb
, unsigned rx
, int scale
, target_sreg disp
,
1453 unsigned sp
, int modify
, bool is_phys
)
1455 TCGv_reg base
= load_gpr(ctx
, rb
);
1458 /* Note that RX is mutually exclusive with DISP. */
1460 ofs
= get_temp(ctx
);
1461 tcg_gen_shli_reg(ofs
, cpu_gr
[rx
], scale
);
1462 tcg_gen_add_reg(ofs
, ofs
, base
);
1463 } else if (disp
|| modify
) {
1464 ofs
= get_temp(ctx
);
1465 tcg_gen_addi_reg(ofs
, base
, disp
);
1471 #ifdef CONFIG_USER_ONLY
1472 *pgva
= (modify
<= 0 ? ofs
: base
);
1474 TCGv_tl addr
= get_temp_tl(ctx
);
1475 tcg_gen_extu_reg_tl(addr
, modify
<= 0 ? ofs
: base
);
1476 if (ctx
->tb_flags
& PSW_W
) {
1477 tcg_gen_andi_tl(addr
, addr
, 0x3fffffffffffffffull
);
1480 tcg_gen_or_tl(addr
, addr
, space_select(ctx
, sp
, base
));
1486 /* Emit a memory load. The modify parameter should be
1487 * < 0 for pre-modify,
1488 * > 0 for post-modify,
1489 * = 0 for no base register update.
1491 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1492 unsigned rx
, int scale
, target_sreg disp
,
1493 unsigned sp
, int modify
, TCGMemOp mop
)
1498 /* Caller uses nullify_over/nullify_end. */
1499 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1501 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1502 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1503 tcg_gen_qemu_ld_reg(dest
, addr
, ctx
->mmu_idx
, mop
);
1505 save_gpr(ctx
, rb
, ofs
);
1509 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1510 unsigned rx
, int scale
, target_sreg disp
,
1511 unsigned sp
, int modify
, TCGMemOp mop
)
1516 /* Caller uses nullify_over/nullify_end. */
1517 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1519 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1520 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1521 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
);
1523 save_gpr(ctx
, rb
, ofs
);
1527 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1528 unsigned rx
, int scale
, target_sreg disp
,
1529 unsigned sp
, int modify
, TCGMemOp mop
)
1534 /* Caller uses nullify_over/nullify_end. */
1535 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1537 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1538 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1539 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
);
1541 save_gpr(ctx
, rb
, ofs
);
1545 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1546 unsigned rx
, int scale
, target_sreg disp
,
1547 unsigned sp
, int modify
, TCGMemOp mop
)
1552 /* Caller uses nullify_over/nullify_end. */
1553 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1555 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1556 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1557 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
);
1559 save_gpr(ctx
, rb
, ofs
);
1563 #if TARGET_REGISTER_BITS == 64
1564 #define do_load_reg do_load_64
1565 #define do_store_reg do_store_64
1567 #define do_load_reg do_load_32
1568 #define do_store_reg do_store_32
1571 static bool do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1572 unsigned rx
, int scale
, target_sreg disp
,
1573 unsigned sp
, int modify
, TCGMemOp mop
)
1580 /* No base register update. */
1581 dest
= dest_gpr(ctx
, rt
);
1583 /* Make sure if RT == RB, we see the result of the load. */
1584 dest
= get_temp(ctx
);
1586 do_load_reg(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1587 save_gpr(ctx
, rt
, dest
);
1589 return nullify_end(ctx
);
1592 static void do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1593 unsigned rx
, int scale
, target_sreg disp
,
1594 unsigned sp
, int modify
)
1600 tmp
= tcg_temp_new_i32();
1601 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1602 save_frw_i32(rt
, tmp
);
1603 tcg_temp_free_i32(tmp
);
1606 gen_helper_loaded_fr0(cpu_env
);
1612 static void do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1613 unsigned rx
, int scale
, target_sreg disp
,
1614 unsigned sp
, int modify
)
1620 tmp
= tcg_temp_new_i64();
1621 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEQ
);
1623 tcg_temp_free_i64(tmp
);
1626 gen_helper_loaded_fr0(cpu_env
);
1632 static bool do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1633 target_sreg disp
, unsigned sp
,
1634 int modify
, TCGMemOp mop
)
1637 do_store_reg(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1638 return nullify_end(ctx
);
1641 static void do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1642 unsigned rx
, int scale
, target_sreg disp
,
1643 unsigned sp
, int modify
)
1649 tmp
= load_frw_i32(rt
);
1650 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1651 tcg_temp_free_i32(tmp
);
1656 static void do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1657 unsigned rx
, int scale
, target_sreg disp
,
1658 unsigned sp
, int modify
)
1665 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEQ
);
1666 tcg_temp_free_i64(tmp
);
1671 static void do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1672 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1677 tmp
= load_frw0_i32(ra
);
1679 func(tmp
, cpu_env
, tmp
);
1681 save_frw_i32(rt
, tmp
);
1682 tcg_temp_free_i32(tmp
);
1686 static void do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1687 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1694 dst
= tcg_temp_new_i32();
1696 func(dst
, cpu_env
, src
);
1698 tcg_temp_free_i64(src
);
1699 save_frw_i32(rt
, dst
);
1700 tcg_temp_free_i32(dst
);
1704 static void do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1705 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1710 tmp
= load_frd0(ra
);
1712 func(tmp
, cpu_env
, tmp
);
1715 tcg_temp_free_i64(tmp
);
1719 static void do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1720 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1726 src
= load_frw0_i32(ra
);
1727 dst
= tcg_temp_new_i64();
1729 func(dst
, cpu_env
, src
);
1731 tcg_temp_free_i32(src
);
1733 tcg_temp_free_i64(dst
);
1737 static void do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1738 unsigned ra
, unsigned rb
,
1739 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
1744 a
= load_frw0_i32(ra
);
1745 b
= load_frw0_i32(rb
);
1747 func(a
, cpu_env
, a
, b
);
1749 tcg_temp_free_i32(b
);
1750 save_frw_i32(rt
, a
);
1751 tcg_temp_free_i32(a
);
1755 static void do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1756 unsigned ra
, unsigned rb
,
1757 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1765 func(a
, cpu_env
, a
, b
);
1767 tcg_temp_free_i64(b
);
1769 tcg_temp_free_i64(a
);
1773 /* Emit an unconditional branch to a direct target, which may or may not
1774 have already had nullification handled. */
1775 static bool do_dbranch(DisasContext
*ctx
, target_ureg dest
,
1776 unsigned link
, bool is_n
)
1778 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1780 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1784 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1790 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1793 if (is_n
&& use_nullify_skip(ctx
)) {
1794 nullify_set(ctx
, 0);
1795 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1797 nullify_set(ctx
, is_n
);
1798 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1803 nullify_set(ctx
, 0);
1804 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1805 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1810 /* Emit a conditional branch to a direct target. If the branch itself
1811 is nullified, we should have already used nullify_over. */
1812 static bool do_cbranch(DisasContext
*ctx
, target_sreg disp
, bool is_n
,
1815 target_ureg dest
= iaoq_dest(ctx
, disp
);
1816 TCGLabel
*taken
= NULL
;
1817 TCGCond c
= cond
->c
;
1820 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1822 /* Handle TRUE and NEVER as direct branches. */
1823 if (c
== TCG_COND_ALWAYS
) {
1824 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1826 if (c
== TCG_COND_NEVER
) {
1827 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1830 taken
= gen_new_label();
1832 tcg_gen_brcond_reg(c
, cond
->a0
, cond
->a1
, taken
);
1835 /* Not taken: Condition not satisfied; nullify on backward branches. */
1836 n
= is_n
&& disp
< 0;
1837 if (n
&& use_nullify_skip(ctx
)) {
1838 nullify_set(ctx
, 0);
1839 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1841 if (!n
&& ctx
->null_lab
) {
1842 gen_set_label(ctx
->null_lab
);
1843 ctx
->null_lab
= NULL
;
1845 nullify_set(ctx
, n
);
1846 if (ctx
->iaoq_n
== -1) {
1847 /* The temporary iaoq_n_var died at the branch above.
1848 Regenerate it here instead of saving it. */
1849 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1851 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1854 gen_set_label(taken
);
1856 /* Taken: Condition satisfied; nullify on forward branches. */
1857 n
= is_n
&& disp
>= 0;
1858 if (n
&& use_nullify_skip(ctx
)) {
1859 nullify_set(ctx
, 0);
1860 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1862 nullify_set(ctx
, n
);
1863 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1866 /* Not taken: the branch itself was nullified. */
1867 if (ctx
->null_lab
) {
1868 gen_set_label(ctx
->null_lab
);
1869 ctx
->null_lab
= NULL
;
1870 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1872 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1877 /* Emit an unconditional branch to an indirect target. This handles
1878 nullification of the branch itself. */
1879 static bool do_ibranch(DisasContext
*ctx
, TCGv_reg dest
,
1880 unsigned link
, bool is_n
)
1882 TCGv_reg a0
, a1
, next
, tmp
;
1885 assert(ctx
->null_lab
== NULL
);
1887 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1889 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1891 next
= get_temp(ctx
);
1892 tcg_gen_mov_reg(next
, dest
);
1894 if (use_nullify_skip(ctx
)) {
1895 tcg_gen_mov_reg(cpu_iaoq_f
, next
);
1896 tcg_gen_addi_reg(cpu_iaoq_b
, next
, 4);
1897 nullify_set(ctx
, 0);
1898 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1901 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1904 ctx
->iaoq_n_var
= next
;
1905 } else if (is_n
&& use_nullify_skip(ctx
)) {
1906 /* The (conditional) branch, B, nullifies the next insn, N,
1907 and we're allowed to skip execution N (no single-step or
1908 tracepoint in effect). Since the goto_ptr that we must use
1909 for the indirect branch consumes no special resources, we
1910 can (conditionally) skip B and continue execution. */
1911 /* The use_nullify_skip test implies we have a known control path. */
1912 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1913 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1915 /* We do have to handle the non-local temporary, DEST, before
1916 branching. Since IOAQ_F is not really live at this point, we
1917 can simply store DEST optimistically. Similarly with IAOQ_B. */
1918 tcg_gen_mov_reg(cpu_iaoq_f
, dest
);
1919 tcg_gen_addi_reg(cpu_iaoq_b
, dest
, 4);
1923 tcg_gen_movi_reg(cpu_gr
[link
], ctx
->iaoq_n
);
1925 tcg_gen_lookup_and_goto_ptr();
1926 return nullify_end(ctx
);
1928 cond_prep(&ctx
->null_cond
);
1929 c
= ctx
->null_cond
.c
;
1930 a0
= ctx
->null_cond
.a0
;
1931 a1
= ctx
->null_cond
.a1
;
1933 tmp
= tcg_temp_new();
1934 next
= get_temp(ctx
);
1936 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1937 tcg_gen_movcond_reg(c
, next
, a0
, a1
, tmp
, dest
);
1939 ctx
->iaoq_n_var
= next
;
1942 tcg_gen_movcond_reg(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1946 /* The branch nullifies the next insn, which means the state of N
1947 after the branch is the inverse of the state of N that applied
1949 tcg_gen_setcond_reg(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1950 cond_free(&ctx
->null_cond
);
1951 ctx
->null_cond
= cond_make_n();
1952 ctx
->psw_n_nonzero
= true;
1954 cond_free(&ctx
->null_cond
);
1961 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1962 * IAOQ_Next{30..31} ← GR[b]{30..31};
1964 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1965 * which keeps the privilege level from being increased.
1967 static TCGv_reg
do_ibranch_priv(DisasContext
*ctx
, TCGv_reg offset
)
1970 switch (ctx
->privilege
) {
1972 /* Privilege 0 is maximum and is allowed to decrease. */
1975 /* Privilege 3 is minimum and is never allowed increase. */
1976 dest
= get_temp(ctx
);
1977 tcg_gen_ori_reg(dest
, offset
, 3);
1980 dest
= tcg_temp_new();
1981 tcg_gen_andi_reg(dest
, offset
, -4);
1982 tcg_gen_ori_reg(dest
, dest
, ctx
->privilege
);
1983 tcg_gen_movcond_reg(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1984 tcg_temp_free(dest
);
1990 #ifdef CONFIG_USER_ONLY
1991 /* On Linux, page zero is normally marked execute only + gateway.
1992 Therefore normal read or write is supposed to fail, but specific
1993 offsets have kernel code mapped to raise permissions to implement
1994 system calls. Handling this via an explicit check here, rather
1995 in than the "be disp(sr2,r0)" instruction that probably sent us
1996 here, is the easiest way to handle the branch delay slot on the
1997 aforementioned BE. */
1998 static void do_page_zero(DisasContext
*ctx
)
2000 /* If by some means we get here with PSW[N]=1, that implies that
2001 the B,GATE instruction would be skipped, and we'd fault on the
2002 next insn within the privilaged page. */
2003 switch (ctx
->null_cond
.c
) {
2004 case TCG_COND_NEVER
:
2006 case TCG_COND_ALWAYS
:
2007 tcg_gen_movi_reg(cpu_psw_n
, 0);
2010 /* Since this is always the first (and only) insn within the
2011 TB, we should know the state of PSW[N] from TB->FLAGS. */
2012 g_assert_not_reached();
2015 /* Check that we didn't arrive here via some means that allowed
2016 non-sequential instruction execution. Normally the PSW[B] bit
2017 detects this by disallowing the B,GATE instruction to execute
2018 under such conditions. */
2019 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
2023 switch (ctx
->iaoq_f
& -4) {
2024 case 0x00: /* Null pointer call */
2025 gen_excp_1(EXCP_IMP
);
2026 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2029 case 0xb0: /* LWS */
2030 gen_excp_1(EXCP_SYSCALL_LWS
);
2031 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2034 case 0xe0: /* SET_THREAD_POINTER */
2035 tcg_gen_st_reg(cpu_gr
[26], cpu_env
, offsetof(CPUHPPAState
, cr
[27]));
2036 tcg_gen_ori_reg(cpu_iaoq_f
, cpu_gr
[31], 3);
2037 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
2038 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
2041 case 0x100: /* SYSCALL */
2042 gen_excp_1(EXCP_SYSCALL
);
2043 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2048 gen_excp_1(EXCP_ILL
);
2049 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2055 static bool trans_nop(DisasContext
*ctx
, arg_nop
*a
)
2057 cond_free(&ctx
->null_cond
);
2061 static bool trans_break(DisasContext
*ctx
, arg_break
*a
)
2063 return gen_excp_iir(ctx
, EXCP_BREAK
);
2066 static bool trans_sync(DisasContext
*ctx
, arg_sync
*a
)
2068 /* No point in nullifying the memory barrier. */
2069 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
2071 cond_free(&ctx
->null_cond
);
2075 static bool trans_mfia(DisasContext
*ctx
, arg_mfia
*a
)
2078 TCGv_reg tmp
= dest_gpr(ctx
, rt
);
2079 tcg_gen_movi_reg(tmp
, ctx
->iaoq_f
);
2080 save_gpr(ctx
, rt
, tmp
);
2082 cond_free(&ctx
->null_cond
);
2086 static bool trans_mfsp(DisasContext
*ctx
, arg_mfsp
*a
)
2089 unsigned rs
= a
->sp
;
2090 TCGv_i64 t0
= tcg_temp_new_i64();
2091 TCGv_reg t1
= tcg_temp_new();
2093 load_spr(ctx
, t0
, rs
);
2094 tcg_gen_shri_i64(t0
, t0
, 32);
2095 tcg_gen_trunc_i64_reg(t1
, t0
);
2097 save_gpr(ctx
, rt
, t1
);
2099 tcg_temp_free_i64(t0
);
2101 cond_free(&ctx
->null_cond
);
2105 static bool trans_mfctl(DisasContext
*ctx
, arg_mfctl
*a
)
2108 unsigned ctl
= a
->r
;
2113 #ifdef TARGET_HPPA64
2115 /* MFSAR without ,W masks low 5 bits. */
2116 tmp
= dest_gpr(ctx
, rt
);
2117 tcg_gen_andi_reg(tmp
, cpu_sar
, 31);
2118 save_gpr(ctx
, rt
, tmp
);
2122 save_gpr(ctx
, rt
, cpu_sar
);
2124 case CR_IT
: /* Interval Timer */
2125 /* FIXME: Respect PSW_S bit. */
2127 tmp
= dest_gpr(ctx
, rt
);
2128 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2130 gen_helper_read_interval_timer(tmp
);
2132 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2134 gen_helper_read_interval_timer(tmp
);
2136 save_gpr(ctx
, rt
, tmp
);
2137 return nullify_end(ctx
);
2142 /* All other control registers are privileged. */
2143 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2147 tmp
= get_temp(ctx
);
2148 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2149 save_gpr(ctx
, rt
, tmp
);
2152 cond_free(&ctx
->null_cond
);
2156 static bool trans_mtsp(DisasContext
*ctx
, arg_mtsp
*a
)
2159 unsigned rs
= a
->sp
;
2163 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2167 t64
= tcg_temp_new_i64();
2168 tcg_gen_extu_reg_i64(t64
, load_gpr(ctx
, rr
));
2169 tcg_gen_shli_i64(t64
, t64
, 32);
2172 tcg_gen_st_i64(t64
, cpu_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2173 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2175 tcg_gen_mov_i64(cpu_sr
[rs
], t64
);
2177 tcg_temp_free_i64(t64
);
2179 return nullify_end(ctx
);
2182 static bool trans_mtctl(DisasContext
*ctx
, arg_mtctl
*a
)
2184 unsigned ctl
= a
->t
;
2185 TCGv_reg reg
= load_gpr(ctx
, a
->r
);
2188 if (ctl
== CR_SAR
) {
2189 tmp
= tcg_temp_new();
2190 tcg_gen_andi_reg(tmp
, reg
, TARGET_REGISTER_BITS
- 1);
2191 save_or_nullify(ctx
, cpu_sar
, tmp
);
2194 cond_free(&ctx
->null_cond
);
2198 /* All other control registers are privileged or read-only. */
2199 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2201 #ifndef CONFIG_USER_ONLY
2205 gen_helper_write_interval_timer(cpu_env
, reg
);
2208 gen_helper_write_eirr(cpu_env
, reg
);
2211 gen_helper_write_eiem(cpu_env
, reg
);
2212 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2217 /* FIXME: Respect PSW_Q bit */
2218 /* The write advances the queue and stores to the back element. */
2219 tmp
= get_temp(ctx
);
2220 tcg_gen_ld_reg(tmp
, cpu_env
,
2221 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2222 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2223 tcg_gen_st_reg(reg
, cpu_env
,
2224 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2228 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2231 return nullify_end(ctx
);
2235 static bool trans_mtsarcm(DisasContext
*ctx
, arg_mtsarcm
*a
)
2237 TCGv_reg tmp
= tcg_temp_new();
2239 tcg_gen_not_reg(tmp
, load_gpr(ctx
, a
->r
));
2240 tcg_gen_andi_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
2241 save_or_nullify(ctx
, cpu_sar
, tmp
);
2244 cond_free(&ctx
->null_cond
);
2248 static bool trans_ldsid(DisasContext
*ctx
, arg_ldsid
*a
)
2250 TCGv_reg dest
= dest_gpr(ctx
, a
->t
);
2252 #ifdef CONFIG_USER_ONLY
2253 /* We don't implement space registers in user mode. */
2254 tcg_gen_movi_reg(dest
, 0);
2256 TCGv_i64 t0
= tcg_temp_new_i64();
2258 tcg_gen_mov_i64(t0
, space_select(ctx
, a
->sp
, load_gpr(ctx
, a
->b
)));
2259 tcg_gen_shri_i64(t0
, t0
, 32);
2260 tcg_gen_trunc_i64_reg(dest
, t0
);
2262 tcg_temp_free_i64(t0
);
2264 save_gpr(ctx
, a
->t
, dest
);
2266 cond_free(&ctx
->null_cond
);
2270 static bool trans_rsm(DisasContext
*ctx
, arg_rsm
*a
)
2272 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2273 #ifndef CONFIG_USER_ONLY
2278 tmp
= get_temp(ctx
);
2279 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2280 tcg_gen_andi_reg(tmp
, tmp
, ~a
->i
);
2281 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2282 save_gpr(ctx
, a
->t
, tmp
);
2284 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2285 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2286 return nullify_end(ctx
);
2290 static bool trans_ssm(DisasContext
*ctx
, arg_ssm
*a
)
2292 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2293 #ifndef CONFIG_USER_ONLY
2298 tmp
= get_temp(ctx
);
2299 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2300 tcg_gen_ori_reg(tmp
, tmp
, a
->i
);
2301 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2302 save_gpr(ctx
, a
->t
, tmp
);
2304 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2305 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2306 return nullify_end(ctx
);
2310 static bool trans_mtsm(DisasContext
*ctx
, arg_mtsm
*a
)
2312 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2313 #ifndef CONFIG_USER_ONLY
2317 reg
= load_gpr(ctx
, a
->r
);
2318 tmp
= get_temp(ctx
);
2319 gen_helper_swap_system_mask(tmp
, cpu_env
, reg
);
2321 /* Exit the TB to recognize new interrupts. */
2322 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2323 return nullify_end(ctx
);
2327 static bool do_rfi(DisasContext
*ctx
, bool rfi_r
)
2329 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2330 #ifndef CONFIG_USER_ONLY
2334 gen_helper_rfi_r(cpu_env
);
2336 gen_helper_rfi(cpu_env
);
2338 /* Exit the TB to recognize new interrupts. */
2339 if (ctx
->base
.singlestep_enabled
) {
2340 gen_excp_1(EXCP_DEBUG
);
2342 tcg_gen_exit_tb(NULL
, 0);
2344 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2346 return nullify_end(ctx
);
2350 static bool trans_rfi(DisasContext
*ctx
, arg_rfi
*a
)
2352 return do_rfi(ctx
, false);
2355 static bool trans_rfi_r(DisasContext
*ctx
, arg_rfi_r
*a
)
2357 return do_rfi(ctx
, true);
2360 #ifndef CONFIG_USER_ONLY
2361 static bool gen_hlt(DisasContext
*ctx
, int reset
)
2363 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2366 gen_helper_reset(cpu_env
);
2368 gen_helper_halt(cpu_env
);
2370 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2371 return nullify_end(ctx
);
2373 #endif /* !CONFIG_USER_ONLY */
2375 static bool trans_nop_addrx(DisasContext
*ctx
, arg_ldst
*a
)
2378 TCGv_reg dest
= dest_gpr(ctx
, a
->b
);
2379 TCGv_reg src1
= load_gpr(ctx
, a
->b
);
2380 TCGv_reg src2
= load_gpr(ctx
, a
->x
);
2382 /* The only thing we need to do is the base register modification. */
2383 tcg_gen_add_reg(dest
, src1
, src2
);
2384 save_gpr(ctx
, a
->b
, dest
);
2386 cond_free(&ctx
->null_cond
);
2390 static bool trans_probe(DisasContext
*ctx
, arg_probe
*a
)
2393 TCGv_i32 level
, want
;
2398 dest
= dest_gpr(ctx
, a
->t
);
2399 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2402 level
= tcg_const_i32(a
->ri
);
2404 level
= tcg_temp_new_i32();
2405 tcg_gen_trunc_reg_i32(level
, load_gpr(ctx
, a
->ri
));
2406 tcg_gen_andi_i32(level
, level
, 3);
2408 want
= tcg_const_i32(a
->write
? PAGE_WRITE
: PAGE_READ
);
2410 gen_helper_probe(dest
, cpu_env
, addr
, level
, want
);
2412 tcg_temp_free_i32(want
);
2413 tcg_temp_free_i32(level
);
2415 save_gpr(ctx
, a
->t
, dest
);
2416 return nullify_end(ctx
);
2419 static bool trans_ixtlbx(DisasContext
*ctx
, arg_ixtlbx
*a
)
2421 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2422 #ifndef CONFIG_USER_ONLY
2428 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2429 reg
= load_gpr(ctx
, a
->r
);
2431 gen_helper_itlba(cpu_env
, addr
, reg
);
2433 gen_helper_itlbp(cpu_env
, addr
, reg
);
2436 /* Exit TB for ITLB change if mmu is enabled. This *should* not be
2437 the case, since the OS TLB fill handler runs with mmu disabled. */
2438 if (!a
->data
&& (ctx
->tb_flags
& PSW_C
)) {
2439 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2441 return nullify_end(ctx
);
2445 static bool trans_pxtlbx(DisasContext
*ctx
, arg_pxtlbx
*a
)
2447 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2448 #ifndef CONFIG_USER_ONLY
2454 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2456 save_gpr(ctx
, a
->b
, ofs
);
2459 gen_helper_ptlbe(cpu_env
);
2461 gen_helper_ptlb(cpu_env
, addr
);
2464 /* Exit TB for TLB change if mmu is enabled. */
2465 if (!a
->data
&& (ctx
->tb_flags
& PSW_C
)) {
2466 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2468 return nullify_end(ctx
);
2472 static bool trans_lpa(DisasContext
*ctx
, arg_ldst
*a
)
2474 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2475 #ifndef CONFIG_USER_ONLY
2477 TCGv_reg ofs
, paddr
;
2481 form_gva(ctx
, &vaddr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2483 paddr
= tcg_temp_new();
2484 gen_helper_lpa(paddr
, cpu_env
, vaddr
);
2486 /* Note that physical address result overrides base modification. */
2488 save_gpr(ctx
, a
->b
, ofs
);
2490 save_gpr(ctx
, a
->t
, paddr
);
2491 tcg_temp_free(paddr
);
2493 return nullify_end(ctx
);
2497 static bool trans_lci(DisasContext
*ctx
, arg_lci
*a
)
2501 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2503 /* The Coherence Index is an implementation-defined function of the
2504 physical address. Two addresses with the same CI have a coherent
2505 view of the cache. Our implementation is to return 0 for all,
2506 since the entire address space is coherent. */
2507 ci
= tcg_const_reg(0);
2508 save_gpr(ctx
, a
->t
, ci
);
2511 cond_free(&ctx
->null_cond
);
2515 static bool trans_add(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2517 return do_add_reg(ctx
, a
, false, false, false, false);
2520 static bool trans_add_l(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2522 return do_add_reg(ctx
, a
, true, false, false, false);
2525 static bool trans_add_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2527 return do_add_reg(ctx
, a
, false, true, false, false);
2530 static bool trans_add_c(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2532 return do_add_reg(ctx
, a
, false, false, false, true);
2535 static bool trans_add_c_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2537 return do_add_reg(ctx
, a
, false, true, false, true);
2540 static bool trans_sub(DisasContext
*ctx
, arg_rrr_cf
*a
)
2542 return do_sub_reg(ctx
, a
, false, false, false);
2545 static bool trans_sub_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2547 return do_sub_reg(ctx
, a
, true, false, false);
2550 static bool trans_sub_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2552 return do_sub_reg(ctx
, a
, false, false, true);
2555 static bool trans_sub_tsv_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2557 return do_sub_reg(ctx
, a
, true, false, true);
2560 static bool trans_sub_b(DisasContext
*ctx
, arg_rrr_cf
*a
)
2562 return do_sub_reg(ctx
, a
, false, true, false);
2565 static bool trans_sub_b_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2567 return do_sub_reg(ctx
, a
, true, true, false);
2570 static bool trans_andcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2572 return do_log_reg(ctx
, a
, tcg_gen_andc_reg
);
2575 static bool trans_and(DisasContext
*ctx
, arg_rrr_cf
*a
)
2577 return do_log_reg(ctx
, a
, tcg_gen_and_reg
);
2580 static bool trans_or(DisasContext
*ctx
, arg_rrr_cf
*a
)
2583 unsigned r2
= a
->r2
;
2584 unsigned r1
= a
->r1
;
2587 if (rt
== 0) { /* NOP */
2588 cond_free(&ctx
->null_cond
);
2591 if (r2
== 0) { /* COPY */
2593 TCGv_reg dest
= dest_gpr(ctx
, rt
);
2594 tcg_gen_movi_reg(dest
, 0);
2595 save_gpr(ctx
, rt
, dest
);
2597 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2599 cond_free(&ctx
->null_cond
);
2602 #ifndef CONFIG_USER_ONLY
2603 /* These are QEMU extensions and are nops in the real architecture:
2605 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2606 * or %r31,%r31,%r31 -- death loop; offline cpu
2607 * currently implemented as idle.
2609 if ((rt
== 10 || rt
== 31) && r1
== rt
&& r2
== rt
) { /* PAUSE */
2612 /* No need to check for supervisor, as userland can only pause
2613 until the next timer interrupt. */
2616 /* Advance the instruction queue. */
2617 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
2618 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
2619 nullify_set(ctx
, 0);
2621 /* Tell the qemu main loop to halt until this cpu has work. */
2622 tmp
= tcg_const_i32(1);
2623 tcg_gen_st_i32(tmp
, cpu_env
, -offsetof(HPPACPU
, env
) +
2624 offsetof(CPUState
, halted
));
2625 tcg_temp_free_i32(tmp
);
2626 gen_excp_1(EXCP_HALTED
);
2627 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2629 return nullify_end(ctx
);
2633 return do_log_reg(ctx
, a
, tcg_gen_or_reg
);
2636 static bool trans_xor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2638 return do_log_reg(ctx
, a
, tcg_gen_xor_reg
);
2641 static bool trans_cmpclr(DisasContext
*ctx
, arg_rrr_cf
*a
)
2643 TCGv_reg tcg_r1
, tcg_r2
;
2648 tcg_r1
= load_gpr(ctx
, a
->r1
);
2649 tcg_r2
= load_gpr(ctx
, a
->r2
);
2650 do_cmpclr(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
);
2651 return nullify_end(ctx
);
2654 static bool trans_uxor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2656 TCGv_reg tcg_r1
, tcg_r2
;
2661 tcg_r1
= load_gpr(ctx
, a
->r1
);
2662 tcg_r2
= load_gpr(ctx
, a
->r2
);
2663 do_unit(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, false, tcg_gen_xor_reg
);
2664 return nullify_end(ctx
);
2667 static bool do_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
, bool is_tc
)
2669 TCGv_reg tcg_r1
, tcg_r2
, tmp
;
2674 tcg_r1
= load_gpr(ctx
, a
->r1
);
2675 tcg_r2
= load_gpr(ctx
, a
->r2
);
2676 tmp
= get_temp(ctx
);
2677 tcg_gen_not_reg(tmp
, tcg_r2
);
2678 do_unit(ctx
, a
->t
, tcg_r1
, tmp
, a
->cf
, is_tc
, tcg_gen_add_reg
);
2679 return nullify_end(ctx
);
2682 static bool trans_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2684 return do_uaddcm(ctx
, a
, false);
2687 static bool trans_uaddcm_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2689 return do_uaddcm(ctx
, a
, true);
2692 static bool do_dcor(DisasContext
*ctx
, arg_rr_cf
*a
, bool is_i
)
2698 tmp
= get_temp(ctx
);
2699 tcg_gen_shri_reg(tmp
, cpu_psw_cb
, 3);
2701 tcg_gen_not_reg(tmp
, tmp
);
2703 tcg_gen_andi_reg(tmp
, tmp
, 0x11111111);
2704 tcg_gen_muli_reg(tmp
, tmp
, 6);
2705 do_unit(ctx
, a
->t
, tmp
, load_gpr(ctx
, a
->r
), a
->cf
, false,
2706 is_i
? tcg_gen_add_reg
: tcg_gen_sub_reg
);
2707 return nullify_end(ctx
);
2710 static bool trans_dcor(DisasContext
*ctx
, arg_rr_cf
*a
)
2712 return do_dcor(ctx
, a
, false);
2715 static bool trans_dcor_i(DisasContext
*ctx
, arg_rr_cf
*a
)
2717 return do_dcor(ctx
, a
, true);
2720 static bool trans_ds(DisasContext
*ctx
, arg_rrr_cf
*a
)
2722 TCGv_reg dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2726 in1
= load_gpr(ctx
, a
->r1
);
2727 in2
= load_gpr(ctx
, a
->r2
);
2729 add1
= tcg_temp_new();
2730 add2
= tcg_temp_new();
2731 addc
= tcg_temp_new();
2732 dest
= tcg_temp_new();
2733 zero
= tcg_const_reg(0);
2735 /* Form R1 << 1 | PSW[CB]{8}. */
2736 tcg_gen_add_reg(add1
, in1
, in1
);
2737 tcg_gen_add_reg(add1
, add1
, cpu_psw_cb_msb
);
2739 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2740 carry{8} requires that we subtract via + ~R2 + 1, as described in
2741 the manual. By extracting and masking V, we can produce the
2742 proper inputs to the addition without movcond. */
2743 tcg_gen_sari_reg(addc
, cpu_psw_v
, TARGET_REGISTER_BITS
- 1);
2744 tcg_gen_xor_reg(add2
, in2
, addc
);
2745 tcg_gen_andi_reg(addc
, addc
, 1);
2746 /* ??? This is only correct for 32-bit. */
2747 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2748 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2750 tcg_temp_free(addc
);
2751 tcg_temp_free(zero
);
2753 /* Write back the result register. */
2754 save_gpr(ctx
, a
->t
, dest
);
2756 /* Write back PSW[CB]. */
2757 tcg_gen_xor_reg(cpu_psw_cb
, add1
, add2
);
2758 tcg_gen_xor_reg(cpu_psw_cb
, cpu_psw_cb
, dest
);
2760 /* Write back PSW[V] for the division step. */
2761 tcg_gen_neg_reg(cpu_psw_v
, cpu_psw_cb_msb
);
2762 tcg_gen_xor_reg(cpu_psw_v
, cpu_psw_v
, in2
);
2764 /* Install the new nullification. */
2767 if (a
->cf
>> 1 == 6) {
2768 /* ??? The lshift is supposed to contribute to overflow. */
2769 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2771 ctx
->null_cond
= do_cond(a
->cf
, dest
, cpu_psw_cb_msb
, sv
);
2774 tcg_temp_free(add1
);
2775 tcg_temp_free(add2
);
2776 tcg_temp_free(dest
);
2778 return nullify_end(ctx
);
2781 static bool trans_addi(DisasContext
*ctx
, uint32_t insn
)
2783 target_sreg im
= low_sextract(insn
, 0, 11);
2784 unsigned e1
= extract32(insn
, 11, 1);
2785 unsigned cf
= extract32(insn
, 12, 4);
2786 unsigned rt
= extract32(insn
, 16, 5);
2787 unsigned r2
= extract32(insn
, 21, 5);
2788 unsigned o1
= extract32(insn
, 26, 1);
2789 TCGv_reg tcg_im
, tcg_r2
;
2795 tcg_im
= load_const(ctx
, im
);
2796 tcg_r2
= load_gpr(ctx
, r2
);
2797 do_add(ctx
, rt
, tcg_im
, tcg_r2
, 0, false, e1
, !o1
, false, cf
);
2799 return nullify_end(ctx
);
2802 static bool trans_subi(DisasContext
*ctx
, uint32_t insn
)
2804 target_sreg im
= low_sextract(insn
, 0, 11);
2805 unsigned e1
= extract32(insn
, 11, 1);
2806 unsigned cf
= extract32(insn
, 12, 4);
2807 unsigned rt
= extract32(insn
, 16, 5);
2808 unsigned r2
= extract32(insn
, 21, 5);
2809 TCGv_reg tcg_im
, tcg_r2
;
2815 tcg_im
= load_const(ctx
, im
);
2816 tcg_r2
= load_gpr(ctx
, r2
);
2817 do_sub(ctx
, rt
, tcg_im
, tcg_r2
, e1
, false, false, cf
);
2819 return nullify_end(ctx
);
2822 static bool trans_cmpiclr(DisasContext
*ctx
, uint32_t insn
)
2824 target_sreg im
= low_sextract(insn
, 0, 11);
2825 unsigned cf
= extract32(insn
, 12, 4);
2826 unsigned rt
= extract32(insn
, 16, 5);
2827 unsigned r2
= extract32(insn
, 21, 5);
2828 TCGv_reg tcg_im
, tcg_r2
;
2834 tcg_im
= load_const(ctx
, im
);
2835 tcg_r2
= load_gpr(ctx
, r2
);
2836 do_cmpclr(ctx
, rt
, tcg_im
, tcg_r2
, cf
);
2838 return nullify_end(ctx
);
2841 static bool trans_ld(DisasContext
*ctx
, arg_ldst
*a
)
2843 return do_load(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2844 a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2847 static bool trans_st(DisasContext
*ctx
, arg_ldst
*a
)
2849 assert(a
->x
== 0 && a
->scale
== 0);
2850 return do_store(ctx
, a
->t
, a
->b
, a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2853 static bool trans_ldc(DisasContext
*ctx
, arg_ldst
*a
)
2855 TCGMemOp mop
= MO_TEUL
| MO_ALIGN_16
| a
->size
;
2856 TCGv_reg zero
, dest
, ofs
;
2862 /* Base register modification. Make sure if RT == RB,
2863 we see the result of the load. */
2864 dest
= get_temp(ctx
);
2866 dest
= dest_gpr(ctx
, a
->t
);
2869 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2870 a
->disp
, a
->sp
, a
->m
, ctx
->mmu_idx
== MMU_PHYS_IDX
);
2871 zero
= tcg_const_reg(0);
2872 tcg_gen_atomic_xchg_reg(dest
, addr
, zero
, ctx
->mmu_idx
, mop
);
2874 save_gpr(ctx
, a
->b
, ofs
);
2876 save_gpr(ctx
, a
->t
, dest
);
2878 return nullify_end(ctx
);
2881 static bool trans_stby(DisasContext
*ctx
, arg_stby
*a
)
2888 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
2889 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2890 val
= load_gpr(ctx
, a
->r
);
2892 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2893 gen_helper_stby_e_parallel(cpu_env
, addr
, val
);
2895 gen_helper_stby_e(cpu_env
, addr
, val
);
2898 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2899 gen_helper_stby_b_parallel(cpu_env
, addr
, val
);
2901 gen_helper_stby_b(cpu_env
, addr
, val
);
2905 tcg_gen_andi_reg(ofs
, ofs
, ~3);
2906 save_gpr(ctx
, a
->b
, ofs
);
2909 return nullify_end(ctx
);
2912 static bool trans_lda(DisasContext
*ctx
, arg_ldst
*a
)
2914 int hold_mmu_idx
= ctx
->mmu_idx
;
2916 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2917 ctx
->mmu_idx
= MMU_PHYS_IDX
;
2919 ctx
->mmu_idx
= hold_mmu_idx
;
2923 static bool trans_sta(DisasContext
*ctx
, arg_ldst
*a
)
2925 int hold_mmu_idx
= ctx
->mmu_idx
;
2927 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2928 ctx
->mmu_idx
= MMU_PHYS_IDX
;
2930 ctx
->mmu_idx
= hold_mmu_idx
;
2934 static bool trans_ldil(DisasContext
*ctx
, uint32_t insn
)
2936 unsigned rt
= extract32(insn
, 21, 5);
2937 target_sreg i
= assemble_21(insn
);
2938 TCGv_reg tcg_rt
= dest_gpr(ctx
, rt
);
2940 tcg_gen_movi_reg(tcg_rt
, i
);
2941 save_gpr(ctx
, rt
, tcg_rt
);
2942 cond_free(&ctx
->null_cond
);
2946 static bool trans_addil(DisasContext
*ctx
, uint32_t insn
)
2948 unsigned rt
= extract32(insn
, 21, 5);
2949 target_sreg i
= assemble_21(insn
);
2950 TCGv_reg tcg_rt
= load_gpr(ctx
, rt
);
2951 TCGv_reg tcg_r1
= dest_gpr(ctx
, 1);
2953 tcg_gen_addi_reg(tcg_r1
, tcg_rt
, i
);
2954 save_gpr(ctx
, 1, tcg_r1
);
2955 cond_free(&ctx
->null_cond
);
2959 static bool trans_ldo(DisasContext
*ctx
, uint32_t insn
)
2961 unsigned rb
= extract32(insn
, 21, 5);
2962 unsigned rt
= extract32(insn
, 16, 5);
2963 target_sreg i
= assemble_16(insn
);
2964 TCGv_reg tcg_rt
= dest_gpr(ctx
, rt
);
2966 /* Special case rb == 0, for the LDI pseudo-op.
2967 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2969 tcg_gen_movi_reg(tcg_rt
, i
);
2971 tcg_gen_addi_reg(tcg_rt
, cpu_gr
[rb
], i
);
2973 save_gpr(ctx
, rt
, tcg_rt
);
2974 cond_free(&ctx
->null_cond
);
2978 static bool trans_load(DisasContext
*ctx
, uint32_t insn
,
2979 bool is_mod
, TCGMemOp mop
)
2981 unsigned rb
= extract32(insn
, 21, 5);
2982 unsigned rt
= extract32(insn
, 16, 5);
2983 unsigned sp
= extract32(insn
, 14, 2);
2984 target_sreg i
= assemble_16(insn
);
2986 do_load(ctx
, rt
, rb
, 0, 0, i
, sp
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
2990 static bool trans_load_w(DisasContext
*ctx
, uint32_t insn
)
2992 unsigned rb
= extract32(insn
, 21, 5);
2993 unsigned rt
= extract32(insn
, 16, 5);
2994 unsigned sp
= extract32(insn
, 14, 2);
2995 target_sreg i
= assemble_16a(insn
);
2996 unsigned ext2
= extract32(insn
, 1, 2);
3001 /* FLDW without modification. */
3002 do_floadw(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, sp
, 0);
3005 /* LDW with modification. Note that the sign of I selects
3006 post-dec vs pre-inc. */
3007 do_load(ctx
, rt
, rb
, 0, 0, i
, sp
, (i
< 0 ? 1 : -1), MO_TEUL
);
3010 return gen_illegal(ctx
);
3015 static bool trans_fload_mod(DisasContext
*ctx
, uint32_t insn
)
3017 target_sreg i
= assemble_16a(insn
);
3018 unsigned t1
= extract32(insn
, 1, 1);
3019 unsigned a
= extract32(insn
, 2, 1);
3020 unsigned sp
= extract32(insn
, 14, 2);
3021 unsigned t0
= extract32(insn
, 16, 5);
3022 unsigned rb
= extract32(insn
, 21, 5);
3024 /* FLDW with modification. */
3025 do_floadw(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, sp
, (a
? -1 : 1));
3029 static bool trans_store(DisasContext
*ctx
, uint32_t insn
,
3030 bool is_mod
, TCGMemOp mop
)
3032 unsigned rb
= extract32(insn
, 21, 5);
3033 unsigned rt
= extract32(insn
, 16, 5);
3034 unsigned sp
= extract32(insn
, 14, 2);
3035 target_sreg i
= assemble_16(insn
);
3037 do_store(ctx
, rt
, rb
, i
, sp
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
3041 static bool trans_store_w(DisasContext
*ctx
, uint32_t insn
)
3043 unsigned rb
= extract32(insn
, 21, 5);
3044 unsigned rt
= extract32(insn
, 16, 5);
3045 unsigned sp
= extract32(insn
, 14, 2);
3046 target_sreg i
= assemble_16a(insn
);
3047 unsigned ext2
= extract32(insn
, 1, 2);
3052 /* FSTW without modification. */
3053 do_fstorew(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, sp
, 0);
3056 /* STW with modification. */
3057 do_store(ctx
, rt
, rb
, i
, sp
, (i
< 0 ? 1 : -1), MO_TEUL
);
3060 return gen_illegal(ctx
);
3065 static bool trans_fstore_mod(DisasContext
*ctx
, uint32_t insn
)
3067 target_sreg i
= assemble_16a(insn
);
3068 unsigned t1
= extract32(insn
, 1, 1);
3069 unsigned a
= extract32(insn
, 2, 1);
3070 unsigned sp
= extract32(insn
, 14, 2);
3071 unsigned t0
= extract32(insn
, 16, 5);
3072 unsigned rb
= extract32(insn
, 21, 5);
3074 /* FSTW with modification. */
3075 do_fstorew(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, sp
, (a
? -1 : 1));
3079 static bool trans_copr_w(DisasContext
*ctx
, uint32_t insn
)
3081 unsigned t0
= extract32(insn
, 0, 5);
3082 unsigned m
= extract32(insn
, 5, 1);
3083 unsigned t1
= extract32(insn
, 6, 1);
3084 unsigned ext3
= extract32(insn
, 7, 3);
3085 /* unsigned cc = extract32(insn, 10, 2); */
3086 unsigned i
= extract32(insn
, 12, 1);
3087 unsigned ua
= extract32(insn
, 13, 1);
3088 unsigned sp
= extract32(insn
, 14, 2);
3089 unsigned rx
= extract32(insn
, 16, 5);
3090 unsigned rb
= extract32(insn
, 21, 5);
3091 unsigned rt
= t1
* 32 + t0
;
3092 int modify
= (m
? (ua
? -1 : 1) : 0);
3096 scale
= (ua
? 2 : 0);
3100 disp
= low_sextract(rx
, 0, 5);
3103 modify
= (m
? (ua
? -1 : 1) : 0);
3108 do_floadw(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3111 do_fstorew(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3114 return gen_illegal(ctx
);
3119 static bool trans_copr_dw(DisasContext
*ctx
, uint32_t insn
)
3121 unsigned rt
= extract32(insn
, 0, 5);
3122 unsigned m
= extract32(insn
, 5, 1);
3123 unsigned ext4
= extract32(insn
, 6, 4);
3124 /* unsigned cc = extract32(insn, 10, 2); */
3125 unsigned i
= extract32(insn
, 12, 1);
3126 unsigned ua
= extract32(insn
, 13, 1);
3127 unsigned sp
= extract32(insn
, 14, 2);
3128 unsigned rx
= extract32(insn
, 16, 5);
3129 unsigned rb
= extract32(insn
, 21, 5);
3130 int modify
= (m
? (ua
? -1 : 1) : 0);
3134 scale
= (ua
? 3 : 0);
3138 disp
= low_sextract(rx
, 0, 5);
3141 modify
= (m
? (ua
? -1 : 1) : 0);
3146 do_floadd(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3149 do_fstored(ctx
, rt
, rb
, rx
, scale
, disp
, sp
, modify
);
3152 return gen_illegal(ctx
);
3157 static bool do_cmpb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3158 unsigned c
, unsigned f
, unsigned n
, int disp
)
3160 TCGv_reg dest
, in2
, sv
;
3163 in2
= load_gpr(ctx
, r
);
3164 dest
= get_temp(ctx
);
3166 tcg_gen_sub_reg(dest
, in1
, in2
);
3170 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3173 cond
= do_sub_cond(c
* 2 + f
, dest
, in1
, in2
, sv
);
3174 return do_cbranch(ctx
, disp
, n
, &cond
);
3177 static bool trans_cmpb(DisasContext
*ctx
, arg_cmpb
*a
)
3180 return do_cmpb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3183 static bool trans_cmpbi(DisasContext
*ctx
, arg_cmpbi
*a
)
3186 return do_cmpb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3189 static bool do_addb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3190 unsigned c
, unsigned f
, unsigned n
, int disp
)
3192 TCGv_reg dest
, in2
, sv
, cb_msb
;
3195 in2
= load_gpr(ctx
, r
);
3196 dest
= dest_gpr(ctx
, r
);
3202 tcg_gen_add_reg(dest
, in1
, in2
);
3205 cb_msb
= get_temp(ctx
);
3206 tcg_gen_movi_reg(cb_msb
, 0);
3207 tcg_gen_add2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3210 tcg_gen_add_reg(dest
, in1
, in2
);
3211 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3215 cond
= do_cond(c
* 2 + f
, dest
, cb_msb
, sv
);
3216 return do_cbranch(ctx
, disp
, n
, &cond
);
3219 static bool trans_addb(DisasContext
*ctx
, arg_addb
*a
)
3222 return do_addb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3225 static bool trans_addbi(DisasContext
*ctx
, arg_addbi
*a
)
3228 return do_addb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3231 static bool trans_bb_sar(DisasContext
*ctx
, arg_bb_sar
*a
)
3233 TCGv_reg tmp
, tcg_r
;
3238 tmp
= tcg_temp_new();
3239 tcg_r
= load_gpr(ctx
, a
->r
);
3240 tcg_gen_shl_reg(tmp
, tcg_r
, cpu_sar
);
3242 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3244 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3247 static bool trans_bb_imm(DisasContext
*ctx
, arg_bb_imm
*a
)
3249 TCGv_reg tmp
, tcg_r
;
3254 tmp
= tcg_temp_new();
3255 tcg_r
= load_gpr(ctx
, a
->r
);
3256 tcg_gen_shli_reg(tmp
, tcg_r
, a
->p
);
3258 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3260 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3263 static bool trans_movb(DisasContext
*ctx
, arg_movb
*a
)
3270 dest
= dest_gpr(ctx
, a
->r2
);
3272 tcg_gen_movi_reg(dest
, 0);
3274 tcg_gen_mov_reg(dest
, cpu_gr
[a
->r1
]);
3277 cond
= do_sed_cond(a
->c
, dest
);
3278 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3281 static bool trans_movbi(DisasContext
*ctx
, arg_movbi
*a
)
3288 dest
= dest_gpr(ctx
, a
->r
);
3289 tcg_gen_movi_reg(dest
, a
->i
);
3291 cond
= do_sed_cond(a
->c
, dest
);
3292 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3295 static bool trans_shrpw_sar(DisasContext
*ctx
, uint32_t insn
,
3296 const DisasInsn
*di
)
3298 unsigned rt
= extract32(insn
, 0, 5);
3299 unsigned c
= extract32(insn
, 13, 3);
3300 unsigned r1
= extract32(insn
, 16, 5);
3301 unsigned r2
= extract32(insn
, 21, 5);
3308 dest
= dest_gpr(ctx
, rt
);
3310 tcg_gen_ext32u_reg(dest
, load_gpr(ctx
, r2
));
3311 tcg_gen_shr_reg(dest
, dest
, cpu_sar
);
3312 } else if (r1
== r2
) {
3313 TCGv_i32 t32
= tcg_temp_new_i32();
3314 tcg_gen_trunc_reg_i32(t32
, load_gpr(ctx
, r2
));
3315 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
3316 tcg_gen_extu_i32_reg(dest
, t32
);
3317 tcg_temp_free_i32(t32
);
3319 TCGv_i64 t
= tcg_temp_new_i64();
3320 TCGv_i64 s
= tcg_temp_new_i64();
3322 tcg_gen_concat_reg_i64(t
, load_gpr(ctx
, r2
), load_gpr(ctx
, r1
));
3323 tcg_gen_extu_reg_i64(s
, cpu_sar
);
3324 tcg_gen_shr_i64(t
, t
, s
);
3325 tcg_gen_trunc_i64_reg(dest
, t
);
3327 tcg_temp_free_i64(t
);
3328 tcg_temp_free_i64(s
);
3330 save_gpr(ctx
, rt
, dest
);
3332 /* Install the new nullification. */
3333 cond_free(&ctx
->null_cond
);
3335 ctx
->null_cond
= do_sed_cond(c
, dest
);
3337 return nullify_end(ctx
);
3340 static bool trans_shrpw_imm(DisasContext
*ctx
, uint32_t insn
,
3341 const DisasInsn
*di
)
3343 unsigned rt
= extract32(insn
, 0, 5);
3344 unsigned cpos
= extract32(insn
, 5, 5);
3345 unsigned c
= extract32(insn
, 13, 3);
3346 unsigned r1
= extract32(insn
, 16, 5);
3347 unsigned r2
= extract32(insn
, 21, 5);
3348 unsigned sa
= 31 - cpos
;
3355 dest
= dest_gpr(ctx
, rt
);
3356 t2
= load_gpr(ctx
, r2
);
3358 TCGv_i32 t32
= tcg_temp_new_i32();
3359 tcg_gen_trunc_reg_i32(t32
, t2
);
3360 tcg_gen_rotri_i32(t32
, t32
, sa
);
3361 tcg_gen_extu_i32_reg(dest
, t32
);
3362 tcg_temp_free_i32(t32
);
3363 } else if (r1
== 0) {
3364 tcg_gen_extract_reg(dest
, t2
, sa
, 32 - sa
);
3366 TCGv_reg t0
= tcg_temp_new();
3367 tcg_gen_extract_reg(t0
, t2
, sa
, 32 - sa
);
3368 tcg_gen_deposit_reg(dest
, t0
, cpu_gr
[r1
], 32 - sa
, sa
);
3371 save_gpr(ctx
, rt
, dest
);
3373 /* Install the new nullification. */
3374 cond_free(&ctx
->null_cond
);
3376 ctx
->null_cond
= do_sed_cond(c
, dest
);
3378 return nullify_end(ctx
);
3381 static bool trans_extrw_sar(DisasContext
*ctx
, uint32_t insn
,
3382 const DisasInsn
*di
)
3384 unsigned clen
= extract32(insn
, 0, 5);
3385 unsigned is_se
= extract32(insn
, 10, 1);
3386 unsigned c
= extract32(insn
, 13, 3);
3387 unsigned rt
= extract32(insn
, 16, 5);
3388 unsigned rr
= extract32(insn
, 21, 5);
3389 unsigned len
= 32 - clen
;
3390 TCGv_reg dest
, src
, tmp
;
3396 dest
= dest_gpr(ctx
, rt
);
3397 src
= load_gpr(ctx
, rr
);
3398 tmp
= tcg_temp_new();
3400 /* Recall that SAR is using big-endian bit numbering. */
3401 tcg_gen_xori_reg(tmp
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3403 tcg_gen_sar_reg(dest
, src
, tmp
);
3404 tcg_gen_sextract_reg(dest
, dest
, 0, len
);
3406 tcg_gen_shr_reg(dest
, src
, tmp
);
3407 tcg_gen_extract_reg(dest
, dest
, 0, len
);
3410 save_gpr(ctx
, rt
, dest
);
3412 /* Install the new nullification. */
3413 cond_free(&ctx
->null_cond
);
3415 ctx
->null_cond
= do_sed_cond(c
, dest
);
3417 return nullify_end(ctx
);
3420 static bool trans_extrw_imm(DisasContext
*ctx
, uint32_t insn
,
3421 const DisasInsn
*di
)
3423 unsigned clen
= extract32(insn
, 0, 5);
3424 unsigned pos
= extract32(insn
, 5, 5);
3425 unsigned is_se
= extract32(insn
, 10, 1);
3426 unsigned c
= extract32(insn
, 13, 3);
3427 unsigned rt
= extract32(insn
, 16, 5);
3428 unsigned rr
= extract32(insn
, 21, 5);
3429 unsigned len
= 32 - clen
;
3430 unsigned cpos
= 31 - pos
;
3437 dest
= dest_gpr(ctx
, rt
);
3438 src
= load_gpr(ctx
, rr
);
3440 tcg_gen_sextract_reg(dest
, src
, cpos
, len
);
3442 tcg_gen_extract_reg(dest
, src
, cpos
, len
);
3444 save_gpr(ctx
, rt
, dest
);
3446 /* Install the new nullification. */
3447 cond_free(&ctx
->null_cond
);
3449 ctx
->null_cond
= do_sed_cond(c
, dest
);
3451 return nullify_end(ctx
);
3454 static const DisasInsn table_sh_ex
[] = {
3455 { 0xd0000000u
, 0xfc001fe0u
, trans_shrpw_sar
},
3456 { 0xd0000800u
, 0xfc001c00u
, trans_shrpw_imm
},
3457 { 0xd0001000u
, 0xfc001be0u
, trans_extrw_sar
},
3458 { 0xd0001800u
, 0xfc001800u
, trans_extrw_imm
},
3461 static bool trans_depw_imm_c(DisasContext
*ctx
, uint32_t insn
,
3462 const DisasInsn
*di
)
3464 unsigned clen
= extract32(insn
, 0, 5);
3465 unsigned cpos
= extract32(insn
, 5, 5);
3466 unsigned nz
= extract32(insn
, 10, 1);
3467 unsigned c
= extract32(insn
, 13, 3);
3468 target_sreg val
= low_sextract(insn
, 16, 5);
3469 unsigned rt
= extract32(insn
, 21, 5);
3470 unsigned len
= 32 - clen
;
3471 target_sreg mask0
, mask1
;
3477 if (cpos
+ len
> 32) {
3481 dest
= dest_gpr(ctx
, rt
);
3482 mask0
= deposit64(0, cpos
, len
, val
);
3483 mask1
= deposit64(-1, cpos
, len
, val
);
3486 TCGv_reg src
= load_gpr(ctx
, rt
);
3488 tcg_gen_andi_reg(dest
, src
, mask1
);
3491 tcg_gen_ori_reg(dest
, src
, mask0
);
3493 tcg_gen_movi_reg(dest
, mask0
);
3495 save_gpr(ctx
, rt
, dest
);
3497 /* Install the new nullification. */
3498 cond_free(&ctx
->null_cond
);
3500 ctx
->null_cond
= do_sed_cond(c
, dest
);
3502 return nullify_end(ctx
);
3505 static bool trans_depw_imm(DisasContext
*ctx
, uint32_t insn
,
3506 const DisasInsn
*di
)
3508 unsigned clen
= extract32(insn
, 0, 5);
3509 unsigned cpos
= extract32(insn
, 5, 5);
3510 unsigned nz
= extract32(insn
, 10, 1);
3511 unsigned c
= extract32(insn
, 13, 3);
3512 unsigned rr
= extract32(insn
, 16, 5);
3513 unsigned rt
= extract32(insn
, 21, 5);
3514 unsigned rs
= nz
? rt
: 0;
3515 unsigned len
= 32 - clen
;
3521 if (cpos
+ len
> 32) {
3525 dest
= dest_gpr(ctx
, rt
);
3526 val
= load_gpr(ctx
, rr
);
3528 tcg_gen_deposit_z_reg(dest
, val
, cpos
, len
);
3530 tcg_gen_deposit_reg(dest
, cpu_gr
[rs
], val
, cpos
, len
);
3532 save_gpr(ctx
, rt
, dest
);
3534 /* Install the new nullification. */
3535 cond_free(&ctx
->null_cond
);
3537 ctx
->null_cond
= do_sed_cond(c
, dest
);
3539 return nullify_end(ctx
);
3542 static bool trans_depw_sar(DisasContext
*ctx
, uint32_t insn
,
3543 const DisasInsn
*di
)
3545 unsigned clen
= extract32(insn
, 0, 5);
3546 unsigned nz
= extract32(insn
, 10, 1);
3547 unsigned i
= extract32(insn
, 12, 1);
3548 unsigned c
= extract32(insn
, 13, 3);
3549 unsigned rt
= extract32(insn
, 21, 5);
3550 unsigned rs
= nz
? rt
: 0;
3551 unsigned len
= 32 - clen
;
3552 TCGv_reg val
, mask
, tmp
, shift
, dest
;
3553 unsigned msb
= 1U << (len
- 1);
3560 val
= load_const(ctx
, low_sextract(insn
, 16, 5));
3562 val
= load_gpr(ctx
, extract32(insn
, 16, 5));
3564 dest
= dest_gpr(ctx
, rt
);
3565 shift
= tcg_temp_new();
3566 tmp
= tcg_temp_new();
3568 /* Convert big-endian bit numbering in SAR to left-shift. */
3569 tcg_gen_xori_reg(shift
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3571 mask
= tcg_const_reg(msb
+ (msb
- 1));
3572 tcg_gen_and_reg(tmp
, val
, mask
);
3574 tcg_gen_shl_reg(mask
, mask
, shift
);
3575 tcg_gen_shl_reg(tmp
, tmp
, shift
);
3576 tcg_gen_andc_reg(dest
, cpu_gr
[rs
], mask
);
3577 tcg_gen_or_reg(dest
, dest
, tmp
);
3579 tcg_gen_shl_reg(dest
, tmp
, shift
);
3581 tcg_temp_free(shift
);
3582 tcg_temp_free(mask
);
3584 save_gpr(ctx
, rt
, dest
);
3586 /* Install the new nullification. */
3587 cond_free(&ctx
->null_cond
);
3589 ctx
->null_cond
= do_sed_cond(c
, dest
);
3591 return nullify_end(ctx
);
3594 static const DisasInsn table_depw
[] = {
3595 { 0xd4000000u
, 0xfc000be0u
, trans_depw_sar
},
3596 { 0xd4000800u
, 0xfc001800u
, trans_depw_imm
},
3597 { 0xd4001800u
, 0xfc001800u
, trans_depw_imm_c
},
3600 static bool trans_be(DisasContext
*ctx
, uint32_t insn
, bool is_l
)
3602 unsigned n
= extract32(insn
, 1, 1);
3603 unsigned b
= extract32(insn
, 21, 5);
3604 target_sreg disp
= assemble_17(insn
);
3607 #ifdef CONFIG_USER_ONLY
3608 /* ??? It seems like there should be a good way of using
3609 "be disp(sr2, r0)", the canonical gateway entry mechanism
3610 to our advantage. But that appears to be inconvenient to
3611 manage along side branch delay slots. Therefore we handle
3612 entry into the gateway page via absolute address. */
3613 /* Since we don't implement spaces, just branch. Do notice the special
3614 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3615 goto_tb to the TB containing the syscall. */
3617 return do_dbranch(ctx
, disp
, is_l
? 31 : 0, n
);
3620 int sp
= assemble_sr3(insn
);
3624 tmp
= get_temp(ctx
);
3625 tcg_gen_addi_reg(tmp
, load_gpr(ctx
, b
), disp
);
3626 tmp
= do_ibranch_priv(ctx
, tmp
);
3628 #ifdef CONFIG_USER_ONLY
3629 return do_ibranch(ctx
, tmp
, is_l
? 31 : 0, n
);
3631 TCGv_i64 new_spc
= tcg_temp_new_i64();
3633 load_spr(ctx
, new_spc
, sp
);
3635 copy_iaoq_entry(cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3636 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3638 if (n
&& use_nullify_skip(ctx
)) {
3639 tcg_gen_mov_reg(cpu_iaoq_f
, tmp
);
3640 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
3641 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3642 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3644 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3645 if (ctx
->iaoq_b
== -1) {
3646 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3648 tcg_gen_mov_reg(cpu_iaoq_b
, tmp
);
3649 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3650 nullify_set(ctx
, n
);
3652 tcg_temp_free_i64(new_spc
);
3653 tcg_gen_lookup_and_goto_ptr();
3654 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3655 return nullify_end(ctx
);
3659 static bool trans_bl(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3661 unsigned n
= extract32(insn
, 1, 1);
3662 unsigned link
= extract32(insn
, 21, 5);
3663 target_sreg disp
= assemble_17(insn
);
3665 do_dbranch(ctx
, iaoq_dest(ctx
, disp
), link
, n
);
3669 static bool trans_b_gate(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3671 unsigned n
= extract32(insn
, 1, 1);
3672 unsigned link
= extract32(insn
, 21, 5);
3673 target_sreg disp
= assemble_17(insn
);
3674 target_ureg dest
= iaoq_dest(ctx
, disp
);
3676 /* Make sure the caller hasn't done something weird with the queue.
3677 * ??? This is not quite the same as the PSW[B] bit, which would be
3678 * expensive to track. Real hardware will trap for
3680 * b gateway+4 (in delay slot of first branch)
3681 * However, checking for a non-sequential instruction queue *will*
3682 * diagnose the security hole
3685 * in which instructions at evil would run with increased privs.
3687 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3688 return gen_illegal(ctx
);
3691 #ifndef CONFIG_USER_ONLY
3692 if (ctx
->tb_flags
& PSW_C
) {
3693 CPUHPPAState
*env
= ctx
->cs
->env_ptr
;
3694 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3695 /* If we could not find a TLB entry, then we need to generate an
3696 ITLB miss exception so the kernel will provide it.
3697 The resulting TLB fill operation will invalidate this TB and
3698 we will re-translate, at which point we *will* be able to find
3699 the TLB entry and determine if this is in fact a gateway page. */
3701 gen_excp(ctx
, EXCP_ITLB_MISS
);
3704 /* No change for non-gateway pages or for priv decrease. */
3705 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3706 dest
= deposit32(dest
, 0, 2, type
- 4);
3709 dest
&= -4; /* priv = 0 */
3713 do_dbranch(ctx
, dest
, link
, n
);
3717 static bool trans_bl_long(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3719 unsigned n
= extract32(insn
, 1, 1);
3720 target_sreg disp
= assemble_22(insn
);
3722 do_dbranch(ctx
, iaoq_dest(ctx
, disp
), 2, n
);
3726 static bool trans_blr(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3728 unsigned n
= extract32(insn
, 1, 1);
3729 unsigned rx
= extract32(insn
, 16, 5);
3730 unsigned link
= extract32(insn
, 21, 5);
3731 TCGv_reg tmp
= get_temp(ctx
);
3733 tcg_gen_shli_reg(tmp
, load_gpr(ctx
, rx
), 3);
3734 tcg_gen_addi_reg(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3735 /* The computation here never changes privilege level. */
3736 do_ibranch(ctx
, tmp
, link
, n
);
3740 static bool trans_bv(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3742 unsigned n
= extract32(insn
, 1, 1);
3743 unsigned rx
= extract32(insn
, 16, 5);
3744 unsigned rb
= extract32(insn
, 21, 5);
3748 dest
= load_gpr(ctx
, rb
);
3750 dest
= get_temp(ctx
);
3751 tcg_gen_shli_reg(dest
, load_gpr(ctx
, rx
), 3);
3752 tcg_gen_add_reg(dest
, dest
, load_gpr(ctx
, rb
));
3754 dest
= do_ibranch_priv(ctx
, dest
);
3755 do_ibranch(ctx
, dest
, 0, n
);
3759 static bool trans_bve(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3761 unsigned n
= extract32(insn
, 1, 1);
3762 unsigned rb
= extract32(insn
, 21, 5);
3763 unsigned link
= extract32(insn
, 13, 1) ? 2 : 0;
3766 #ifdef CONFIG_USER_ONLY
3767 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, rb
));
3768 do_ibranch(ctx
, dest
, link
, n
);
3771 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, rb
));
3773 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3774 if (ctx
->iaoq_b
== -1) {
3775 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3777 copy_iaoq_entry(cpu_iaoq_b
, -1, dest
);
3778 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3780 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3782 nullify_set(ctx
, n
);
3783 tcg_gen_lookup_and_goto_ptr();
3784 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3785 return nullify_end(ctx
);
3790 static const DisasInsn table_branch
[] = {
3791 { 0xe8000000u
, 0xfc006000u
, trans_bl
}, /* B,L and B,L,PUSH */
3792 { 0xe800a000u
, 0xfc00e000u
, trans_bl_long
},
3793 { 0xe8004000u
, 0xfc00fffdu
, trans_blr
},
3794 { 0xe800c000u
, 0xfc00fffdu
, trans_bv
},
3795 { 0xe800d000u
, 0xfc00dffcu
, trans_bve
},
3796 { 0xe8002000u
, 0xfc00e000u
, trans_b_gate
},
3799 static bool trans_fop_wew_0c(DisasContext
*ctx
, uint32_t insn
,
3800 const DisasInsn
*di
)
3802 unsigned rt
= extract32(insn
, 0, 5);
3803 unsigned ra
= extract32(insn
, 21, 5);
3804 do_fop_wew(ctx
, rt
, ra
, di
->f
.wew
);
3808 static bool trans_fop_wew_0e(DisasContext
*ctx
, uint32_t insn
,
3809 const DisasInsn
*di
)
3811 unsigned rt
= assemble_rt64(insn
);
3812 unsigned ra
= assemble_ra64(insn
);
3813 do_fop_wew(ctx
, rt
, ra
, di
->f
.wew
);
3817 static bool trans_fop_ded(DisasContext
*ctx
, uint32_t insn
,
3818 const DisasInsn
*di
)
3820 unsigned rt
= extract32(insn
, 0, 5);
3821 unsigned ra
= extract32(insn
, 21, 5);
3822 do_fop_ded(ctx
, rt
, ra
, di
->f
.ded
);
3826 static bool trans_fop_wed_0c(DisasContext
*ctx
, uint32_t insn
,
3827 const DisasInsn
*di
)
3829 unsigned rt
= extract32(insn
, 0, 5);
3830 unsigned ra
= extract32(insn
, 21, 5);
3831 do_fop_wed(ctx
, rt
, ra
, di
->f
.wed
);
3835 static bool trans_fop_wed_0e(DisasContext
*ctx
, uint32_t insn
,
3836 const DisasInsn
*di
)
3838 unsigned rt
= assemble_rt64(insn
);
3839 unsigned ra
= extract32(insn
, 21, 5);
3840 do_fop_wed(ctx
, rt
, ra
, di
->f
.wed
);
3844 static bool trans_fop_dew_0c(DisasContext
*ctx
, uint32_t insn
,
3845 const DisasInsn
*di
)
3847 unsigned rt
= extract32(insn
, 0, 5);
3848 unsigned ra
= extract32(insn
, 21, 5);
3849 do_fop_dew(ctx
, rt
, ra
, di
->f
.dew
);
3853 static bool trans_fop_dew_0e(DisasContext
*ctx
, uint32_t insn
,
3854 const DisasInsn
*di
)
3856 unsigned rt
= extract32(insn
, 0, 5);
3857 unsigned ra
= assemble_ra64(insn
);
3858 do_fop_dew(ctx
, rt
, ra
, di
->f
.dew
);
3862 static bool trans_fop_weww_0c(DisasContext
*ctx
, uint32_t insn
,
3863 const DisasInsn
*di
)
3865 unsigned rt
= extract32(insn
, 0, 5);
3866 unsigned rb
= extract32(insn
, 16, 5);
3867 unsigned ra
= extract32(insn
, 21, 5);
3868 do_fop_weww(ctx
, rt
, ra
, rb
, di
->f
.weww
);
3872 static bool trans_fop_weww_0e(DisasContext
*ctx
, uint32_t insn
,
3873 const DisasInsn
*di
)
3875 unsigned rt
= assemble_rt64(insn
);
3876 unsigned rb
= assemble_rb64(insn
);
3877 unsigned ra
= assemble_ra64(insn
);
3878 do_fop_weww(ctx
, rt
, ra
, rb
, di
->f
.weww
);
3882 static bool trans_fop_dedd(DisasContext
*ctx
, uint32_t insn
,
3883 const DisasInsn
*di
)
3885 unsigned rt
= extract32(insn
, 0, 5);
3886 unsigned rb
= extract32(insn
, 16, 5);
3887 unsigned ra
= extract32(insn
, 21, 5);
3888 do_fop_dedd(ctx
, rt
, ra
, rb
, di
->f
.dedd
);
3892 static void gen_fcpy_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3894 tcg_gen_mov_i32(dst
, src
);
3897 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3899 tcg_gen_mov_i64(dst
, src
);
3902 static void gen_fabs_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3904 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3907 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3909 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3912 static void gen_fneg_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3914 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3917 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3919 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3922 static void gen_fnegabs_s(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3924 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
3927 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3929 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
3932 static void do_fcmp_s(DisasContext
*ctx
, unsigned ra
, unsigned rb
,
3933 unsigned y
, unsigned c
)
3935 TCGv_i32 ta
, tb
, tc
, ty
;
3939 ta
= load_frw0_i32(ra
);
3940 tb
= load_frw0_i32(rb
);
3941 ty
= tcg_const_i32(y
);
3942 tc
= tcg_const_i32(c
);
3944 gen_helper_fcmp_s(cpu_env
, ta
, tb
, ty
, tc
);
3946 tcg_temp_free_i32(ta
);
3947 tcg_temp_free_i32(tb
);
3948 tcg_temp_free_i32(ty
);
3949 tcg_temp_free_i32(tc
);
3954 static bool trans_fcmp_s_0c(DisasContext
*ctx
, uint32_t insn
,
3955 const DisasInsn
*di
)
3957 unsigned c
= extract32(insn
, 0, 5);
3958 unsigned y
= extract32(insn
, 13, 3);
3959 unsigned rb
= extract32(insn
, 16, 5);
3960 unsigned ra
= extract32(insn
, 21, 5);
3961 do_fcmp_s(ctx
, ra
, rb
, y
, c
);
3965 static bool trans_fcmp_s_0e(DisasContext
*ctx
, uint32_t insn
,
3966 const DisasInsn
*di
)
3968 unsigned c
= extract32(insn
, 0, 5);
3969 unsigned y
= extract32(insn
, 13, 3);
3970 unsigned rb
= assemble_rb64(insn
);
3971 unsigned ra
= assemble_ra64(insn
);
3972 do_fcmp_s(ctx
, ra
, rb
, y
, c
);
3976 static bool trans_fcmp_d(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
3978 unsigned c
= extract32(insn
, 0, 5);
3979 unsigned y
= extract32(insn
, 13, 3);
3980 unsigned rb
= extract32(insn
, 16, 5);
3981 unsigned ra
= extract32(insn
, 21, 5);
3989 ty
= tcg_const_i32(y
);
3990 tc
= tcg_const_i32(c
);
3992 gen_helper_fcmp_d(cpu_env
, ta
, tb
, ty
, tc
);
3994 tcg_temp_free_i64(ta
);
3995 tcg_temp_free_i64(tb
);
3996 tcg_temp_free_i32(ty
);
3997 tcg_temp_free_i32(tc
);
3999 return nullify_end(ctx
);
4002 static bool trans_ftest_t(DisasContext
*ctx
, uint32_t insn
,
4003 const DisasInsn
*di
)
4005 unsigned y
= extract32(insn
, 13, 3);
4006 unsigned cbit
= (y
^ 1) - 1;
4012 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
4013 tcg_gen_extract_reg(t
, t
, 21 - cbit
, 1);
4014 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4017 return nullify_end(ctx
);
4020 static bool trans_ftest_q(DisasContext
*ctx
, uint32_t insn
,
4021 const DisasInsn
*di
)
4023 unsigned c
= extract32(insn
, 0, 5);
4031 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
4034 case 0: /* simple */
4035 tcg_gen_andi_reg(t
, t
, 0x4000000);
4036 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4060 return gen_illegal(ctx
);
4063 TCGv_reg c
= load_const(ctx
, mask
);
4064 tcg_gen_or_reg(t
, t
, c
);
4065 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
4067 tcg_gen_andi_reg(t
, t
, mask
);
4068 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
4071 return nullify_end(ctx
);
4074 static bool trans_xmpyu(DisasContext
*ctx
, uint32_t insn
, const DisasInsn
*di
)
4076 unsigned rt
= extract32(insn
, 0, 5);
4077 unsigned rb
= assemble_rb64(insn
);
4078 unsigned ra
= assemble_ra64(insn
);
4083 a
= load_frw0_i64(ra
);
4084 b
= load_frw0_i64(rb
);
4085 tcg_gen_mul_i64(a
, a
, b
);
4087 tcg_temp_free_i64(a
);
4088 tcg_temp_free_i64(b
);
4090 return nullify_end(ctx
);
4093 #define FOP_DED trans_fop_ded, .f.ded
4094 #define FOP_DEDD trans_fop_dedd, .f.dedd
4096 #define FOP_WEW trans_fop_wew_0c, .f.wew
4097 #define FOP_DEW trans_fop_dew_0c, .f.dew
4098 #define FOP_WED trans_fop_wed_0c, .f.wed
4099 #define FOP_WEWW trans_fop_weww_0c, .f.weww
4101 static const DisasInsn table_float_0c
[] = {
4102 /* floating point class zero */
4103 { 0x30004000, 0xfc1fffe0, FOP_WEW
= gen_fcpy_s
},
4104 { 0x30006000, 0xfc1fffe0, FOP_WEW
= gen_fabs_s
},
4105 { 0x30008000, 0xfc1fffe0, FOP_WEW
= gen_helper_fsqrt_s
},
4106 { 0x3000a000, 0xfc1fffe0, FOP_WEW
= gen_helper_frnd_s
},
4107 { 0x3000c000, 0xfc1fffe0, FOP_WEW
= gen_fneg_s
},
4108 { 0x3000e000, 0xfc1fffe0, FOP_WEW
= gen_fnegabs_s
},
4110 { 0x30004800, 0xfc1fffe0, FOP_DED
= gen_fcpy_d
},
4111 { 0x30006800, 0xfc1fffe0, FOP_DED
= gen_fabs_d
},
4112 { 0x30008800, 0xfc1fffe0, FOP_DED
= gen_helper_fsqrt_d
},
4113 { 0x3000a800, 0xfc1fffe0, FOP_DED
= gen_helper_frnd_d
},
4114 { 0x3000c800, 0xfc1fffe0, FOP_DED
= gen_fneg_d
},
4115 { 0x3000e800, 0xfc1fffe0, FOP_DED
= gen_fnegabs_d
},
4117 /* floating point class three */
4118 { 0x30000600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fadd_s
},
4119 { 0x30002600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fsub_s
},
4120 { 0x30004600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fmpy_s
},
4121 { 0x30006600, 0xfc00ffe0, FOP_WEWW
= gen_helper_fdiv_s
},
4123 { 0x30000e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fadd_d
},
4124 { 0x30002e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fsub_d
},
4125 { 0x30004e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fmpy_d
},
4126 { 0x30006e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fdiv_d
},
4128 /* floating point class one */
4130 { 0x30000a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_s
},
4131 { 0x30002200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_d
},
4133 { 0x30008200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_w_s
},
4134 { 0x30008a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_dw_s
},
4135 { 0x3000a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_w_d
},
4136 { 0x3000aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_dw_d
},
4138 { 0x30010200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_s_w
},
4139 { 0x30010a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_w
},
4140 { 0x30012200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_dw
},
4141 { 0x30012a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_dw
},
4142 /* float/int truncate */
4143 { 0x30018200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_t_s_w
},
4144 { 0x30018a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_t_d_w
},
4145 { 0x3001a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_t_s_dw
},
4146 { 0x3001aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_dw
},
4148 { 0x30028200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_uw_s
},
4149 { 0x30028a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_udw_s
},
4150 { 0x3002a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_uw_d
},
4151 { 0x3002aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_udw_d
},
4153 { 0x30030200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_s_uw
},
4154 { 0x30030a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_d_uw
},
4155 { 0x30032200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_s_udw
},
4156 { 0x30032a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_udw
},
4157 /* float/uint truncate */
4158 { 0x30038200, 0xfc1fffe0, FOP_WEW
= gen_helper_fcnv_t_s_uw
},
4159 { 0x30038a00, 0xfc1fffe0, FOP_WED
= gen_helper_fcnv_t_d_uw
},
4160 { 0x3003a200, 0xfc1fffe0, FOP_DEW
= gen_helper_fcnv_t_s_udw
},
4161 { 0x3003aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_udw
},
4163 /* floating point class two */
4164 { 0x30000400, 0xfc001fe0, trans_fcmp_s_0c
},
4165 { 0x30000c00, 0xfc001fe0, trans_fcmp_d
},
4166 { 0x30002420, 0xffffffe0, trans_ftest_q
},
4167 { 0x30000420, 0xffff1fff, trans_ftest_t
},
4169 /* FID. Note that ra == rt == 0, which via fcpy puts 0 into fr0.
4170 This is machine/revision == 0, which is reserved for simulator. */
4171 { 0x30000000, 0xffffffff, FOP_WEW
= gen_fcpy_s
},
4178 #define FOP_WEW trans_fop_wew_0e, .f.wew
4179 #define FOP_DEW trans_fop_dew_0e, .f.dew
4180 #define FOP_WED trans_fop_wed_0e, .f.wed
4181 #define FOP_WEWW trans_fop_weww_0e, .f.weww
4183 static const DisasInsn table_float_0e
[] = {
4184 /* floating point class zero */
4185 { 0x38004000, 0xfc1fff20, FOP_WEW
= gen_fcpy_s
},
4186 { 0x38006000, 0xfc1fff20, FOP_WEW
= gen_fabs_s
},
4187 { 0x38008000, 0xfc1fff20, FOP_WEW
= gen_helper_fsqrt_s
},
4188 { 0x3800a000, 0xfc1fff20, FOP_WEW
= gen_helper_frnd_s
},
4189 { 0x3800c000, 0xfc1fff20, FOP_WEW
= gen_fneg_s
},
4190 { 0x3800e000, 0xfc1fff20, FOP_WEW
= gen_fnegabs_s
},
4192 { 0x38004800, 0xfc1fffe0, FOP_DED
= gen_fcpy_d
},
4193 { 0x38006800, 0xfc1fffe0, FOP_DED
= gen_fabs_d
},
4194 { 0x38008800, 0xfc1fffe0, FOP_DED
= gen_helper_fsqrt_d
},
4195 { 0x3800a800, 0xfc1fffe0, FOP_DED
= gen_helper_frnd_d
},
4196 { 0x3800c800, 0xfc1fffe0, FOP_DED
= gen_fneg_d
},
4197 { 0x3800e800, 0xfc1fffe0, FOP_DED
= gen_fnegabs_d
},
4199 /* floating point class three */
4200 { 0x38000600, 0xfc00ef20, FOP_WEWW
= gen_helper_fadd_s
},
4201 { 0x38002600, 0xfc00ef20, FOP_WEWW
= gen_helper_fsub_s
},
4202 { 0x38004600, 0xfc00ef20, FOP_WEWW
= gen_helper_fmpy_s
},
4203 { 0x38006600, 0xfc00ef20, FOP_WEWW
= gen_helper_fdiv_s
},
4205 { 0x38000e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fadd_d
},
4206 { 0x38002e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fsub_d
},
4207 { 0x38004e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fmpy_d
},
4208 { 0x38006e00, 0xfc00ffe0, FOP_DEDD
= gen_helper_fdiv_d
},
4210 { 0x38004700, 0xfc00ef60, trans_xmpyu
},
4212 /* floating point class one */
4214 { 0x38000a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_s
},
4215 { 0x38002200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_d
},
4217 { 0x38008200, 0xfc1ffe20, FOP_WEW
= gen_helper_fcnv_w_s
},
4218 { 0x38008a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_dw_s
},
4219 { 0x3800a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_w_d
},
4220 { 0x3800aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_dw_d
},
4222 { 0x38010200, 0xfc1ffe20, FOP_WEW
= gen_helper_fcnv_s_w
},
4223 { 0x38010a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_w
},
4224 { 0x38012200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_dw
},
4225 { 0x38012a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_dw
},
4226 /* float/int truncate */
4227 { 0x38018200, 0xfc1ffe20, FOP_WEW
= gen_helper_fcnv_t_s_w
},
4228 { 0x38018a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_t_d_w
},
4229 { 0x3801a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_t_s_dw
},
4230 { 0x3801aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_dw
},
4232 { 0x38028200, 0xfc1ffe20, FOP_WEW
= gen_helper_fcnv_uw_s
},
4233 { 0x38028a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_udw_s
},
4234 { 0x3802a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_uw_d
},
4235 { 0x3802aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_udw_d
},
4237 { 0x38030200, 0xfc1ffe20, FOP_WEW
= gen_helper_fcnv_s_uw
},
4238 { 0x38030a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_d_uw
},
4239 { 0x38032200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_s_udw
},
4240 { 0x38032a00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_d_udw
},
4241 /* float/uint truncate */
4242 { 0x38038200, 0xfc1ffe20, FOP_WEW
= gen_helper_fcnv_t_s_uw
},
4243 { 0x38038a00, 0xfc1fffa0, FOP_WED
= gen_helper_fcnv_t_d_uw
},
4244 { 0x3803a200, 0xfc1fff60, FOP_DEW
= gen_helper_fcnv_t_s_udw
},
4245 { 0x3803aa00, 0xfc1fffe0, FOP_DED
= gen_helper_fcnv_t_d_udw
},
4247 /* floating point class two */
4248 { 0x38000400, 0xfc000f60, trans_fcmp_s_0e
},
4249 { 0x38000c00, 0xfc001fe0, trans_fcmp_d
},
4259 /* Convert the fmpyadd single-precision register encodings to standard. */
4260 static inline int fmpyadd_s_reg(unsigned r
)
4262 return (r
& 16) * 2 + 16 + (r
& 15);
4265 static bool do_fmpyadd_s(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4267 int tm
= fmpyadd_s_reg(a
->tm
);
4268 int ra
= fmpyadd_s_reg(a
->ra
);
4269 int ta
= fmpyadd_s_reg(a
->ta
);
4270 int rm2
= fmpyadd_s_reg(a
->rm2
);
4271 int rm1
= fmpyadd_s_reg(a
->rm1
);
4275 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
4276 do_fop_weww(ctx
, ta
, ta
, ra
,
4277 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
4279 return nullify_end(ctx
);
4282 static bool trans_fmpyadd_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4284 return do_fmpyadd_s(ctx
, a
, false);
4287 static bool trans_fmpysub_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4289 return do_fmpyadd_s(ctx
, a
, true);
4292 static bool do_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4296 do_fop_dedd(ctx
, a
->tm
, a
->rm1
, a
->rm2
, gen_helper_fmpy_d
);
4297 do_fop_dedd(ctx
, a
->ta
, a
->ta
, a
->ra
,
4298 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
4300 return nullify_end(ctx
);
4303 static bool trans_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4305 return do_fmpyadd_d(ctx
, a
, false);
4308 static bool trans_fmpysub_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4310 return do_fmpyadd_d(ctx
, a
, true);
4313 static bool trans_fmpyfadd_s(DisasContext
*ctx
, uint32_t insn
,
4314 const DisasInsn
*di
)
4316 unsigned rt
= assemble_rt64(insn
);
4317 unsigned neg
= extract32(insn
, 5, 1);
4318 unsigned rm1
= assemble_ra64(insn
);
4319 unsigned rm2
= assemble_rb64(insn
);
4320 unsigned ra3
= assemble_rc64(insn
);
4324 a
= load_frw0_i32(rm1
);
4325 b
= load_frw0_i32(rm2
);
4326 c
= load_frw0_i32(ra3
);
4329 gen_helper_fmpynfadd_s(a
, cpu_env
, a
, b
, c
);
4331 gen_helper_fmpyfadd_s(a
, cpu_env
, a
, b
, c
);
4334 tcg_temp_free_i32(b
);
4335 tcg_temp_free_i32(c
);
4336 save_frw_i32(rt
, a
);
4337 tcg_temp_free_i32(a
);
4338 return nullify_end(ctx
);
4341 static bool trans_fmpyfadd_d(DisasContext
*ctx
, uint32_t insn
,
4342 const DisasInsn
*di
)
4344 unsigned rt
= extract32(insn
, 0, 5);
4345 unsigned neg
= extract32(insn
, 5, 1);
4346 unsigned rm1
= extract32(insn
, 21, 5);
4347 unsigned rm2
= extract32(insn
, 16, 5);
4348 unsigned ra3
= assemble_rc64(insn
);
4357 gen_helper_fmpynfadd_d(a
, cpu_env
, a
, b
, c
);
4359 gen_helper_fmpyfadd_d(a
, cpu_env
, a
, b
, c
);
4362 tcg_temp_free_i64(b
);
4363 tcg_temp_free_i64(c
);
4365 tcg_temp_free_i64(a
);
4366 return nullify_end(ctx
);
4369 static const DisasInsn table_fp_fused
[] = {
4370 { 0xb8000000u
, 0xfc000800u
, trans_fmpyfadd_s
},
4371 { 0xb8000800u
, 0xfc0019c0u
, trans_fmpyfadd_d
}
4374 static void translate_table_int(DisasContext
*ctx
, uint32_t insn
,
4375 const DisasInsn table
[], size_t n
)
4378 for (i
= 0; i
< n
; ++i
) {
4379 if ((insn
& table
[i
].mask
) == table
[i
].insn
) {
4380 table
[i
].trans(ctx
, insn
, &table
[i
]);
4384 qemu_log_mask(LOG_UNIMP
, "UNIMP insn %08x @ " TARGET_FMT_lx
"\n",
4385 insn
, ctx
->base
.pc_next
);
4389 #define translate_table(ctx, insn, table) \
4390 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
4392 static void translate_one(DisasContext
*ctx
, uint32_t insn
)
4396 /* Transition to the auto-generated decoder. */
4397 if (decode(ctx
, insn
)) {
4401 opc
= extract32(insn
, 26, 6);
4404 trans_ldil(ctx
, insn
);
4407 trans_copr_w(ctx
, insn
);
4410 trans_addil(ctx
, insn
);
4413 trans_copr_dw(ctx
, insn
);
4416 translate_table(ctx
, insn
, table_float_0c
);
4419 trans_ldo(ctx
, insn
);
4422 translate_table(ctx
, insn
, table_float_0e
);
4426 trans_load(ctx
, insn
, false, MO_UB
);
4429 trans_load(ctx
, insn
, false, MO_TEUW
);
4432 trans_load(ctx
, insn
, false, MO_TEUL
);
4435 trans_load(ctx
, insn
, true, MO_TEUL
);
4438 trans_fload_mod(ctx
, insn
);
4441 trans_load_w(ctx
, insn
);
4444 trans_store(ctx
, insn
, false, MO_UB
);
4447 trans_store(ctx
, insn
, false, MO_TEUW
);
4450 trans_store(ctx
, insn
, false, MO_TEUL
);
4453 trans_store(ctx
, insn
, true, MO_TEUL
);
4456 trans_fstore_mod(ctx
, insn
);
4459 trans_store_w(ctx
, insn
);
4463 trans_cmpiclr(ctx
, insn
);
4466 trans_subi(ctx
, insn
);
4470 trans_addi(ctx
, insn
);
4473 translate_table(ctx
, insn
, table_fp_fused
);
4477 translate_table(ctx
, insn
, table_sh_ex
);
4480 translate_table(ctx
, insn
, table_depw
);
4483 trans_be(ctx
, insn
, false);
4486 trans_be(ctx
, insn
, true);
4489 translate_table(ctx
, insn
, table_branch
);
4492 case 0x04: /* spopn */
4493 case 0x05: /* diag */
4494 case 0x0F: /* product specific */
4497 case 0x07: /* unassigned */
4498 case 0x15: /* unassigned */
4499 case 0x1D: /* unassigned */
4500 case 0x37: /* unassigned */
4503 #ifndef CONFIG_USER_ONLY
4504 /* Unassigned, but use as system-halt. */
4505 if (insn
== 0xfffdead0) {
4506 gen_hlt(ctx
, 0); /* halt system */
4509 if (insn
== 0xfffdead1) {
4510 gen_hlt(ctx
, 1); /* reset system */
4521 static void hppa_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4523 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4527 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4529 #ifdef CONFIG_USER_ONLY
4530 ctx
->privilege
= MMU_USER_IDX
;
4531 ctx
->mmu_idx
= MMU_USER_IDX
;
4532 ctx
->iaoq_f
= ctx
->base
.pc_first
| MMU_USER_IDX
;
4533 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
| MMU_USER_IDX
;
4535 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4536 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
? ctx
->privilege
: MMU_PHYS_IDX
);
4538 /* Recover the IAOQ values from the GVA + PRIV. */
4539 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4540 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4541 int32_t diff
= cs_base
;
4543 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4544 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4547 ctx
->iaoq_n_var
= NULL
;
4549 /* Bound the number of instructions by those left on the page. */
4550 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4551 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
4555 memset(ctx
->tempr
, 0, sizeof(ctx
->tempr
));
4556 memset(ctx
->templ
, 0, sizeof(ctx
->templ
));
4559 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4561 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4563 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4564 ctx
->null_cond
= cond_make_f();
4565 ctx
->psw_n_nonzero
= false;
4566 if (ctx
->tb_flags
& PSW_N
) {
4567 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4568 ctx
->psw_n_nonzero
= true;
4570 ctx
->null_lab
= NULL
;
4573 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4575 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4577 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
);
4580 static bool hppa_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cs
,
4581 const CPUBreakpoint
*bp
)
4583 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4585 gen_excp(ctx
, EXCP_DEBUG
);
4586 ctx
->base
.pc_next
+= 4;
4590 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4592 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4593 CPUHPPAState
*env
= cs
->env_ptr
;
4597 /* Execute one insn. */
4598 #ifdef CONFIG_USER_ONLY
4599 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4601 ret
= ctx
->base
.is_jmp
;
4602 assert(ret
!= DISAS_NEXT
);
4606 /* Always fetch the insn, even if nullified, so that we check
4607 the page permissions for execute. */
4608 uint32_t insn
= cpu_ldl_code(env
, ctx
->base
.pc_next
);
4610 /* Set up the IA queue for the next insn.
4611 This will be overwritten by a branch. */
4612 if (ctx
->iaoq_b
== -1) {
4614 ctx
->iaoq_n_var
= get_temp(ctx
);
4615 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4617 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4618 ctx
->iaoq_n_var
= NULL
;
4621 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4622 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4626 translate_one(ctx
, insn
);
4627 ret
= ctx
->base
.is_jmp
;
4628 assert(ctx
->null_lab
== NULL
);
4632 /* Free any temporaries allocated. */
4633 for (i
= 0, n
= ctx
->ntempr
; i
< n
; ++i
) {
4634 tcg_temp_free(ctx
->tempr
[i
]);
4635 ctx
->tempr
[i
] = NULL
;
4637 for (i
= 0, n
= ctx
->ntempl
; i
< n
; ++i
) {
4638 tcg_temp_free_tl(ctx
->templ
[i
]);
4639 ctx
->templ
[i
] = NULL
;
4644 /* Advance the insn queue. Note that this check also detects
4645 a priority change within the instruction queue. */
4646 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4647 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4648 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4649 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4650 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4651 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4652 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4653 ctx
->base
.is_jmp
= ret
= DISAS_NORETURN
;
4655 ctx
->base
.is_jmp
= ret
= DISAS_IAQ_N_STALE
;
4658 ctx
->iaoq_f
= ctx
->iaoq_b
;
4659 ctx
->iaoq_b
= ctx
->iaoq_n
;
4660 ctx
->base
.pc_next
+= 4;
4662 if (ret
== DISAS_NORETURN
|| ret
== DISAS_IAQ_N_UPDATED
) {
4665 if (ctx
->iaoq_f
== -1) {
4666 tcg_gen_mov_reg(cpu_iaoq_f
, cpu_iaoq_b
);
4667 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4668 #ifndef CONFIG_USER_ONLY
4669 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4672 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
4673 } else if (ctx
->iaoq_b
== -1) {
4674 tcg_gen_mov_reg(cpu_iaoq_b
, ctx
->iaoq_n_var
);
4678 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4680 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4681 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4684 case DISAS_NORETURN
:
4686 case DISAS_TOO_MANY
:
4687 case DISAS_IAQ_N_STALE
:
4688 case DISAS_IAQ_N_STALE_EXIT
:
4689 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4690 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4693 case DISAS_IAQ_N_UPDATED
:
4694 if (ctx
->base
.singlestep_enabled
) {
4695 gen_excp_1(EXCP_DEBUG
);
4696 } else if (is_jmp
== DISAS_IAQ_N_STALE_EXIT
) {
4697 tcg_gen_exit_tb(NULL
, 0);
4699 tcg_gen_lookup_and_goto_ptr();
4703 g_assert_not_reached();
4707 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cs
)
4709 target_ulong pc
= dcbase
->pc_first
;
4711 #ifdef CONFIG_USER_ONLY
4714 qemu_log("IN:\n0x00000000: (null)\n");
4717 qemu_log("IN:\n0x000000b0: light-weight-syscall\n");
4720 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n");
4723 qemu_log("IN:\n0x00000100: syscall\n");
4728 qemu_log("IN: %s\n", lookup_symbol(pc
));
4729 log_target_disas(cs
, pc
, dcbase
->tb
->size
);
4732 static const TranslatorOps hppa_tr_ops
= {
4733 .init_disas_context
= hppa_tr_init_disas_context
,
4734 .tb_start
= hppa_tr_tb_start
,
4735 .insn_start
= hppa_tr_insn_start
,
4736 .breakpoint_check
= hppa_tr_breakpoint_check
,
4737 .translate_insn
= hppa_tr_translate_insn
,
4738 .tb_stop
= hppa_tr_tb_stop
,
4739 .disas_log
= hppa_tr_disas_log
,
4742 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
4746 translator_loop(&hppa_tr_ops
, &ctx
.base
, cs
, tb
);
4749 void restore_state_to_opc(CPUHPPAState
*env
, TranslationBlock
*tb
,
4752 env
->iaoq_f
= data
[0];
4753 if (data
[1] != (target_ureg
)-1) {
4754 env
->iaoq_b
= data
[1];
4756 /* Since we were executing the instruction at IAOQ_F, and took some
4757 sort of action that provoked the cpu_restore_state, we can infer
4758 that the instruction was not nullified. */