migration: fix populate_vfio_info
[qemu/kevin.git] / target / hexagon / genptr.c
blobbb274d4a71702056875f88a37082604513d2e0a6
1 /*
2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
19 #include "cpu.h"
20 #include "internal.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "insn.h"
24 #include "opcodes.h"
25 #include "translate.h"
26 #define QEMU_GENERATE /* Used internally by macros.h */
27 #include "macros.h"
28 #include "mmvec/macros.h"
29 #undef QEMU_GENERATE
30 #include "gen_tcg.h"
31 #include "gen_tcg_hvx.h"
32 #include "genptr.h"
34 TCGv gen_read_reg(TCGv result, int num)
36 tcg_gen_mov_tl(result, hex_gpr[num]);
37 return result;
40 TCGv gen_read_preg(TCGv pred, uint8_t num)
42 tcg_gen_mov_tl(pred, hex_pred[num]);
43 return pred;
46 #define IMMUTABLE (~0)
48 static const target_ulong reg_immut_masks[TOTAL_PER_THREAD_REGS] = {
49 [HEX_REG_USR] = 0xc13000c0,
50 [HEX_REG_PC] = IMMUTABLE,
51 [HEX_REG_GP] = 0x3f,
52 [HEX_REG_UPCYCLELO] = IMMUTABLE,
53 [HEX_REG_UPCYCLEHI] = IMMUTABLE,
54 [HEX_REG_UTIMERLO] = IMMUTABLE,
55 [HEX_REG_UTIMERHI] = IMMUTABLE,
58 static inline void gen_masked_reg_write(TCGv new_val, TCGv cur_val,
59 target_ulong reg_mask)
61 if (reg_mask) {
62 TCGv tmp = tcg_temp_new();
64 /* new_val = (new_val & ~reg_mask) | (cur_val & reg_mask) */
65 tcg_gen_andi_tl(new_val, new_val, ~reg_mask);
66 tcg_gen_andi_tl(tmp, cur_val, reg_mask);
67 tcg_gen_or_tl(new_val, new_val, tmp);
71 static TCGv get_result_gpr(DisasContext *ctx, int rnum)
73 return hex_new_value[rnum];
76 static TCGv_i64 get_result_gpr_pair(DisasContext *ctx, int rnum)
78 TCGv_i64 result = tcg_temp_new_i64();
79 tcg_gen_concat_i32_i64(result, hex_new_value[rnum],
80 hex_new_value[rnum + 1]);
81 return result;
84 void gen_log_reg_write(int rnum, TCGv val)
86 const target_ulong reg_mask = reg_immut_masks[rnum];
88 gen_masked_reg_write(val, hex_gpr[rnum], reg_mask);
89 tcg_gen_mov_tl(hex_new_value[rnum], val);
90 if (HEX_DEBUG) {
91 /* Do this so HELPER(debug_commit_end) will know */
92 tcg_gen_movi_tl(hex_reg_written[rnum], 1);
96 static void gen_log_reg_write_pair(int rnum, TCGv_i64 val)
98 const target_ulong reg_mask_low = reg_immut_masks[rnum];
99 const target_ulong reg_mask_high = reg_immut_masks[rnum + 1];
100 TCGv val32 = tcg_temp_new();
102 /* Low word */
103 tcg_gen_extrl_i64_i32(val32, val);
104 gen_masked_reg_write(val32, hex_gpr[rnum], reg_mask_low);
105 tcg_gen_mov_tl(hex_new_value[rnum], val32);
106 if (HEX_DEBUG) {
107 /* Do this so HELPER(debug_commit_end) will know */
108 tcg_gen_movi_tl(hex_reg_written[rnum], 1);
111 /* High word */
112 tcg_gen_extrh_i64_i32(val32, val);
113 gen_masked_reg_write(val32, hex_gpr[rnum + 1], reg_mask_high);
114 tcg_gen_mov_tl(hex_new_value[rnum + 1], val32);
115 if (HEX_DEBUG) {
116 /* Do this so HELPER(debug_commit_end) will know */
117 tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1);
121 void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val)
123 TCGv base_val = tcg_temp_new();
125 tcg_gen_andi_tl(base_val, val, 0xff);
128 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
130 * Multiple writes to the same preg are and'ed together
131 * If this is the first predicate write in the packet, do a
132 * straight assignment. Otherwise, do an and.
134 if (!test_bit(pnum, ctx->pregs_written)) {
135 tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val);
136 } else {
137 tcg_gen_and_tl(hex_new_pred_value[pnum],
138 hex_new_pred_value[pnum], base_val);
140 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum);
141 set_bit(pnum, ctx->pregs_written);
144 static inline void gen_read_p3_0(TCGv control_reg)
146 tcg_gen_movi_tl(control_reg, 0);
147 for (int i = 0; i < NUM_PREGS; i++) {
148 tcg_gen_deposit_tl(control_reg, control_reg, hex_pred[i], i * 8, 8);
153 * Certain control registers require special handling on read
154 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
155 * -> concat the 4 predicate registers together
156 * HEX_REG_PC actual value stored in DisasContext
157 * -> assign from ctx->base.pc_next
158 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
159 * -> add current TB changes to existing reg value
161 static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num,
162 TCGv dest)
164 if (reg_num == HEX_REG_P3_0_ALIASED) {
165 gen_read_p3_0(dest);
166 } else if (reg_num == HEX_REG_PC) {
167 tcg_gen_movi_tl(dest, ctx->base.pc_next);
168 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
169 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_PKT_CNT],
170 ctx->num_packets);
171 } else if (reg_num == HEX_REG_QEMU_INSN_CNT) {
172 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT],
173 ctx->num_insns);
174 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
175 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT],
176 ctx->num_hvx_insns);
177 } else {
178 tcg_gen_mov_tl(dest, hex_gpr[reg_num]);
182 static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num,
183 TCGv_i64 dest)
185 if (reg_num == HEX_REG_P3_0_ALIASED) {
186 TCGv p3_0 = tcg_temp_new();
187 gen_read_p3_0(p3_0);
188 tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]);
189 } else if (reg_num == HEX_REG_PC - 1) {
190 TCGv pc = tcg_constant_tl(ctx->base.pc_next);
191 tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc);
192 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
193 TCGv pkt_cnt = tcg_temp_new();
194 TCGv insn_cnt = tcg_temp_new();
195 tcg_gen_addi_tl(pkt_cnt, hex_gpr[HEX_REG_QEMU_PKT_CNT],
196 ctx->num_packets);
197 tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT],
198 ctx->num_insns);
199 tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt);
200 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
201 TCGv hvx_cnt = tcg_temp_new();
202 tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT],
203 ctx->num_hvx_insns);
204 tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]);
205 } else {
206 tcg_gen_concat_i32_i64(dest,
207 hex_gpr[reg_num],
208 hex_gpr[reg_num + 1]);
212 static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg)
214 TCGv hex_p8 = tcg_temp_new();
215 for (int i = 0; i < NUM_PREGS; i++) {
216 tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8);
217 gen_log_pred_write(ctx, i, hex_p8);
222 * Certain control registers require special handling on write
223 * HEX_REG_P3_0_ALIASED aliased to the predicate registers
224 * -> break the value across 4 predicate registers
225 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
226 * -> clear the changes
228 static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num,
229 TCGv val)
231 if (reg_num == HEX_REG_P3_0_ALIASED) {
232 gen_write_p3_0(ctx, val);
233 } else {
234 gen_log_reg_write(reg_num, val);
235 if (reg_num == HEX_REG_QEMU_PKT_CNT) {
236 ctx->num_packets = 0;
238 if (reg_num == HEX_REG_QEMU_INSN_CNT) {
239 ctx->num_insns = 0;
241 if (reg_num == HEX_REG_QEMU_HVX_CNT) {
242 ctx->num_hvx_insns = 0;
247 static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num,
248 TCGv_i64 val)
250 if (reg_num == HEX_REG_P3_0_ALIASED) {
251 TCGv result = get_result_gpr(ctx, reg_num + 1);
252 TCGv val32 = tcg_temp_new();
253 tcg_gen_extrl_i64_i32(val32, val);
254 gen_write_p3_0(ctx, val32);
255 tcg_gen_extrh_i64_i32(val32, val);
256 tcg_gen_mov_tl(result, val32);
257 } else {
258 gen_log_reg_write_pair(reg_num, val);
259 if (reg_num == HEX_REG_QEMU_PKT_CNT) {
260 ctx->num_packets = 0;
261 ctx->num_insns = 0;
263 if (reg_num == HEX_REG_QEMU_HVX_CNT) {
264 ctx->num_hvx_insns = 0;
269 TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign)
271 if (sign) {
272 tcg_gen_sextract_tl(result, src, N * 8, 8);
273 } else {
274 tcg_gen_extract_tl(result, src, N * 8, 8);
276 return result;
279 TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign)
281 TCGv_i64 res64 = tcg_temp_new_i64();
282 if (sign) {
283 tcg_gen_sextract_i64(res64, src, N * 8, 8);
284 } else {
285 tcg_gen_extract_i64(res64, src, N * 8, 8);
287 tcg_gen_extrl_i64_i32(result, res64);
289 return result;
292 TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign)
294 if (sign) {
295 tcg_gen_sextract_tl(result, src, N * 16, 16);
296 } else {
297 tcg_gen_extract_tl(result, src, N * 16, 16);
299 return result;
302 void gen_set_half(int N, TCGv result, TCGv src)
304 tcg_gen_deposit_tl(result, result, src, N * 16, 16);
307 void gen_set_half_i64(int N, TCGv_i64 result, TCGv src)
309 TCGv_i64 src64 = tcg_temp_new_i64();
310 tcg_gen_extu_i32_i64(src64, src);
311 tcg_gen_deposit_i64(result, result, src64, N * 16, 16);
314 void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
316 TCGv_i64 src64 = tcg_temp_new_i64();
317 tcg_gen_extu_i32_i64(src64, src);
318 tcg_gen_deposit_i64(result, result, src64, N * 8, 8);
321 static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
323 tcg_gen_qemu_ld32u(dest, vaddr, mem_index);
324 tcg_gen_mov_tl(hex_llsc_addr, vaddr);
325 tcg_gen_mov_tl(hex_llsc_val, dest);
328 static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
330 tcg_gen_qemu_ld64(dest, vaddr, mem_index);
331 tcg_gen_mov_tl(hex_llsc_addr, vaddr);
332 tcg_gen_mov_i64(hex_llsc_val_i64, dest);
335 static inline void gen_store_conditional4(DisasContext *ctx,
336 TCGv pred, TCGv vaddr, TCGv src)
338 TCGLabel *fail = gen_new_label();
339 TCGLabel *done = gen_new_label();
340 TCGv one, zero, tmp;
342 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
344 one = tcg_constant_tl(0xff);
345 zero = tcg_constant_tl(0);
346 tmp = tcg_temp_new();
347 tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src,
348 ctx->mem_idx, MO_32);
349 tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val,
350 one, zero);
351 tcg_gen_br(done);
353 gen_set_label(fail);
354 tcg_gen_movi_tl(pred, 0);
356 gen_set_label(done);
357 tcg_gen_movi_tl(hex_llsc_addr, ~0);
360 static inline void gen_store_conditional8(DisasContext *ctx,
361 TCGv pred, TCGv vaddr, TCGv_i64 src)
363 TCGLabel *fail = gen_new_label();
364 TCGLabel *done = gen_new_label();
365 TCGv_i64 one, zero, tmp;
367 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
369 one = tcg_constant_i64(0xff);
370 zero = tcg_constant_i64(0);
371 tmp = tcg_temp_new_i64();
372 tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src,
373 ctx->mem_idx, MO_64);
374 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64,
375 one, zero);
376 tcg_gen_extrl_i64_i32(pred, tmp);
377 tcg_gen_br(done);
379 gen_set_label(fail);
380 tcg_gen_movi_tl(pred, 0);
382 gen_set_label(done);
383 tcg_gen_movi_tl(hex_llsc_addr, ~0);
386 void gen_store32(TCGv vaddr, TCGv src, int width, uint32_t slot)
388 tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
389 tcg_gen_movi_tl(hex_store_width[slot], width);
390 tcg_gen_mov_tl(hex_store_val32[slot], src);
393 void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
395 gen_store32(vaddr, src, 1, slot);
398 void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
400 TCGv tmp = tcg_constant_tl(src);
401 gen_store1(cpu_env, vaddr, tmp, slot);
404 void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
406 gen_store32(vaddr, src, 2, slot);
409 void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
411 TCGv tmp = tcg_constant_tl(src);
412 gen_store2(cpu_env, vaddr, tmp, slot);
415 void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, uint32_t slot)
417 gen_store32(vaddr, src, 4, slot);
420 void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, uint32_t slot)
422 TCGv tmp = tcg_constant_tl(src);
423 gen_store4(cpu_env, vaddr, tmp, slot);
426 void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, uint32_t slot)
428 tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
429 tcg_gen_movi_tl(hex_store_width[slot], 8);
430 tcg_gen_mov_i64(hex_store_val64[slot], src);
433 void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, uint32_t slot)
435 TCGv_i64 tmp = tcg_constant_i64(src);
436 gen_store8(cpu_env, vaddr, tmp, slot);
439 TCGv gen_8bitsof(TCGv result, TCGv value)
441 TCGv zero = tcg_constant_tl(0);
442 TCGv ones = tcg_constant_tl(0xff);
443 tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero);
445 return result;
448 static void gen_write_new_pc_addr(DisasContext *ctx, TCGv addr,
449 TCGCond cond, TCGv pred)
451 TCGLabel *pred_false = NULL;
452 if (cond != TCG_COND_ALWAYS) {
453 pred_false = gen_new_label();
454 tcg_gen_brcondi_tl(cond, pred, 0, pred_false);
457 if (ctx->pkt->pkt_has_multi_cof) {
458 /* If there are multiple branches in a packet, ignore the second one */
459 tcg_gen_movcond_tl(TCG_COND_NE, hex_gpr[HEX_REG_PC],
460 hex_branch_taken, tcg_constant_tl(0),
461 hex_gpr[HEX_REG_PC], addr);
462 tcg_gen_movi_tl(hex_branch_taken, 1);
463 } else {
464 tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], addr);
467 if (cond != TCG_COND_ALWAYS) {
468 gen_set_label(pred_false);
472 static void gen_write_new_pc_pcrel(DisasContext *ctx, int pc_off,
473 TCGCond cond, TCGv pred)
475 target_ulong dest = ctx->pkt->pc + pc_off;
476 if (ctx->pkt->pkt_has_multi_cof) {
477 gen_write_new_pc_addr(ctx, tcg_constant_tl(dest), cond, pred);
478 } else {
479 /* Defer this jump to the end of the TB */
480 ctx->branch_cond = TCG_COND_ALWAYS;
481 if (pred != NULL) {
482 ctx->branch_cond = cond;
483 tcg_gen_mov_tl(hex_branch_taken, pred);
485 ctx->branch_dest = dest;
489 void gen_set_usr_field(int field, TCGv val)
491 tcg_gen_deposit_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR],
492 val,
493 reg_field_info[field].offset,
494 reg_field_info[field].width);
497 void gen_set_usr_fieldi(int field, int x)
499 if (reg_field_info[field].width == 1) {
500 target_ulong bit = 1 << reg_field_info[field].offset;
501 if ((x & 1) == 1) {
502 tcg_gen_ori_tl(hex_new_value[HEX_REG_USR],
503 hex_new_value[HEX_REG_USR],
504 bit);
505 } else {
506 tcg_gen_andi_tl(hex_new_value[HEX_REG_USR],
507 hex_new_value[HEX_REG_USR],
508 ~bit);
510 } else {
511 TCGv val = tcg_constant_tl(x);
512 gen_set_usr_field(field, val);
516 static void gen_compare(TCGCond cond, TCGv res, TCGv arg1, TCGv arg2)
518 TCGv one = tcg_constant_tl(0xff);
519 TCGv zero = tcg_constant_tl(0);
521 tcg_gen_movcond_tl(cond, res, arg1, arg2, one, zero);
524 static void gen_cond_jumpr(DisasContext *ctx, TCGv dst_pc,
525 TCGCond cond, TCGv pred)
527 gen_write_new_pc_addr(ctx, dst_pc, cond, pred);
530 static void gen_cond_jumpr31(DisasContext *ctx, TCGCond cond, TCGv pred)
532 TCGv LSB = tcg_temp_new();
533 tcg_gen_andi_tl(LSB, pred, 1);
534 gen_cond_jumpr(ctx, hex_gpr[HEX_REG_LR], cond, LSB);
537 static void gen_cond_jump(DisasContext *ctx, TCGCond cond, TCGv pred,
538 int pc_off)
540 gen_write_new_pc_pcrel(ctx, pc_off, cond, pred);
543 static void gen_cmpnd_cmp_jmp(DisasContext *ctx,
544 int pnum, TCGCond cond1, TCGv arg1, TCGv arg2,
545 TCGCond cond2, int pc_off)
547 if (ctx->insn->part1) {
548 TCGv pred = tcg_temp_new();
549 gen_compare(cond1, pred, arg1, arg2);
550 gen_log_pred_write(ctx, pnum, pred);
551 } else {
552 TCGv pred = tcg_temp_new();
553 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]);
554 gen_cond_jump(ctx, cond2, pred, pc_off);
558 static void gen_cmpnd_cmp_jmp_t(DisasContext *ctx,
559 int pnum, TCGCond cond, TCGv arg1, TCGv arg2,
560 int pc_off)
562 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_EQ, pc_off);
565 static void gen_cmpnd_cmp_jmp_f(DisasContext *ctx,
566 int pnum, TCGCond cond, TCGv arg1, TCGv arg2,
567 int pc_off)
569 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, arg2, TCG_COND_NE, pc_off);
572 static void gen_cmpnd_cmpi_jmp_t(DisasContext *ctx,
573 int pnum, TCGCond cond, TCGv arg1, int arg2,
574 int pc_off)
576 TCGv tmp = tcg_constant_tl(arg2);
577 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_EQ, pc_off);
580 static void gen_cmpnd_cmpi_jmp_f(DisasContext *ctx,
581 int pnum, TCGCond cond, TCGv arg1, int arg2,
582 int pc_off)
584 TCGv tmp = tcg_constant_tl(arg2);
585 gen_cmpnd_cmp_jmp(ctx, pnum, cond, arg1, tmp, TCG_COND_NE, pc_off);
588 static void gen_cmpnd_cmp_n1_jmp_t(DisasContext *ctx, int pnum, TCGCond cond,
589 TCGv arg, int pc_off)
591 gen_cmpnd_cmpi_jmp_t(ctx, pnum, cond, arg, -1, pc_off);
594 static void gen_cmpnd_cmp_n1_jmp_f(DisasContext *ctx, int pnum, TCGCond cond,
595 TCGv arg, int pc_off)
597 gen_cmpnd_cmpi_jmp_f(ctx, pnum, cond, arg, -1, pc_off);
600 static void gen_cmpnd_tstbit0_jmp(DisasContext *ctx,
601 int pnum, TCGv arg, TCGCond cond, int pc_off)
603 if (ctx->insn->part1) {
604 TCGv pred = tcg_temp_new();
605 tcg_gen_andi_tl(pred, arg, 1);
606 gen_8bitsof(pred, pred);
607 gen_log_pred_write(ctx, pnum, pred);
608 } else {
609 TCGv pred = tcg_temp_new();
610 tcg_gen_mov_tl(pred, hex_new_pred_value[pnum]);
611 gen_cond_jump(ctx, cond, pred, pc_off);
615 static void gen_testbit0_jumpnv(DisasContext *ctx,
616 TCGv arg, TCGCond cond, int pc_off)
618 TCGv pred = tcg_temp_new();
619 tcg_gen_andi_tl(pred, arg, 1);
620 gen_cond_jump(ctx, cond, pred, pc_off);
623 static void gen_jump(DisasContext *ctx, int pc_off)
625 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL);
628 static void gen_jumpr(DisasContext *ctx, TCGv new_pc)
630 gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL);
633 static void gen_call(DisasContext *ctx, int pc_off)
635 TCGv lr = get_result_gpr(ctx, HEX_REG_LR);
636 tcg_gen_movi_tl(lr, ctx->next_PC);
637 gen_write_new_pc_pcrel(ctx, pc_off, TCG_COND_ALWAYS, NULL);
640 static void gen_callr(DisasContext *ctx, TCGv new_pc)
642 TCGv lr = get_result_gpr(ctx, HEX_REG_LR);
643 tcg_gen_movi_tl(lr, ctx->next_PC);
644 gen_write_new_pc_addr(ctx, new_pc, TCG_COND_ALWAYS, NULL);
647 static void gen_cond_call(DisasContext *ctx, TCGv pred,
648 TCGCond cond, int pc_off)
650 TCGv lr = get_result_gpr(ctx, HEX_REG_LR);
651 TCGv lsb = tcg_temp_new();
652 TCGLabel *skip = gen_new_label();
653 tcg_gen_andi_tl(lsb, pred, 1);
654 gen_write_new_pc_pcrel(ctx, pc_off, cond, lsb);
655 tcg_gen_brcondi_tl(cond, lsb, 0, skip);
656 tcg_gen_movi_tl(lr, ctx->next_PC);
657 gen_set_label(skip);
660 static void gen_cond_callr(DisasContext *ctx,
661 TCGCond cond, TCGv pred, TCGv new_pc)
663 TCGv lsb = tcg_temp_new();
664 TCGLabel *skip = gen_new_label();
665 tcg_gen_andi_tl(lsb, pred, 1);
666 tcg_gen_brcondi_tl(cond, lsb, 0, skip);
667 gen_callr(ctx, new_pc);
668 gen_set_label(skip);
671 /* frame ^= (int64_t)FRAMEKEY << 32 */
672 static void gen_frame_unscramble(TCGv_i64 frame)
674 TCGv_i64 framekey = tcg_temp_new_i64();
675 tcg_gen_extu_i32_i64(framekey, hex_gpr[HEX_REG_FRAMEKEY]);
676 tcg_gen_shli_i64(framekey, framekey, 32);
677 tcg_gen_xor_i64(frame, frame, framekey);
680 static void gen_load_frame(DisasContext *ctx, TCGv_i64 frame, TCGv EA)
682 Insn *insn = ctx->insn; /* Needed for CHECK_NOSHUF */
683 CHECK_NOSHUF(EA, 8);
684 tcg_gen_qemu_ld64(frame, EA, ctx->mem_idx);
687 static void gen_return(DisasContext *ctx, TCGv_i64 dst, TCGv src)
690 * frame = *src
691 * dst = frame_unscramble(frame)
692 * SP = src + 8
693 * PC = dst.w[1]
695 TCGv_i64 frame = tcg_temp_new_i64();
696 TCGv r31 = tcg_temp_new();
697 TCGv r29 = get_result_gpr(ctx, HEX_REG_SP);
699 gen_load_frame(ctx, frame, src);
700 gen_frame_unscramble(frame);
701 tcg_gen_mov_i64(dst, frame);
702 tcg_gen_addi_tl(r29, src, 8);
703 tcg_gen_extrh_i64_i32(r31, dst);
704 gen_jumpr(ctx, r31);
707 /* if (pred) dst = dealloc_return(src):raw */
708 static void gen_cond_return(DisasContext *ctx, TCGv_i64 dst, TCGv src,
709 TCGv pred, TCGCond cond)
711 TCGv LSB = tcg_temp_new();
712 TCGLabel *skip = gen_new_label();
713 tcg_gen_andi_tl(LSB, pred, 1);
715 tcg_gen_brcondi_tl(cond, LSB, 0, skip);
716 gen_return(ctx, dst, src);
717 gen_set_label(skip);
720 /* sub-instruction version (no RddV, so handle it manually) */
721 static void gen_cond_return_subinsn(DisasContext *ctx, TCGCond cond, TCGv pred)
723 TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP);
724 gen_cond_return(ctx, RddV, hex_gpr[HEX_REG_FP], pred, cond);
725 gen_log_reg_write_pair(HEX_REG_FP, RddV);
728 static void gen_endloop0(DisasContext *ctx)
730 TCGv lpcfg = tcg_temp_new();
732 GET_USR_FIELD(USR_LPCFG, lpcfg);
735 * if (lpcfg == 1) {
736 * hex_new_pred_value[3] = 0xff;
737 * hex_pred_written |= 1 << 3;
740 TCGLabel *label1 = gen_new_label();
741 tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1);
743 tcg_gen_movi_tl(hex_new_pred_value[3], 0xff);
744 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << 3);
746 gen_set_label(label1);
749 * if (lpcfg) {
750 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
753 TCGLabel *label2 = gen_new_label();
754 tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2);
756 tcg_gen_subi_tl(lpcfg, lpcfg, 1);
757 SET_USR_FIELD(USR_LPCFG, lpcfg);
759 gen_set_label(label2);
762 * If we're in a tight loop, we'll do this at the end of the TB to take
763 * advantage of direct block chaining.
765 if (!ctx->is_tight_loop) {
767 * if (hex_gpr[HEX_REG_LC0] > 1) {
768 * PC = hex_gpr[HEX_REG_SA0];
769 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
772 TCGLabel *label3 = gen_new_label();
773 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3);
775 TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0);
776 gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]);
777 tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1);
779 gen_set_label(label3);
783 static void gen_endloop1(DisasContext *ctx)
786 * if (hex_gpr[HEX_REG_LC1] > 1) {
787 * PC = hex_gpr[HEX_REG_SA1];
788 * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1;
791 TCGLabel *label = gen_new_label();
792 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, label);
794 TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1);
795 gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]);
796 tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1);
798 gen_set_label(label);
801 static void gen_endloop01(DisasContext *ctx)
803 TCGv lpcfg = tcg_temp_new();
804 TCGLabel *label1 = gen_new_label();
805 TCGLabel *label2 = gen_new_label();
806 TCGLabel *label3 = gen_new_label();
807 TCGLabel *done = gen_new_label();
809 GET_USR_FIELD(USR_LPCFG, lpcfg);
812 * if (lpcfg == 1) {
813 * hex_new_pred_value[3] = 0xff;
814 * hex_pred_written |= 1 << 3;
817 tcg_gen_brcondi_tl(TCG_COND_NE, lpcfg, 1, label1);
819 tcg_gen_movi_tl(hex_new_pred_value[3], 0xff);
820 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << 3);
822 gen_set_label(label1);
825 * if (lpcfg) {
826 * SET_USR_FIELD(USR_LPCFG, lpcfg - 1);
829 tcg_gen_brcondi_tl(TCG_COND_EQ, lpcfg, 0, label2);
831 tcg_gen_subi_tl(lpcfg, lpcfg, 1);
832 SET_USR_FIELD(USR_LPCFG, lpcfg);
834 gen_set_label(label2);
837 * if (hex_gpr[HEX_REG_LC0] > 1) {
838 * PC = hex_gpr[HEX_REG_SA0];
839 * hex_new_value[HEX_REG_LC0] = hex_gpr[HEX_REG_LC0] - 1;
840 * } else {
841 * if (hex_gpr[HEX_REG_LC1] > 1) {
842 * hex_next_pc = hex_gpr[HEX_REG_SA1];
843 * hex_new_value[HEX_REG_LC1] = hex_gpr[HEX_REG_LC1] - 1;
847 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC0], 1, label3);
849 TCGv lc0 = get_result_gpr(ctx, HEX_REG_LC0);
850 gen_jumpr(ctx, hex_gpr[HEX_REG_SA0]);
851 tcg_gen_subi_tl(lc0, hex_gpr[HEX_REG_LC0], 1);
852 tcg_gen_br(done);
854 gen_set_label(label3);
855 tcg_gen_brcondi_tl(TCG_COND_LEU, hex_gpr[HEX_REG_LC1], 1, done);
857 TCGv lc1 = get_result_gpr(ctx, HEX_REG_LC1);
858 gen_jumpr(ctx, hex_gpr[HEX_REG_SA1]);
859 tcg_gen_subi_tl(lc1, hex_gpr[HEX_REG_LC1], 1);
861 gen_set_label(done);
864 static void gen_cmp_jumpnv(DisasContext *ctx,
865 TCGCond cond, TCGv val, TCGv src, int pc_off)
867 TCGv pred = tcg_temp_new();
868 tcg_gen_setcond_tl(cond, pred, val, src);
869 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off);
872 static void gen_cmpi_jumpnv(DisasContext *ctx,
873 TCGCond cond, TCGv val, int src, int pc_off)
875 TCGv pred = tcg_temp_new();
876 tcg_gen_setcondi_tl(cond, pred, val, src);
877 gen_cond_jump(ctx, TCG_COND_EQ, pred, pc_off);
880 /* Shift left with saturation */
881 static void gen_shl_sat(TCGv dst, TCGv src, TCGv shift_amt)
883 TCGv sh32 = tcg_temp_new();
884 TCGv dst_sar = tcg_temp_new();
885 TCGv ovf = tcg_temp_new();
886 TCGv satval = tcg_temp_new();
887 TCGv min = tcg_constant_tl(0x80000000);
888 TCGv max = tcg_constant_tl(0x7fffffff);
891 * Possible values for shift_amt are 0 .. 64
892 * We need special handling for values above 31
894 * sh32 = shift & 31;
895 * dst = sh32 == shift ? src : 0;
896 * dst <<= sh32;
897 * dst_sar = dst >> sh32;
898 * satval = src < 0 ? min : max;
899 * if (dst_asr != src) {
900 * usr.OVF |= 1;
901 * dst = satval;
905 tcg_gen_andi_tl(sh32, shift_amt, 31);
906 tcg_gen_movcond_tl(TCG_COND_EQ, dst, sh32, shift_amt,
907 src, tcg_constant_tl(0));
908 tcg_gen_shl_tl(dst, dst, sh32);
909 tcg_gen_sar_tl(dst_sar, dst, sh32);
910 tcg_gen_movcond_tl(TCG_COND_LT, satval, src, tcg_constant_tl(0), min, max);
912 tcg_gen_setcond_tl(TCG_COND_NE, ovf, dst_sar, src);
913 tcg_gen_shli_tl(ovf, ovf, reg_field_info[USR_OVF].offset);
914 tcg_gen_or_tl(hex_new_value[HEX_REG_USR], hex_new_value[HEX_REG_USR], ovf);
916 tcg_gen_movcond_tl(TCG_COND_EQ, dst, dst_sar, src, dst, satval);
919 static void gen_sar(TCGv dst, TCGv src, TCGv shift_amt)
922 * Shift arithmetic right
923 * Robust when shift_amt is >31 bits
925 TCGv tmp = tcg_temp_new();
926 tcg_gen_umin_tl(tmp, shift_amt, tcg_constant_tl(31));
927 tcg_gen_sar_tl(dst, src, tmp);
930 /* Bidirectional shift right with saturation */
931 static void gen_asr_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
933 TCGv shift_amt = tcg_temp_new();
934 TCGLabel *positive = gen_new_label();
935 TCGLabel *done = gen_new_label();
937 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7);
938 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive);
940 /* Negative shift amount => shift left */
941 tcg_gen_neg_tl(shift_amt, shift_amt);
942 gen_shl_sat(RdV, RsV, shift_amt);
943 tcg_gen_br(done);
945 gen_set_label(positive);
946 /* Positive shift amount => shift right */
947 gen_sar(RdV, RsV, shift_amt);
949 gen_set_label(done);
952 /* Bidirectional shift left with saturation */
953 static void gen_asl_r_r_sat(TCGv RdV, TCGv RsV, TCGv RtV)
955 TCGv shift_amt = tcg_temp_new();
956 TCGLabel *positive = gen_new_label();
957 TCGLabel *done = gen_new_label();
959 tcg_gen_sextract_i32(shift_amt, RtV, 0, 7);
960 tcg_gen_brcondi_tl(TCG_COND_GE, shift_amt, 0, positive);
962 /* Negative shift amount => shift right */
963 tcg_gen_neg_tl(shift_amt, shift_amt);
964 gen_sar(RdV, RsV, shift_amt);
965 tcg_gen_br(done);
967 gen_set_label(positive);
968 /* Positive shift amount => shift left */
969 gen_shl_sat(RdV, RsV, shift_amt);
971 gen_set_label(done);
974 static intptr_t vreg_src_off(DisasContext *ctx, int num)
976 intptr_t offset = offsetof(CPUHexagonState, VRegs[num]);
978 if (test_bit(num, ctx->vregs_select)) {
979 offset = ctx_future_vreg_off(ctx, num, 1, false);
981 if (test_bit(num, ctx->vregs_updated_tmp)) {
982 offset = ctx_tmp_vreg_off(ctx, num, 1, false);
984 return offset;
987 static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num,
988 VRegWriteType type)
990 intptr_t dstoff;
992 if (type != EXT_TMP) {
993 dstoff = ctx_future_vreg_off(ctx, num, 1, true);
994 tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
995 sizeof(MMVector), sizeof(MMVector));
996 } else {
997 dstoff = ctx_tmp_vreg_off(ctx, num, 1, false);
998 tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
999 sizeof(MMVector), sizeof(MMVector));
1003 static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num,
1004 VRegWriteType type)
1006 gen_log_vreg_write(ctx, srcoff, num ^ 0, type);
1007 srcoff += sizeof(MMVector);
1008 gen_log_vreg_write(ctx, srcoff, num ^ 1, type);
1011 static intptr_t get_result_qreg(DisasContext *ctx, int qnum)
1013 return offsetof(CPUHexagonState, future_QRegs[qnum]);
1016 static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
1017 bool aligned)
1019 TCGv_i64 tmp = tcg_temp_new_i64();
1020 if (aligned) {
1021 tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
1023 for (int i = 0; i < sizeof(MMVector) / 8; i++) {
1024 tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx);
1025 tcg_gen_addi_tl(src, src, 8);
1026 tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
1030 static void gen_vreg_store(DisasContext *ctx, TCGv EA, intptr_t srcoff,
1031 int slot, bool aligned)
1033 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
1034 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
1036 if (is_gather_store_insn(ctx)) {
1037 TCGv sl = tcg_constant_tl(slot);
1038 gen_helper_gather_store(cpu_env, EA, sl);
1039 return;
1042 tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
1043 if (aligned) {
1044 tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
1045 ~((int32_t)sizeof(MMVector) - 1));
1046 } else {
1047 tcg_gen_mov_tl(hex_vstore_addr[slot], EA);
1049 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
1051 /* Copy the data to the vstore buffer */
1052 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
1053 /* Set the mask to all 1's */
1054 tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL);
1057 static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff,
1058 intptr_t bitsoff, int slot, bool invert)
1060 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
1061 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
1063 tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
1064 tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
1065 ~((int32_t)sizeof(MMVector) - 1));
1066 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
1068 /* Copy the data to the vstore buffer */
1069 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
1070 /* Copy the mask */
1071 tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg));
1072 if (invert) {
1073 tcg_gen_gvec_not(MO_64, maskoff, maskoff,
1074 sizeof(MMQReg), sizeof(MMQReg));
1078 static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff)
1080 TCGv_i64 tmp = tcg_temp_new_i64();
1081 TCGv_i64 word = tcg_temp_new_i64();
1082 TCGv_i64 bits = tcg_temp_new_i64();
1083 TCGv_i64 mask = tcg_temp_new_i64();
1084 TCGv_i64 zero = tcg_constant_i64(0);
1085 TCGv_i64 ones = tcg_constant_i64(~0);
1087 for (int i = 0; i < sizeof(MMVector) / 8; i++) {
1088 tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8);
1089 tcg_gen_movi_i64(mask, 0);
1091 for (int j = 0; j < 8; j += size) {
1092 tcg_gen_extract_i64(word, tmp, j * 8, size * 8);
1093 tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero);
1094 tcg_gen_deposit_i64(mask, mask, bits, j, size);
1097 tcg_gen_st8_i64(mask, cpu_env, dstoff + i);
1101 void probe_noshuf_load(TCGv va, int s, int mi)
1103 TCGv size = tcg_constant_tl(s);
1104 TCGv mem_idx = tcg_constant_tl(mi);
1105 gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx);
1109 * Note: Since this function might branch, `val` is
1110 * required to be a `tcg_temp_local`.
1112 void gen_set_usr_field_if(int field, TCGv val)
1114 /* Sets the USR field if `val` is non-zero */
1115 if (reg_field_info[field].width == 1) {
1116 TCGv tmp = tcg_temp_new();
1117 tcg_gen_extract_tl(tmp, val, 0, reg_field_info[field].width);
1118 tcg_gen_shli_tl(tmp, tmp, reg_field_info[field].offset);
1119 tcg_gen_or_tl(hex_new_value[HEX_REG_USR],
1120 hex_new_value[HEX_REG_USR],
1121 tmp);
1122 } else {
1123 TCGLabel *skip_label = gen_new_label();
1124 tcg_gen_brcondi_tl(TCG_COND_EQ, val, 0, skip_label);
1125 gen_set_usr_field(field, val);
1126 gen_set_label(skip_label);
1130 void gen_sat_i32(TCGv dest, TCGv source, int width)
1132 TCGv max_val = tcg_constant_tl((1 << (width - 1)) - 1);
1133 TCGv min_val = tcg_constant_tl(-(1 << (width - 1)));
1134 tcg_gen_smin_tl(dest, source, max_val);
1135 tcg_gen_smax_tl(dest, dest, min_val);
1138 void gen_sat_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width)
1140 gen_sat_i32(dest, source, width);
1141 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest);
1144 void gen_satu_i32(TCGv dest, TCGv source, int width)
1146 TCGv max_val = tcg_constant_tl((1 << width) - 1);
1147 TCGv zero = tcg_constant_tl(0);
1148 tcg_gen_movcond_tl(TCG_COND_GTU, dest, source, max_val, max_val, source);
1149 tcg_gen_movcond_tl(TCG_COND_LT, dest, source, zero, zero, dest);
1152 void gen_satu_i32_ovfl(TCGv ovfl, TCGv dest, TCGv source, int width)
1154 gen_satu_i32(dest, source, width);
1155 tcg_gen_setcond_tl(TCG_COND_NE, ovfl, source, dest);
1158 void gen_sat_i64(TCGv_i64 dest, TCGv_i64 source, int width)
1160 TCGv_i64 max_val = tcg_constant_i64((1LL << (width - 1)) - 1LL);
1161 TCGv_i64 min_val = tcg_constant_i64(-(1LL << (width - 1)));
1162 tcg_gen_smin_i64(dest, source, max_val);
1163 tcg_gen_smax_i64(dest, dest, min_val);
1166 void gen_sat_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width)
1168 TCGv_i64 ovfl_64;
1169 gen_sat_i64(dest, source, width);
1170 ovfl_64 = tcg_temp_new_i64();
1171 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source);
1172 tcg_gen_trunc_i64_tl(ovfl, ovfl_64);
1175 void gen_satu_i64(TCGv_i64 dest, TCGv_i64 source, int width)
1177 TCGv_i64 max_val = tcg_constant_i64((1LL << width) - 1LL);
1178 TCGv_i64 zero = tcg_constant_i64(0);
1179 tcg_gen_movcond_i64(TCG_COND_GTU, dest, source, max_val, max_val, source);
1180 tcg_gen_movcond_i64(TCG_COND_LT, dest, source, zero, zero, dest);
1183 void gen_satu_i64_ovfl(TCGv ovfl, TCGv_i64 dest, TCGv_i64 source, int width)
1185 TCGv_i64 ovfl_64;
1186 gen_satu_i64(dest, source, width);
1187 ovfl_64 = tcg_temp_new_i64();
1188 tcg_gen_setcond_i64(TCG_COND_NE, ovfl_64, dest, source);
1189 tcg_gen_trunc_i64_tl(ovfl, ovfl_64);
1192 /* Implements the fADDSAT64 macro in TCG */
1193 void gen_add_sat_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b)
1195 TCGv_i64 sum = tcg_temp_new_i64();
1196 TCGv_i64 xor = tcg_temp_new_i64();
1197 TCGv_i64 cond1 = tcg_temp_new_i64();
1198 TCGv_i64 cond2 = tcg_temp_new_i64();
1199 TCGv_i64 cond3 = tcg_temp_new_i64();
1200 TCGv_i64 mask = tcg_constant_i64(0x8000000000000000ULL);
1201 TCGv_i64 max_pos = tcg_constant_i64(0x7FFFFFFFFFFFFFFFLL);
1202 TCGv_i64 max_neg = tcg_constant_i64(0x8000000000000000LL);
1203 TCGv_i64 zero = tcg_constant_i64(0);
1204 TCGLabel *no_ovfl_label = gen_new_label();
1205 TCGLabel *ovfl_label = gen_new_label();
1206 TCGLabel *ret_label = gen_new_label();
1208 tcg_gen_add_i64(sum, a, b);
1209 tcg_gen_xor_i64(xor, a, b);
1211 /* if (xor & mask) */
1212 tcg_gen_and_i64(cond1, xor, mask);
1213 tcg_gen_brcondi_i64(TCG_COND_NE, cond1, 0, no_ovfl_label);
1215 /* else if ((a ^ sum) & mask) */
1216 tcg_gen_xor_i64(cond2, a, sum);
1217 tcg_gen_and_i64(cond2, cond2, mask);
1218 tcg_gen_brcondi_i64(TCG_COND_NE, cond2, 0, ovfl_label);
1219 /* fallthrough to no_ovfl_label branch */
1221 /* if branch */
1222 gen_set_label(no_ovfl_label);
1223 tcg_gen_mov_i64(ret, sum);
1224 tcg_gen_br(ret_label);
1226 /* else if branch */
1227 gen_set_label(ovfl_label);
1228 tcg_gen_and_i64(cond3, sum, mask);
1229 tcg_gen_movcond_i64(TCG_COND_NE, ret, cond3, zero, max_pos, max_neg);
1230 SET_USR_FIELD(USR_OVF, 1);
1232 gen_set_label(ret_label);
1235 #include "tcg_funcs_generated.c.inc"
1236 #include "tcg_func_table_generated.c.inc"