tests/docker: Add flex/bison to `debian-all-test`
[qemu/ar7.git] / target / hexagon / genptr.c
blob806d0974ffebc849b4e6a7f6bbb3c09d5da3b75a
1 /*
2 * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
19 #include "cpu.h"
20 #include "internal.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
23 #include "insn.h"
24 #include "opcodes.h"
25 #include "translate.h"
26 #define QEMU_GENERATE /* Used internally by macros.h */
27 #include "macros.h"
28 #include "mmvec/macros.h"
29 #undef QEMU_GENERATE
30 #include "gen_tcg.h"
31 #include "gen_tcg_hvx.h"
33 static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot)
35 TCGv zero = tcg_constant_tl(0);
36 TCGv slot_mask = tcg_temp_new();
38 tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
39 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum], slot_mask, zero,
40 val, hex_new_value[rnum]);
41 if (HEX_DEBUG) {
43 * Do this so HELPER(debug_commit_end) will know
45 * Note that slot_mask indicates the value is not written
46 * (i.e., slot was cancelled), so we create a true/false value before
47 * or'ing with hex_reg_written[rnum].
49 tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
50 tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
53 tcg_temp_free(slot_mask);
56 static inline void gen_log_reg_write(int rnum, TCGv val)
58 tcg_gen_mov_tl(hex_new_value[rnum], val);
59 if (HEX_DEBUG) {
60 /* Do this so HELPER(debug_commit_end) will know */
61 tcg_gen_movi_tl(hex_reg_written[rnum], 1);
65 static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot)
67 TCGv val32 = tcg_temp_new();
68 TCGv zero = tcg_constant_tl(0);
69 TCGv slot_mask = tcg_temp_new();
71 tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot);
72 /* Low word */
73 tcg_gen_extrl_i64_i32(val32, val);
74 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum],
75 slot_mask, zero,
76 val32, hex_new_value[rnum]);
77 /* High word */
78 tcg_gen_extrh_i64_i32(val32, val);
79 tcg_gen_movcond_tl(TCG_COND_EQ, hex_new_value[rnum + 1],
80 slot_mask, zero,
81 val32, hex_new_value[rnum + 1]);
82 if (HEX_DEBUG) {
84 * Do this so HELPER(debug_commit_end) will know
86 * Note that slot_mask indicates the value is not written
87 * (i.e., slot was cancelled), so we create a true/false value before
88 * or'ing with hex_reg_written[rnum].
90 tcg_gen_setcond_tl(TCG_COND_EQ, slot_mask, slot_mask, zero);
91 tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask);
92 tcg_gen_or_tl(hex_reg_written[rnum + 1], hex_reg_written[rnum + 1],
93 slot_mask);
96 tcg_temp_free(val32);
97 tcg_temp_free(slot_mask);
100 static void gen_log_reg_write_pair(int rnum, TCGv_i64 val)
102 /* Low word */
103 tcg_gen_extrl_i64_i32(hex_new_value[rnum], val);
104 if (HEX_DEBUG) {
105 /* Do this so HELPER(debug_commit_end) will know */
106 tcg_gen_movi_tl(hex_reg_written[rnum], 1);
109 /* High word */
110 tcg_gen_extrh_i64_i32(hex_new_value[rnum + 1], val);
111 if (HEX_DEBUG) {
112 /* Do this so HELPER(debug_commit_end) will know */
113 tcg_gen_movi_tl(hex_reg_written[rnum + 1], 1);
117 static inline void gen_log_pred_write(DisasContext *ctx, int pnum, TCGv val)
119 TCGv base_val = tcg_temp_new();
121 tcg_gen_andi_tl(base_val, val, 0xff);
124 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
126 * Multiple writes to the same preg are and'ed together
127 * If this is the first predicate write in the packet, do a
128 * straight assignment. Otherwise, do an and.
130 if (!test_bit(pnum, ctx->pregs_written)) {
131 tcg_gen_mov_tl(hex_new_pred_value[pnum], base_val);
132 } else {
133 tcg_gen_and_tl(hex_new_pred_value[pnum],
134 hex_new_pred_value[pnum], base_val);
136 tcg_gen_ori_tl(hex_pred_written, hex_pred_written, 1 << pnum);
138 tcg_temp_free(base_val);
141 static inline void gen_read_p3_0(TCGv control_reg)
143 tcg_gen_movi_tl(control_reg, 0);
144 for (int i = 0; i < NUM_PREGS; i++) {
145 tcg_gen_deposit_tl(control_reg, control_reg, hex_pred[i], i * 8, 8);
150 * Certain control registers require special handling on read
151 * HEX_REG_P3_0 aliased to the predicate registers
152 * -> concat the 4 predicate registers together
153 * HEX_REG_PC actual value stored in DisasContext
154 * -> assign from ctx->base.pc_next
155 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
156 * -> add current TB changes to existing reg value
158 static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num,
159 TCGv dest)
161 if (reg_num == HEX_REG_P3_0) {
162 gen_read_p3_0(dest);
163 } else if (reg_num == HEX_REG_PC) {
164 tcg_gen_movi_tl(dest, ctx->base.pc_next);
165 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
166 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_PKT_CNT],
167 ctx->num_packets);
168 } else if (reg_num == HEX_REG_QEMU_INSN_CNT) {
169 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT],
170 ctx->num_insns);
171 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
172 tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT],
173 ctx->num_hvx_insns);
174 } else {
175 tcg_gen_mov_tl(dest, hex_gpr[reg_num]);
179 static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num,
180 TCGv_i64 dest)
182 if (reg_num == HEX_REG_P3_0) {
183 TCGv p3_0 = tcg_temp_new();
184 gen_read_p3_0(p3_0);
185 tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]);
186 tcg_temp_free(p3_0);
187 } else if (reg_num == HEX_REG_PC - 1) {
188 TCGv pc = tcg_constant_tl(ctx->base.pc_next);
189 tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc);
190 } else if (reg_num == HEX_REG_QEMU_PKT_CNT) {
191 TCGv pkt_cnt = tcg_temp_new();
192 TCGv insn_cnt = tcg_temp_new();
193 tcg_gen_addi_tl(pkt_cnt, hex_gpr[HEX_REG_QEMU_PKT_CNT],
194 ctx->num_packets);
195 tcg_gen_addi_tl(insn_cnt, hex_gpr[HEX_REG_QEMU_INSN_CNT],
196 ctx->num_insns);
197 tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt);
198 tcg_temp_free(pkt_cnt);
199 tcg_temp_free(insn_cnt);
200 } else if (reg_num == HEX_REG_QEMU_HVX_CNT) {
201 TCGv hvx_cnt = tcg_temp_new();
202 tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT],
203 ctx->num_hvx_insns);
204 tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]);
205 tcg_temp_free(hvx_cnt);
206 } else {
207 tcg_gen_concat_i32_i64(dest,
208 hex_gpr[reg_num],
209 hex_gpr[reg_num + 1]);
213 static void gen_write_p3_0(DisasContext *ctx, TCGv control_reg)
215 TCGv hex_p8 = tcg_temp_new();
216 for (int i = 0; i < NUM_PREGS; i++) {
217 tcg_gen_extract_tl(hex_p8, control_reg, i * 8, 8);
218 gen_log_pred_write(ctx, i, hex_p8);
219 ctx_log_pred_write(ctx, i);
221 tcg_temp_free(hex_p8);
225 * Certain control registers require special handling on write
226 * HEX_REG_P3_0 aliased to the predicate registers
227 * -> break the value across 4 predicate registers
228 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
229 * -> clear the changes
231 static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num,
232 TCGv val)
234 if (reg_num == HEX_REG_P3_0) {
235 gen_write_p3_0(ctx, val);
236 } else {
237 gen_log_reg_write(reg_num, val);
238 ctx_log_reg_write(ctx, reg_num);
239 if (reg_num == HEX_REG_QEMU_PKT_CNT) {
240 ctx->num_packets = 0;
242 if (reg_num == HEX_REG_QEMU_INSN_CNT) {
243 ctx->num_insns = 0;
245 if (reg_num == HEX_REG_QEMU_HVX_CNT) {
246 ctx->num_hvx_insns = 0;
251 static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num,
252 TCGv_i64 val)
254 if (reg_num == HEX_REG_P3_0) {
255 TCGv val32 = tcg_temp_new();
256 tcg_gen_extrl_i64_i32(val32, val);
257 gen_write_p3_0(ctx, val32);
258 tcg_gen_extrh_i64_i32(val32, val);
259 gen_log_reg_write(reg_num + 1, val32);
260 tcg_temp_free(val32);
261 ctx_log_reg_write(ctx, reg_num + 1);
262 } else {
263 gen_log_reg_write_pair(reg_num, val);
264 ctx_log_reg_write_pair(ctx, reg_num);
265 if (reg_num == HEX_REG_QEMU_PKT_CNT) {
266 ctx->num_packets = 0;
267 ctx->num_insns = 0;
269 if (reg_num == HEX_REG_QEMU_HVX_CNT) {
270 ctx->num_hvx_insns = 0;
275 static TCGv gen_get_byte(TCGv result, int N, TCGv src, bool sign)
277 if (sign) {
278 tcg_gen_sextract_tl(result, src, N * 8, 8);
279 } else {
280 tcg_gen_extract_tl(result, src, N * 8, 8);
282 return result;
285 static TCGv gen_get_byte_i64(TCGv result, int N, TCGv_i64 src, bool sign)
287 TCGv_i64 res64 = tcg_temp_new_i64();
288 if (sign) {
289 tcg_gen_sextract_i64(res64, src, N * 8, 8);
290 } else {
291 tcg_gen_extract_i64(res64, src, N * 8, 8);
293 tcg_gen_extrl_i64_i32(result, res64);
294 tcg_temp_free_i64(res64);
296 return result;
299 static inline TCGv gen_get_half(TCGv result, int N, TCGv src, bool sign)
301 if (sign) {
302 tcg_gen_sextract_tl(result, src, N * 16, 16);
303 } else {
304 tcg_gen_extract_tl(result, src, N * 16, 16);
306 return result;
309 static inline void gen_set_half(int N, TCGv result, TCGv src)
311 tcg_gen_deposit_tl(result, result, src, N * 16, 16);
314 static inline void gen_set_half_i64(int N, TCGv_i64 result, TCGv src)
316 TCGv_i64 src64 = tcg_temp_new_i64();
317 tcg_gen_extu_i32_i64(src64, src);
318 tcg_gen_deposit_i64(result, result, src64, N * 16, 16);
319 tcg_temp_free_i64(src64);
322 static void gen_set_byte_i64(int N, TCGv_i64 result, TCGv src)
324 TCGv_i64 src64 = tcg_temp_new_i64();
325 tcg_gen_extu_i32_i64(src64, src);
326 tcg_gen_deposit_i64(result, result, src64, N * 8, 8);
327 tcg_temp_free_i64(src64);
330 static inline void gen_load_locked4u(TCGv dest, TCGv vaddr, int mem_index)
332 tcg_gen_qemu_ld32u(dest, vaddr, mem_index);
333 tcg_gen_mov_tl(hex_llsc_addr, vaddr);
334 tcg_gen_mov_tl(hex_llsc_val, dest);
337 static inline void gen_load_locked8u(TCGv_i64 dest, TCGv vaddr, int mem_index)
339 tcg_gen_qemu_ld64(dest, vaddr, mem_index);
340 tcg_gen_mov_tl(hex_llsc_addr, vaddr);
341 tcg_gen_mov_i64(hex_llsc_val_i64, dest);
344 static inline void gen_store_conditional4(DisasContext *ctx,
345 TCGv pred, TCGv vaddr, TCGv src)
347 TCGLabel *fail = gen_new_label();
348 TCGLabel *done = gen_new_label();
349 TCGv one, zero, tmp;
351 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
353 one = tcg_constant_tl(0xff);
354 zero = tcg_constant_tl(0);
355 tmp = tcg_temp_new();
356 tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src,
357 ctx->mem_idx, MO_32);
358 tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val,
359 one, zero);
360 tcg_temp_free(tmp);
361 tcg_gen_br(done);
363 gen_set_label(fail);
364 tcg_gen_movi_tl(pred, 0);
366 gen_set_label(done);
367 tcg_gen_movi_tl(hex_llsc_addr, ~0);
370 static inline void gen_store_conditional8(DisasContext *ctx,
371 TCGv pred, TCGv vaddr, TCGv_i64 src)
373 TCGLabel *fail = gen_new_label();
374 TCGLabel *done = gen_new_label();
375 TCGv_i64 one, zero, tmp;
377 tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail);
379 one = tcg_constant_i64(0xff);
380 zero = tcg_constant_i64(0);
381 tmp = tcg_temp_new_i64();
382 tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src,
383 ctx->mem_idx, MO_64);
384 tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64,
385 one, zero);
386 tcg_gen_extrl_i64_i32(pred, tmp);
387 tcg_temp_free_i64(tmp);
388 tcg_gen_br(done);
390 gen_set_label(fail);
391 tcg_gen_movi_tl(pred, 0);
393 gen_set_label(done);
394 tcg_gen_movi_tl(hex_llsc_addr, ~0);
397 static inline void gen_store32(TCGv vaddr, TCGv src, int width, int slot)
399 tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
400 tcg_gen_movi_tl(hex_store_width[slot], width);
401 tcg_gen_mov_tl(hex_store_val32[slot], src);
404 static inline void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, int slot)
406 gen_store32(vaddr, src, 1, slot);
409 static inline void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, int slot)
411 TCGv tmp = tcg_constant_tl(src);
412 gen_store1(cpu_env, vaddr, tmp, slot);
415 static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, int slot)
417 gen_store32(vaddr, src, 2, slot);
420 static inline void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, int slot)
422 TCGv tmp = tcg_constant_tl(src);
423 gen_store2(cpu_env, vaddr, tmp, slot);
426 static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, int slot)
428 gen_store32(vaddr, src, 4, slot);
431 static inline void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, int slot)
433 TCGv tmp = tcg_constant_tl(src);
434 gen_store4(cpu_env, vaddr, tmp, slot);
437 static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, int slot)
439 tcg_gen_mov_tl(hex_store_addr[slot], vaddr);
440 tcg_gen_movi_tl(hex_store_width[slot], 8);
441 tcg_gen_mov_i64(hex_store_val64[slot], src);
444 static inline void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, int slot)
446 TCGv_i64 tmp = tcg_constant_i64(src);
447 gen_store8(cpu_env, vaddr, tmp, slot);
450 static TCGv gen_8bitsof(TCGv result, TCGv value)
452 TCGv zero = tcg_constant_tl(0);
453 TCGv ones = tcg_constant_tl(0xff);
454 tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero);
456 return result;
459 static intptr_t vreg_src_off(DisasContext *ctx, int num)
461 intptr_t offset = offsetof(CPUHexagonState, VRegs[num]);
463 if (test_bit(num, ctx->vregs_select)) {
464 offset = ctx_future_vreg_off(ctx, num, 1, false);
466 if (test_bit(num, ctx->vregs_updated_tmp)) {
467 offset = ctx_tmp_vreg_off(ctx, num, 1, false);
469 return offset;
472 static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num,
473 VRegWriteType type, int slot_num,
474 bool is_predicated)
476 TCGLabel *label_end = NULL;
477 intptr_t dstoff;
479 if (is_predicated) {
480 TCGv cancelled = tcg_temp_local_new();
481 label_end = gen_new_label();
483 /* Don't do anything if the slot was cancelled */
484 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
485 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
486 tcg_temp_free(cancelled);
489 if (type != EXT_TMP) {
490 dstoff = ctx_future_vreg_off(ctx, num, 1, true);
491 tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
492 sizeof(MMVector), sizeof(MMVector));
493 tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num);
494 } else {
495 dstoff = ctx_tmp_vreg_off(ctx, num, 1, false);
496 tcg_gen_gvec_mov(MO_64, dstoff, srcoff,
497 sizeof(MMVector), sizeof(MMVector));
500 if (is_predicated) {
501 gen_set_label(label_end);
505 static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num,
506 VRegWriteType type, int slot_num,
507 bool is_predicated)
509 gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated);
510 srcoff += sizeof(MMVector);
511 gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated);
514 static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew,
515 int slot_num, bool is_predicated)
517 TCGLabel *label_end = NULL;
518 intptr_t dstoff;
520 if (is_predicated) {
521 TCGv cancelled = tcg_temp_local_new();
522 label_end = gen_new_label();
524 /* Don't do anything if the slot was cancelled */
525 tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1);
526 tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end);
527 tcg_temp_free(cancelled);
530 dstoff = offsetof(CPUHexagonState, future_QRegs[num]);
531 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg));
533 if (is_predicated) {
534 tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num);
535 gen_set_label(label_end);
539 static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src,
540 bool aligned)
542 TCGv_i64 tmp = tcg_temp_new_i64();
543 if (aligned) {
544 tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1));
546 for (int i = 0; i < sizeof(MMVector) / 8; i++) {
547 tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx);
548 tcg_gen_addi_tl(src, src, 8);
549 tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8);
551 tcg_temp_free_i64(tmp);
554 static void gen_vreg_store(DisasContext *ctx, Insn *insn, Packet *pkt,
555 TCGv EA, intptr_t srcoff, int slot, bool aligned)
557 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
558 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
560 if (is_gather_store_insn(insn, pkt)) {
561 TCGv sl = tcg_constant_tl(slot);
562 gen_helper_gather_store(cpu_env, EA, sl);
563 return;
566 tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
567 if (aligned) {
568 tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
569 ~((int32_t)sizeof(MMVector) - 1));
570 } else {
571 tcg_gen_mov_tl(hex_vstore_addr[slot], EA);
573 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
575 /* Copy the data to the vstore buffer */
576 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
577 /* Set the mask to all 1's */
578 tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL);
581 static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff,
582 intptr_t bitsoff, int slot, bool invert)
584 intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data);
585 intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask);
587 tcg_gen_movi_tl(hex_vstore_pending[slot], 1);
588 tcg_gen_andi_tl(hex_vstore_addr[slot], EA,
589 ~((int32_t)sizeof(MMVector) - 1));
590 tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector));
592 /* Copy the data to the vstore buffer */
593 tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector));
594 /* Copy the mask */
595 tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg));
596 if (invert) {
597 tcg_gen_gvec_not(MO_64, maskoff, maskoff,
598 sizeof(MMQReg), sizeof(MMQReg));
602 static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff)
604 TCGv_i64 tmp = tcg_temp_new_i64();
605 TCGv_i64 word = tcg_temp_new_i64();
606 TCGv_i64 bits = tcg_temp_new_i64();
607 TCGv_i64 mask = tcg_temp_new_i64();
608 TCGv_i64 zero = tcg_constant_i64(0);
609 TCGv_i64 ones = tcg_constant_i64(~0);
611 for (int i = 0; i < sizeof(MMVector) / 8; i++) {
612 tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8);
613 tcg_gen_movi_i64(mask, 0);
615 for (int j = 0; j < 8; j += size) {
616 tcg_gen_extract_i64(word, tmp, j * 8, size * 8);
617 tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero);
618 tcg_gen_deposit_i64(mask, mask, bits, j, size);
621 tcg_gen_st8_i64(mask, cpu_env, dstoff + i);
623 tcg_temp_free_i64(tmp);
624 tcg_temp_free_i64(word);
625 tcg_temp_free_i64(bits);
626 tcg_temp_free_i64(mask);
629 static void probe_noshuf_load(TCGv va, int s, int mi)
631 TCGv size = tcg_constant_tl(s);
632 TCGv mem_idx = tcg_constant_tl(mi);
633 gen_helper_probe_noshuf_load(cpu_env, va, size, mem_idx);
636 #include "tcg_funcs_generated.c.inc"
637 #include "tcg_func_table_generated.c.inc"