2 * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "qemu/osdep.h"
21 #include "tcg/tcg-op.h"
22 #include "tcg/tcg-op-gvec.h"
25 #include "translate.h"
26 #define QEMU_GENERATE /* Used internally by macros.h */
28 #include "mmvec/macros.h"
31 #include "gen_tcg_hvx.h"
33 static inline void gen_log_predicated_reg_write(int rnum
, TCGv val
, int slot
)
35 TCGv zero
= tcg_constant_tl(0);
36 TCGv slot_mask
= tcg_temp_new();
38 tcg_gen_andi_tl(slot_mask
, hex_slot_cancelled
, 1 << slot
);
39 tcg_gen_movcond_tl(TCG_COND_EQ
, hex_new_value
[rnum
], slot_mask
, zero
,
40 val
, hex_new_value
[rnum
]);
43 * Do this so HELPER(debug_commit_end) will know
45 * Note that slot_mask indicates the value is not written
46 * (i.e., slot was cancelled), so we create a true/false value before
47 * or'ing with hex_reg_written[rnum].
49 tcg_gen_setcond_tl(TCG_COND_EQ
, slot_mask
, slot_mask
, zero
);
50 tcg_gen_or_tl(hex_reg_written
[rnum
], hex_reg_written
[rnum
], slot_mask
);
53 tcg_temp_free(slot_mask
);
56 static inline void gen_log_reg_write(int rnum
, TCGv val
)
58 tcg_gen_mov_tl(hex_new_value
[rnum
], val
);
60 /* Do this so HELPER(debug_commit_end) will know */
61 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
65 static void gen_log_predicated_reg_write_pair(int rnum
, TCGv_i64 val
, int slot
)
67 TCGv val32
= tcg_temp_new();
68 TCGv zero
= tcg_constant_tl(0);
69 TCGv slot_mask
= tcg_temp_new();
71 tcg_gen_andi_tl(slot_mask
, hex_slot_cancelled
, 1 << slot
);
73 tcg_gen_extrl_i64_i32(val32
, val
);
74 tcg_gen_movcond_tl(TCG_COND_EQ
, hex_new_value
[rnum
],
76 val32
, hex_new_value
[rnum
]);
78 tcg_gen_extrh_i64_i32(val32
, val
);
79 tcg_gen_movcond_tl(TCG_COND_EQ
, hex_new_value
[rnum
+ 1],
81 val32
, hex_new_value
[rnum
+ 1]);
84 * Do this so HELPER(debug_commit_end) will know
86 * Note that slot_mask indicates the value is not written
87 * (i.e., slot was cancelled), so we create a true/false value before
88 * or'ing with hex_reg_written[rnum].
90 tcg_gen_setcond_tl(TCG_COND_EQ
, slot_mask
, slot_mask
, zero
);
91 tcg_gen_or_tl(hex_reg_written
[rnum
], hex_reg_written
[rnum
], slot_mask
);
92 tcg_gen_or_tl(hex_reg_written
[rnum
+ 1], hex_reg_written
[rnum
+ 1],
97 tcg_temp_free(slot_mask
);
100 static void gen_log_reg_write_pair(int rnum
, TCGv_i64 val
)
103 tcg_gen_extrl_i64_i32(hex_new_value
[rnum
], val
);
105 /* Do this so HELPER(debug_commit_end) will know */
106 tcg_gen_movi_tl(hex_reg_written
[rnum
], 1);
110 tcg_gen_extrh_i64_i32(hex_new_value
[rnum
+ 1], val
);
112 /* Do this so HELPER(debug_commit_end) will know */
113 tcg_gen_movi_tl(hex_reg_written
[rnum
+ 1], 1);
117 static inline void gen_log_pred_write(DisasContext
*ctx
, int pnum
, TCGv val
)
119 TCGv base_val
= tcg_temp_new();
121 tcg_gen_andi_tl(base_val
, val
, 0xff);
124 * Section 6.1.3 of the Hexagon V67 Programmer's Reference Manual
126 * Multiple writes to the same preg are and'ed together
127 * If this is the first predicate write in the packet, do a
128 * straight assignment. Otherwise, do an and.
130 if (!test_bit(pnum
, ctx
->pregs_written
)) {
131 tcg_gen_mov_tl(hex_new_pred_value
[pnum
], base_val
);
133 tcg_gen_and_tl(hex_new_pred_value
[pnum
],
134 hex_new_pred_value
[pnum
], base_val
);
136 tcg_gen_ori_tl(hex_pred_written
, hex_pred_written
, 1 << pnum
);
138 tcg_temp_free(base_val
);
141 static inline void gen_read_p3_0(TCGv control_reg
)
143 tcg_gen_movi_tl(control_reg
, 0);
144 for (int i
= 0; i
< NUM_PREGS
; i
++) {
145 tcg_gen_deposit_tl(control_reg
, control_reg
, hex_pred
[i
], i
* 8, 8);
150 * Certain control registers require special handling on read
151 * HEX_REG_P3_0 aliased to the predicate registers
152 * -> concat the 4 predicate registers together
153 * HEX_REG_PC actual value stored in DisasContext
154 * -> assign from ctx->base.pc_next
155 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
156 * -> add current TB changes to existing reg value
158 static inline void gen_read_ctrl_reg(DisasContext
*ctx
, const int reg_num
,
161 if (reg_num
== HEX_REG_P3_0
) {
163 } else if (reg_num
== HEX_REG_PC
) {
164 tcg_gen_movi_tl(dest
, ctx
->base
.pc_next
);
165 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
166 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
168 } else if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
169 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
171 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
172 tcg_gen_addi_tl(dest
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
175 tcg_gen_mov_tl(dest
, hex_gpr
[reg_num
]);
179 static inline void gen_read_ctrl_reg_pair(DisasContext
*ctx
, const int reg_num
,
182 if (reg_num
== HEX_REG_P3_0
) {
183 TCGv p3_0
= tcg_temp_new();
185 tcg_gen_concat_i32_i64(dest
, p3_0
, hex_gpr
[reg_num
+ 1]);
187 } else if (reg_num
== HEX_REG_PC
- 1) {
188 TCGv pc
= tcg_constant_tl(ctx
->base
.pc_next
);
189 tcg_gen_concat_i32_i64(dest
, hex_gpr
[reg_num
], pc
);
190 } else if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
191 TCGv pkt_cnt
= tcg_temp_new();
192 TCGv insn_cnt
= tcg_temp_new();
193 tcg_gen_addi_tl(pkt_cnt
, hex_gpr
[HEX_REG_QEMU_PKT_CNT
],
195 tcg_gen_addi_tl(insn_cnt
, hex_gpr
[HEX_REG_QEMU_INSN_CNT
],
197 tcg_gen_concat_i32_i64(dest
, pkt_cnt
, insn_cnt
);
198 tcg_temp_free(pkt_cnt
);
199 tcg_temp_free(insn_cnt
);
200 } else if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
201 TCGv hvx_cnt
= tcg_temp_new();
202 tcg_gen_addi_tl(hvx_cnt
, hex_gpr
[HEX_REG_QEMU_HVX_CNT
],
204 tcg_gen_concat_i32_i64(dest
, hvx_cnt
, hex_gpr
[reg_num
+ 1]);
205 tcg_temp_free(hvx_cnt
);
207 tcg_gen_concat_i32_i64(dest
,
209 hex_gpr
[reg_num
+ 1]);
213 static inline void gen_write_p3_0(TCGv control_reg
)
215 for (int i
= 0; i
< NUM_PREGS
; i
++) {
216 tcg_gen_extract_tl(hex_pred
[i
], control_reg
, i
* 8, 8);
221 * Certain control registers require special handling on write
222 * HEX_REG_P3_0 aliased to the predicate registers
223 * -> break the value across 4 predicate registers
224 * HEX_REG_QEMU_*_CNT changes in current TB in DisasContext
225 * -> clear the changes
227 static inline void gen_write_ctrl_reg(DisasContext
*ctx
, int reg_num
,
230 if (reg_num
== HEX_REG_P3_0
) {
233 gen_log_reg_write(reg_num
, val
);
234 ctx_log_reg_write(ctx
, reg_num
);
235 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
236 ctx
->num_packets
= 0;
238 if (reg_num
== HEX_REG_QEMU_INSN_CNT
) {
241 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
242 ctx
->num_hvx_insns
= 0;
247 static inline void gen_write_ctrl_reg_pair(DisasContext
*ctx
, int reg_num
,
250 if (reg_num
== HEX_REG_P3_0
) {
251 TCGv val32
= tcg_temp_new();
252 tcg_gen_extrl_i64_i32(val32
, val
);
253 gen_write_p3_0(val32
);
254 tcg_gen_extrh_i64_i32(val32
, val
);
255 gen_log_reg_write(reg_num
+ 1, val32
);
256 tcg_temp_free(val32
);
257 ctx_log_reg_write(ctx
, reg_num
+ 1);
259 gen_log_reg_write_pair(reg_num
, val
);
260 ctx_log_reg_write_pair(ctx
, reg_num
);
261 if (reg_num
== HEX_REG_QEMU_PKT_CNT
) {
262 ctx
->num_packets
= 0;
265 if (reg_num
== HEX_REG_QEMU_HVX_CNT
) {
266 ctx
->num_hvx_insns
= 0;
271 static TCGv
gen_get_byte(TCGv result
, int N
, TCGv src
, bool sign
)
274 tcg_gen_sextract_tl(result
, src
, N
* 8, 8);
276 tcg_gen_extract_tl(result
, src
, N
* 8, 8);
281 static TCGv
gen_get_byte_i64(TCGv result
, int N
, TCGv_i64 src
, bool sign
)
283 TCGv_i64 res64
= tcg_temp_new_i64();
285 tcg_gen_sextract_i64(res64
, src
, N
* 8, 8);
287 tcg_gen_extract_i64(res64
, src
, N
* 8, 8);
289 tcg_gen_extrl_i64_i32(result
, res64
);
290 tcg_temp_free_i64(res64
);
295 static inline TCGv
gen_get_half(TCGv result
, int N
, TCGv src
, bool sign
)
298 tcg_gen_sextract_tl(result
, src
, N
* 16, 16);
300 tcg_gen_extract_tl(result
, src
, N
* 16, 16);
305 static inline void gen_set_half(int N
, TCGv result
, TCGv src
)
307 tcg_gen_deposit_tl(result
, result
, src
, N
* 16, 16);
310 static inline void gen_set_half_i64(int N
, TCGv_i64 result
, TCGv src
)
312 TCGv_i64 src64
= tcg_temp_new_i64();
313 tcg_gen_extu_i32_i64(src64
, src
);
314 tcg_gen_deposit_i64(result
, result
, src64
, N
* 16, 16);
315 tcg_temp_free_i64(src64
);
318 static void gen_set_byte_i64(int N
, TCGv_i64 result
, TCGv src
)
320 TCGv_i64 src64
= tcg_temp_new_i64();
321 tcg_gen_extu_i32_i64(src64
, src
);
322 tcg_gen_deposit_i64(result
, result
, src64
, N
* 8, 8);
323 tcg_temp_free_i64(src64
);
326 static inline void gen_load_locked4u(TCGv dest
, TCGv vaddr
, int mem_index
)
328 tcg_gen_qemu_ld32u(dest
, vaddr
, mem_index
);
329 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
330 tcg_gen_mov_tl(hex_llsc_val
, dest
);
333 static inline void gen_load_locked8u(TCGv_i64 dest
, TCGv vaddr
, int mem_index
)
335 tcg_gen_qemu_ld64(dest
, vaddr
, mem_index
);
336 tcg_gen_mov_tl(hex_llsc_addr
, vaddr
);
337 tcg_gen_mov_i64(hex_llsc_val_i64
, dest
);
340 static inline void gen_store_conditional4(DisasContext
*ctx
,
341 TCGv pred
, TCGv vaddr
, TCGv src
)
343 TCGLabel
*fail
= gen_new_label();
344 TCGLabel
*done
= gen_new_label();
347 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
349 one
= tcg_constant_tl(0xff);
350 zero
= tcg_constant_tl(0);
351 tmp
= tcg_temp_new();
352 tcg_gen_atomic_cmpxchg_tl(tmp
, hex_llsc_addr
, hex_llsc_val
, src
,
353 ctx
->mem_idx
, MO_32
);
354 tcg_gen_movcond_tl(TCG_COND_EQ
, pred
, tmp
, hex_llsc_val
,
360 tcg_gen_movi_tl(pred
, 0);
363 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
366 static inline void gen_store_conditional8(DisasContext
*ctx
,
367 TCGv pred
, TCGv vaddr
, TCGv_i64 src
)
369 TCGLabel
*fail
= gen_new_label();
370 TCGLabel
*done
= gen_new_label();
371 TCGv_i64 one
, zero
, tmp
;
373 tcg_gen_brcond_tl(TCG_COND_NE
, vaddr
, hex_llsc_addr
, fail
);
375 one
= tcg_constant_i64(0xff);
376 zero
= tcg_constant_i64(0);
377 tmp
= tcg_temp_new_i64();
378 tcg_gen_atomic_cmpxchg_i64(tmp
, hex_llsc_addr
, hex_llsc_val_i64
, src
,
379 ctx
->mem_idx
, MO_64
);
380 tcg_gen_movcond_i64(TCG_COND_EQ
, tmp
, tmp
, hex_llsc_val_i64
,
382 tcg_gen_extrl_i64_i32(pred
, tmp
);
383 tcg_temp_free_i64(tmp
);
387 tcg_gen_movi_tl(pred
, 0);
390 tcg_gen_movi_tl(hex_llsc_addr
, ~0);
393 static inline void gen_store32(TCGv vaddr
, TCGv src
, int width
, int slot
)
395 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
396 tcg_gen_movi_tl(hex_store_width
[slot
], width
);
397 tcg_gen_mov_tl(hex_store_val32
[slot
], src
);
400 static inline void gen_store1(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
,
401 DisasContext
*ctx
, int slot
)
403 gen_store32(vaddr
, src
, 1, slot
);
404 ctx
->store_width
[slot
] = 1;
407 static inline void gen_store1i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
,
408 DisasContext
*ctx
, int slot
)
410 TCGv tmp
= tcg_constant_tl(src
);
411 gen_store1(cpu_env
, vaddr
, tmp
, ctx
, slot
);
414 static inline void gen_store2(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
,
415 DisasContext
*ctx
, int slot
)
417 gen_store32(vaddr
, src
, 2, slot
);
418 ctx
->store_width
[slot
] = 2;
421 static inline void gen_store2i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
,
422 DisasContext
*ctx
, int slot
)
424 TCGv tmp
= tcg_constant_tl(src
);
425 gen_store2(cpu_env
, vaddr
, tmp
, ctx
, slot
);
428 static inline void gen_store4(TCGv_env cpu_env
, TCGv vaddr
, TCGv src
,
429 DisasContext
*ctx
, int slot
)
431 gen_store32(vaddr
, src
, 4, slot
);
432 ctx
->store_width
[slot
] = 4;
435 static inline void gen_store4i(TCGv_env cpu_env
, TCGv vaddr
, int32_t src
,
436 DisasContext
*ctx
, int slot
)
438 TCGv tmp
= tcg_constant_tl(src
);
439 gen_store4(cpu_env
, vaddr
, tmp
, ctx
, slot
);
442 static inline void gen_store8(TCGv_env cpu_env
, TCGv vaddr
, TCGv_i64 src
,
443 DisasContext
*ctx
, int slot
)
445 tcg_gen_mov_tl(hex_store_addr
[slot
], vaddr
);
446 tcg_gen_movi_tl(hex_store_width
[slot
], 8);
447 tcg_gen_mov_i64(hex_store_val64
[slot
], src
);
448 ctx
->store_width
[slot
] = 8;
451 static inline void gen_store8i(TCGv_env cpu_env
, TCGv vaddr
, int64_t src
,
452 DisasContext
*ctx
, int slot
)
454 TCGv_i64 tmp
= tcg_constant_i64(src
);
455 gen_store8(cpu_env
, vaddr
, tmp
, ctx
, slot
);
458 static TCGv
gen_8bitsof(TCGv result
, TCGv value
)
460 TCGv zero
= tcg_constant_tl(0);
461 TCGv ones
= tcg_constant_tl(0xff);
462 tcg_gen_movcond_tl(TCG_COND_NE
, result
, value
, zero
, ones
, zero
);
467 static intptr_t vreg_src_off(DisasContext
*ctx
, int num
)
469 intptr_t offset
= offsetof(CPUHexagonState
, VRegs
[num
]);
471 if (test_bit(num
, ctx
->vregs_select
)) {
472 offset
= ctx_future_vreg_off(ctx
, num
, 1, false);
474 if (test_bit(num
, ctx
->vregs_updated_tmp
)) {
475 offset
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
480 static void gen_log_vreg_write(DisasContext
*ctx
, intptr_t srcoff
, int num
,
481 VRegWriteType type
, int slot_num
,
484 TCGLabel
*label_end
= NULL
;
488 TCGv cancelled
= tcg_temp_local_new();
489 label_end
= gen_new_label();
491 /* Don't do anything if the slot was cancelled */
492 tcg_gen_extract_tl(cancelled
, hex_slot_cancelled
, slot_num
, 1);
493 tcg_gen_brcondi_tl(TCG_COND_NE
, cancelled
, 0, label_end
);
494 tcg_temp_free(cancelled
);
497 if (type
!= EXT_TMP
) {
498 dstoff
= ctx_future_vreg_off(ctx
, num
, 1, true);
499 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
500 sizeof(MMVector
), sizeof(MMVector
));
501 tcg_gen_ori_tl(hex_VRegs_updated
, hex_VRegs_updated
, 1 << num
);
503 dstoff
= ctx_tmp_vreg_off(ctx
, num
, 1, false);
504 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
,
505 sizeof(MMVector
), sizeof(MMVector
));
509 gen_set_label(label_end
);
513 static void gen_log_vreg_write_pair(DisasContext
*ctx
, intptr_t srcoff
, int num
,
514 VRegWriteType type
, int slot_num
,
517 gen_log_vreg_write(ctx
, srcoff
, num
^ 0, type
, slot_num
, is_predicated
);
518 srcoff
+= sizeof(MMVector
);
519 gen_log_vreg_write(ctx
, srcoff
, num
^ 1, type
, slot_num
, is_predicated
);
522 static void gen_log_qreg_write(intptr_t srcoff
, int num
, int vnew
,
523 int slot_num
, bool is_predicated
)
525 TCGLabel
*label_end
= NULL
;
529 TCGv cancelled
= tcg_temp_local_new();
530 label_end
= gen_new_label();
532 /* Don't do anything if the slot was cancelled */
533 tcg_gen_extract_tl(cancelled
, hex_slot_cancelled
, slot_num
, 1);
534 tcg_gen_brcondi_tl(TCG_COND_NE
, cancelled
, 0, label_end
);
535 tcg_temp_free(cancelled
);
538 dstoff
= offsetof(CPUHexagonState
, future_QRegs
[num
]);
539 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMQReg
), sizeof(MMQReg
));
542 tcg_gen_ori_tl(hex_QRegs_updated
, hex_QRegs_updated
, 1 << num
);
543 gen_set_label(label_end
);
547 static void gen_vreg_load(DisasContext
*ctx
, intptr_t dstoff
, TCGv src
,
550 TCGv_i64 tmp
= tcg_temp_new_i64();
552 tcg_gen_andi_tl(src
, src
, ~((int32_t)sizeof(MMVector
) - 1));
554 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
555 tcg_gen_qemu_ld64(tmp
, src
, ctx
->mem_idx
);
556 tcg_gen_addi_tl(src
, src
, 8);
557 tcg_gen_st_i64(tmp
, cpu_env
, dstoff
+ i
* 8);
559 tcg_temp_free_i64(tmp
);
562 static void gen_vreg_store(DisasContext
*ctx
, Insn
*insn
, Packet
*pkt
,
563 TCGv EA
, intptr_t srcoff
, int slot
, bool aligned
)
565 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
566 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
568 if (is_gather_store_insn(insn
, pkt
)) {
569 TCGv sl
= tcg_constant_tl(slot
);
570 gen_helper_gather_store(cpu_env
, EA
, sl
);
574 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
576 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
577 ~((int32_t)sizeof(MMVector
) - 1));
579 tcg_gen_mov_tl(hex_vstore_addr
[slot
], EA
);
581 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
583 /* Copy the data to the vstore buffer */
584 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
585 /* Set the mask to all 1's */
586 tcg_gen_gvec_dup_imm(MO_64
, maskoff
, sizeof(MMQReg
), sizeof(MMQReg
), ~0LL);
589 static void gen_vreg_masked_store(DisasContext
*ctx
, TCGv EA
, intptr_t srcoff
,
590 intptr_t bitsoff
, int slot
, bool invert
)
592 intptr_t dstoff
= offsetof(CPUHexagonState
, vstore
[slot
].data
);
593 intptr_t maskoff
= offsetof(CPUHexagonState
, vstore
[slot
].mask
);
595 tcg_gen_movi_tl(hex_vstore_pending
[slot
], 1);
596 tcg_gen_andi_tl(hex_vstore_addr
[slot
], EA
,
597 ~((int32_t)sizeof(MMVector
) - 1));
598 tcg_gen_movi_tl(hex_vstore_size
[slot
], sizeof(MMVector
));
600 /* Copy the data to the vstore buffer */
601 tcg_gen_gvec_mov(MO_64
, dstoff
, srcoff
, sizeof(MMVector
), sizeof(MMVector
));
603 tcg_gen_gvec_mov(MO_64
, maskoff
, bitsoff
, sizeof(MMQReg
), sizeof(MMQReg
));
605 tcg_gen_gvec_not(MO_64
, maskoff
, maskoff
,
606 sizeof(MMQReg
), sizeof(MMQReg
));
610 static void vec_to_qvec(size_t size
, intptr_t dstoff
, intptr_t srcoff
)
612 TCGv_i64 tmp
= tcg_temp_new_i64();
613 TCGv_i64 word
= tcg_temp_new_i64();
614 TCGv_i64 bits
= tcg_temp_new_i64();
615 TCGv_i64 mask
= tcg_temp_new_i64();
616 TCGv_i64 zero
= tcg_constant_i64(0);
617 TCGv_i64 ones
= tcg_constant_i64(~0);
619 for (int i
= 0; i
< sizeof(MMVector
) / 8; i
++) {
620 tcg_gen_ld_i64(tmp
, cpu_env
, srcoff
+ i
* 8);
621 tcg_gen_movi_i64(mask
, 0);
623 for (int j
= 0; j
< 8; j
+= size
) {
624 tcg_gen_extract_i64(word
, tmp
, j
* 8, size
* 8);
625 tcg_gen_movcond_i64(TCG_COND_NE
, bits
, word
, zero
, ones
, zero
);
626 tcg_gen_deposit_i64(mask
, mask
, bits
, j
, size
);
629 tcg_gen_st8_i64(mask
, cpu_env
, dstoff
+ i
);
631 tcg_temp_free_i64(tmp
);
632 tcg_temp_free_i64(word
);
633 tcg_temp_free_i64(bits
);
634 tcg_temp_free_i64(mask
);
637 #include "tcg_funcs_generated.c.inc"
638 #include "tcg_func_table_generated.c.inc"