target-ppc: add lxsi[bw]zx instruction
[qemu/ar7.git] / target-ppc / translate / vsx-impl.inc.c
blob888f2e4e136f780dbf4a947dd1d1f9841366594b
1 /*** VSX extension ***/
3 static inline TCGv_i64 cpu_vsrh(int n)
5 if (n < 32) {
6 return cpu_fpr[n];
7 } else {
8 return cpu_avrh[n-32];
12 static inline TCGv_i64 cpu_vsrl(int n)
14 if (n < 32) {
15 return cpu_vsr[n];
16 } else {
17 return cpu_avrl[n-32];
21 #define VSX_LOAD_SCALAR(name, operation) \
22 static void gen_##name(DisasContext *ctx) \
23 { \
24 TCGv EA; \
25 if (unlikely(!ctx->vsx_enabled)) { \
26 gen_exception(ctx, POWERPC_EXCP_VSXU); \
27 return; \
28 } \
29 gen_set_access_type(ctx, ACCESS_INT); \
30 EA = tcg_temp_new(); \
31 gen_addr_reg_index(ctx, EA); \
32 gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
33 /* NOTE: cpu_vsrl is undefined */ \
34 tcg_temp_free(EA); \
37 VSX_LOAD_SCALAR(lxsdx, ld64_i64)
38 VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
39 VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
40 VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
41 VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
42 VSX_LOAD_SCALAR(lxsspx, ld32fs)
44 static void gen_lxvd2x(DisasContext *ctx)
46 TCGv EA;
47 if (unlikely(!ctx->vsx_enabled)) {
48 gen_exception(ctx, POWERPC_EXCP_VSXU);
49 return;
51 gen_set_access_type(ctx, ACCESS_INT);
52 EA = tcg_temp_new();
53 gen_addr_reg_index(ctx, EA);
54 gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
55 tcg_gen_addi_tl(EA, EA, 8);
56 gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
57 tcg_temp_free(EA);
60 static void gen_lxvdsx(DisasContext *ctx)
62 TCGv EA;
63 if (unlikely(!ctx->vsx_enabled)) {
64 gen_exception(ctx, POWERPC_EXCP_VSXU);
65 return;
67 gen_set_access_type(ctx, ACCESS_INT);
68 EA = tcg_temp_new();
69 gen_addr_reg_index(ctx, EA);
70 gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
71 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
72 tcg_temp_free(EA);
75 static void gen_lxvw4x(DisasContext *ctx)
77 TCGv EA;
78 TCGv_i64 tmp;
79 TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
80 TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
81 if (unlikely(!ctx->vsx_enabled)) {
82 gen_exception(ctx, POWERPC_EXCP_VSXU);
83 return;
85 gen_set_access_type(ctx, ACCESS_INT);
86 EA = tcg_temp_new();
87 tmp = tcg_temp_new_i64();
89 gen_addr_reg_index(ctx, EA);
90 gen_qemu_ld32u_i64(ctx, tmp, EA);
91 tcg_gen_addi_tl(EA, EA, 4);
92 gen_qemu_ld32u_i64(ctx, xth, EA);
93 tcg_gen_deposit_i64(xth, xth, tmp, 32, 32);
95 tcg_gen_addi_tl(EA, EA, 4);
96 gen_qemu_ld32u_i64(ctx, tmp, EA);
97 tcg_gen_addi_tl(EA, EA, 4);
98 gen_qemu_ld32u_i64(ctx, xtl, EA);
99 tcg_gen_deposit_i64(xtl, xtl, tmp, 32, 32);
101 tcg_temp_free(EA);
102 tcg_temp_free_i64(tmp);
105 #define VSX_STORE_SCALAR(name, operation) \
106 static void gen_##name(DisasContext *ctx) \
108 TCGv EA; \
109 if (unlikely(!ctx->vsx_enabled)) { \
110 gen_exception(ctx, POWERPC_EXCP_VSXU); \
111 return; \
113 gen_set_access_type(ctx, ACCESS_INT); \
114 EA = tcg_temp_new(); \
115 gen_addr_reg_index(ctx, EA); \
116 gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
117 tcg_temp_free(EA); \
120 VSX_STORE_SCALAR(stxsdx, st64_i64)
121 VSX_STORE_SCALAR(stxsiwx, st32_i64)
122 VSX_STORE_SCALAR(stxsspx, st32fs)
124 static void gen_stxvd2x(DisasContext *ctx)
126 TCGv EA;
127 if (unlikely(!ctx->vsx_enabled)) {
128 gen_exception(ctx, POWERPC_EXCP_VSXU);
129 return;
131 gen_set_access_type(ctx, ACCESS_INT);
132 EA = tcg_temp_new();
133 gen_addr_reg_index(ctx, EA);
134 gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
135 tcg_gen_addi_tl(EA, EA, 8);
136 gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
137 tcg_temp_free(EA);
140 static void gen_stxvw4x(DisasContext *ctx)
142 TCGv_i64 tmp;
143 TCGv EA;
144 if (unlikely(!ctx->vsx_enabled)) {
145 gen_exception(ctx, POWERPC_EXCP_VSXU);
146 return;
148 gen_set_access_type(ctx, ACCESS_INT);
149 EA = tcg_temp_new();
150 gen_addr_reg_index(ctx, EA);
151 tmp = tcg_temp_new_i64();
153 tcg_gen_shri_i64(tmp, cpu_vsrh(xS(ctx->opcode)), 32);
154 gen_qemu_st32_i64(ctx, tmp, EA);
155 tcg_gen_addi_tl(EA, EA, 4);
156 gen_qemu_st32_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
158 tcg_gen_shri_i64(tmp, cpu_vsrl(xS(ctx->opcode)), 32);
159 tcg_gen_addi_tl(EA, EA, 4);
160 gen_qemu_st32_i64(ctx, tmp, EA);
161 tcg_gen_addi_tl(EA, EA, 4);
162 gen_qemu_st32_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
164 tcg_temp_free(EA);
165 tcg_temp_free_i64(tmp);
168 #define MV_VSRW(name, tcgop1, tcgop2, target, source) \
169 static void gen_##name(DisasContext *ctx) \
171 if (xS(ctx->opcode) < 32) { \
172 if (unlikely(!ctx->fpu_enabled)) { \
173 gen_exception(ctx, POWERPC_EXCP_FPU); \
174 return; \
176 } else { \
177 if (unlikely(!ctx->altivec_enabled)) { \
178 gen_exception(ctx, POWERPC_EXCP_VPU); \
179 return; \
182 TCGv_i64 tmp = tcg_temp_new_i64(); \
183 tcg_gen_##tcgop1(tmp, source); \
184 tcg_gen_##tcgop2(target, tmp); \
185 tcg_temp_free_i64(tmp); \
189 MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
190 cpu_vsrh(xS(ctx->opcode)))
191 MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
192 cpu_gpr[rA(ctx->opcode)])
193 MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
194 cpu_gpr[rA(ctx->opcode)])
196 #if defined(TARGET_PPC64)
197 #define MV_VSRD(name, target, source) \
198 static void gen_##name(DisasContext *ctx) \
200 if (xS(ctx->opcode) < 32) { \
201 if (unlikely(!ctx->fpu_enabled)) { \
202 gen_exception(ctx, POWERPC_EXCP_FPU); \
203 return; \
205 } else { \
206 if (unlikely(!ctx->altivec_enabled)) { \
207 gen_exception(ctx, POWERPC_EXCP_VPU); \
208 return; \
211 tcg_gen_mov_i64(target, source); \
214 MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
215 MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
217 #endif
219 static void gen_xxpermdi(DisasContext *ctx)
221 if (unlikely(!ctx->vsx_enabled)) {
222 gen_exception(ctx, POWERPC_EXCP_VSXU);
223 return;
226 if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
227 (xT(ctx->opcode) == xB(ctx->opcode)))) {
228 TCGv_i64 xh, xl;
230 xh = tcg_temp_new_i64();
231 xl = tcg_temp_new_i64();
233 if ((DM(ctx->opcode) & 2) == 0) {
234 tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
235 } else {
236 tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
238 if ((DM(ctx->opcode) & 1) == 0) {
239 tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
240 } else {
241 tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
244 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
245 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
247 tcg_temp_free_i64(xh);
248 tcg_temp_free_i64(xl);
249 } else {
250 if ((DM(ctx->opcode) & 2) == 0) {
251 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
252 } else {
253 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
255 if ((DM(ctx->opcode) & 1) == 0) {
256 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
257 } else {
258 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
263 #define OP_ABS 1
264 #define OP_NABS 2
265 #define OP_NEG 3
266 #define OP_CPSGN 4
267 #define SGN_MASK_DP 0x8000000000000000ull
268 #define SGN_MASK_SP 0x8000000080000000ull
270 #define VSX_SCALAR_MOVE(name, op, sgn_mask) \
271 static void glue(gen_, name)(DisasContext * ctx) \
273 TCGv_i64 xb, sgm; \
274 if (unlikely(!ctx->vsx_enabled)) { \
275 gen_exception(ctx, POWERPC_EXCP_VSXU); \
276 return; \
278 xb = tcg_temp_new_i64(); \
279 sgm = tcg_temp_new_i64(); \
280 tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
281 tcg_gen_movi_i64(sgm, sgn_mask); \
282 switch (op) { \
283 case OP_ABS: { \
284 tcg_gen_andc_i64(xb, xb, sgm); \
285 break; \
287 case OP_NABS: { \
288 tcg_gen_or_i64(xb, xb, sgm); \
289 break; \
291 case OP_NEG: { \
292 tcg_gen_xor_i64(xb, xb, sgm); \
293 break; \
295 case OP_CPSGN: { \
296 TCGv_i64 xa = tcg_temp_new_i64(); \
297 tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
298 tcg_gen_and_i64(xa, xa, sgm); \
299 tcg_gen_andc_i64(xb, xb, sgm); \
300 tcg_gen_or_i64(xb, xb, xa); \
301 tcg_temp_free_i64(xa); \
302 break; \
305 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
306 tcg_temp_free_i64(xb); \
307 tcg_temp_free_i64(sgm); \
310 VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
311 VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
312 VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
313 VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
315 #define VSX_VECTOR_MOVE(name, op, sgn_mask) \
316 static void glue(gen_, name)(DisasContext * ctx) \
318 TCGv_i64 xbh, xbl, sgm; \
319 if (unlikely(!ctx->vsx_enabled)) { \
320 gen_exception(ctx, POWERPC_EXCP_VSXU); \
321 return; \
323 xbh = tcg_temp_new_i64(); \
324 xbl = tcg_temp_new_i64(); \
325 sgm = tcg_temp_new_i64(); \
326 tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
327 tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
328 tcg_gen_movi_i64(sgm, sgn_mask); \
329 switch (op) { \
330 case OP_ABS: { \
331 tcg_gen_andc_i64(xbh, xbh, sgm); \
332 tcg_gen_andc_i64(xbl, xbl, sgm); \
333 break; \
335 case OP_NABS: { \
336 tcg_gen_or_i64(xbh, xbh, sgm); \
337 tcg_gen_or_i64(xbl, xbl, sgm); \
338 break; \
340 case OP_NEG: { \
341 tcg_gen_xor_i64(xbh, xbh, sgm); \
342 tcg_gen_xor_i64(xbl, xbl, sgm); \
343 break; \
345 case OP_CPSGN: { \
346 TCGv_i64 xah = tcg_temp_new_i64(); \
347 TCGv_i64 xal = tcg_temp_new_i64(); \
348 tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
349 tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
350 tcg_gen_and_i64(xah, xah, sgm); \
351 tcg_gen_and_i64(xal, xal, sgm); \
352 tcg_gen_andc_i64(xbh, xbh, sgm); \
353 tcg_gen_andc_i64(xbl, xbl, sgm); \
354 tcg_gen_or_i64(xbh, xbh, xah); \
355 tcg_gen_or_i64(xbl, xbl, xal); \
356 tcg_temp_free_i64(xah); \
357 tcg_temp_free_i64(xal); \
358 break; \
361 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
362 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
363 tcg_temp_free_i64(xbh); \
364 tcg_temp_free_i64(xbl); \
365 tcg_temp_free_i64(sgm); \
368 VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
369 VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
370 VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
371 VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
372 VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
373 VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
374 VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
375 VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
377 #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
378 static void gen_##name(DisasContext * ctx) \
380 TCGv_i32 opc; \
381 if (unlikely(!ctx->vsx_enabled)) { \
382 gen_exception(ctx, POWERPC_EXCP_VSXU); \
383 return; \
385 opc = tcg_const_i32(ctx->opcode); \
386 gen_helper_##name(cpu_env, opc); \
387 tcg_temp_free_i32(opc); \
390 #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
391 static void gen_##name(DisasContext * ctx) \
393 if (unlikely(!ctx->vsx_enabled)) { \
394 gen_exception(ctx, POWERPC_EXCP_VSXU); \
395 return; \
397 gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
398 cpu_vsrh(xB(ctx->opcode))); \
401 GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
402 GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
403 GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
404 GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
405 GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
406 GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
407 GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
408 GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
409 GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
410 GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
411 GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
412 GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
413 GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
414 GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
415 GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
416 GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
417 GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
418 GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
419 GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
420 GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
421 GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
422 GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
423 GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
424 GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
425 GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
426 GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
427 GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
428 GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
429 GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
430 GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
431 GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
432 GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
433 GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
434 GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
435 GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
436 GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
437 GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
439 GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
440 GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
441 GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
442 GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
443 GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
444 GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
445 GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
446 GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
447 GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
448 GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
449 GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
450 GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
451 GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
452 GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
453 GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
454 GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
455 GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
457 GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
458 GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
459 GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
460 GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
461 GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
462 GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
463 GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
464 GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
465 GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
466 GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
467 GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
468 GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
469 GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
470 GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
471 GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
472 GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
473 GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
474 GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
475 GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
476 GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
477 GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
478 GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
479 GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
480 GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
481 GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
482 GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
483 GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
484 GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
485 GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
486 GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
487 GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
488 GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
489 GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
490 GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
491 GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
492 GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
494 GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
495 GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
496 GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
497 GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
498 GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
499 GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
500 GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
501 GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
502 GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
503 GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
504 GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
505 GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
506 GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
507 GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
508 GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
509 GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
510 GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
511 GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
512 GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
513 GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
514 GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
515 GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
516 GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
517 GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
518 GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
519 GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
520 GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
521 GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
522 GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
523 GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
524 GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
525 GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
526 GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
527 GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
528 GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
529 GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
531 #define VSX_LOGICAL(name, tcg_op) \
532 static void glue(gen_, name)(DisasContext * ctx) \
534 if (unlikely(!ctx->vsx_enabled)) { \
535 gen_exception(ctx, POWERPC_EXCP_VSXU); \
536 return; \
538 tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
539 cpu_vsrh(xB(ctx->opcode))); \
540 tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
541 cpu_vsrl(xB(ctx->opcode))); \
544 VSX_LOGICAL(xxland, tcg_gen_and_i64)
545 VSX_LOGICAL(xxlandc, tcg_gen_andc_i64)
546 VSX_LOGICAL(xxlor, tcg_gen_or_i64)
547 VSX_LOGICAL(xxlxor, tcg_gen_xor_i64)
548 VSX_LOGICAL(xxlnor, tcg_gen_nor_i64)
549 VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64)
550 VSX_LOGICAL(xxlnand, tcg_gen_nand_i64)
551 VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
553 #define VSX_XXMRG(name, high) \
554 static void glue(gen_, name)(DisasContext * ctx) \
556 TCGv_i64 a0, a1, b0, b1; \
557 if (unlikely(!ctx->vsx_enabled)) { \
558 gen_exception(ctx, POWERPC_EXCP_VSXU); \
559 return; \
561 a0 = tcg_temp_new_i64(); \
562 a1 = tcg_temp_new_i64(); \
563 b0 = tcg_temp_new_i64(); \
564 b1 = tcg_temp_new_i64(); \
565 if (high) { \
566 tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
567 tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
568 tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
569 tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
570 } else { \
571 tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
572 tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
573 tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
574 tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
576 tcg_gen_shri_i64(a0, a0, 32); \
577 tcg_gen_shri_i64(b0, b0, 32); \
578 tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
579 b0, a0, 32, 32); \
580 tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
581 b1, a1, 32, 32); \
582 tcg_temp_free_i64(a0); \
583 tcg_temp_free_i64(a1); \
584 tcg_temp_free_i64(b0); \
585 tcg_temp_free_i64(b1); \
588 VSX_XXMRG(xxmrghw, 1)
589 VSX_XXMRG(xxmrglw, 0)
591 static void gen_xxsel(DisasContext * ctx)
593 TCGv_i64 a, b, c;
594 if (unlikely(!ctx->vsx_enabled)) {
595 gen_exception(ctx, POWERPC_EXCP_VSXU);
596 return;
598 a = tcg_temp_new_i64();
599 b = tcg_temp_new_i64();
600 c = tcg_temp_new_i64();
602 tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
603 tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
604 tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
606 tcg_gen_and_i64(b, b, c);
607 tcg_gen_andc_i64(a, a, c);
608 tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
610 tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
611 tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
612 tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
614 tcg_gen_and_i64(b, b, c);
615 tcg_gen_andc_i64(a, a, c);
616 tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
618 tcg_temp_free_i64(a);
619 tcg_temp_free_i64(b);
620 tcg_temp_free_i64(c);
623 static void gen_xxspltw(DisasContext *ctx)
625 TCGv_i64 b, b2;
626 TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
627 cpu_vsrl(xB(ctx->opcode)) :
628 cpu_vsrh(xB(ctx->opcode));
630 if (unlikely(!ctx->vsx_enabled)) {
631 gen_exception(ctx, POWERPC_EXCP_VSXU);
632 return;
635 b = tcg_temp_new_i64();
636 b2 = tcg_temp_new_i64();
638 if (UIM(ctx->opcode) & 1) {
639 tcg_gen_ext32u_i64(b, vsr);
640 } else {
641 tcg_gen_shri_i64(b, vsr, 32);
644 tcg_gen_shli_i64(b2, b, 32);
645 tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
646 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
648 tcg_temp_free_i64(b);
649 tcg_temp_free_i64(b2);
652 #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
654 static void gen_xxspltib(DisasContext *ctx)
656 unsigned char uim8 = IMM8(ctx->opcode);
657 if (xS(ctx->opcode) < 32) {
658 if (unlikely(!ctx->altivec_enabled)) {
659 gen_exception(ctx, POWERPC_EXCP_VPU);
660 return;
662 } else {
663 if (unlikely(!ctx->vsx_enabled)) {
664 gen_exception(ctx, POWERPC_EXCP_VSXU);
665 return;
668 tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
669 tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
672 static void gen_xxsldwi(DisasContext *ctx)
674 TCGv_i64 xth, xtl;
675 if (unlikely(!ctx->vsx_enabled)) {
676 gen_exception(ctx, POWERPC_EXCP_VSXU);
677 return;
679 xth = tcg_temp_new_i64();
680 xtl = tcg_temp_new_i64();
682 switch (SHW(ctx->opcode)) {
683 case 0: {
684 tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
685 tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
686 break;
688 case 1: {
689 TCGv_i64 t0 = tcg_temp_new_i64();
690 tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
691 tcg_gen_shli_i64(xth, xth, 32);
692 tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
693 tcg_gen_shri_i64(t0, t0, 32);
694 tcg_gen_or_i64(xth, xth, t0);
695 tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
696 tcg_gen_shli_i64(xtl, xtl, 32);
697 tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
698 tcg_gen_shri_i64(t0, t0, 32);
699 tcg_gen_or_i64(xtl, xtl, t0);
700 tcg_temp_free_i64(t0);
701 break;
703 case 2: {
704 tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
705 tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
706 break;
708 case 3: {
709 TCGv_i64 t0 = tcg_temp_new_i64();
710 tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
711 tcg_gen_shli_i64(xth, xth, 32);
712 tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
713 tcg_gen_shri_i64(t0, t0, 32);
714 tcg_gen_or_i64(xth, xth, t0);
715 tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
716 tcg_gen_shli_i64(xtl, xtl, 32);
717 tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
718 tcg_gen_shri_i64(t0, t0, 32);
719 tcg_gen_or_i64(xtl, xtl, t0);
720 tcg_temp_free_i64(t0);
721 break;
725 tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
726 tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
728 tcg_temp_free_i64(xth);
729 tcg_temp_free_i64(xtl);
732 #undef GEN_XX2FORM
733 #undef GEN_XX3FORM
734 #undef GEN_XX2IFORM
735 #undef GEN_XX3_RC_FORM
736 #undef GEN_XX3FORM_DM
737 #undef VSX_LOGICAL