2 * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
4 * Copyright (c) 2005 Fabrice Bellard
5 * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 #define XMM_ONLY(...) __VA_ARGS__
38 void glue(helper_psrlw
, SUFFIX
)(Reg
*d
, Reg
*s
)
62 void glue(helper_psraw
, SUFFIX
)(Reg
*d
, Reg
*s
)
71 d
->W(0) = (int16_t)d
->W(0) >> shift
;
72 d
->W(1) = (int16_t)d
->W(1) >> shift
;
73 d
->W(2) = (int16_t)d
->W(2) >> shift
;
74 d
->W(3) = (int16_t)d
->W(3) >> shift
;
76 d
->W(4) = (int16_t)d
->W(4) >> shift
;
77 d
->W(5) = (int16_t)d
->W(5) >> shift
;
78 d
->W(6) = (int16_t)d
->W(6) >> shift
;
79 d
->W(7) = (int16_t)d
->W(7) >> shift
;
83 void glue(helper_psllw
, SUFFIX
)(Reg
*d
, Reg
*s
)
107 void glue(helper_psrld
, SUFFIX
)(Reg
*d
, Reg
*s
)
127 void glue(helper_psrad
, SUFFIX
)(Reg
*d
, Reg
*s
)
136 d
->L(0) = (int32_t)d
->L(0) >> shift
;
137 d
->L(1) = (int32_t)d
->L(1) >> shift
;
139 d
->L(2) = (int32_t)d
->L(2) >> shift
;
140 d
->L(3) = (int32_t)d
->L(3) >> shift
;
144 void glue(helper_pslld
, SUFFIX
)(Reg
*d
, Reg
*s
)
164 void glue(helper_psrlq
, SUFFIX
)(Reg
*d
, Reg
*s
)
182 void glue(helper_psllq
, SUFFIX
)(Reg
*d
, Reg
*s
)
201 void glue(helper_psrldq
, SUFFIX
)(Reg
*d
, Reg
*s
)
208 for(i
= 0; i
< 16 - shift
; i
++)
209 d
->B(i
) = d
->B(i
+ shift
);
210 for(i
= 16 - shift
; i
< 16; i
++)
214 void glue(helper_pslldq
, SUFFIX
)(Reg
*d
, Reg
*s
)
221 for(i
= 15; i
>= shift
; i
--)
222 d
->B(i
) = d
->B(i
- shift
);
223 for(i
= 0; i
< shift
; i
++)
228 #define SSE_HELPER_B(name, F)\
229 void glue(name, SUFFIX) (Reg *d, Reg *s)\
231 d->B(0) = F(d->B(0), s->B(0));\
232 d->B(1) = F(d->B(1), s->B(1));\
233 d->B(2) = F(d->B(2), s->B(2));\
234 d->B(3) = F(d->B(3), s->B(3));\
235 d->B(4) = F(d->B(4), s->B(4));\
236 d->B(5) = F(d->B(5), s->B(5));\
237 d->B(6) = F(d->B(6), s->B(6));\
238 d->B(7) = F(d->B(7), s->B(7));\
240 d->B(8) = F(d->B(8), s->B(8));\
241 d->B(9) = F(d->B(9), s->B(9));\
242 d->B(10) = F(d->B(10), s->B(10));\
243 d->B(11) = F(d->B(11), s->B(11));\
244 d->B(12) = F(d->B(12), s->B(12));\
245 d->B(13) = F(d->B(13), s->B(13));\
246 d->B(14) = F(d->B(14), s->B(14));\
247 d->B(15) = F(d->B(15), s->B(15));\
251 #define SSE_HELPER_W(name, F)\
252 void glue(name, SUFFIX) (Reg *d, Reg *s)\
254 d->W(0) = F(d->W(0), s->W(0));\
255 d->W(1) = F(d->W(1), s->W(1));\
256 d->W(2) = F(d->W(2), s->W(2));\
257 d->W(3) = F(d->W(3), s->W(3));\
259 d->W(4) = F(d->W(4), s->W(4));\
260 d->W(5) = F(d->W(5), s->W(5));\
261 d->W(6) = F(d->W(6), s->W(6));\
262 d->W(7) = F(d->W(7), s->W(7));\
266 #define SSE_HELPER_L(name, F)\
267 void glue(name, SUFFIX) (Reg *d, Reg *s)\
269 d->L(0) = F(d->L(0), s->L(0));\
270 d->L(1) = F(d->L(1), s->L(1));\
272 d->L(2) = F(d->L(2), s->L(2));\
273 d->L(3) = F(d->L(3), s->L(3));\
277 #define SSE_HELPER_Q(name, F)\
278 void glue(name, SUFFIX) (Reg *d, Reg *s)\
280 d->Q(0) = F(d->Q(0), s->Q(0));\
282 d->Q(1) = F(d->Q(1), s->Q(1));\
287 static inline int satub(int x
)
297 static inline int satuw(int x
)
307 static inline int satsb(int x
)
317 static inline int satsw(int x
)
327 #define FADD(a, b) ((a) + (b))
328 #define FADDUB(a, b) satub((a) + (b))
329 #define FADDUW(a, b) satuw((a) + (b))
330 #define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
331 #define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
333 #define FSUB(a, b) ((a) - (b))
334 #define FSUBUB(a, b) satub((a) - (b))
335 #define FSUBUW(a, b) satuw((a) - (b))
336 #define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
337 #define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
338 #define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
339 #define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
340 #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
341 #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
343 #define FAND(a, b) (a) & (b)
344 #define FANDN(a, b) ((~(a)) & (b))
345 #define FOR(a, b) (a) | (b)
346 #define FXOR(a, b) (a) ^ (b)
348 #define FCMPGTB(a, b) (int8_t)(a) > (int8_t)(b) ? -1 : 0
349 #define FCMPGTW(a, b) (int16_t)(a) > (int16_t)(b) ? -1 : 0
350 #define FCMPGTL(a, b) (int32_t)(a) > (int32_t)(b) ? -1 : 0
351 #define FCMPEQ(a, b) (a) == (b) ? -1 : 0
353 #define FMULLW(a, b) (a) * (b)
354 #define FMULHRW(a, b) ((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16
355 #define FMULHUW(a, b) (a) * (b) >> 16
356 #define FMULHW(a, b) (int16_t)(a) * (int16_t)(b) >> 16
358 #define FAVG(a, b) ((a) + (b) + 1) >> 1
361 SSE_HELPER_B(helper_paddb
, FADD
)
362 SSE_HELPER_W(helper_paddw
, FADD
)
363 SSE_HELPER_L(helper_paddl
, FADD
)
364 SSE_HELPER_Q(helper_paddq
, FADD
)
366 SSE_HELPER_B(helper_psubb
, FSUB
)
367 SSE_HELPER_W(helper_psubw
, FSUB
)
368 SSE_HELPER_L(helper_psubl
, FSUB
)
369 SSE_HELPER_Q(helper_psubq
, FSUB
)
371 SSE_HELPER_B(helper_paddusb
, FADDUB
)
372 SSE_HELPER_B(helper_paddsb
, FADDSB
)
373 SSE_HELPER_B(helper_psubusb
, FSUBUB
)
374 SSE_HELPER_B(helper_psubsb
, FSUBSB
)
376 SSE_HELPER_W(helper_paddusw
, FADDUW
)
377 SSE_HELPER_W(helper_paddsw
, FADDSW
)
378 SSE_HELPER_W(helper_psubusw
, FSUBUW
)
379 SSE_HELPER_W(helper_psubsw
, FSUBSW
)
381 SSE_HELPER_B(helper_pminub
, FMINUB
)
382 SSE_HELPER_B(helper_pmaxub
, FMAXUB
)
384 SSE_HELPER_W(helper_pminsw
, FMINSW
)
385 SSE_HELPER_W(helper_pmaxsw
, FMAXSW
)
387 SSE_HELPER_Q(helper_pand
, FAND
)
388 SSE_HELPER_Q(helper_pandn
, FANDN
)
389 SSE_HELPER_Q(helper_por
, FOR
)
390 SSE_HELPER_Q(helper_pxor
, FXOR
)
392 SSE_HELPER_B(helper_pcmpgtb
, FCMPGTB
)
393 SSE_HELPER_W(helper_pcmpgtw
, FCMPGTW
)
394 SSE_HELPER_L(helper_pcmpgtl
, FCMPGTL
)
396 SSE_HELPER_B(helper_pcmpeqb
, FCMPEQ
)
397 SSE_HELPER_W(helper_pcmpeqw
, FCMPEQ
)
398 SSE_HELPER_L(helper_pcmpeql
, FCMPEQ
)
400 SSE_HELPER_W(helper_pmullw
, FMULLW
)
402 SSE_HELPER_W(helper_pmulhrw
, FMULHRW
)
404 SSE_HELPER_W(helper_pmulhuw
, FMULHUW
)
405 SSE_HELPER_W(helper_pmulhw
, FMULHW
)
407 SSE_HELPER_B(helper_pavgb
, FAVG
)
408 SSE_HELPER_W(helper_pavgw
, FAVG
)
410 void glue(helper_pmuludq
, SUFFIX
) (Reg
*d
, Reg
*s
)
412 d
->Q(0) = (uint64_t)s
->L(0) * (uint64_t)d
->L(0);
414 d
->Q(1) = (uint64_t)s
->L(2) * (uint64_t)d
->L(2);
418 void glue(helper_pmaddwd
, SUFFIX
) (Reg
*d
, Reg
*s
)
422 for(i
= 0; i
< (2 << SHIFT
); i
++) {
423 d
->L(i
) = (int16_t)s
->W(2*i
) * (int16_t)d
->W(2*i
) +
424 (int16_t)s
->W(2*i
+1) * (int16_t)d
->W(2*i
+1);
429 static inline int abs1(int a
)
437 void glue(helper_psadbw
, SUFFIX
) (Reg
*d
, Reg
*s
)
442 val
+= abs1(d
->B(0) - s
->B(0));
443 val
+= abs1(d
->B(1) - s
->B(1));
444 val
+= abs1(d
->B(2) - s
->B(2));
445 val
+= abs1(d
->B(3) - s
->B(3));
446 val
+= abs1(d
->B(4) - s
->B(4));
447 val
+= abs1(d
->B(5) - s
->B(5));
448 val
+= abs1(d
->B(6) - s
->B(6));
449 val
+= abs1(d
->B(7) - s
->B(7));
453 val
+= abs1(d
->B(8) - s
->B(8));
454 val
+= abs1(d
->B(9) - s
->B(9));
455 val
+= abs1(d
->B(10) - s
->B(10));
456 val
+= abs1(d
->B(11) - s
->B(11));
457 val
+= abs1(d
->B(12) - s
->B(12));
458 val
+= abs1(d
->B(13) - s
->B(13));
459 val
+= abs1(d
->B(14) - s
->B(14));
460 val
+= abs1(d
->B(15) - s
->B(15));
465 void glue(helper_maskmov
, SUFFIX
) (Reg
*d
, Reg
*s
, target_ulong a0
)
468 for(i
= 0; i
< (8 << SHIFT
); i
++) {
470 stb(a0
+ i
, d
->B(i
));
474 void glue(helper_movl_mm_T0
, SUFFIX
) (Reg
*d
, uint32_t val
)
484 void glue(helper_movq_mm_T0
, SUFFIX
) (Reg
*d
, uint64_t val
)
494 void glue(helper_pshufw
, SUFFIX
) (Reg
*d
, Reg
*s
, int order
)
497 r
.W(0) = s
->W(order
& 3);
498 r
.W(1) = s
->W((order
>> 2) & 3);
499 r
.W(2) = s
->W((order
>> 4) & 3);
500 r
.W(3) = s
->W((order
>> 6) & 3);
504 void helper_shufps(Reg
*d
, Reg
*s
, int order
)
507 r
.L(0) = d
->L(order
& 3);
508 r
.L(1) = d
->L((order
>> 2) & 3);
509 r
.L(2) = s
->L((order
>> 4) & 3);
510 r
.L(3) = s
->L((order
>> 6) & 3);
514 void helper_shufpd(Reg
*d
, Reg
*s
, int order
)
517 r
.Q(0) = d
->Q(order
& 1);
518 r
.Q(1) = s
->Q((order
>> 1) & 1);
522 void glue(helper_pshufd
, SUFFIX
) (Reg
*d
, Reg
*s
, int order
)
525 r
.L(0) = s
->L(order
& 3);
526 r
.L(1) = s
->L((order
>> 2) & 3);
527 r
.L(2) = s
->L((order
>> 4) & 3);
528 r
.L(3) = s
->L((order
>> 6) & 3);
532 void glue(helper_pshuflw
, SUFFIX
) (Reg
*d
, Reg
*s
, int order
)
535 r
.W(0) = s
->W(order
& 3);
536 r
.W(1) = s
->W((order
>> 2) & 3);
537 r
.W(2) = s
->W((order
>> 4) & 3);
538 r
.W(3) = s
->W((order
>> 6) & 3);
543 void glue(helper_pshufhw
, SUFFIX
) (Reg
*d
, Reg
*s
, int order
)
547 r
.W(4) = s
->W(4 + (order
& 3));
548 r
.W(5) = s
->W(4 + ((order
>> 2) & 3));
549 r
.W(6) = s
->W(4 + ((order
>> 4) & 3));
550 r
.W(7) = s
->W(4 + ((order
>> 6) & 3));
557 /* XXX: not accurate */
559 #define SSE_HELPER_S(name, F)\
560 void helper_ ## name ## ps (Reg *d, Reg *s)\
562 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
563 d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1));\
564 d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2));\
565 d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3));\
568 void helper_ ## name ## ss (Reg *d, Reg *s)\
570 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
572 void helper_ ## name ## pd (Reg *d, Reg *s)\
574 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
575 d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1));\
578 void helper_ ## name ## sd (Reg *d, Reg *s)\
580 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
583 #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
584 #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
585 #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
586 #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
587 #define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status)
589 /* Note that the choice of comparison op here is important to get the
590 * special cases right: for min and max Intel specifies that (-0,0),
591 * (NaN, anything) and (anything, NaN) return the second argument.
593 #define FPU_MIN(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)
594 #define FPU_MAX(size, a, b) float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)
596 SSE_HELPER_S(add
, FPU_ADD
)
597 SSE_HELPER_S(sub
, FPU_SUB
)
598 SSE_HELPER_S(mul
, FPU_MUL
)
599 SSE_HELPER_S(div
, FPU_DIV
)
600 SSE_HELPER_S(min
, FPU_MIN
)
601 SSE_HELPER_S(max
, FPU_MAX
)
602 SSE_HELPER_S(sqrt
, FPU_SQRT
)
605 /* float to float conversions */
606 void helper_cvtps2pd(Reg
*d
, Reg
*s
)
611 d
->XMM_D(0) = float32_to_float64(s0
, &env
->sse_status
);
612 d
->XMM_D(1) = float32_to_float64(s1
, &env
->sse_status
);
615 void helper_cvtpd2ps(Reg
*d
, Reg
*s
)
617 d
->XMM_S(0) = float64_to_float32(s
->XMM_D(0), &env
->sse_status
);
618 d
->XMM_S(1) = float64_to_float32(s
->XMM_D(1), &env
->sse_status
);
622 void helper_cvtss2sd(Reg
*d
, Reg
*s
)
624 d
->XMM_D(0) = float32_to_float64(s
->XMM_S(0), &env
->sse_status
);
627 void helper_cvtsd2ss(Reg
*d
, Reg
*s
)
629 d
->XMM_S(0) = float64_to_float32(s
->XMM_D(0), &env
->sse_status
);
632 /* integer to float */
633 void helper_cvtdq2ps(Reg
*d
, Reg
*s
)
635 d
->XMM_S(0) = int32_to_float32(s
->XMM_L(0), &env
->sse_status
);
636 d
->XMM_S(1) = int32_to_float32(s
->XMM_L(1), &env
->sse_status
);
637 d
->XMM_S(2) = int32_to_float32(s
->XMM_L(2), &env
->sse_status
);
638 d
->XMM_S(3) = int32_to_float32(s
->XMM_L(3), &env
->sse_status
);
641 void helper_cvtdq2pd(Reg
*d
, Reg
*s
)
644 l0
= (int32_t)s
->XMM_L(0);
645 l1
= (int32_t)s
->XMM_L(1);
646 d
->XMM_D(0) = int32_to_float64(l0
, &env
->sse_status
);
647 d
->XMM_D(1) = int32_to_float64(l1
, &env
->sse_status
);
650 void helper_cvtpi2ps(XMMReg
*d
, MMXReg
*s
)
652 d
->XMM_S(0) = int32_to_float32(s
->MMX_L(0), &env
->sse_status
);
653 d
->XMM_S(1) = int32_to_float32(s
->MMX_L(1), &env
->sse_status
);
656 void helper_cvtpi2pd(XMMReg
*d
, MMXReg
*s
)
658 d
->XMM_D(0) = int32_to_float64(s
->MMX_L(0), &env
->sse_status
);
659 d
->XMM_D(1) = int32_to_float64(s
->MMX_L(1), &env
->sse_status
);
662 void helper_cvtsi2ss(XMMReg
*d
, uint32_t val
)
664 d
->XMM_S(0) = int32_to_float32(val
, &env
->sse_status
);
667 void helper_cvtsi2sd(XMMReg
*d
, uint32_t val
)
669 d
->XMM_D(0) = int32_to_float64(val
, &env
->sse_status
);
673 void helper_cvtsq2ss(XMMReg
*d
, uint64_t val
)
675 d
->XMM_S(0) = int64_to_float32(val
, &env
->sse_status
);
678 void helper_cvtsq2sd(XMMReg
*d
, uint64_t val
)
680 d
->XMM_D(0) = int64_to_float64(val
, &env
->sse_status
);
684 /* float to integer */
685 void helper_cvtps2dq(XMMReg
*d
, XMMReg
*s
)
687 d
->XMM_L(0) = float32_to_int32(s
->XMM_S(0), &env
->sse_status
);
688 d
->XMM_L(1) = float32_to_int32(s
->XMM_S(1), &env
->sse_status
);
689 d
->XMM_L(2) = float32_to_int32(s
->XMM_S(2), &env
->sse_status
);
690 d
->XMM_L(3) = float32_to_int32(s
->XMM_S(3), &env
->sse_status
);
693 void helper_cvtpd2dq(XMMReg
*d
, XMMReg
*s
)
695 d
->XMM_L(0) = float64_to_int32(s
->XMM_D(0), &env
->sse_status
);
696 d
->XMM_L(1) = float64_to_int32(s
->XMM_D(1), &env
->sse_status
);
700 void helper_cvtps2pi(MMXReg
*d
, XMMReg
*s
)
702 d
->MMX_L(0) = float32_to_int32(s
->XMM_S(0), &env
->sse_status
);
703 d
->MMX_L(1) = float32_to_int32(s
->XMM_S(1), &env
->sse_status
);
706 void helper_cvtpd2pi(MMXReg
*d
, XMMReg
*s
)
708 d
->MMX_L(0) = float64_to_int32(s
->XMM_D(0), &env
->sse_status
);
709 d
->MMX_L(1) = float64_to_int32(s
->XMM_D(1), &env
->sse_status
);
712 int32_t helper_cvtss2si(XMMReg
*s
)
714 return float32_to_int32(s
->XMM_S(0), &env
->sse_status
);
717 int32_t helper_cvtsd2si(XMMReg
*s
)
719 return float64_to_int32(s
->XMM_D(0), &env
->sse_status
);
723 int64_t helper_cvtss2sq(XMMReg
*s
)
725 return float32_to_int64(s
->XMM_S(0), &env
->sse_status
);
728 int64_t helper_cvtsd2sq(XMMReg
*s
)
730 return float64_to_int64(s
->XMM_D(0), &env
->sse_status
);
734 /* float to integer truncated */
735 void helper_cvttps2dq(XMMReg
*d
, XMMReg
*s
)
737 d
->XMM_L(0) = float32_to_int32_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
738 d
->XMM_L(1) = float32_to_int32_round_to_zero(s
->XMM_S(1), &env
->sse_status
);
739 d
->XMM_L(2) = float32_to_int32_round_to_zero(s
->XMM_S(2), &env
->sse_status
);
740 d
->XMM_L(3) = float32_to_int32_round_to_zero(s
->XMM_S(3), &env
->sse_status
);
743 void helper_cvttpd2dq(XMMReg
*d
, XMMReg
*s
)
745 d
->XMM_L(0) = float64_to_int32_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
746 d
->XMM_L(1) = float64_to_int32_round_to_zero(s
->XMM_D(1), &env
->sse_status
);
750 void helper_cvttps2pi(MMXReg
*d
, XMMReg
*s
)
752 d
->MMX_L(0) = float32_to_int32_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
753 d
->MMX_L(1) = float32_to_int32_round_to_zero(s
->XMM_S(1), &env
->sse_status
);
756 void helper_cvttpd2pi(MMXReg
*d
, XMMReg
*s
)
758 d
->MMX_L(0) = float64_to_int32_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
759 d
->MMX_L(1) = float64_to_int32_round_to_zero(s
->XMM_D(1), &env
->sse_status
);
762 int32_t helper_cvttss2si(XMMReg
*s
)
764 return float32_to_int32_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
767 int32_t helper_cvttsd2si(XMMReg
*s
)
769 return float64_to_int32_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
773 int64_t helper_cvttss2sq(XMMReg
*s
)
775 return float32_to_int64_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
778 int64_t helper_cvttsd2sq(XMMReg
*s
)
780 return float64_to_int64_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
784 void helper_rsqrtps(XMMReg
*d
, XMMReg
*s
)
786 d
->XMM_S(0) = float32_div(float32_one
,
787 float32_sqrt(s
->XMM_S(0), &env
->sse_status
),
789 d
->XMM_S(1) = float32_div(float32_one
,
790 float32_sqrt(s
->XMM_S(1), &env
->sse_status
),
792 d
->XMM_S(2) = float32_div(float32_one
,
793 float32_sqrt(s
->XMM_S(2), &env
->sse_status
),
795 d
->XMM_S(3) = float32_div(float32_one
,
796 float32_sqrt(s
->XMM_S(3), &env
->sse_status
),
800 void helper_rsqrtss(XMMReg
*d
, XMMReg
*s
)
802 d
->XMM_S(0) = float32_div(float32_one
,
803 float32_sqrt(s
->XMM_S(0), &env
->sse_status
),
807 void helper_rcpps(XMMReg
*d
, XMMReg
*s
)
809 d
->XMM_S(0) = float32_div(float32_one
, s
->XMM_S(0), &env
->sse_status
);
810 d
->XMM_S(1) = float32_div(float32_one
, s
->XMM_S(1), &env
->sse_status
);
811 d
->XMM_S(2) = float32_div(float32_one
, s
->XMM_S(2), &env
->sse_status
);
812 d
->XMM_S(3) = float32_div(float32_one
, s
->XMM_S(3), &env
->sse_status
);
815 void helper_rcpss(XMMReg
*d
, XMMReg
*s
)
817 d
->XMM_S(0) = float32_div(float32_one
, s
->XMM_S(0), &env
->sse_status
);
820 static inline uint64_t helper_extrq(uint64_t src
, int shift
, int len
)
827 mask
= (1ULL << len
) - 1;
829 return (src
>> shift
) & mask
;
832 void helper_extrq_r(XMMReg
*d
, XMMReg
*s
)
834 d
->XMM_Q(0) = helper_extrq(d
->XMM_Q(0), s
->XMM_B(1), s
->XMM_B(0));
837 void helper_extrq_i(XMMReg
*d
, int index
, int length
)
839 d
->XMM_Q(0) = helper_extrq(d
->XMM_Q(0), index
, length
);
842 static inline uint64_t helper_insertq(uint64_t src
, int shift
, int len
)
849 mask
= (1ULL << len
) - 1;
851 return (src
& ~(mask
<< shift
)) | ((src
& mask
) << shift
);
854 void helper_insertq_r(XMMReg
*d
, XMMReg
*s
)
856 d
->XMM_Q(0) = helper_insertq(s
->XMM_Q(0), s
->XMM_B(9), s
->XMM_B(8));
859 void helper_insertq_i(XMMReg
*d
, int index
, int length
)
861 d
->XMM_Q(0) = helper_insertq(d
->XMM_Q(0), index
, length
);
864 void helper_haddps(XMMReg
*d
, XMMReg
*s
)
867 r
.XMM_S(0) = float32_add(d
->XMM_S(0), d
->XMM_S(1), &env
->sse_status
);
868 r
.XMM_S(1) = float32_add(d
->XMM_S(2), d
->XMM_S(3), &env
->sse_status
);
869 r
.XMM_S(2) = float32_add(s
->XMM_S(0), s
->XMM_S(1), &env
->sse_status
);
870 r
.XMM_S(3) = float32_add(s
->XMM_S(2), s
->XMM_S(3), &env
->sse_status
);
874 void helper_haddpd(XMMReg
*d
, XMMReg
*s
)
877 r
.XMM_D(0) = float64_add(d
->XMM_D(0), d
->XMM_D(1), &env
->sse_status
);
878 r
.XMM_D(1) = float64_add(s
->XMM_D(0), s
->XMM_D(1), &env
->sse_status
);
882 void helper_hsubps(XMMReg
*d
, XMMReg
*s
)
885 r
.XMM_S(0) = float32_sub(d
->XMM_S(0), d
->XMM_S(1), &env
->sse_status
);
886 r
.XMM_S(1) = float32_sub(d
->XMM_S(2), d
->XMM_S(3), &env
->sse_status
);
887 r
.XMM_S(2) = float32_sub(s
->XMM_S(0), s
->XMM_S(1), &env
->sse_status
);
888 r
.XMM_S(3) = float32_sub(s
->XMM_S(2), s
->XMM_S(3), &env
->sse_status
);
892 void helper_hsubpd(XMMReg
*d
, XMMReg
*s
)
895 r
.XMM_D(0) = float64_sub(d
->XMM_D(0), d
->XMM_D(1), &env
->sse_status
);
896 r
.XMM_D(1) = float64_sub(s
->XMM_D(0), s
->XMM_D(1), &env
->sse_status
);
900 void helper_addsubps(XMMReg
*d
, XMMReg
*s
)
902 d
->XMM_S(0) = float32_sub(d
->XMM_S(0), s
->XMM_S(0), &env
->sse_status
);
903 d
->XMM_S(1) = float32_add(d
->XMM_S(1), s
->XMM_S(1), &env
->sse_status
);
904 d
->XMM_S(2) = float32_sub(d
->XMM_S(2), s
->XMM_S(2), &env
->sse_status
);
905 d
->XMM_S(3) = float32_add(d
->XMM_S(3), s
->XMM_S(3), &env
->sse_status
);
908 void helper_addsubpd(XMMReg
*d
, XMMReg
*s
)
910 d
->XMM_D(0) = float64_sub(d
->XMM_D(0), s
->XMM_D(0), &env
->sse_status
);
911 d
->XMM_D(1) = float64_add(d
->XMM_D(1), s
->XMM_D(1), &env
->sse_status
);
915 #define SSE_HELPER_CMP(name, F)\
916 void helper_ ## name ## ps (Reg *d, Reg *s)\
918 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
919 d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1));\
920 d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2));\
921 d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3));\
924 void helper_ ## name ## ss (Reg *d, Reg *s)\
926 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
928 void helper_ ## name ## pd (Reg *d, Reg *s)\
930 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
931 d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1));\
934 void helper_ ## name ## sd (Reg *d, Reg *s)\
936 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
939 #define FPU_CMPEQ(size, a, b) float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0
940 #define FPU_CMPLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0
941 #define FPU_CMPLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? -1 : 0
942 #define FPU_CMPUNORD(size, a, b) float ## size ## _unordered_quiet(a, b, &env->sse_status) ? - 1 : 0
943 #define FPU_CMPNEQ(size, a, b) float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1
944 #define FPU_CMPNLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1
945 #define FPU_CMPNLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? 0 : -1
946 #define FPU_CMPORD(size, a, b) float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1
948 SSE_HELPER_CMP(cmpeq
, FPU_CMPEQ
)
949 SSE_HELPER_CMP(cmplt
, FPU_CMPLT
)
950 SSE_HELPER_CMP(cmple
, FPU_CMPLE
)
951 SSE_HELPER_CMP(cmpunord
, FPU_CMPUNORD
)
952 SSE_HELPER_CMP(cmpneq
, FPU_CMPNEQ
)
953 SSE_HELPER_CMP(cmpnlt
, FPU_CMPNLT
)
954 SSE_HELPER_CMP(cmpnle
, FPU_CMPNLE
)
955 SSE_HELPER_CMP(cmpord
, FPU_CMPORD
)
957 static const int comis_eflags
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
959 void helper_ucomiss(Reg
*d
, Reg
*s
)
966 ret
= float32_compare_quiet(s0
, s1
, &env
->sse_status
);
967 CC_SRC
= comis_eflags
[ret
+ 1];
970 void helper_comiss(Reg
*d
, Reg
*s
)
977 ret
= float32_compare(s0
, s1
, &env
->sse_status
);
978 CC_SRC
= comis_eflags
[ret
+ 1];
981 void helper_ucomisd(Reg
*d
, Reg
*s
)
988 ret
= float64_compare_quiet(d0
, d1
, &env
->sse_status
);
989 CC_SRC
= comis_eflags
[ret
+ 1];
992 void helper_comisd(Reg
*d
, Reg
*s
)
999 ret
= float64_compare(d0
, d1
, &env
->sse_status
);
1000 CC_SRC
= comis_eflags
[ret
+ 1];
1003 uint32_t helper_movmskps(Reg
*s
)
1006 b0
= s
->XMM_L(0) >> 31;
1007 b1
= s
->XMM_L(1) >> 31;
1008 b2
= s
->XMM_L(2) >> 31;
1009 b3
= s
->XMM_L(3) >> 31;
1010 return b0
| (b1
<< 1) | (b2
<< 2) | (b3
<< 3);
1013 uint32_t helper_movmskpd(Reg
*s
)
1016 b0
= s
->XMM_L(1) >> 31;
1017 b1
= s
->XMM_L(3) >> 31;
1018 return b0
| (b1
<< 1);
1023 uint32_t glue(helper_pmovmskb
, SUFFIX
)(Reg
*s
)
1027 val
|= (s
->B(0) >> 7);
1028 val
|= (s
->B(1) >> 6) & 0x02;
1029 val
|= (s
->B(2) >> 5) & 0x04;
1030 val
|= (s
->B(3) >> 4) & 0x08;
1031 val
|= (s
->B(4) >> 3) & 0x10;
1032 val
|= (s
->B(5) >> 2) & 0x20;
1033 val
|= (s
->B(6) >> 1) & 0x40;
1034 val
|= (s
->B(7)) & 0x80;
1036 val
|= (s
->B(8) << 1) & 0x0100;
1037 val
|= (s
->B(9) << 2) & 0x0200;
1038 val
|= (s
->B(10) << 3) & 0x0400;
1039 val
|= (s
->B(11) << 4) & 0x0800;
1040 val
|= (s
->B(12) << 5) & 0x1000;
1041 val
|= (s
->B(13) << 6) & 0x2000;
1042 val
|= (s
->B(14) << 7) & 0x4000;
1043 val
|= (s
->B(15) << 8) & 0x8000;
1048 void glue(helper_packsswb
, SUFFIX
) (Reg
*d
, Reg
*s
)
1052 r
.B(0) = satsb((int16_t)d
->W(0));
1053 r
.B(1) = satsb((int16_t)d
->W(1));
1054 r
.B(2) = satsb((int16_t)d
->W(2));
1055 r
.B(3) = satsb((int16_t)d
->W(3));
1057 r
.B(4) = satsb((int16_t)d
->W(4));
1058 r
.B(5) = satsb((int16_t)d
->W(5));
1059 r
.B(6) = satsb((int16_t)d
->W(6));
1060 r
.B(7) = satsb((int16_t)d
->W(7));
1062 r
.B((4 << SHIFT
) + 0) = satsb((int16_t)s
->W(0));
1063 r
.B((4 << SHIFT
) + 1) = satsb((int16_t)s
->W(1));
1064 r
.B((4 << SHIFT
) + 2) = satsb((int16_t)s
->W(2));
1065 r
.B((4 << SHIFT
) + 3) = satsb((int16_t)s
->W(3));
1067 r
.B(12) = satsb((int16_t)s
->W(4));
1068 r
.B(13) = satsb((int16_t)s
->W(5));
1069 r
.B(14) = satsb((int16_t)s
->W(6));
1070 r
.B(15) = satsb((int16_t)s
->W(7));
1075 void glue(helper_packuswb
, SUFFIX
) (Reg
*d
, Reg
*s
)
1079 r
.B(0) = satub((int16_t)d
->W(0));
1080 r
.B(1) = satub((int16_t)d
->W(1));
1081 r
.B(2) = satub((int16_t)d
->W(2));
1082 r
.B(3) = satub((int16_t)d
->W(3));
1084 r
.B(4) = satub((int16_t)d
->W(4));
1085 r
.B(5) = satub((int16_t)d
->W(5));
1086 r
.B(6) = satub((int16_t)d
->W(6));
1087 r
.B(7) = satub((int16_t)d
->W(7));
1089 r
.B((4 << SHIFT
) + 0) = satub((int16_t)s
->W(0));
1090 r
.B((4 << SHIFT
) + 1) = satub((int16_t)s
->W(1));
1091 r
.B((4 << SHIFT
) + 2) = satub((int16_t)s
->W(2));
1092 r
.B((4 << SHIFT
) + 3) = satub((int16_t)s
->W(3));
1094 r
.B(12) = satub((int16_t)s
->W(4));
1095 r
.B(13) = satub((int16_t)s
->W(5));
1096 r
.B(14) = satub((int16_t)s
->W(6));
1097 r
.B(15) = satub((int16_t)s
->W(7));
1102 void glue(helper_packssdw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1106 r
.W(0) = satsw(d
->L(0));
1107 r
.W(1) = satsw(d
->L(1));
1109 r
.W(2) = satsw(d
->L(2));
1110 r
.W(3) = satsw(d
->L(3));
1112 r
.W((2 << SHIFT
) + 0) = satsw(s
->L(0));
1113 r
.W((2 << SHIFT
) + 1) = satsw(s
->L(1));
1115 r
.W(6) = satsw(s
->L(2));
1116 r
.W(7) = satsw(s
->L(3));
1121 #define UNPCK_OP(base_name, base) \
1123 void glue(helper_punpck ## base_name ## bw, SUFFIX) (Reg *d, Reg *s) \
1127 r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
1128 r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
1129 r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
1130 r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
1131 r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
1132 r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
1133 r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
1134 r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
1136 r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
1137 r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
1138 r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
1139 r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
1140 r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
1141 r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
1142 r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
1143 r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
1148 void glue(helper_punpck ## base_name ## wd, SUFFIX) (Reg *d, Reg *s) \
1152 r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
1153 r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
1154 r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
1155 r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
1157 r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
1158 r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
1159 r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
1160 r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
1165 void glue(helper_punpck ## base_name ## dq, SUFFIX) (Reg *d, Reg *s) \
1169 r.L(0) = d->L((base << SHIFT) + 0); \
1170 r.L(1) = s->L((base << SHIFT) + 0); \
1172 r.L(2) = d->L((base << SHIFT) + 1); \
1173 r.L(3) = s->L((base << SHIFT) + 1); \
1179 void glue(helper_punpck ## base_name ## qdq, SUFFIX) (Reg *d, Reg *s) \
1183 r.Q(0) = d->Q(base); \
1184 r.Q(1) = s->Q(base); \
1192 /* 3DNow! float ops */
1194 void helper_pi2fd(MMXReg
*d
, MMXReg
*s
)
1196 d
->MMX_S(0) = int32_to_float32(s
->MMX_L(0), &env
->mmx_status
);
1197 d
->MMX_S(1) = int32_to_float32(s
->MMX_L(1), &env
->mmx_status
);
1200 void helper_pi2fw(MMXReg
*d
, MMXReg
*s
)
1202 d
->MMX_S(0) = int32_to_float32((int16_t)s
->MMX_W(0), &env
->mmx_status
);
1203 d
->MMX_S(1) = int32_to_float32((int16_t)s
->MMX_W(2), &env
->mmx_status
);
1206 void helper_pf2id(MMXReg
*d
, MMXReg
*s
)
1208 d
->MMX_L(0) = float32_to_int32_round_to_zero(s
->MMX_S(0), &env
->mmx_status
);
1209 d
->MMX_L(1) = float32_to_int32_round_to_zero(s
->MMX_S(1), &env
->mmx_status
);
1212 void helper_pf2iw(MMXReg
*d
, MMXReg
*s
)
1214 d
->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s
->MMX_S(0), &env
->mmx_status
));
1215 d
->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s
->MMX_S(1), &env
->mmx_status
));
1218 void helper_pfacc(MMXReg
*d
, MMXReg
*s
)
1221 r
.MMX_S(0) = float32_add(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1222 r
.MMX_S(1) = float32_add(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1226 void helper_pfadd(MMXReg
*d
, MMXReg
*s
)
1228 d
->MMX_S(0) = float32_add(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1229 d
->MMX_S(1) = float32_add(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1232 void helper_pfcmpeq(MMXReg
*d
, MMXReg
*s
)
1234 d
->MMX_L(0) = float32_eq_quiet(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
) ? -1 : 0;
1235 d
->MMX_L(1) = float32_eq_quiet(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
) ? -1 : 0;
1238 void helper_pfcmpge(MMXReg
*d
, MMXReg
*s
)
1240 d
->MMX_L(0) = float32_le(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
) ? -1 : 0;
1241 d
->MMX_L(1) = float32_le(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
) ? -1 : 0;
1244 void helper_pfcmpgt(MMXReg
*d
, MMXReg
*s
)
1246 d
->MMX_L(0) = float32_lt(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
) ? -1 : 0;
1247 d
->MMX_L(1) = float32_lt(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
) ? -1 : 0;
1250 void helper_pfmax(MMXReg
*d
, MMXReg
*s
)
1252 if (float32_lt(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
))
1253 d
->MMX_S(0) = s
->MMX_S(0);
1254 if (float32_lt(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
))
1255 d
->MMX_S(1) = s
->MMX_S(1);
1258 void helper_pfmin(MMXReg
*d
, MMXReg
*s
)
1260 if (float32_lt(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
))
1261 d
->MMX_S(0) = s
->MMX_S(0);
1262 if (float32_lt(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
))
1263 d
->MMX_S(1) = s
->MMX_S(1);
1266 void helper_pfmul(MMXReg
*d
, MMXReg
*s
)
1268 d
->MMX_S(0) = float32_mul(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1269 d
->MMX_S(1) = float32_mul(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1272 void helper_pfnacc(MMXReg
*d
, MMXReg
*s
)
1275 r
.MMX_S(0) = float32_sub(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1276 r
.MMX_S(1) = float32_sub(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1280 void helper_pfpnacc(MMXReg
*d
, MMXReg
*s
)
1283 r
.MMX_S(0) = float32_sub(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1284 r
.MMX_S(1) = float32_add(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1288 void helper_pfrcp(MMXReg
*d
, MMXReg
*s
)
1290 d
->MMX_S(0) = float32_div(float32_one
, s
->MMX_S(0), &env
->mmx_status
);
1291 d
->MMX_S(1) = d
->MMX_S(0);
1294 void helper_pfrsqrt(MMXReg
*d
, MMXReg
*s
)
1296 d
->MMX_L(1) = s
->MMX_L(0) & 0x7fffffff;
1297 d
->MMX_S(1) = float32_div(float32_one
,
1298 float32_sqrt(d
->MMX_S(1), &env
->mmx_status
),
1300 d
->MMX_L(1) |= s
->MMX_L(0) & 0x80000000;
1301 d
->MMX_L(0) = d
->MMX_L(1);
1304 void helper_pfsub(MMXReg
*d
, MMXReg
*s
)
1306 d
->MMX_S(0) = float32_sub(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1307 d
->MMX_S(1) = float32_sub(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1310 void helper_pfsubr(MMXReg
*d
, MMXReg
*s
)
1312 d
->MMX_S(0) = float32_sub(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
);
1313 d
->MMX_S(1) = float32_sub(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
);
1316 void helper_pswapd(MMXReg
*d
, MMXReg
*s
)
1319 r
.MMX_L(0) = s
->MMX_L(1);
1320 r
.MMX_L(1) = s
->MMX_L(0);
1325 /* SSSE3 op helpers */
1326 void glue(helper_pshufb
, SUFFIX
) (Reg
*d
, Reg
*s
)
1331 for (i
= 0; i
< (8 << SHIFT
); i
++)
1332 r
.B(i
) = (s
->B(i
) & 0x80) ? 0 : (d
->B(s
->B(i
) & ((8 << SHIFT
) - 1)));
1337 void glue(helper_phaddw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1339 d
->W(0) = (int16_t)d
->W(0) + (int16_t)d
->W(1);
1340 d
->W(1) = (int16_t)d
->W(2) + (int16_t)d
->W(3);
1341 XMM_ONLY(d
->W(2) = (int16_t)d
->W(4) + (int16_t)d
->W(5));
1342 XMM_ONLY(d
->W(3) = (int16_t)d
->W(6) + (int16_t)d
->W(7));
1343 d
->W((2 << SHIFT
) + 0) = (int16_t)s
->W(0) + (int16_t)s
->W(1);
1344 d
->W((2 << SHIFT
) + 1) = (int16_t)s
->W(2) + (int16_t)s
->W(3);
1345 XMM_ONLY(d
->W(6) = (int16_t)s
->W(4) + (int16_t)s
->W(5));
1346 XMM_ONLY(d
->W(7) = (int16_t)s
->W(6) + (int16_t)s
->W(7));
1349 void glue(helper_phaddd
, SUFFIX
) (Reg
*d
, Reg
*s
)
1351 d
->L(0) = (int32_t)d
->L(0) + (int32_t)d
->L(1);
1352 XMM_ONLY(d
->L(1) = (int32_t)d
->L(2) + (int32_t)d
->L(3));
1353 d
->L((1 << SHIFT
) + 0) = (int32_t)s
->L(0) + (int32_t)s
->L(1);
1354 XMM_ONLY(d
->L(3) = (int32_t)s
->L(2) + (int32_t)s
->L(3));
1357 void glue(helper_phaddsw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1359 d
->W(0) = satsw((int16_t)d
->W(0) + (int16_t)d
->W(1));
1360 d
->W(1) = satsw((int16_t)d
->W(2) + (int16_t)d
->W(3));
1361 XMM_ONLY(d
->W(2) = satsw((int16_t)d
->W(4) + (int16_t)d
->W(5)));
1362 XMM_ONLY(d
->W(3) = satsw((int16_t)d
->W(6) + (int16_t)d
->W(7)));
1363 d
->W((2 << SHIFT
) + 0) = satsw((int16_t)s
->W(0) + (int16_t)s
->W(1));
1364 d
->W((2 << SHIFT
) + 1) = satsw((int16_t)s
->W(2) + (int16_t)s
->W(3));
1365 XMM_ONLY(d
->W(6) = satsw((int16_t)s
->W(4) + (int16_t)s
->W(5)));
1366 XMM_ONLY(d
->W(7) = satsw((int16_t)s
->W(6) + (int16_t)s
->W(7)));
1369 void glue(helper_pmaddubsw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1371 d
->W(0) = satsw((int8_t)s
->B( 0) * (uint8_t)d
->B( 0) +
1372 (int8_t)s
->B( 1) * (uint8_t)d
->B( 1));
1373 d
->W(1) = satsw((int8_t)s
->B( 2) * (uint8_t)d
->B( 2) +
1374 (int8_t)s
->B( 3) * (uint8_t)d
->B( 3));
1375 d
->W(2) = satsw((int8_t)s
->B( 4) * (uint8_t)d
->B( 4) +
1376 (int8_t)s
->B( 5) * (uint8_t)d
->B( 5));
1377 d
->W(3) = satsw((int8_t)s
->B( 6) * (uint8_t)d
->B( 6) +
1378 (int8_t)s
->B( 7) * (uint8_t)d
->B( 7));
1380 d
->W(4) = satsw((int8_t)s
->B( 8) * (uint8_t)d
->B( 8) +
1381 (int8_t)s
->B( 9) * (uint8_t)d
->B( 9));
1382 d
->W(5) = satsw((int8_t)s
->B(10) * (uint8_t)d
->B(10) +
1383 (int8_t)s
->B(11) * (uint8_t)d
->B(11));
1384 d
->W(6) = satsw((int8_t)s
->B(12) * (uint8_t)d
->B(12) +
1385 (int8_t)s
->B(13) * (uint8_t)d
->B(13));
1386 d
->W(7) = satsw((int8_t)s
->B(14) * (uint8_t)d
->B(14) +
1387 (int8_t)s
->B(15) * (uint8_t)d
->B(15));
1391 void glue(helper_phsubw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1393 d
->W(0) = (int16_t)d
->W(0) - (int16_t)d
->W(1);
1394 d
->W(1) = (int16_t)d
->W(2) - (int16_t)d
->W(3);
1395 XMM_ONLY(d
->W(2) = (int16_t)d
->W(4) - (int16_t)d
->W(5));
1396 XMM_ONLY(d
->W(3) = (int16_t)d
->W(6) - (int16_t)d
->W(7));
1397 d
->W((2 << SHIFT
) + 0) = (int16_t)s
->W(0) - (int16_t)s
->W(1);
1398 d
->W((2 << SHIFT
) + 1) = (int16_t)s
->W(2) - (int16_t)s
->W(3);
1399 XMM_ONLY(d
->W(6) = (int16_t)s
->W(4) - (int16_t)s
->W(5));
1400 XMM_ONLY(d
->W(7) = (int16_t)s
->W(6) - (int16_t)s
->W(7));
1403 void glue(helper_phsubd
, SUFFIX
) (Reg
*d
, Reg
*s
)
1405 d
->L(0) = (int32_t)d
->L(0) - (int32_t)d
->L(1);
1406 XMM_ONLY(d
->L(1) = (int32_t)d
->L(2) - (int32_t)d
->L(3));
1407 d
->L((1 << SHIFT
) + 0) = (int32_t)s
->L(0) - (int32_t)s
->L(1);
1408 XMM_ONLY(d
->L(3) = (int32_t)s
->L(2) - (int32_t)s
->L(3));
1411 void glue(helper_phsubsw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1413 d
->W(0) = satsw((int16_t)d
->W(0) - (int16_t)d
->W(1));
1414 d
->W(1) = satsw((int16_t)d
->W(2) - (int16_t)d
->W(3));
1415 XMM_ONLY(d
->W(2) = satsw((int16_t)d
->W(4) - (int16_t)d
->W(5)));
1416 XMM_ONLY(d
->W(3) = satsw((int16_t)d
->W(6) - (int16_t)d
->W(7)));
1417 d
->W((2 << SHIFT
) + 0) = satsw((int16_t)s
->W(0) - (int16_t)s
->W(1));
1418 d
->W((2 << SHIFT
) + 1) = satsw((int16_t)s
->W(2) - (int16_t)s
->W(3));
1419 XMM_ONLY(d
->W(6) = satsw((int16_t)s
->W(4) - (int16_t)s
->W(5)));
1420 XMM_ONLY(d
->W(7) = satsw((int16_t)s
->W(6) - (int16_t)s
->W(7)));
1423 #define FABSB(_, x) x > INT8_MAX ? -(int8_t ) x : x
1424 #define FABSW(_, x) x > INT16_MAX ? -(int16_t) x : x
1425 #define FABSL(_, x) x > INT32_MAX ? -(int32_t) x : x
1426 SSE_HELPER_B(helper_pabsb
, FABSB
)
1427 SSE_HELPER_W(helper_pabsw
, FABSW
)
1428 SSE_HELPER_L(helper_pabsd
, FABSL
)
1430 #define FMULHRSW(d, s) ((int16_t) d * (int16_t) s + 0x4000) >> 15
1431 SSE_HELPER_W(helper_pmulhrsw
, FMULHRSW
)
1433 #define FSIGNB(d, s) s <= INT8_MAX ? s ? d : 0 : -(int8_t ) d
1434 #define FSIGNW(d, s) s <= INT16_MAX ? s ? d : 0 : -(int16_t) d
1435 #define FSIGNL(d, s) s <= INT32_MAX ? s ? d : 0 : -(int32_t) d
1436 SSE_HELPER_B(helper_psignb
, FSIGNB
)
1437 SSE_HELPER_W(helper_psignw
, FSIGNW
)
1438 SSE_HELPER_L(helper_psignd
, FSIGNL
)
1440 void glue(helper_palignr
, SUFFIX
) (Reg
*d
, Reg
*s
, int32_t shift
)
1444 /* XXX could be checked during translation */
1445 if (shift
>= (16 << SHIFT
)) {
1447 XMM_ONLY(r
.Q(1) = 0);
1450 #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
1452 r
.Q(0) = SHR(s
->Q(0), shift
- 0) |
1453 SHR(d
->Q(0), shift
- 64);
1455 r
.Q(0) = SHR(s
->Q(0), shift
- 0) |
1456 SHR(s
->Q(1), shift
- 64) |
1457 SHR(d
->Q(0), shift
- 128) |
1458 SHR(d
->Q(1), shift
- 192);
1459 r
.Q(1) = SHR(s
->Q(0), shift
+ 64) |
1460 SHR(s
->Q(1), shift
- 0) |
1461 SHR(d
->Q(0), shift
- 64) |
1462 SHR(d
->Q(1), shift
- 128);
1470 #define XMM0 env->xmm_regs[0]
1473 #define SSE_HELPER_V(name, elem, num, F)\
1474 void glue(name, SUFFIX) (Reg *d, Reg *s)\
1476 d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0));\
1477 d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1));\
1479 d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2));\
1480 d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3));\
1482 d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4));\
1483 d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5));\
1484 d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6));\
1485 d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7));\
1487 d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8));\
1488 d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9));\
1489 d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10));\
1490 d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11));\
1491 d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12));\
1492 d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13));\
1493 d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14));\
1494 d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15));\
1500 #define SSE_HELPER_I(name, elem, num, F)\
1501 void glue(name, SUFFIX) (Reg *d, Reg *s, uint32_t imm)\
1503 d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1));\
1504 d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1));\
1506 d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1));\
1507 d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1));\
1509 d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1));\
1510 d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1));\
1511 d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1));\
1512 d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1));\
1514 d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1));\
1515 d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1));\
1516 d->elem(10) = F(d->elem(10), s->elem(10), ((imm >> 10) & 1));\
1517 d->elem(11) = F(d->elem(11), s->elem(11), ((imm >> 11) & 1));\
1518 d->elem(12) = F(d->elem(12), s->elem(12), ((imm >> 12) & 1));\
1519 d->elem(13) = F(d->elem(13), s->elem(13), ((imm >> 13) & 1));\
1520 d->elem(14) = F(d->elem(14), s->elem(14), ((imm >> 14) & 1));\
1521 d->elem(15) = F(d->elem(15), s->elem(15), ((imm >> 15) & 1));\
1527 /* SSE4.1 op helpers */
1528 #define FBLENDVB(d, s, m) (m & 0x80) ? s : d
1529 #define FBLENDVPS(d, s, m) (m & 0x80000000) ? s : d
1530 #define FBLENDVPD(d, s, m) (m & 0x8000000000000000LL) ? s : d
1531 SSE_HELPER_V(helper_pblendvb
, B
, 16, FBLENDVB
)
1532 SSE_HELPER_V(helper_blendvps
, L
, 4, FBLENDVPS
)
1533 SSE_HELPER_V(helper_blendvpd
, Q
, 2, FBLENDVPD
)
1535 void glue(helper_ptest
, SUFFIX
) (Reg
*d
, Reg
*s
)
1537 uint64_t zf
= (s
->Q(0) & d
->Q(0)) | (s
->Q(1) & d
->Q(1));
1538 uint64_t cf
= (s
->Q(0) & ~d
->Q(0)) | (s
->Q(1) & ~d
->Q(1));
1540 CC_SRC
= (zf
? 0 : CC_Z
) | (cf
? 0 : CC_C
);
1543 #define SSE_HELPER_F(name, elem, num, F)\
1544 void glue(name, SUFFIX) (Reg *d, Reg *s)\
1560 SSE_HELPER_F(helper_pmovsxbw
, W
, 8, (int8_t) s
->B
)
1561 SSE_HELPER_F(helper_pmovsxbd
, L
, 4, (int8_t) s
->B
)
1562 SSE_HELPER_F(helper_pmovsxbq
, Q
, 2, (int8_t) s
->B
)
1563 SSE_HELPER_F(helper_pmovsxwd
, L
, 4, (int16_t) s
->W
)
1564 SSE_HELPER_F(helper_pmovsxwq
, Q
, 2, (int16_t) s
->W
)
1565 SSE_HELPER_F(helper_pmovsxdq
, Q
, 2, (int32_t) s
->L
)
1566 SSE_HELPER_F(helper_pmovzxbw
, W
, 8, s
->B
)
1567 SSE_HELPER_F(helper_pmovzxbd
, L
, 4, s
->B
)
1568 SSE_HELPER_F(helper_pmovzxbq
, Q
, 2, s
->B
)
1569 SSE_HELPER_F(helper_pmovzxwd
, L
, 4, s
->W
)
1570 SSE_HELPER_F(helper_pmovzxwq
, Q
, 2, s
->W
)
1571 SSE_HELPER_F(helper_pmovzxdq
, Q
, 2, s
->L
)
1573 void glue(helper_pmuldq
, SUFFIX
) (Reg
*d
, Reg
*s
)
1575 d
->Q(0) = (int64_t) (int32_t) d
->L(0) * (int32_t) s
->L(0);
1576 d
->Q(1) = (int64_t) (int32_t) d
->L(2) * (int32_t) s
->L(2);
1579 #define FCMPEQQ(d, s) d == s ? -1 : 0
1580 SSE_HELPER_Q(helper_pcmpeqq
, FCMPEQQ
)
1582 void glue(helper_packusdw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1584 d
->W(0) = satuw((int32_t) d
->L(0));
1585 d
->W(1) = satuw((int32_t) d
->L(1));
1586 d
->W(2) = satuw((int32_t) d
->L(2));
1587 d
->W(3) = satuw((int32_t) d
->L(3));
1588 d
->W(4) = satuw((int32_t) s
->L(0));
1589 d
->W(5) = satuw((int32_t) s
->L(1));
1590 d
->W(6) = satuw((int32_t) s
->L(2));
1591 d
->W(7) = satuw((int32_t) s
->L(3));
1594 #define FMINSB(d, s) MIN((int8_t) d, (int8_t) s)
1595 #define FMINSD(d, s) MIN((int32_t) d, (int32_t) s)
1596 #define FMAXSB(d, s) MAX((int8_t) d, (int8_t) s)
1597 #define FMAXSD(d, s) MAX((int32_t) d, (int32_t) s)
1598 SSE_HELPER_B(helper_pminsb
, FMINSB
)
1599 SSE_HELPER_L(helper_pminsd
, FMINSD
)
1600 SSE_HELPER_W(helper_pminuw
, MIN
)
1601 SSE_HELPER_L(helper_pminud
, MIN
)
1602 SSE_HELPER_B(helper_pmaxsb
, FMAXSB
)
1603 SSE_HELPER_L(helper_pmaxsd
, FMAXSD
)
1604 SSE_HELPER_W(helper_pmaxuw
, MAX
)
1605 SSE_HELPER_L(helper_pmaxud
, MAX
)
1607 #define FMULLD(d, s) (int32_t) d * (int32_t) s
1608 SSE_HELPER_L(helper_pmulld
, FMULLD
)
1610 void glue(helper_phminposuw
, SUFFIX
) (Reg
*d
, Reg
*s
)
1614 if (s
->W(1) < s
->W(idx
))
1616 if (s
->W(2) < s
->W(idx
))
1618 if (s
->W(3) < s
->W(idx
))
1620 if (s
->W(4) < s
->W(idx
))
1622 if (s
->W(5) < s
->W(idx
))
1624 if (s
->W(6) < s
->W(idx
))
1626 if (s
->W(7) < s
->W(idx
))
1632 d
->W(0) = s
->W(idx
);
1635 void glue(helper_roundps
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t mode
)
1637 signed char prev_rounding_mode
;
1639 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1640 if (!(mode
& (1 << 2)))
1643 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1646 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1649 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1652 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1656 d
->XMM_S(0) = float32_round_to_int(s
->XMM_S(0), &env
->sse_status
);
1657 d
->XMM_S(1) = float32_round_to_int(s
->XMM_S(1), &env
->sse_status
);
1658 d
->XMM_S(2) = float32_round_to_int(s
->XMM_S(2), &env
->sse_status
);
1659 d
->XMM_S(3) = float32_round_to_int(s
->XMM_S(3), &env
->sse_status
);
1662 if (mode
& (1 << 3))
1663 set_float_exception_flags(
1664 get_float_exception_flags(&env
->sse_status
) &
1665 ~float_flag_inexact
,
1668 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1671 void glue(helper_roundpd
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t mode
)
1673 signed char prev_rounding_mode
;
1675 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1676 if (!(mode
& (1 << 2)))
1679 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1682 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1685 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1688 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1692 d
->XMM_D(0) = float64_round_to_int(s
->XMM_D(0), &env
->sse_status
);
1693 d
->XMM_D(1) = float64_round_to_int(s
->XMM_D(1), &env
->sse_status
);
1696 if (mode
& (1 << 3))
1697 set_float_exception_flags(
1698 get_float_exception_flags(&env
->sse_status
) &
1699 ~float_flag_inexact
,
1702 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1705 void glue(helper_roundss
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t mode
)
1707 signed char prev_rounding_mode
;
1709 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1710 if (!(mode
& (1 << 2)))
1713 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1716 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1719 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1722 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1726 d
->XMM_S(0) = float32_round_to_int(s
->XMM_S(0), &env
->sse_status
);
1729 if (mode
& (1 << 3))
1730 set_float_exception_flags(
1731 get_float_exception_flags(&env
->sse_status
) &
1732 ~float_flag_inexact
,
1735 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1738 void glue(helper_roundsd
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t mode
)
1740 signed char prev_rounding_mode
;
1742 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1743 if (!(mode
& (1 << 2)))
1746 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1749 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1752 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1755 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1759 d
->XMM_D(0) = float64_round_to_int(s
->XMM_D(0), &env
->sse_status
);
1762 if (mode
& (1 << 3))
1763 set_float_exception_flags(
1764 get_float_exception_flags(&env
->sse_status
) &
1765 ~float_flag_inexact
,
1768 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1771 #define FBLENDP(d, s, m) m ? s : d
1772 SSE_HELPER_I(helper_blendps
, L
, 4, FBLENDP
)
1773 SSE_HELPER_I(helper_blendpd
, Q
, 2, FBLENDP
)
1774 SSE_HELPER_I(helper_pblendw
, W
, 8, FBLENDP
)
1776 void glue(helper_dpps
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t mask
)
1778 float32 iresult
= float32_zero
;
1780 if (mask
& (1 << 4))
1781 iresult
= float32_add(iresult
,
1782 float32_mul(d
->XMM_S(0), s
->XMM_S(0), &env
->sse_status
),
1784 if (mask
& (1 << 5))
1785 iresult
= float32_add(iresult
,
1786 float32_mul(d
->XMM_S(1), s
->XMM_S(1), &env
->sse_status
),
1788 if (mask
& (1 << 6))
1789 iresult
= float32_add(iresult
,
1790 float32_mul(d
->XMM_S(2), s
->XMM_S(2), &env
->sse_status
),
1792 if (mask
& (1 << 7))
1793 iresult
= float32_add(iresult
,
1794 float32_mul(d
->XMM_S(3), s
->XMM_S(3), &env
->sse_status
),
1796 d
->XMM_S(0) = (mask
& (1 << 0)) ? iresult
: float32_zero
;
1797 d
->XMM_S(1) = (mask
& (1 << 1)) ? iresult
: float32_zero
;
1798 d
->XMM_S(2) = (mask
& (1 << 2)) ? iresult
: float32_zero
;
1799 d
->XMM_S(3) = (mask
& (1 << 3)) ? iresult
: float32_zero
;
1802 void glue(helper_dppd
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t mask
)
1804 float64 iresult
= float64_zero
;
1806 if (mask
& (1 << 4))
1807 iresult
= float64_add(iresult
,
1808 float64_mul(d
->XMM_D(0), s
->XMM_D(0), &env
->sse_status
),
1810 if (mask
& (1 << 5))
1811 iresult
= float64_add(iresult
,
1812 float64_mul(d
->XMM_D(1), s
->XMM_D(1), &env
->sse_status
),
1814 d
->XMM_D(0) = (mask
& (1 << 0)) ? iresult
: float64_zero
;
1815 d
->XMM_D(1) = (mask
& (1 << 1)) ? iresult
: float64_zero
;
1818 void glue(helper_mpsadbw
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t offset
)
1820 int s0
= (offset
& 3) << 2;
1821 int d0
= (offset
& 4) << 0;
1825 for (i
= 0; i
< 8; i
++, d0
++) {
1827 r
.W(i
) += abs1(d
->B(d0
+ 0) - s
->B(s0
+ 0));
1828 r
.W(i
) += abs1(d
->B(d0
+ 1) - s
->B(s0
+ 1));
1829 r
.W(i
) += abs1(d
->B(d0
+ 2) - s
->B(s0
+ 2));
1830 r
.W(i
) += abs1(d
->B(d0
+ 3) - s
->B(s0
+ 3));
1836 /* SSE4.2 op helpers */
1837 /* it's unclear whether signed or unsigned */
1838 #define FCMPGTQ(d, s) d > s ? -1 : 0
1839 SSE_HELPER_Q(helper_pcmpgtq
, FCMPGTQ
)
1841 static inline int pcmp_elen(int reg
, uint32_t ctrl
)
1845 /* Presence of REX.W is indicated by a bit higher than 7 set */
1847 val
= abs1((int64_t) env
->regs
[reg
]);
1849 val
= abs1((int32_t) env
->regs
[reg
]);
1861 static inline int pcmp_ilen(Reg
*r
, uint8_t ctrl
)
1866 while (val
< 8 && r
->W(val
))
1869 while (val
< 16 && r
->B(val
))
1875 static inline int pcmp_val(Reg
*r
, uint8_t ctrl
, int i
)
1877 switch ((ctrl
>> 0) & 3) {
1883 return (int8_t) r
->B(i
);
1886 return (int16_t) r
->W(i
);
1890 static inline unsigned pcmpxstrx(Reg
*d
, Reg
*s
,
1891 int8_t ctrl
, int valids
, int validd
)
1893 unsigned int res
= 0;
1896 int upper
= (ctrl
& 1) ? 7 : 15;
1901 CC_SRC
= (valids
< upper
? CC_Z
: 0) | (validd
< upper
? CC_S
: 0);
1903 switch ((ctrl
>> 2) & 3) {
1905 for (j
= valids
; j
>= 0; j
--) {
1907 v
= pcmp_val(s
, ctrl
, j
);
1908 for (i
= validd
; i
>= 0; i
--)
1909 res
|= (v
== pcmp_val(d
, ctrl
, i
));
1913 for (j
= valids
; j
>= 0; j
--) {
1915 v
= pcmp_val(s
, ctrl
, j
);
1916 for (i
= ((validd
- 1) | 1); i
>= 0; i
-= 2)
1917 res
|= (pcmp_val(d
, ctrl
, i
- 0) <= v
&&
1918 pcmp_val(d
, ctrl
, i
- 1) >= v
);
1922 res
= (2 << (upper
- MAX(valids
, validd
))) - 1;
1923 res
<<= MAX(valids
, validd
) - MIN(valids
, validd
);
1924 for (i
= MIN(valids
, validd
); i
>= 0; i
--) {
1926 v
= pcmp_val(s
, ctrl
, i
);
1927 res
|= (v
== pcmp_val(d
, ctrl
, i
));
1931 for (j
= valids
- validd
; j
>= 0; j
--) {
1934 for (i
= MIN(upper
- j
, validd
); i
>= 0; i
--)
1935 res
&= (pcmp_val(s
, ctrl
, i
+ j
) == pcmp_val(d
, ctrl
, i
));
1940 switch ((ctrl
>> 4) & 3) {
1942 res
^= (2 << upper
) - 1;
1945 res
^= (2 << valids
) - 1;
1957 static inline int rffs1(unsigned int val
)
1961 for (hi
= sizeof(val
) * 4; hi
; hi
/= 2)
1970 static inline int ffs1(unsigned int val
)
1974 for (hi
= sizeof(val
) * 4; hi
; hi
/= 2)
1983 void glue(helper_pcmpestri
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t ctrl
)
1985 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
1986 pcmp_elen(R_EDX
, ctrl
),
1987 pcmp_elen(R_EAX
, ctrl
));
1990 env
->regs
[R_ECX
] = ((ctrl
& (1 << 6)) ? rffs1
: ffs1
)(res
) - 1;
1992 env
->regs
[R_ECX
] = 16 >> (ctrl
& (1 << 0));
1995 void glue(helper_pcmpestrm
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t ctrl
)
1998 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
1999 pcmp_elen(R_EDX
, ctrl
),
2000 pcmp_elen(R_EAX
, ctrl
));
2002 if ((ctrl
>> 6) & 1) {
2004 for (i
= 0; i
< 8; i
++, res
>>= 1) {
2005 d
->W(i
) = (res
& 1) ? ~0 : 0;
2008 for (i
= 0; i
< 16; i
++, res
>>= 1) {
2009 d
->B(i
) = (res
& 1) ? ~0 : 0;
2017 void glue(helper_pcmpistri
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t ctrl
)
2019 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
2021 pcmp_ilen(d
, ctrl
));
2024 env
->regs
[R_ECX
] = ((ctrl
& (1 << 6)) ? rffs1
: ffs1
)(res
) - 1;
2026 env
->regs
[R_ECX
] = 16 >> (ctrl
& (1 << 0));
2029 void glue(helper_pcmpistrm
, SUFFIX
) (Reg
*d
, Reg
*s
, uint32_t ctrl
)
2032 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
2034 pcmp_ilen(d
, ctrl
));
2036 if ((ctrl
>> 6) & 1) {
2038 for (i
= 0; i
< 8; i
++, res
>>= 1) {
2039 d
->W(i
) = (res
& 1) ? ~0 : 0;
2042 for (i
= 0; i
< 16; i
++, res
>>= 1) {
2043 d
->B(i
) = (res
& 1) ? ~0 : 0;
2051 #define CRCPOLY 0x1edc6f41
2052 #define CRCPOLY_BITREV 0x82f63b78
2053 target_ulong
helper_crc32(uint32_t crc1
, target_ulong msg
, uint32_t len
)
2055 target_ulong crc
= (msg
& ((target_ulong
) -1 >>
2056 (TARGET_LONG_BITS
- len
))) ^ crc1
;
2059 crc
= (crc
>> 1) ^ ((crc
& 1) ? CRCPOLY_BITREV
: 0);
2064 #define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
2065 #define POPCOUNT(n, i) (n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))
2066 target_ulong
helper_popcnt(target_ulong n
, uint32_t type
)
2068 CC_SRC
= n
? 0 : CC_Z
;
2078 #ifndef TARGET_X86_64
2084 return POPCOUNT(n
, 5);