2 * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
4 * Copyright (c) 2005 Fabrice Bellard
5 * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
30 #define XMM_ONLY(...) __VA_ARGS__
38 void glue(helper_psrlw
, SUFFIX
)(Reg
*d
, Reg
*s
)
62 void glue(helper_psraw
, SUFFIX
)(Reg
*d
, Reg
*s
)
71 d
->W(0) = (int16_t)d
->W(0) >> shift
;
72 d
->W(1) = (int16_t)d
->W(1) >> shift
;
73 d
->W(2) = (int16_t)d
->W(2) >> shift
;
74 d
->W(3) = (int16_t)d
->W(3) >> shift
;
76 d
->W(4) = (int16_t)d
->W(4) >> shift
;
77 d
->W(5) = (int16_t)d
->W(5) >> shift
;
78 d
->W(6) = (int16_t)d
->W(6) >> shift
;
79 d
->W(7) = (int16_t)d
->W(7) >> shift
;
83 void glue(helper_psllw
, SUFFIX
)(Reg
*d
, Reg
*s
)
107 void glue(helper_psrld
, SUFFIX
)(Reg
*d
, Reg
*s
)
127 void glue(helper_psrad
, SUFFIX
)(Reg
*d
, Reg
*s
)
136 d
->L(0) = (int32_t)d
->L(0) >> shift
;
137 d
->L(1) = (int32_t)d
->L(1) >> shift
;
139 d
->L(2) = (int32_t)d
->L(2) >> shift
;
140 d
->L(3) = (int32_t)d
->L(3) >> shift
;
144 void glue(helper_pslld
, SUFFIX
)(Reg
*d
, Reg
*s
)
164 void glue(helper_psrlq
, SUFFIX
)(Reg
*d
, Reg
*s
)
182 void glue(helper_psllq
, SUFFIX
)(Reg
*d
, Reg
*s
)
201 void glue(helper_psrldq
, SUFFIX
)(Reg
*d
, Reg
*s
)
209 for (i
= 0; i
< 16 - shift
; i
++) {
210 d
->B(i
) = d
->B(i
+ shift
);
212 for (i
= 16 - shift
; i
< 16; i
++) {
217 void glue(helper_pslldq
, SUFFIX
)(Reg
*d
, Reg
*s
)
225 for (i
= 15; i
>= shift
; i
--) {
226 d
->B(i
) = d
->B(i
- shift
);
228 for (i
= 0; i
< shift
; i
++) {
234 #define SSE_HELPER_B(name, F) \
235 void glue(name, SUFFIX)(Reg *d, Reg *s) \
237 d->B(0) = F(d->B(0), s->B(0)); \
238 d->B(1) = F(d->B(1), s->B(1)); \
239 d->B(2) = F(d->B(2), s->B(2)); \
240 d->B(3) = F(d->B(3), s->B(3)); \
241 d->B(4) = F(d->B(4), s->B(4)); \
242 d->B(5) = F(d->B(5), s->B(5)); \
243 d->B(6) = F(d->B(6), s->B(6)); \
244 d->B(7) = F(d->B(7), s->B(7)); \
246 d->B(8) = F(d->B(8), s->B(8)); \
247 d->B(9) = F(d->B(9), s->B(9)); \
248 d->B(10) = F(d->B(10), s->B(10)); \
249 d->B(11) = F(d->B(11), s->B(11)); \
250 d->B(12) = F(d->B(12), s->B(12)); \
251 d->B(13) = F(d->B(13), s->B(13)); \
252 d->B(14) = F(d->B(14), s->B(14)); \
253 d->B(15) = F(d->B(15), s->B(15)); \
257 #define SSE_HELPER_W(name, F) \
258 void glue(name, SUFFIX)(Reg *d, Reg *s) \
260 d->W(0) = F(d->W(0), s->W(0)); \
261 d->W(1) = F(d->W(1), s->W(1)); \
262 d->W(2) = F(d->W(2), s->W(2)); \
263 d->W(3) = F(d->W(3), s->W(3)); \
265 d->W(4) = F(d->W(4), s->W(4)); \
266 d->W(5) = F(d->W(5), s->W(5)); \
267 d->W(6) = F(d->W(6), s->W(6)); \
268 d->W(7) = F(d->W(7), s->W(7)); \
272 #define SSE_HELPER_L(name, F) \
273 void glue(name, SUFFIX)(Reg *d, Reg *s) \
275 d->L(0) = F(d->L(0), s->L(0)); \
276 d->L(1) = F(d->L(1), s->L(1)); \
278 d->L(2) = F(d->L(2), s->L(2)); \
279 d->L(3) = F(d->L(3), s->L(3)); \
283 #define SSE_HELPER_Q(name, F) \
284 void glue(name, SUFFIX)(Reg *d, Reg *s) \
286 d->Q(0) = F(d->Q(0), s->Q(0)); \
288 d->Q(1) = F(d->Q(1), s->Q(1)); \
293 static inline int satub(int x
)
297 } else if (x
> 255) {
304 static inline int satuw(int x
)
308 } else if (x
> 65535) {
315 static inline int satsb(int x
)
319 } else if (x
> 127) {
326 static inline int satsw(int x
)
330 } else if (x
> 32767) {
337 #define FADD(a, b) ((a) + (b))
338 #define FADDUB(a, b) satub((a) + (b))
339 #define FADDUW(a, b) satuw((a) + (b))
340 #define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
341 #define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
343 #define FSUB(a, b) ((a) - (b))
344 #define FSUBUB(a, b) satub((a) - (b))
345 #define FSUBUW(a, b) satuw((a) - (b))
346 #define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
347 #define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
348 #define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
349 #define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
350 #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
351 #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
353 #define FAND(a, b) ((a) & (b))
354 #define FANDN(a, b) ((~(a)) & (b))
355 #define FOR(a, b) ((a) | (b))
356 #define FXOR(a, b) ((a) ^ (b))
358 #define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0)
359 #define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0)
360 #define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0)
361 #define FCMPEQ(a, b) ((a) == (b) ? -1 : 0)
363 #define FMULLW(a, b) ((a) * (b))
364 #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
365 #define FMULHUW(a, b) ((a) * (b) >> 16)
366 #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
368 #define FAVG(a, b) (((a) + (b) + 1) >> 1)
371 SSE_HELPER_B(helper_paddb
, FADD
)
372 SSE_HELPER_W(helper_paddw
, FADD
)
373 SSE_HELPER_L(helper_paddl
, FADD
)
374 SSE_HELPER_Q(helper_paddq
, FADD
)
376 SSE_HELPER_B(helper_psubb
, FSUB
)
377 SSE_HELPER_W(helper_psubw
, FSUB
)
378 SSE_HELPER_L(helper_psubl
, FSUB
)
379 SSE_HELPER_Q(helper_psubq
, FSUB
)
381 SSE_HELPER_B(helper_paddusb
, FADDUB
)
382 SSE_HELPER_B(helper_paddsb
, FADDSB
)
383 SSE_HELPER_B(helper_psubusb
, FSUBUB
)
384 SSE_HELPER_B(helper_psubsb
, FSUBSB
)
386 SSE_HELPER_W(helper_paddusw
, FADDUW
)
387 SSE_HELPER_W(helper_paddsw
, FADDSW
)
388 SSE_HELPER_W(helper_psubusw
, FSUBUW
)
389 SSE_HELPER_W(helper_psubsw
, FSUBSW
)
391 SSE_HELPER_B(helper_pminub
, FMINUB
)
392 SSE_HELPER_B(helper_pmaxub
, FMAXUB
)
394 SSE_HELPER_W(helper_pminsw
, FMINSW
)
395 SSE_HELPER_W(helper_pmaxsw
, FMAXSW
)
397 SSE_HELPER_Q(helper_pand
, FAND
)
398 SSE_HELPER_Q(helper_pandn
, FANDN
)
399 SSE_HELPER_Q(helper_por
, FOR
)
400 SSE_HELPER_Q(helper_pxor
, FXOR
)
402 SSE_HELPER_B(helper_pcmpgtb
, FCMPGTB
)
403 SSE_HELPER_W(helper_pcmpgtw
, FCMPGTW
)
404 SSE_HELPER_L(helper_pcmpgtl
, FCMPGTL
)
406 SSE_HELPER_B(helper_pcmpeqb
, FCMPEQ
)
407 SSE_HELPER_W(helper_pcmpeqw
, FCMPEQ
)
408 SSE_HELPER_L(helper_pcmpeql
, FCMPEQ
)
410 SSE_HELPER_W(helper_pmullw
, FMULLW
)
412 SSE_HELPER_W(helper_pmulhrw
, FMULHRW
)
414 SSE_HELPER_W(helper_pmulhuw
, FMULHUW
)
415 SSE_HELPER_W(helper_pmulhw
, FMULHW
)
417 SSE_HELPER_B(helper_pavgb
, FAVG
)
418 SSE_HELPER_W(helper_pavgw
, FAVG
)
420 void glue(helper_pmuludq
, SUFFIX
)(Reg
*d
, Reg
*s
)
422 d
->Q(0) = (uint64_t)s
->L(0) * (uint64_t)d
->L(0);
424 d
->Q(1) = (uint64_t)s
->L(2) * (uint64_t)d
->L(2);
428 void glue(helper_pmaddwd
, SUFFIX
)(Reg
*d
, Reg
*s
)
432 for (i
= 0; i
< (2 << SHIFT
); i
++) {
433 d
->L(i
) = (int16_t)s
->W(2 * i
) * (int16_t)d
->W(2 * i
) +
434 (int16_t)s
->W(2 * i
+ 1) * (int16_t)d
->W(2 * i
+ 1);
439 static inline int abs1(int a
)
448 void glue(helper_psadbw
, SUFFIX
)(Reg
*d
, Reg
*s
)
453 val
+= abs1(d
->B(0) - s
->B(0));
454 val
+= abs1(d
->B(1) - s
->B(1));
455 val
+= abs1(d
->B(2) - s
->B(2));
456 val
+= abs1(d
->B(3) - s
->B(3));
457 val
+= abs1(d
->B(4) - s
->B(4));
458 val
+= abs1(d
->B(5) - s
->B(5));
459 val
+= abs1(d
->B(6) - s
->B(6));
460 val
+= abs1(d
->B(7) - s
->B(7));
464 val
+= abs1(d
->B(8) - s
->B(8));
465 val
+= abs1(d
->B(9) - s
->B(9));
466 val
+= abs1(d
->B(10) - s
->B(10));
467 val
+= abs1(d
->B(11) - s
->B(11));
468 val
+= abs1(d
->B(12) - s
->B(12));
469 val
+= abs1(d
->B(13) - s
->B(13));
470 val
+= abs1(d
->B(14) - s
->B(14));
471 val
+= abs1(d
->B(15) - s
->B(15));
476 void glue(helper_maskmov
, SUFFIX
)(Reg
*d
, Reg
*s
, target_ulong a0
)
480 for (i
= 0; i
< (8 << SHIFT
); i
++) {
481 if (s
->B(i
) & 0x80) {
482 stb(a0
+ i
, d
->B(i
));
487 void glue(helper_movl_mm_T0
, SUFFIX
)(Reg
*d
, uint32_t val
)
497 void glue(helper_movq_mm_T0
, SUFFIX
)(Reg
*d
, uint64_t val
)
507 void glue(helper_pshufw
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
511 r
.W(0) = s
->W(order
& 3);
512 r
.W(1) = s
->W((order
>> 2) & 3);
513 r
.W(2) = s
->W((order
>> 4) & 3);
514 r
.W(3) = s
->W((order
>> 6) & 3);
518 void helper_shufps(Reg
*d
, Reg
*s
, int order
)
522 r
.L(0) = d
->L(order
& 3);
523 r
.L(1) = d
->L((order
>> 2) & 3);
524 r
.L(2) = s
->L((order
>> 4) & 3);
525 r
.L(3) = s
->L((order
>> 6) & 3);
529 void helper_shufpd(Reg
*d
, Reg
*s
, int order
)
533 r
.Q(0) = d
->Q(order
& 1);
534 r
.Q(1) = s
->Q((order
>> 1) & 1);
538 void glue(helper_pshufd
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
542 r
.L(0) = s
->L(order
& 3);
543 r
.L(1) = s
->L((order
>> 2) & 3);
544 r
.L(2) = s
->L((order
>> 4) & 3);
545 r
.L(3) = s
->L((order
>> 6) & 3);
549 void glue(helper_pshuflw
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
553 r
.W(0) = s
->W(order
& 3);
554 r
.W(1) = s
->W((order
>> 2) & 3);
555 r
.W(2) = s
->W((order
>> 4) & 3);
556 r
.W(3) = s
->W((order
>> 6) & 3);
561 void glue(helper_pshufhw
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
566 r
.W(4) = s
->W(4 + (order
& 3));
567 r
.W(5) = s
->W(4 + ((order
>> 2) & 3));
568 r
.W(6) = s
->W(4 + ((order
>> 4) & 3));
569 r
.W(7) = s
->W(4 + ((order
>> 6) & 3));
576 /* XXX: not accurate */
578 #define SSE_HELPER_S(name, F) \
579 void helper_ ## name ## ps(Reg *d, Reg *s) \
581 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
582 d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \
583 d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \
584 d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \
587 void helper_ ## name ## ss(Reg *d, Reg *s) \
589 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
592 void helper_ ## name ## pd(Reg *d, Reg *s) \
594 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
595 d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \
598 void helper_ ## name ## sd(Reg *d, Reg *s) \
600 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
603 #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
604 #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
605 #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
606 #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
607 #define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status)
609 /* Note that the choice of comparison op here is important to get the
610 * special cases right: for min and max Intel specifies that (-0,0),
611 * (NaN, anything) and (anything, NaN) return the second argument.
613 #define FPU_MIN(size, a, b) \
614 (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b))
615 #define FPU_MAX(size, a, b) \
616 (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b))
618 SSE_HELPER_S(add
, FPU_ADD
)
619 SSE_HELPER_S(sub
, FPU_SUB
)
620 SSE_HELPER_S(mul
, FPU_MUL
)
621 SSE_HELPER_S(div
, FPU_DIV
)
622 SSE_HELPER_S(min
, FPU_MIN
)
623 SSE_HELPER_S(max
, FPU_MAX
)
624 SSE_HELPER_S(sqrt
, FPU_SQRT
)
627 /* float to float conversions */
628 void helper_cvtps2pd(Reg
*d
, Reg
*s
)
634 d
->XMM_D(0) = float32_to_float64(s0
, &env
->sse_status
);
635 d
->XMM_D(1) = float32_to_float64(s1
, &env
->sse_status
);
638 void helper_cvtpd2ps(Reg
*d
, Reg
*s
)
640 d
->XMM_S(0) = float64_to_float32(s
->XMM_D(0), &env
->sse_status
);
641 d
->XMM_S(1) = float64_to_float32(s
->XMM_D(1), &env
->sse_status
);
645 void helper_cvtss2sd(Reg
*d
, Reg
*s
)
647 d
->XMM_D(0) = float32_to_float64(s
->XMM_S(0), &env
->sse_status
);
650 void helper_cvtsd2ss(Reg
*d
, Reg
*s
)
652 d
->XMM_S(0) = float64_to_float32(s
->XMM_D(0), &env
->sse_status
);
655 /* integer to float */
656 void helper_cvtdq2ps(Reg
*d
, Reg
*s
)
658 d
->XMM_S(0) = int32_to_float32(s
->XMM_L(0), &env
->sse_status
);
659 d
->XMM_S(1) = int32_to_float32(s
->XMM_L(1), &env
->sse_status
);
660 d
->XMM_S(2) = int32_to_float32(s
->XMM_L(2), &env
->sse_status
);
661 d
->XMM_S(3) = int32_to_float32(s
->XMM_L(3), &env
->sse_status
);
664 void helper_cvtdq2pd(Reg
*d
, Reg
*s
)
668 l0
= (int32_t)s
->XMM_L(0);
669 l1
= (int32_t)s
->XMM_L(1);
670 d
->XMM_D(0) = int32_to_float64(l0
, &env
->sse_status
);
671 d
->XMM_D(1) = int32_to_float64(l1
, &env
->sse_status
);
674 void helper_cvtpi2ps(XMMReg
*d
, MMXReg
*s
)
676 d
->XMM_S(0) = int32_to_float32(s
->MMX_L(0), &env
->sse_status
);
677 d
->XMM_S(1) = int32_to_float32(s
->MMX_L(1), &env
->sse_status
);
680 void helper_cvtpi2pd(XMMReg
*d
, MMXReg
*s
)
682 d
->XMM_D(0) = int32_to_float64(s
->MMX_L(0), &env
->sse_status
);
683 d
->XMM_D(1) = int32_to_float64(s
->MMX_L(1), &env
->sse_status
);
686 void helper_cvtsi2ss(XMMReg
*d
, uint32_t val
)
688 d
->XMM_S(0) = int32_to_float32(val
, &env
->sse_status
);
691 void helper_cvtsi2sd(XMMReg
*d
, uint32_t val
)
693 d
->XMM_D(0) = int32_to_float64(val
, &env
->sse_status
);
697 void helper_cvtsq2ss(XMMReg
*d
, uint64_t val
)
699 d
->XMM_S(0) = int64_to_float32(val
, &env
->sse_status
);
702 void helper_cvtsq2sd(XMMReg
*d
, uint64_t val
)
704 d
->XMM_D(0) = int64_to_float64(val
, &env
->sse_status
);
708 /* float to integer */
709 void helper_cvtps2dq(XMMReg
*d
, XMMReg
*s
)
711 d
->XMM_L(0) = float32_to_int32(s
->XMM_S(0), &env
->sse_status
);
712 d
->XMM_L(1) = float32_to_int32(s
->XMM_S(1), &env
->sse_status
);
713 d
->XMM_L(2) = float32_to_int32(s
->XMM_S(2), &env
->sse_status
);
714 d
->XMM_L(3) = float32_to_int32(s
->XMM_S(3), &env
->sse_status
);
717 void helper_cvtpd2dq(XMMReg
*d
, XMMReg
*s
)
719 d
->XMM_L(0) = float64_to_int32(s
->XMM_D(0), &env
->sse_status
);
720 d
->XMM_L(1) = float64_to_int32(s
->XMM_D(1), &env
->sse_status
);
724 void helper_cvtps2pi(MMXReg
*d
, XMMReg
*s
)
726 d
->MMX_L(0) = float32_to_int32(s
->XMM_S(0), &env
->sse_status
);
727 d
->MMX_L(1) = float32_to_int32(s
->XMM_S(1), &env
->sse_status
);
730 void helper_cvtpd2pi(MMXReg
*d
, XMMReg
*s
)
732 d
->MMX_L(0) = float64_to_int32(s
->XMM_D(0), &env
->sse_status
);
733 d
->MMX_L(1) = float64_to_int32(s
->XMM_D(1), &env
->sse_status
);
736 int32_t helper_cvtss2si(XMMReg
*s
)
738 return float32_to_int32(s
->XMM_S(0), &env
->sse_status
);
741 int32_t helper_cvtsd2si(XMMReg
*s
)
743 return float64_to_int32(s
->XMM_D(0), &env
->sse_status
);
747 int64_t helper_cvtss2sq(XMMReg
*s
)
749 return float32_to_int64(s
->XMM_S(0), &env
->sse_status
);
752 int64_t helper_cvtsd2sq(XMMReg
*s
)
754 return float64_to_int64(s
->XMM_D(0), &env
->sse_status
);
758 /* float to integer truncated */
759 void helper_cvttps2dq(XMMReg
*d
, XMMReg
*s
)
761 d
->XMM_L(0) = float32_to_int32_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
762 d
->XMM_L(1) = float32_to_int32_round_to_zero(s
->XMM_S(1), &env
->sse_status
);
763 d
->XMM_L(2) = float32_to_int32_round_to_zero(s
->XMM_S(2), &env
->sse_status
);
764 d
->XMM_L(3) = float32_to_int32_round_to_zero(s
->XMM_S(3), &env
->sse_status
);
767 void helper_cvttpd2dq(XMMReg
*d
, XMMReg
*s
)
769 d
->XMM_L(0) = float64_to_int32_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
770 d
->XMM_L(1) = float64_to_int32_round_to_zero(s
->XMM_D(1), &env
->sse_status
);
774 void helper_cvttps2pi(MMXReg
*d
, XMMReg
*s
)
776 d
->MMX_L(0) = float32_to_int32_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
777 d
->MMX_L(1) = float32_to_int32_round_to_zero(s
->XMM_S(1), &env
->sse_status
);
780 void helper_cvttpd2pi(MMXReg
*d
, XMMReg
*s
)
782 d
->MMX_L(0) = float64_to_int32_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
783 d
->MMX_L(1) = float64_to_int32_round_to_zero(s
->XMM_D(1), &env
->sse_status
);
786 int32_t helper_cvttss2si(XMMReg
*s
)
788 return float32_to_int32_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
791 int32_t helper_cvttsd2si(XMMReg
*s
)
793 return float64_to_int32_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
797 int64_t helper_cvttss2sq(XMMReg
*s
)
799 return float32_to_int64_round_to_zero(s
->XMM_S(0), &env
->sse_status
);
802 int64_t helper_cvttsd2sq(XMMReg
*s
)
804 return float64_to_int64_round_to_zero(s
->XMM_D(0), &env
->sse_status
);
808 void helper_rsqrtps(XMMReg
*d
, XMMReg
*s
)
810 d
->XMM_S(0) = float32_div(float32_one
,
811 float32_sqrt(s
->XMM_S(0), &env
->sse_status
),
813 d
->XMM_S(1) = float32_div(float32_one
,
814 float32_sqrt(s
->XMM_S(1), &env
->sse_status
),
816 d
->XMM_S(2) = float32_div(float32_one
,
817 float32_sqrt(s
->XMM_S(2), &env
->sse_status
),
819 d
->XMM_S(3) = float32_div(float32_one
,
820 float32_sqrt(s
->XMM_S(3), &env
->sse_status
),
824 void helper_rsqrtss(XMMReg
*d
, XMMReg
*s
)
826 d
->XMM_S(0) = float32_div(float32_one
,
827 float32_sqrt(s
->XMM_S(0), &env
->sse_status
),
831 void helper_rcpps(XMMReg
*d
, XMMReg
*s
)
833 d
->XMM_S(0) = float32_div(float32_one
, s
->XMM_S(0), &env
->sse_status
);
834 d
->XMM_S(1) = float32_div(float32_one
, s
->XMM_S(1), &env
->sse_status
);
835 d
->XMM_S(2) = float32_div(float32_one
, s
->XMM_S(2), &env
->sse_status
);
836 d
->XMM_S(3) = float32_div(float32_one
, s
->XMM_S(3), &env
->sse_status
);
839 void helper_rcpss(XMMReg
*d
, XMMReg
*s
)
841 d
->XMM_S(0) = float32_div(float32_one
, s
->XMM_S(0), &env
->sse_status
);
844 static inline uint64_t helper_extrq(uint64_t src
, int shift
, int len
)
851 mask
= (1ULL << len
) - 1;
853 return (src
>> shift
) & mask
;
856 void helper_extrq_r(XMMReg
*d
, XMMReg
*s
)
858 d
->XMM_Q(0) = helper_extrq(d
->XMM_Q(0), s
->XMM_B(1), s
->XMM_B(0));
861 void helper_extrq_i(XMMReg
*d
, int index
, int length
)
863 d
->XMM_Q(0) = helper_extrq(d
->XMM_Q(0), index
, length
);
866 static inline uint64_t helper_insertq(uint64_t src
, int shift
, int len
)
873 mask
= (1ULL << len
) - 1;
875 return (src
& ~(mask
<< shift
)) | ((src
& mask
) << shift
);
878 void helper_insertq_r(XMMReg
*d
, XMMReg
*s
)
880 d
->XMM_Q(0) = helper_insertq(s
->XMM_Q(0), s
->XMM_B(9), s
->XMM_B(8));
883 void helper_insertq_i(XMMReg
*d
, int index
, int length
)
885 d
->XMM_Q(0) = helper_insertq(d
->XMM_Q(0), index
, length
);
888 void helper_haddps(XMMReg
*d
, XMMReg
*s
)
892 r
.XMM_S(0) = float32_add(d
->XMM_S(0), d
->XMM_S(1), &env
->sse_status
);
893 r
.XMM_S(1) = float32_add(d
->XMM_S(2), d
->XMM_S(3), &env
->sse_status
);
894 r
.XMM_S(2) = float32_add(s
->XMM_S(0), s
->XMM_S(1), &env
->sse_status
);
895 r
.XMM_S(3) = float32_add(s
->XMM_S(2), s
->XMM_S(3), &env
->sse_status
);
899 void helper_haddpd(XMMReg
*d
, XMMReg
*s
)
903 r
.XMM_D(0) = float64_add(d
->XMM_D(0), d
->XMM_D(1), &env
->sse_status
);
904 r
.XMM_D(1) = float64_add(s
->XMM_D(0), s
->XMM_D(1), &env
->sse_status
);
908 void helper_hsubps(XMMReg
*d
, XMMReg
*s
)
912 r
.XMM_S(0) = float32_sub(d
->XMM_S(0), d
->XMM_S(1), &env
->sse_status
);
913 r
.XMM_S(1) = float32_sub(d
->XMM_S(2), d
->XMM_S(3), &env
->sse_status
);
914 r
.XMM_S(2) = float32_sub(s
->XMM_S(0), s
->XMM_S(1), &env
->sse_status
);
915 r
.XMM_S(3) = float32_sub(s
->XMM_S(2), s
->XMM_S(3), &env
->sse_status
);
919 void helper_hsubpd(XMMReg
*d
, XMMReg
*s
)
923 r
.XMM_D(0) = float64_sub(d
->XMM_D(0), d
->XMM_D(1), &env
->sse_status
);
924 r
.XMM_D(1) = float64_sub(s
->XMM_D(0), s
->XMM_D(1), &env
->sse_status
);
928 void helper_addsubps(XMMReg
*d
, XMMReg
*s
)
930 d
->XMM_S(0) = float32_sub(d
->XMM_S(0), s
->XMM_S(0), &env
->sse_status
);
931 d
->XMM_S(1) = float32_add(d
->XMM_S(1), s
->XMM_S(1), &env
->sse_status
);
932 d
->XMM_S(2) = float32_sub(d
->XMM_S(2), s
->XMM_S(2), &env
->sse_status
);
933 d
->XMM_S(3) = float32_add(d
->XMM_S(3), s
->XMM_S(3), &env
->sse_status
);
936 void helper_addsubpd(XMMReg
*d
, XMMReg
*s
)
938 d
->XMM_D(0) = float64_sub(d
->XMM_D(0), s
->XMM_D(0), &env
->sse_status
);
939 d
->XMM_D(1) = float64_add(d
->XMM_D(1), s
->XMM_D(1), &env
->sse_status
);
943 #define SSE_HELPER_CMP(name, F) \
944 void helper_ ## name ## ps(Reg *d, Reg *s) \
946 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
947 d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \
948 d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \
949 d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \
952 void helper_ ## name ## ss(Reg *d, Reg *s) \
954 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
957 void helper_ ## name ## pd(Reg *d, Reg *s) \
959 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
960 d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \
963 void helper_ ## name ## sd(Reg *d, Reg *s) \
965 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
968 #define FPU_CMPEQ(size, a, b) \
969 (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0)
970 #define FPU_CMPLT(size, a, b) \
971 (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0)
972 #define FPU_CMPLE(size, a, b) \
973 (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0)
974 #define FPU_CMPUNORD(size, a, b) \
975 (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0)
976 #define FPU_CMPNEQ(size, a, b) \
977 (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1)
978 #define FPU_CMPNLT(size, a, b) \
979 (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1)
980 #define FPU_CMPNLE(size, a, b) \
981 (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1)
982 #define FPU_CMPORD(size, a, b) \
983 (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1)
985 SSE_HELPER_CMP(cmpeq
, FPU_CMPEQ
)
986 SSE_HELPER_CMP(cmplt
, FPU_CMPLT
)
987 SSE_HELPER_CMP(cmple
, FPU_CMPLE
)
988 SSE_HELPER_CMP(cmpunord
, FPU_CMPUNORD
)
989 SSE_HELPER_CMP(cmpneq
, FPU_CMPNEQ
)
990 SSE_HELPER_CMP(cmpnlt
, FPU_CMPNLT
)
991 SSE_HELPER_CMP(cmpnle
, FPU_CMPNLE
)
992 SSE_HELPER_CMP(cmpord
, FPU_CMPORD
)
994 static const int comis_eflags
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
996 void helper_ucomiss(Reg
*d
, Reg
*s
)
1003 ret
= float32_compare_quiet(s0
, s1
, &env
->sse_status
);
1004 CC_SRC
= comis_eflags
[ret
+ 1];
1007 void helper_comiss(Reg
*d
, Reg
*s
)
1014 ret
= float32_compare(s0
, s1
, &env
->sse_status
);
1015 CC_SRC
= comis_eflags
[ret
+ 1];
1018 void helper_ucomisd(Reg
*d
, Reg
*s
)
1025 ret
= float64_compare_quiet(d0
, d1
, &env
->sse_status
);
1026 CC_SRC
= comis_eflags
[ret
+ 1];
1029 void helper_comisd(Reg
*d
, Reg
*s
)
1036 ret
= float64_compare(d0
, d1
, &env
->sse_status
);
1037 CC_SRC
= comis_eflags
[ret
+ 1];
1040 uint32_t helper_movmskps(Reg
*s
)
1044 b0
= s
->XMM_L(0) >> 31;
1045 b1
= s
->XMM_L(1) >> 31;
1046 b2
= s
->XMM_L(2) >> 31;
1047 b3
= s
->XMM_L(3) >> 31;
1048 return b0
| (b1
<< 1) | (b2
<< 2) | (b3
<< 3);
1051 uint32_t helper_movmskpd(Reg
*s
)
1055 b0
= s
->XMM_L(1) >> 31;
1056 b1
= s
->XMM_L(3) >> 31;
1057 return b0
| (b1
<< 1);
1062 uint32_t glue(helper_pmovmskb
, SUFFIX
)(Reg
*s
)
1067 val
|= (s
->B(0) >> 7);
1068 val
|= (s
->B(1) >> 6) & 0x02;
1069 val
|= (s
->B(2) >> 5) & 0x04;
1070 val
|= (s
->B(3) >> 4) & 0x08;
1071 val
|= (s
->B(4) >> 3) & 0x10;
1072 val
|= (s
->B(5) >> 2) & 0x20;
1073 val
|= (s
->B(6) >> 1) & 0x40;
1074 val
|= (s
->B(7)) & 0x80;
1076 val
|= (s
->B(8) << 1) & 0x0100;
1077 val
|= (s
->B(9) << 2) & 0x0200;
1078 val
|= (s
->B(10) << 3) & 0x0400;
1079 val
|= (s
->B(11) << 4) & 0x0800;
1080 val
|= (s
->B(12) << 5) & 0x1000;
1081 val
|= (s
->B(13) << 6) & 0x2000;
1082 val
|= (s
->B(14) << 7) & 0x4000;
1083 val
|= (s
->B(15) << 8) & 0x8000;
1088 void glue(helper_packsswb
, SUFFIX
)(Reg
*d
, Reg
*s
)
1092 r
.B(0) = satsb((int16_t)d
->W(0));
1093 r
.B(1) = satsb((int16_t)d
->W(1));
1094 r
.B(2) = satsb((int16_t)d
->W(2));
1095 r
.B(3) = satsb((int16_t)d
->W(3));
1097 r
.B(4) = satsb((int16_t)d
->W(4));
1098 r
.B(5) = satsb((int16_t)d
->W(5));
1099 r
.B(6) = satsb((int16_t)d
->W(6));
1100 r
.B(7) = satsb((int16_t)d
->W(7));
1102 r
.B((4 << SHIFT
) + 0) = satsb((int16_t)s
->W(0));
1103 r
.B((4 << SHIFT
) + 1) = satsb((int16_t)s
->W(1));
1104 r
.B((4 << SHIFT
) + 2) = satsb((int16_t)s
->W(2));
1105 r
.B((4 << SHIFT
) + 3) = satsb((int16_t)s
->W(3));
1107 r
.B(12) = satsb((int16_t)s
->W(4));
1108 r
.B(13) = satsb((int16_t)s
->W(5));
1109 r
.B(14) = satsb((int16_t)s
->W(6));
1110 r
.B(15) = satsb((int16_t)s
->W(7));
1115 void glue(helper_packuswb
, SUFFIX
)(Reg
*d
, Reg
*s
)
1119 r
.B(0) = satub((int16_t)d
->W(0));
1120 r
.B(1) = satub((int16_t)d
->W(1));
1121 r
.B(2) = satub((int16_t)d
->W(2));
1122 r
.B(3) = satub((int16_t)d
->W(3));
1124 r
.B(4) = satub((int16_t)d
->W(4));
1125 r
.B(5) = satub((int16_t)d
->W(5));
1126 r
.B(6) = satub((int16_t)d
->W(6));
1127 r
.B(7) = satub((int16_t)d
->W(7));
1129 r
.B((4 << SHIFT
) + 0) = satub((int16_t)s
->W(0));
1130 r
.B((4 << SHIFT
) + 1) = satub((int16_t)s
->W(1));
1131 r
.B((4 << SHIFT
) + 2) = satub((int16_t)s
->W(2));
1132 r
.B((4 << SHIFT
) + 3) = satub((int16_t)s
->W(3));
1134 r
.B(12) = satub((int16_t)s
->W(4));
1135 r
.B(13) = satub((int16_t)s
->W(5));
1136 r
.B(14) = satub((int16_t)s
->W(6));
1137 r
.B(15) = satub((int16_t)s
->W(7));
1142 void glue(helper_packssdw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1146 r
.W(0) = satsw(d
->L(0));
1147 r
.W(1) = satsw(d
->L(1));
1149 r
.W(2) = satsw(d
->L(2));
1150 r
.W(3) = satsw(d
->L(3));
1152 r
.W((2 << SHIFT
) + 0) = satsw(s
->L(0));
1153 r
.W((2 << SHIFT
) + 1) = satsw(s
->L(1));
1155 r
.W(6) = satsw(s
->L(2));
1156 r
.W(7) = satsw(s
->L(3));
1161 #define UNPCK_OP(base_name, base) \
1163 void glue(helper_punpck ## base_name ## bw, SUFFIX)(Reg *d, Reg *s) \
1167 r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
1168 r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
1169 r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
1170 r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
1171 r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
1172 r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
1173 r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
1174 r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
1176 r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
1177 r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
1178 r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
1179 r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
1180 r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
1181 r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
1182 r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
1183 r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
1188 void glue(helper_punpck ## base_name ## wd, SUFFIX)(Reg *d, Reg *s) \
1192 r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
1193 r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
1194 r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
1195 r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
1197 r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
1198 r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
1199 r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
1200 r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
1205 void glue(helper_punpck ## base_name ## dq, SUFFIX)(Reg *d, Reg *s) \
1209 r.L(0) = d->L((base << SHIFT) + 0); \
1210 r.L(1) = s->L((base << SHIFT) + 0); \
1212 r.L(2) = d->L((base << SHIFT) + 1); \
1213 r.L(3) = s->L((base << SHIFT) + 1); \
1219 void glue(helper_punpck ## base_name ## qdq, SUFFIX)(Reg *d, \
1224 r.Q(0) = d->Q(base); \
1225 r.Q(1) = s->Q(base); \
1233 /* 3DNow! float ops */
1235 void helper_pi2fd(MMXReg
*d
, MMXReg
*s
)
1237 d
->MMX_S(0) = int32_to_float32(s
->MMX_L(0), &env
->mmx_status
);
1238 d
->MMX_S(1) = int32_to_float32(s
->MMX_L(1), &env
->mmx_status
);
1241 void helper_pi2fw(MMXReg
*d
, MMXReg
*s
)
1243 d
->MMX_S(0) = int32_to_float32((int16_t)s
->MMX_W(0), &env
->mmx_status
);
1244 d
->MMX_S(1) = int32_to_float32((int16_t)s
->MMX_W(2), &env
->mmx_status
);
1247 void helper_pf2id(MMXReg
*d
, MMXReg
*s
)
1249 d
->MMX_L(0) = float32_to_int32_round_to_zero(s
->MMX_S(0), &env
->mmx_status
);
1250 d
->MMX_L(1) = float32_to_int32_round_to_zero(s
->MMX_S(1), &env
->mmx_status
);
1253 void helper_pf2iw(MMXReg
*d
, MMXReg
*s
)
1255 d
->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s
->MMX_S(0),
1257 d
->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s
->MMX_S(1),
1261 void helper_pfacc(MMXReg
*d
, MMXReg
*s
)
1265 r
.MMX_S(0) = float32_add(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1266 r
.MMX_S(1) = float32_add(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1270 void helper_pfadd(MMXReg
*d
, MMXReg
*s
)
1272 d
->MMX_S(0) = float32_add(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1273 d
->MMX_S(1) = float32_add(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1276 void helper_pfcmpeq(MMXReg
*d
, MMXReg
*s
)
1278 d
->MMX_L(0) = float32_eq_quiet(d
->MMX_S(0), s
->MMX_S(0),
1279 &env
->mmx_status
) ? -1 : 0;
1280 d
->MMX_L(1) = float32_eq_quiet(d
->MMX_S(1), s
->MMX_S(1),
1281 &env
->mmx_status
) ? -1 : 0;
1284 void helper_pfcmpge(MMXReg
*d
, MMXReg
*s
)
1286 d
->MMX_L(0) = float32_le(s
->MMX_S(0), d
->MMX_S(0),
1287 &env
->mmx_status
) ? -1 : 0;
1288 d
->MMX_L(1) = float32_le(s
->MMX_S(1), d
->MMX_S(1),
1289 &env
->mmx_status
) ? -1 : 0;
1292 void helper_pfcmpgt(MMXReg
*d
, MMXReg
*s
)
1294 d
->MMX_L(0) = float32_lt(s
->MMX_S(0), d
->MMX_S(0),
1295 &env
->mmx_status
) ? -1 : 0;
1296 d
->MMX_L(1) = float32_lt(s
->MMX_S(1), d
->MMX_S(1),
1297 &env
->mmx_status
) ? -1 : 0;
1300 void helper_pfmax(MMXReg
*d
, MMXReg
*s
)
1302 if (float32_lt(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
)) {
1303 d
->MMX_S(0) = s
->MMX_S(0);
1305 if (float32_lt(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
)) {
1306 d
->MMX_S(1) = s
->MMX_S(1);
1310 void helper_pfmin(MMXReg
*d
, MMXReg
*s
)
1312 if (float32_lt(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
)) {
1313 d
->MMX_S(0) = s
->MMX_S(0);
1315 if (float32_lt(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
)) {
1316 d
->MMX_S(1) = s
->MMX_S(1);
1320 void helper_pfmul(MMXReg
*d
, MMXReg
*s
)
1322 d
->MMX_S(0) = float32_mul(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1323 d
->MMX_S(1) = float32_mul(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1326 void helper_pfnacc(MMXReg
*d
, MMXReg
*s
)
1330 r
.MMX_S(0) = float32_sub(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1331 r
.MMX_S(1) = float32_sub(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1335 void helper_pfpnacc(MMXReg
*d
, MMXReg
*s
)
1339 r
.MMX_S(0) = float32_sub(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1340 r
.MMX_S(1) = float32_add(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1344 void helper_pfrcp(MMXReg
*d
, MMXReg
*s
)
1346 d
->MMX_S(0) = float32_div(float32_one
, s
->MMX_S(0), &env
->mmx_status
);
1347 d
->MMX_S(1) = d
->MMX_S(0);
1350 void helper_pfrsqrt(MMXReg
*d
, MMXReg
*s
)
1352 d
->MMX_L(1) = s
->MMX_L(0) & 0x7fffffff;
1353 d
->MMX_S(1) = float32_div(float32_one
,
1354 float32_sqrt(d
->MMX_S(1), &env
->mmx_status
),
1356 d
->MMX_L(1) |= s
->MMX_L(0) & 0x80000000;
1357 d
->MMX_L(0) = d
->MMX_L(1);
1360 void helper_pfsub(MMXReg
*d
, MMXReg
*s
)
1362 d
->MMX_S(0) = float32_sub(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1363 d
->MMX_S(1) = float32_sub(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1366 void helper_pfsubr(MMXReg
*d
, MMXReg
*s
)
1368 d
->MMX_S(0) = float32_sub(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
);
1369 d
->MMX_S(1) = float32_sub(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
);
1372 void helper_pswapd(MMXReg
*d
, MMXReg
*s
)
1376 r
.MMX_L(0) = s
->MMX_L(1);
1377 r
.MMX_L(1) = s
->MMX_L(0);
1382 /* SSSE3 op helpers */
1383 void glue(helper_pshufb
, SUFFIX
)(Reg
*d
, Reg
*s
)
1388 for (i
= 0; i
< (8 << SHIFT
); i
++) {
1389 r
.B(i
) = (s
->B(i
) & 0x80) ? 0 : (d
->B(s
->B(i
) & ((8 << SHIFT
) - 1)));
1395 void glue(helper_phaddw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1397 d
->W(0) = (int16_t)d
->W(0) + (int16_t)d
->W(1);
1398 d
->W(1) = (int16_t)d
->W(2) + (int16_t)d
->W(3);
1399 XMM_ONLY(d
->W(2) = (int16_t)d
->W(4) + (int16_t)d
->W(5));
1400 XMM_ONLY(d
->W(3) = (int16_t)d
->W(6) + (int16_t)d
->W(7));
1401 d
->W((2 << SHIFT
) + 0) = (int16_t)s
->W(0) + (int16_t)s
->W(1);
1402 d
->W((2 << SHIFT
) + 1) = (int16_t)s
->W(2) + (int16_t)s
->W(3);
1403 XMM_ONLY(d
->W(6) = (int16_t)s
->W(4) + (int16_t)s
->W(5));
1404 XMM_ONLY(d
->W(7) = (int16_t)s
->W(6) + (int16_t)s
->W(7));
1407 void glue(helper_phaddd
, SUFFIX
)(Reg
*d
, Reg
*s
)
1409 d
->L(0) = (int32_t)d
->L(0) + (int32_t)d
->L(1);
1410 XMM_ONLY(d
->L(1) = (int32_t)d
->L(2) + (int32_t)d
->L(3));
1411 d
->L((1 << SHIFT
) + 0) = (int32_t)s
->L(0) + (int32_t)s
->L(1);
1412 XMM_ONLY(d
->L(3) = (int32_t)s
->L(2) + (int32_t)s
->L(3));
1415 void glue(helper_phaddsw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1417 d
->W(0) = satsw((int16_t)d
->W(0) + (int16_t)d
->W(1));
1418 d
->W(1) = satsw((int16_t)d
->W(2) + (int16_t)d
->W(3));
1419 XMM_ONLY(d
->W(2) = satsw((int16_t)d
->W(4) + (int16_t)d
->W(5)));
1420 XMM_ONLY(d
->W(3) = satsw((int16_t)d
->W(6) + (int16_t)d
->W(7)));
1421 d
->W((2 << SHIFT
) + 0) = satsw((int16_t)s
->W(0) + (int16_t)s
->W(1));
1422 d
->W((2 << SHIFT
) + 1) = satsw((int16_t)s
->W(2) + (int16_t)s
->W(3));
1423 XMM_ONLY(d
->W(6) = satsw((int16_t)s
->W(4) + (int16_t)s
->W(5)));
1424 XMM_ONLY(d
->W(7) = satsw((int16_t)s
->W(6) + (int16_t)s
->W(7)));
1427 void glue(helper_pmaddubsw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1429 d
->W(0) = satsw((int8_t)s
->B(0) * (uint8_t)d
->B(0) +
1430 (int8_t)s
->B(1) * (uint8_t)d
->B(1));
1431 d
->W(1) = satsw((int8_t)s
->B(2) * (uint8_t)d
->B(2) +
1432 (int8_t)s
->B(3) * (uint8_t)d
->B(3));
1433 d
->W(2) = satsw((int8_t)s
->B(4) * (uint8_t)d
->B(4) +
1434 (int8_t)s
->B(5) * (uint8_t)d
->B(5));
1435 d
->W(3) = satsw((int8_t)s
->B(6) * (uint8_t)d
->B(6) +
1436 (int8_t)s
->B(7) * (uint8_t)d
->B(7));
1438 d
->W(4) = satsw((int8_t)s
->B(8) * (uint8_t)d
->B(8) +
1439 (int8_t)s
->B(9) * (uint8_t)d
->B(9));
1440 d
->W(5) = satsw((int8_t)s
->B(10) * (uint8_t)d
->B(10) +
1441 (int8_t)s
->B(11) * (uint8_t)d
->B(11));
1442 d
->W(6) = satsw((int8_t)s
->B(12) * (uint8_t)d
->B(12) +
1443 (int8_t)s
->B(13) * (uint8_t)d
->B(13));
1444 d
->W(7) = satsw((int8_t)s
->B(14) * (uint8_t)d
->B(14) +
1445 (int8_t)s
->B(15) * (uint8_t)d
->B(15));
1449 void glue(helper_phsubw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1451 d
->W(0) = (int16_t)d
->W(0) - (int16_t)d
->W(1);
1452 d
->W(1) = (int16_t)d
->W(2) - (int16_t)d
->W(3);
1453 XMM_ONLY(d
->W(2) = (int16_t)d
->W(4) - (int16_t)d
->W(5));
1454 XMM_ONLY(d
->W(3) = (int16_t)d
->W(6) - (int16_t)d
->W(7));
1455 d
->W((2 << SHIFT
) + 0) = (int16_t)s
->W(0) - (int16_t)s
->W(1);
1456 d
->W((2 << SHIFT
) + 1) = (int16_t)s
->W(2) - (int16_t)s
->W(3);
1457 XMM_ONLY(d
->W(6) = (int16_t)s
->W(4) - (int16_t)s
->W(5));
1458 XMM_ONLY(d
->W(7) = (int16_t)s
->W(6) - (int16_t)s
->W(7));
1461 void glue(helper_phsubd
, SUFFIX
)(Reg
*d
, Reg
*s
)
1463 d
->L(0) = (int32_t)d
->L(0) - (int32_t)d
->L(1);
1464 XMM_ONLY(d
->L(1) = (int32_t)d
->L(2) - (int32_t)d
->L(3));
1465 d
->L((1 << SHIFT
) + 0) = (int32_t)s
->L(0) - (int32_t)s
->L(1);
1466 XMM_ONLY(d
->L(3) = (int32_t)s
->L(2) - (int32_t)s
->L(3));
1469 void glue(helper_phsubsw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1471 d
->W(0) = satsw((int16_t)d
->W(0) - (int16_t)d
->W(1));
1472 d
->W(1) = satsw((int16_t)d
->W(2) - (int16_t)d
->W(3));
1473 XMM_ONLY(d
->W(2) = satsw((int16_t)d
->W(4) - (int16_t)d
->W(5)));
1474 XMM_ONLY(d
->W(3) = satsw((int16_t)d
->W(6) - (int16_t)d
->W(7)));
1475 d
->W((2 << SHIFT
) + 0) = satsw((int16_t)s
->W(0) - (int16_t)s
->W(1));
1476 d
->W((2 << SHIFT
) + 1) = satsw((int16_t)s
->W(2) - (int16_t)s
->W(3));
1477 XMM_ONLY(d
->W(6) = satsw((int16_t)s
->W(4) - (int16_t)s
->W(5)));
1478 XMM_ONLY(d
->W(7) = satsw((int16_t)s
->W(6) - (int16_t)s
->W(7)));
1481 #define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x)
1482 #define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x)
1483 #define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x)
1484 SSE_HELPER_B(helper_pabsb
, FABSB
)
1485 SSE_HELPER_W(helper_pabsw
, FABSW
)
1486 SSE_HELPER_L(helper_pabsd
, FABSL
)
1488 #define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15)
1489 SSE_HELPER_W(helper_pmulhrsw
, FMULHRSW
)
1491 #define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d)
1492 #define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d)
1493 #define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d)
1494 SSE_HELPER_B(helper_psignb
, FSIGNB
)
1495 SSE_HELPER_W(helper_psignw
, FSIGNW
)
1496 SSE_HELPER_L(helper_psignd
, FSIGNL
)
1498 void glue(helper_palignr
, SUFFIX
)(Reg
*d
, Reg
*s
, int32_t shift
)
1502 /* XXX could be checked during translation */
1503 if (shift
>= (16 << SHIFT
)) {
1505 XMM_ONLY(r
.Q(1) = 0);
1508 #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
1510 r
.Q(0) = SHR(s
->Q(0), shift
- 0) |
1511 SHR(d
->Q(0), shift
- 64);
1513 r
.Q(0) = SHR(s
->Q(0), shift
- 0) |
1514 SHR(s
->Q(1), shift
- 64) |
1515 SHR(d
->Q(0), shift
- 128) |
1516 SHR(d
->Q(1), shift
- 192);
1517 r
.Q(1) = SHR(s
->Q(0), shift
+ 64) |
1518 SHR(s
->Q(1), shift
- 0) |
1519 SHR(d
->Q(0), shift
- 64) |
1520 SHR(d
->Q(1), shift
- 128);
1528 #define XMM0 (env->xmm_regs[0])
1531 #define SSE_HELPER_V(name, elem, num, F) \
1532 void glue(name, SUFFIX)(Reg *d, Reg *s) \
1534 d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \
1535 d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \
1537 d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \
1538 d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \
1540 d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \
1541 d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \
1542 d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \
1543 d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \
1545 d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \
1546 d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \
1547 d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \
1548 d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \
1549 d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \
1550 d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \
1551 d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \
1552 d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \
1558 #define SSE_HELPER_I(name, elem, num, F) \
1559 void glue(name, SUFFIX)(Reg *d, Reg *s, uint32_t imm) \
1561 d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \
1562 d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \
1564 d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \
1565 d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \
1567 d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \
1568 d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \
1569 d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \
1570 d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \
1572 d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \
1573 d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \
1574 d->elem(10) = F(d->elem(10), s->elem(10), \
1575 ((imm >> 10) & 1)); \
1576 d->elem(11) = F(d->elem(11), s->elem(11), \
1577 ((imm >> 11) & 1)); \
1578 d->elem(12) = F(d->elem(12), s->elem(12), \
1579 ((imm >> 12) & 1)); \
1580 d->elem(13) = F(d->elem(13), s->elem(13), \
1581 ((imm >> 13) & 1)); \
1582 d->elem(14) = F(d->elem(14), s->elem(14), \
1583 ((imm >> 14) & 1)); \
1584 d->elem(15) = F(d->elem(15), s->elem(15), \
1585 ((imm >> 15) & 1)); \
1591 /* SSE4.1 op helpers */
1592 #define FBLENDVB(d, s, m) ((m & 0x80) ? s : d)
1593 #define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d)
1594 #define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d)
1595 SSE_HELPER_V(helper_pblendvb
, B
, 16, FBLENDVB
)
1596 SSE_HELPER_V(helper_blendvps
, L
, 4, FBLENDVPS
)
1597 SSE_HELPER_V(helper_blendvpd
, Q
, 2, FBLENDVPD
)
1599 void glue(helper_ptest
, SUFFIX
)(Reg
*d
, Reg
*s
)
1601 uint64_t zf
= (s
->Q(0) & d
->Q(0)) | (s
->Q(1) & d
->Q(1));
1602 uint64_t cf
= (s
->Q(0) & ~d
->Q(0)) | (s
->Q(1) & ~d
->Q(1));
1604 CC_SRC
= (zf
? 0 : CC_Z
) | (cf
? 0 : CC_C
);
1607 #define SSE_HELPER_F(name, elem, num, F) \
1608 void glue(name, SUFFIX)(Reg *d, Reg *s) \
1610 d->elem(0) = F(0); \
1611 d->elem(1) = F(1); \
1613 d->elem(2) = F(2); \
1614 d->elem(3) = F(3); \
1616 d->elem(4) = F(4); \
1617 d->elem(5) = F(5); \
1618 d->elem(6) = F(6); \
1619 d->elem(7) = F(7); \
1624 SSE_HELPER_F(helper_pmovsxbw
, W
, 8, (int8_t) s
->B
)
1625 SSE_HELPER_F(helper_pmovsxbd
, L
, 4, (int8_t) s
->B
)
1626 SSE_HELPER_F(helper_pmovsxbq
, Q
, 2, (int8_t) s
->B
)
1627 SSE_HELPER_F(helper_pmovsxwd
, L
, 4, (int16_t) s
->W
)
1628 SSE_HELPER_F(helper_pmovsxwq
, Q
, 2, (int16_t) s
->W
)
1629 SSE_HELPER_F(helper_pmovsxdq
, Q
, 2, (int32_t) s
->L
)
1630 SSE_HELPER_F(helper_pmovzxbw
, W
, 8, s
->B
)
1631 SSE_HELPER_F(helper_pmovzxbd
, L
, 4, s
->B
)
1632 SSE_HELPER_F(helper_pmovzxbq
, Q
, 2, s
->B
)
1633 SSE_HELPER_F(helper_pmovzxwd
, L
, 4, s
->W
)
1634 SSE_HELPER_F(helper_pmovzxwq
, Q
, 2, s
->W
)
1635 SSE_HELPER_F(helper_pmovzxdq
, Q
, 2, s
->L
)
1637 void glue(helper_pmuldq
, SUFFIX
)(Reg
*d
, Reg
*s
)
1639 d
->Q(0) = (int64_t)(int32_t) d
->L(0) * (int32_t) s
->L(0);
1640 d
->Q(1) = (int64_t)(int32_t) d
->L(2) * (int32_t) s
->L(2);
1643 #define FCMPEQQ(d, s) (d == s ? -1 : 0)
1644 SSE_HELPER_Q(helper_pcmpeqq
, FCMPEQQ
)
1646 void glue(helper_packusdw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1648 d
->W(0) = satuw((int32_t) d
->L(0));
1649 d
->W(1) = satuw((int32_t) d
->L(1));
1650 d
->W(2) = satuw((int32_t) d
->L(2));
1651 d
->W(3) = satuw((int32_t) d
->L(3));
1652 d
->W(4) = satuw((int32_t) s
->L(0));
1653 d
->W(5) = satuw((int32_t) s
->L(1));
1654 d
->W(6) = satuw((int32_t) s
->L(2));
1655 d
->W(7) = satuw((int32_t) s
->L(3));
1658 #define FMINSB(d, s) MIN((int8_t)d, (int8_t)s)
1659 #define FMINSD(d, s) MIN((int32_t)d, (int32_t)s)
1660 #define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s)
1661 #define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s)
1662 SSE_HELPER_B(helper_pminsb
, FMINSB
)
1663 SSE_HELPER_L(helper_pminsd
, FMINSD
)
1664 SSE_HELPER_W(helper_pminuw
, MIN
)
1665 SSE_HELPER_L(helper_pminud
, MIN
)
1666 SSE_HELPER_B(helper_pmaxsb
, FMAXSB
)
1667 SSE_HELPER_L(helper_pmaxsd
, FMAXSD
)
1668 SSE_HELPER_W(helper_pmaxuw
, MAX
)
1669 SSE_HELPER_L(helper_pmaxud
, MAX
)
1671 #define FMULLD(d, s) ((int32_t)d * (int32_t)s)
1672 SSE_HELPER_L(helper_pmulld
, FMULLD
)
1674 void glue(helper_phminposuw
, SUFFIX
)(Reg
*d
, Reg
*s
)
1678 if (s
->W(1) < s
->W(idx
)) {
1681 if (s
->W(2) < s
->W(idx
)) {
1684 if (s
->W(3) < s
->W(idx
)) {
1687 if (s
->W(4) < s
->W(idx
)) {
1690 if (s
->W(5) < s
->W(idx
)) {
1693 if (s
->W(6) < s
->W(idx
)) {
1696 if (s
->W(7) < s
->W(idx
)) {
1703 d
->W(0) = s
->W(idx
);
1706 void glue(helper_roundps
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t mode
)
1708 signed char prev_rounding_mode
;
1710 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1711 if (!(mode
& (1 << 2))) {
1714 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1717 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1720 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1723 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1728 d
->XMM_S(0) = float32_round_to_int(s
->XMM_S(0), &env
->sse_status
);
1729 d
->XMM_S(1) = float32_round_to_int(s
->XMM_S(1), &env
->sse_status
);
1730 d
->XMM_S(2) = float32_round_to_int(s
->XMM_S(2), &env
->sse_status
);
1731 d
->XMM_S(3) = float32_round_to_int(s
->XMM_S(3), &env
->sse_status
);
1734 if (mode
& (1 << 3)) {
1735 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1736 ~float_flag_inexact
,
1740 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1743 void glue(helper_roundpd
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t mode
)
1745 signed char prev_rounding_mode
;
1747 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1748 if (!(mode
& (1 << 2))) {
1751 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1754 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1757 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1760 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1765 d
->XMM_D(0) = float64_round_to_int(s
->XMM_D(0), &env
->sse_status
);
1766 d
->XMM_D(1) = float64_round_to_int(s
->XMM_D(1), &env
->sse_status
);
1769 if (mode
& (1 << 3)) {
1770 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1771 ~float_flag_inexact
,
1775 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1778 void glue(helper_roundss
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t mode
)
1780 signed char prev_rounding_mode
;
1782 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1783 if (!(mode
& (1 << 2))) {
1786 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1789 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1792 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1795 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1800 d
->XMM_S(0) = float32_round_to_int(s
->XMM_S(0), &env
->sse_status
);
1803 if (mode
& (1 << 3)) {
1804 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1805 ~float_flag_inexact
,
1809 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1812 void glue(helper_roundsd
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t mode
)
1814 signed char prev_rounding_mode
;
1816 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1817 if (!(mode
& (1 << 2))) {
1820 set_float_rounding_mode(float_round_nearest_even
, &env
->sse_status
);
1823 set_float_rounding_mode(float_round_down
, &env
->sse_status
);
1826 set_float_rounding_mode(float_round_up
, &env
->sse_status
);
1829 set_float_rounding_mode(float_round_to_zero
, &env
->sse_status
);
1834 d
->XMM_D(0) = float64_round_to_int(s
->XMM_D(0), &env
->sse_status
);
1837 if (mode
& (1 << 3)) {
1838 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1839 ~float_flag_inexact
,
1843 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1846 #define FBLENDP(d, s, m) (m ? s : d)
1847 SSE_HELPER_I(helper_blendps
, L
, 4, FBLENDP
)
1848 SSE_HELPER_I(helper_blendpd
, Q
, 2, FBLENDP
)
1849 SSE_HELPER_I(helper_pblendw
, W
, 8, FBLENDP
)
1851 void glue(helper_dpps
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t mask
)
1853 float32 iresult
= float32_zero
;
1855 if (mask
& (1 << 4)) {
1856 iresult
= float32_add(iresult
,
1857 float32_mul(d
->XMM_S(0), s
->XMM_S(0),
1861 if (mask
& (1 << 5)) {
1862 iresult
= float32_add(iresult
,
1863 float32_mul(d
->XMM_S(1), s
->XMM_S(1),
1867 if (mask
& (1 << 6)) {
1868 iresult
= float32_add(iresult
,
1869 float32_mul(d
->XMM_S(2), s
->XMM_S(2),
1873 if (mask
& (1 << 7)) {
1874 iresult
= float32_add(iresult
,
1875 float32_mul(d
->XMM_S(3), s
->XMM_S(3),
1879 d
->XMM_S(0) = (mask
& (1 << 0)) ? iresult
: float32_zero
;
1880 d
->XMM_S(1) = (mask
& (1 << 1)) ? iresult
: float32_zero
;
1881 d
->XMM_S(2) = (mask
& (1 << 2)) ? iresult
: float32_zero
;
1882 d
->XMM_S(3) = (mask
& (1 << 3)) ? iresult
: float32_zero
;
1885 void glue(helper_dppd
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t mask
)
1887 float64 iresult
= float64_zero
;
1889 if (mask
& (1 << 4)) {
1890 iresult
= float64_add(iresult
,
1891 float64_mul(d
->XMM_D(0), s
->XMM_D(0),
1895 if (mask
& (1 << 5)) {
1896 iresult
= float64_add(iresult
,
1897 float64_mul(d
->XMM_D(1), s
->XMM_D(1),
1901 d
->XMM_D(0) = (mask
& (1 << 0)) ? iresult
: float64_zero
;
1902 d
->XMM_D(1) = (mask
& (1 << 1)) ? iresult
: float64_zero
;
1905 void glue(helper_mpsadbw
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t offset
)
1907 int s0
= (offset
& 3) << 2;
1908 int d0
= (offset
& 4) << 0;
1912 for (i
= 0; i
< 8; i
++, d0
++) {
1914 r
.W(i
) += abs1(d
->B(d0
+ 0) - s
->B(s0
+ 0));
1915 r
.W(i
) += abs1(d
->B(d0
+ 1) - s
->B(s0
+ 1));
1916 r
.W(i
) += abs1(d
->B(d0
+ 2) - s
->B(s0
+ 2));
1917 r
.W(i
) += abs1(d
->B(d0
+ 3) - s
->B(s0
+ 3));
1923 /* SSE4.2 op helpers */
1924 /* it's unclear whether signed or unsigned */
1925 #define FCMPGTQ(d, s) (d > s ? -1 : 0)
1926 SSE_HELPER_Q(helper_pcmpgtq
, FCMPGTQ
)
1928 static inline int pcmp_elen(int reg
, uint32_t ctrl
)
1932 /* Presence of REX.W is indicated by a bit higher than 7 set */
1934 val
= abs1((int64_t)env
->regs
[reg
]);
1936 val
= abs1((int32_t)env
->regs
[reg
]);
1951 static inline int pcmp_ilen(Reg
*r
, uint8_t ctrl
)
1956 while (val
< 8 && r
->W(val
)) {
1960 while (val
< 16 && r
->B(val
)) {
1968 static inline int pcmp_val(Reg
*r
, uint8_t ctrl
, int i
)
1970 switch ((ctrl
>> 0) & 3) {
1976 return (int8_t)r
->B(i
);
1979 return (int16_t)r
->W(i
);
1983 static inline unsigned pcmpxstrx(Reg
*d
, Reg
*s
,
1984 int8_t ctrl
, int valids
, int validd
)
1986 unsigned int res
= 0;
1989 int upper
= (ctrl
& 1) ? 7 : 15;
1994 CC_SRC
= (valids
< upper
? CC_Z
: 0) | (validd
< upper
? CC_S
: 0);
1996 switch ((ctrl
>> 2) & 3) {
1998 for (j
= valids
; j
>= 0; j
--) {
2000 v
= pcmp_val(s
, ctrl
, j
);
2001 for (i
= validd
; i
>= 0; i
--) {
2002 res
|= (v
== pcmp_val(d
, ctrl
, i
));
2007 for (j
= valids
; j
>= 0; j
--) {
2009 v
= pcmp_val(s
, ctrl
, j
);
2010 for (i
= ((validd
- 1) | 1); i
>= 0; i
-= 2) {
2011 res
|= (pcmp_val(d
, ctrl
, i
- 0) <= v
&&
2012 pcmp_val(d
, ctrl
, i
- 1) >= v
);
2017 res
= (2 << (upper
- MAX(valids
, validd
))) - 1;
2018 res
<<= MAX(valids
, validd
) - MIN(valids
, validd
);
2019 for (i
= MIN(valids
, validd
); i
>= 0; i
--) {
2021 v
= pcmp_val(s
, ctrl
, i
);
2022 res
|= (v
== pcmp_val(d
, ctrl
, i
));
2026 for (j
= valids
- validd
; j
>= 0; j
--) {
2029 for (i
= MIN(upper
- j
, validd
); i
>= 0; i
--) {
2030 res
&= (pcmp_val(s
, ctrl
, i
+ j
) == pcmp_val(d
, ctrl
, i
));
2036 switch ((ctrl
>> 4) & 3) {
2038 res
^= (2 << upper
) - 1;
2041 res
^= (2 << valids
) - 1;
2055 static inline int rffs1(unsigned int val
)
2059 for (hi
= sizeof(val
) * 4; hi
; hi
/= 2) {
2069 static inline int ffs1(unsigned int val
)
2073 for (hi
= sizeof(val
) * 4; hi
; hi
/= 2) {
2083 void glue(helper_pcmpestri
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t ctrl
)
2085 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
2086 pcmp_elen(R_EDX
, ctrl
),
2087 pcmp_elen(R_EAX
, ctrl
));
2090 env
->regs
[R_ECX
] = ((ctrl
& (1 << 6)) ? rffs1
: ffs1
)(res
) - 1;
2092 env
->regs
[R_ECX
] = 16 >> (ctrl
& (1 << 0));
2096 void glue(helper_pcmpestrm
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t ctrl
)
2099 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
2100 pcmp_elen(R_EDX
, ctrl
),
2101 pcmp_elen(R_EAX
, ctrl
));
2103 if ((ctrl
>> 6) & 1) {
2105 for (i
= 0; i
< 8; i
++, res
>>= 1) {
2106 d
->W(i
) = (res
& 1) ? ~0 : 0;
2109 for (i
= 0; i
< 16; i
++, res
>>= 1) {
2110 d
->B(i
) = (res
& 1) ? ~0 : 0;
2119 void glue(helper_pcmpistri
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t ctrl
)
2121 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
2123 pcmp_ilen(d
, ctrl
));
2126 env
->regs
[R_ECX
] = ((ctrl
& (1 << 6)) ? rffs1
: ffs1
)(res
) - 1;
2128 env
->regs
[R_ECX
] = 16 >> (ctrl
& (1 << 0));
2132 void glue(helper_pcmpistrm
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t ctrl
)
2135 unsigned int res
= pcmpxstrx(d
, s
, ctrl
,
2137 pcmp_ilen(d
, ctrl
));
2139 if ((ctrl
>> 6) & 1) {
2141 for (i
= 0; i
< 8; i
++, res
>>= 1) {
2142 d
->W(i
) = (res
& 1) ? ~0 : 0;
2145 for (i
= 0; i
< 16; i
++, res
>>= 1) {
2146 d
->B(i
) = (res
& 1) ? ~0 : 0;
2155 #define CRCPOLY 0x1edc6f41
2156 #define CRCPOLY_BITREV 0x82f63b78
2157 target_ulong
helper_crc32(uint32_t crc1
, target_ulong msg
, uint32_t len
)
2159 target_ulong crc
= (msg
& ((target_ulong
) -1 >>
2160 (TARGET_LONG_BITS
- len
))) ^ crc1
;
2163 crc
= (crc
>> 1) ^ ((crc
& 1) ? CRCPOLY_BITREV
: 0);
2169 #define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
2170 #define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i)))
2171 target_ulong
helper_popcnt(target_ulong n
, uint32_t type
)
2173 CC_SRC
= n
? 0 : CC_Z
;
2184 #ifndef TARGET_X86_64
2191 return POPCOUNT(n
, 5);