2 * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
4 * Copyright (c) 2005 Fabrice Bellard
5 * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com>
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "crypto/aes.h"
22 #include "crypto/aes-round.h"
23 #include "crypto/clmul.h"
35 #define XMM_ONLY(...) __VA_ARGS__
47 #define LANE_WIDTH (SHIFT ? 16 : 8)
48 #define PACK_WIDTH (LANE_WIDTH / 2)
51 #define FPSRL(x, c) ((x) >> shift)
52 #define FPSRAW(x, c) ((int16_t)(x) >> shift)
53 #define FPSRAL(x, c) ((int32_t)(x) >> shift)
54 #define FPSLL(x, c) ((x) << shift)
57 void glue(helper_psrlw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
61 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
66 for (int i
= 0; i
< 4 << SHIFT
; i
++) {
67 d
->W(i
) = FPSRL(s
->W(i
), shift
);
72 void glue(helper_psllw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
76 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
81 for (int i
= 0; i
< 4 << SHIFT
; i
++) {
82 d
->W(i
) = FPSLL(s
->W(i
), shift
);
87 void glue(helper_psraw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
95 for (int i
= 0; i
< 4 << SHIFT
; i
++) {
96 d
->W(i
) = FPSRAW(s
->W(i
), shift
);
100 void glue(helper_psrld
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
104 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
109 for (int i
= 0; i
< 2 << SHIFT
; i
++) {
110 d
->L(i
) = FPSRL(s
->L(i
), shift
);
115 void glue(helper_pslld
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
119 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
124 for (int i
= 0; i
< 2 << SHIFT
; i
++) {
125 d
->L(i
) = FPSLL(s
->L(i
), shift
);
130 void glue(helper_psrad
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
138 for (int i
= 0; i
< 2 << SHIFT
; i
++) {
139 d
->L(i
) = FPSRAL(s
->L(i
), shift
);
143 void glue(helper_psrlq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
147 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
152 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
153 d
->Q(i
) = FPSRL(s
->Q(i
), shift
);
158 void glue(helper_psllq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
162 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
167 for (int i
= 0; i
< 1 << SHIFT
; i
++) {
168 d
->Q(i
) = FPSLL(s
->Q(i
), shift
);
174 void glue(helper_psrldq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
182 for (j
= 0; j
< 8 << SHIFT
; j
+= LANE_WIDTH
) {
183 for (i
= 0; i
< 16 - shift
; i
++) {
184 d
->B(j
+ i
) = s
->B(j
+ i
+ shift
);
186 for (i
= 16 - shift
; i
< 16; i
++) {
192 void glue(helper_pslldq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, Reg
*c
)
200 for (j
= 0; j
< 8 << SHIFT
; j
+= LANE_WIDTH
) {
201 for (i
= 15; i
>= shift
; i
--) {
202 d
->B(j
+ i
) = s
->B(j
+ i
- shift
);
204 for (i
= 0; i
< shift
; i
++) {
211 #define SSE_HELPER_1(name, elem, num, F) \
212 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
215 for (int i = 0; i < n; i++) { \
216 d->elem(i) = F(s->elem(i)); \
220 #define SSE_HELPER_2(name, elem, num, F) \
221 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
224 for (int i = 0; i < n; i++) { \
225 d->elem(i) = F(v->elem(i), s->elem(i)); \
229 #define SSE_HELPER_B(name, F) \
230 SSE_HELPER_2(name, B, 8 << SHIFT, F)
232 #define SSE_HELPER_W(name, F) \
233 SSE_HELPER_2(name, W, 4 << SHIFT, F)
235 #define SSE_HELPER_L(name, F) \
236 SSE_HELPER_2(name, L, 2 << SHIFT, F)
238 #define SSE_HELPER_Q(name, F) \
239 SSE_HELPER_2(name, Q, 1 << SHIFT, F)
242 static inline int satub(int x
)
246 } else if (x
> 255) {
253 static inline int satuw(int x
)
257 } else if (x
> 65535) {
264 static inline int satsb(int x
)
268 } else if (x
> 127) {
275 static inline int satsw(int x
)
279 } else if (x
> 32767) {
286 #define FADD(a, b) ((a) + (b))
287 #define FADDUB(a, b) satub((a) + (b))
288 #define FADDUW(a, b) satuw((a) + (b))
289 #define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
290 #define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
292 #define FSUB(a, b) ((a) - (b))
293 #define FSUBUB(a, b) satub((a) - (b))
294 #define FSUBUW(a, b) satuw((a) - (b))
295 #define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
296 #define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
297 #define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
298 #define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
299 #define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
300 #define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
302 #define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
303 #define FMULHUW(a, b) ((a) * (b) >> 16)
304 #define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
306 #define FAVG(a, b) (((a) + (b) + 1) >> 1)
309 SSE_HELPER_W(helper_pmulhuw
, FMULHUW
)
310 SSE_HELPER_W(helper_pmulhw
, FMULHW
)
313 void glue(helper_pmulhrw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
315 d
->W(0) = FMULHRW(d
->W(0), s
->W(0));
316 d
->W(1) = FMULHRW(d
->W(1), s
->W(1));
317 d
->W(2) = FMULHRW(d
->W(2), s
->W(2));
318 d
->W(3) = FMULHRW(d
->W(3), s
->W(3));
322 SSE_HELPER_B(helper_pavgb
, FAVG
)
323 SSE_HELPER_W(helper_pavgw
, FAVG
)
325 void glue(helper_pmuludq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
329 for (i
= 0; i
< (1 << SHIFT
); i
++) {
330 d
->Q(i
) = (uint64_t)s
->L(i
* 2) * (uint64_t)v
->L(i
* 2);
334 void glue(helper_pmaddwd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
338 for (i
= 0; i
< (2 << SHIFT
); i
++) {
339 d
->L(i
) = (int16_t)s
->W(2 * i
) * (int16_t)v
->W(2 * i
) +
340 (int16_t)s
->W(2 * i
+ 1) * (int16_t)v
->W(2 * i
+ 1);
345 static inline int abs1(int a
)
354 void glue(helper_psadbw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
358 for (i
= 0; i
< (1 << SHIFT
); i
++) {
359 unsigned int val
= 0;
360 val
+= abs1(v
->B(8 * i
+ 0) - s
->B(8 * i
+ 0));
361 val
+= abs1(v
->B(8 * i
+ 1) - s
->B(8 * i
+ 1));
362 val
+= abs1(v
->B(8 * i
+ 2) - s
->B(8 * i
+ 2));
363 val
+= abs1(v
->B(8 * i
+ 3) - s
->B(8 * i
+ 3));
364 val
+= abs1(v
->B(8 * i
+ 4) - s
->B(8 * i
+ 4));
365 val
+= abs1(v
->B(8 * i
+ 5) - s
->B(8 * i
+ 5));
366 val
+= abs1(v
->B(8 * i
+ 6) - s
->B(8 * i
+ 6));
367 val
+= abs1(v
->B(8 * i
+ 7) - s
->B(8 * i
+ 7));
373 void glue(helper_maskmov
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
378 for (i
= 0; i
< (8 << SHIFT
); i
++) {
379 if (s
->B(i
) & 0x80) {
380 cpu_stb_data_ra(env
, a0
+ i
, d
->B(i
), GETPC());
386 #define SHUFFLE4(F, a, b, offset) do { \
387 r0 = a->F((order & 3) + offset); \
388 r1 = a->F(((order >> 2) & 3) + offset); \
389 r2 = b->F(((order >> 4) & 3) + offset); \
390 r3 = b->F(((order >> 6) & 3) + offset); \
392 d->F(offset + 1) = r1; \
393 d->F(offset + 2) = r2; \
394 d->F(offset + 3) = r3; \
398 void glue(helper_pshufw
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
400 uint16_t r0
, r1
, r2
, r3
;
402 SHUFFLE4(W
, s
, s
, 0);
405 void glue(helper_shufps
, SUFFIX
)(Reg
*d
, Reg
*v
, Reg
*s
, int order
)
407 uint32_t r0
, r1
, r2
, r3
;
410 for (i
= 0; i
< 2 << SHIFT
; i
+= 4) {
411 SHUFFLE4(L
, v
, s
, i
);
415 void glue(helper_shufpd
, SUFFIX
)(Reg
*d
, Reg
*v
, Reg
*s
, int order
)
420 for (i
= 0; i
< 1 << SHIFT
; i
+= 2) {
421 r0
= v
->Q(((order
& 1) & 1) + i
);
422 r1
= s
->Q(((order
>> 1) & 1) + i
);
429 void glue(helper_pshufd
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
431 uint32_t r0
, r1
, r2
, r3
;
434 for (i
= 0; i
< 2 << SHIFT
; i
+= 4) {
435 SHUFFLE4(L
, s
, s
, i
);
439 void glue(helper_pshuflw
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
441 uint16_t r0
, r1
, r2
, r3
;
444 for (i
= 0, j
= 1; j
< 1 << SHIFT
; i
+= 8, j
+= 2) {
445 SHUFFLE4(W
, s
, s
, i
);
450 void glue(helper_pshufhw
, SUFFIX
)(Reg
*d
, Reg
*s
, int order
)
452 uint16_t r0
, r1
, r2
, r3
;
455 for (i
= 4, j
= 0; j
< 1 << SHIFT
; i
+= 8, j
+= 2) {
457 SHUFFLE4(W
, s
, s
, i
);
464 /* XXX: not accurate */
466 #define SSE_HELPER_P(name, F) \
467 void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \
468 Reg *d, Reg *v, Reg *s) \
471 for (i = 0; i < 2 << SHIFT; i++) { \
472 d->ZMM_S(i) = F(32, v->ZMM_S(i), s->ZMM_S(i)); \
476 void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \
477 Reg *d, Reg *v, Reg *s) \
480 for (i = 0; i < 1 << SHIFT; i++) { \
481 d->ZMM_D(i) = F(64, v->ZMM_D(i), s->ZMM_D(i)); \
487 #define SSE_HELPER_S(name, F) \
488 SSE_HELPER_P(name, F) \
490 void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s)\
493 d->ZMM_S(0) = F(32, v->ZMM_S(0), s->ZMM_S(0)); \
494 for (i = 1; i < 2 << SHIFT; i++) { \
495 d->ZMM_L(i) = v->ZMM_L(i); \
499 void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s)\
502 d->ZMM_D(0) = F(64, v->ZMM_D(0), s->ZMM_D(0)); \
503 for (i = 1; i < 1 << SHIFT; i++) { \
504 d->ZMM_Q(i) = v->ZMM_Q(i); \
510 #define SSE_HELPER_S(name, F) SSE_HELPER_P(name, F)
514 #define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
515 #define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
516 #define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
517 #define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
519 /* Note that the choice of comparison op here is important to get the
520 * special cases right: for min and max Intel specifies that (-0,0),
521 * (NaN, anything) and (anything, NaN) return the second argument.
523 #define FPU_MIN(size, a, b) \
524 (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b))
525 #define FPU_MAX(size, a, b) \
526 (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b))
528 SSE_HELPER_S(add
, FPU_ADD
)
529 SSE_HELPER_S(sub
, FPU_SUB
)
530 SSE_HELPER_S(mul
, FPU_MUL
)
531 SSE_HELPER_S(div
, FPU_DIV
)
532 SSE_HELPER_S(min
, FPU_MIN
)
533 SSE_HELPER_S(max
, FPU_MAX
)
535 void glue(helper_sqrtps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
538 for (i
= 0; i
< 2 << SHIFT
; i
++) {
539 d
->ZMM_S(i
) = float32_sqrt(s
->ZMM_S(i
), &env
->sse_status
);
543 void glue(helper_sqrtpd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
546 for (i
= 0; i
< 1 << SHIFT
; i
++) {
547 d
->ZMM_D(i
) = float64_sqrt(s
->ZMM_D(i
), &env
->sse_status
);
552 void helper_sqrtss(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
555 d
->ZMM_S(0) = float32_sqrt(s
->ZMM_S(0), &env
->sse_status
);
556 for (i
= 1; i
< 2 << SHIFT
; i
++) {
557 d
->ZMM_L(i
) = v
->ZMM_L(i
);
561 void helper_sqrtsd(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
564 d
->ZMM_D(0) = float64_sqrt(s
->ZMM_D(0), &env
->sse_status
);
565 for (i
= 1; i
< 1 << SHIFT
; i
++) {
566 d
->ZMM_Q(i
) = v
->ZMM_Q(i
);
571 /* float to float conversions */
572 void glue(helper_cvtps2pd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
575 for (i
= 1 << SHIFT
; --i
>= 0; ) {
576 d
->ZMM_D(i
) = float32_to_float64(s
->ZMM_S(i
), &env
->sse_status
);
580 void glue(helper_cvtpd2ps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
583 for (i
= 0; i
< 1 << SHIFT
; i
++) {
584 d
->ZMM_S(i
) = float64_to_float32(s
->ZMM_D(i
), &env
->sse_status
);
586 for (i
>>= 1; i
< 1 << SHIFT
; i
++) {
592 void glue(helper_cvtph2ps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
596 for (i
= 2 << SHIFT
; --i
>= 0; ) {
597 d
->ZMM_S(i
) = float16_to_float32(s
->ZMM_H(i
), true, &env
->sse_status
);
601 void glue(helper_cvtps2ph
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
, int mode
)
604 FloatRoundMode prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
605 if (!(mode
& (1 << 2))) {
606 set_x86_rounding_mode(mode
& 3, &env
->sse_status
);
609 for (i
= 0; i
< 2 << SHIFT
; i
++) {
610 d
->ZMM_H(i
) = float32_to_float16(s
->ZMM_S(i
), true, &env
->sse_status
);
612 for (i
>>= 2; i
< 1 << SHIFT
; i
++) {
616 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
621 void helper_cvtss2sd(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
624 d
->ZMM_D(0) = float32_to_float64(s
->ZMM_S(0), &env
->sse_status
);
625 for (i
= 1; i
< 1 << SHIFT
; i
++) {
626 d
->ZMM_Q(i
) = v
->ZMM_Q(i
);
630 void helper_cvtsd2ss(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
633 d
->ZMM_S(0) = float64_to_float32(s
->ZMM_D(0), &env
->sse_status
);
634 for (i
= 1; i
< 2 << SHIFT
; i
++) {
635 d
->ZMM_L(i
) = v
->ZMM_L(i
);
640 /* integer to float */
641 void glue(helper_cvtdq2ps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
644 for (i
= 0; i
< 2 << SHIFT
; i
++) {
645 d
->ZMM_S(i
) = int32_to_float32(s
->ZMM_L(i
), &env
->sse_status
);
649 void glue(helper_cvtdq2pd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
652 for (i
= 1 << SHIFT
; --i
>= 0; ) {
653 int32_t l
= s
->ZMM_L(i
);
654 d
->ZMM_D(i
) = int32_to_float64(l
, &env
->sse_status
);
659 void helper_cvtpi2ps(CPUX86State
*env
, ZMMReg
*d
, MMXReg
*s
)
661 d
->ZMM_S(0) = int32_to_float32(s
->MMX_L(0), &env
->sse_status
);
662 d
->ZMM_S(1) = int32_to_float32(s
->MMX_L(1), &env
->sse_status
);
665 void helper_cvtpi2pd(CPUX86State
*env
, ZMMReg
*d
, MMXReg
*s
)
667 d
->ZMM_D(0) = int32_to_float64(s
->MMX_L(0), &env
->sse_status
);
668 d
->ZMM_D(1) = int32_to_float64(s
->MMX_L(1), &env
->sse_status
);
671 void helper_cvtsi2ss(CPUX86State
*env
, ZMMReg
*d
, uint32_t val
)
673 d
->ZMM_S(0) = int32_to_float32(val
, &env
->sse_status
);
676 void helper_cvtsi2sd(CPUX86State
*env
, ZMMReg
*d
, uint32_t val
)
678 d
->ZMM_D(0) = int32_to_float64(val
, &env
->sse_status
);
682 void helper_cvtsq2ss(CPUX86State
*env
, ZMMReg
*d
, uint64_t val
)
684 d
->ZMM_S(0) = int64_to_float32(val
, &env
->sse_status
);
687 void helper_cvtsq2sd(CPUX86State
*env
, ZMMReg
*d
, uint64_t val
)
689 d
->ZMM_D(0) = int64_to_float64(val
, &env
->sse_status
);
695 /* float to integer */
699 * x86 mandates that we return the indefinite integer value for the result
700 * of any float-to-integer conversion that raises the 'invalid' exception.
701 * Wrap the softfloat functions to get this behaviour.
703 #define WRAP_FLOATCONV(RETTYPE, FN, FLOATTYPE, INDEFVALUE) \
704 static inline RETTYPE x86_##FN(FLOATTYPE a, float_status *s) \
706 int oldflags, newflags; \
709 oldflags = get_float_exception_flags(s); \
710 set_float_exception_flags(0, s); \
712 newflags = get_float_exception_flags(s); \
713 if (newflags & float_flag_invalid) { \
716 set_float_exception_flags(newflags | oldflags, s); \
720 WRAP_FLOATCONV(int32_t, float32_to_int32
, float32
, INT32_MIN
)
721 WRAP_FLOATCONV(int32_t, float32_to_int32_round_to_zero
, float32
, INT32_MIN
)
722 WRAP_FLOATCONV(int32_t, float64_to_int32
, float64
, INT32_MIN
)
723 WRAP_FLOATCONV(int32_t, float64_to_int32_round_to_zero
, float64
, INT32_MIN
)
724 WRAP_FLOATCONV(int64_t, float32_to_int64
, float32
, INT64_MIN
)
725 WRAP_FLOATCONV(int64_t, float32_to_int64_round_to_zero
, float32
, INT64_MIN
)
726 WRAP_FLOATCONV(int64_t, float64_to_int64
, float64
, INT64_MIN
)
727 WRAP_FLOATCONV(int64_t, float64_to_int64_round_to_zero
, float64
, INT64_MIN
)
730 void glue(helper_cvtps2dq
, SUFFIX
)(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
733 for (i
= 0; i
< 2 << SHIFT
; i
++) {
734 d
->ZMM_L(i
) = x86_float32_to_int32(s
->ZMM_S(i
), &env
->sse_status
);
738 void glue(helper_cvtpd2dq
, SUFFIX
)(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
741 for (i
= 0; i
< 1 << SHIFT
; i
++) {
742 d
->ZMM_L(i
) = x86_float64_to_int32(s
->ZMM_D(i
), &env
->sse_status
);
744 for (i
>>= 1; i
< 1 << SHIFT
; i
++) {
750 void helper_cvtps2pi(CPUX86State
*env
, MMXReg
*d
, ZMMReg
*s
)
752 d
->MMX_L(0) = x86_float32_to_int32(s
->ZMM_S(0), &env
->sse_status
);
753 d
->MMX_L(1) = x86_float32_to_int32(s
->ZMM_S(1), &env
->sse_status
);
756 void helper_cvtpd2pi(CPUX86State
*env
, MMXReg
*d
, ZMMReg
*s
)
758 d
->MMX_L(0) = x86_float64_to_int32(s
->ZMM_D(0), &env
->sse_status
);
759 d
->MMX_L(1) = x86_float64_to_int32(s
->ZMM_D(1), &env
->sse_status
);
762 int32_t helper_cvtss2si(CPUX86State
*env
, ZMMReg
*s
)
764 return x86_float32_to_int32(s
->ZMM_S(0), &env
->sse_status
);
767 int32_t helper_cvtsd2si(CPUX86State
*env
, ZMMReg
*s
)
769 return x86_float64_to_int32(s
->ZMM_D(0), &env
->sse_status
);
773 int64_t helper_cvtss2sq(CPUX86State
*env
, ZMMReg
*s
)
775 return x86_float32_to_int64(s
->ZMM_S(0), &env
->sse_status
);
778 int64_t helper_cvtsd2sq(CPUX86State
*env
, ZMMReg
*s
)
780 return x86_float64_to_int64(s
->ZMM_D(0), &env
->sse_status
);
785 /* float to integer truncated */
786 void glue(helper_cvttps2dq
, SUFFIX
)(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
789 for (i
= 0; i
< 2 << SHIFT
; i
++) {
790 d
->ZMM_L(i
) = x86_float32_to_int32_round_to_zero(s
->ZMM_S(i
),
795 void glue(helper_cvttpd2dq
, SUFFIX
)(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
798 for (i
= 0; i
< 1 << SHIFT
; i
++) {
799 d
->ZMM_L(i
) = x86_float64_to_int32_round_to_zero(s
->ZMM_D(i
),
802 for (i
>>= 1; i
< 1 << SHIFT
; i
++) {
808 void helper_cvttps2pi(CPUX86State
*env
, MMXReg
*d
, ZMMReg
*s
)
810 d
->MMX_L(0) = x86_float32_to_int32_round_to_zero(s
->ZMM_S(0), &env
->sse_status
);
811 d
->MMX_L(1) = x86_float32_to_int32_round_to_zero(s
->ZMM_S(1), &env
->sse_status
);
814 void helper_cvttpd2pi(CPUX86State
*env
, MMXReg
*d
, ZMMReg
*s
)
816 d
->MMX_L(0) = x86_float64_to_int32_round_to_zero(s
->ZMM_D(0), &env
->sse_status
);
817 d
->MMX_L(1) = x86_float64_to_int32_round_to_zero(s
->ZMM_D(1), &env
->sse_status
);
820 int32_t helper_cvttss2si(CPUX86State
*env
, ZMMReg
*s
)
822 return x86_float32_to_int32_round_to_zero(s
->ZMM_S(0), &env
->sse_status
);
825 int32_t helper_cvttsd2si(CPUX86State
*env
, ZMMReg
*s
)
827 return x86_float64_to_int32_round_to_zero(s
->ZMM_D(0), &env
->sse_status
);
831 int64_t helper_cvttss2sq(CPUX86State
*env
, ZMMReg
*s
)
833 return x86_float32_to_int64_round_to_zero(s
->ZMM_S(0), &env
->sse_status
);
836 int64_t helper_cvttsd2sq(CPUX86State
*env
, ZMMReg
*s
)
838 return x86_float64_to_int64_round_to_zero(s
->ZMM_D(0), &env
->sse_status
);
843 void glue(helper_rsqrtps
, SUFFIX
)(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
845 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
847 for (i
= 0; i
< 2 << SHIFT
; i
++) {
848 d
->ZMM_S(i
) = float32_div(float32_one
,
849 float32_sqrt(s
->ZMM_S(i
), &env
->sse_status
),
852 set_float_exception_flags(old_flags
, &env
->sse_status
);
856 void helper_rsqrtss(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*v
, ZMMReg
*s
)
858 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
860 d
->ZMM_S(0) = float32_div(float32_one
,
861 float32_sqrt(s
->ZMM_S(0), &env
->sse_status
),
863 set_float_exception_flags(old_flags
, &env
->sse_status
);
864 for (i
= 1; i
< 2 << SHIFT
; i
++) {
865 d
->ZMM_L(i
) = v
->ZMM_L(i
);
870 void glue(helper_rcpps
, SUFFIX
)(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
872 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
874 for (i
= 0; i
< 2 << SHIFT
; i
++) {
875 d
->ZMM_S(i
) = float32_div(float32_one
, s
->ZMM_S(i
), &env
->sse_status
);
877 set_float_exception_flags(old_flags
, &env
->sse_status
);
881 void helper_rcpss(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*v
, ZMMReg
*s
)
883 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
885 d
->ZMM_S(0) = float32_div(float32_one
, s
->ZMM_S(0), &env
->sse_status
);
886 for (i
= 1; i
< 2 << SHIFT
; i
++) {
887 d
->ZMM_L(i
) = v
->ZMM_L(i
);
889 set_float_exception_flags(old_flags
, &env
->sse_status
);
894 static inline uint64_t helper_extrq(uint64_t src
, int shift
, int len
)
901 mask
= (1ULL << len
) - 1;
903 return (src
>> shift
) & mask
;
906 void helper_extrq_r(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
908 d
->ZMM_Q(0) = helper_extrq(d
->ZMM_Q(0), s
->ZMM_B(1) & 63, s
->ZMM_B(0) & 63);
911 void helper_extrq_i(CPUX86State
*env
, ZMMReg
*d
, int index
, int length
)
913 d
->ZMM_Q(0) = helper_extrq(d
->ZMM_Q(0), index
, length
);
916 static inline uint64_t helper_insertq(uint64_t dest
, uint64_t src
, int shift
, int len
)
923 mask
= (1ULL << len
) - 1;
925 return (dest
& ~(mask
<< shift
)) | ((src
& mask
) << shift
);
928 void helper_insertq_r(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
)
930 d
->ZMM_Q(0) = helper_insertq(d
->ZMM_Q(0), s
->ZMM_Q(0), s
->ZMM_B(9) & 63, s
->ZMM_B(8) & 63);
933 void helper_insertq_i(CPUX86State
*env
, ZMMReg
*d
, ZMMReg
*s
, int index
, int length
)
935 d
->ZMM_Q(0) = helper_insertq(d
->ZMM_Q(0), s
->ZMM_Q(0), index
, length
);
939 #define SSE_HELPER_HPS(name, F) \
940 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
942 float32 r[2 << SHIFT]; \
944 for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \
945 for (i = j = 0; j < 4; i++, j += 2) { \
946 r[i + k] = F(v->ZMM_S(j + k), v->ZMM_S(j + k + 1), &env->sse_status); \
948 for (j = 0; j < 4; i++, j += 2) { \
949 r[i + k] = F(s->ZMM_S(j + k), s->ZMM_S(j + k + 1), &env->sse_status); \
952 for (i = 0; i < 2 << SHIFT; i++) { \
953 d->ZMM_S(i) = r[i]; \
957 SSE_HELPER_HPS(haddps
, float32_add
)
958 SSE_HELPER_HPS(hsubps
, float32_sub
)
960 #define SSE_HELPER_HPD(name, F) \
961 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
963 float64 r[1 << SHIFT]; \
965 for (k = 0; k < 1 << SHIFT; k += LANE_WIDTH / 8) { \
966 for (i = j = 0; j < 2; i++, j += 2) { \
967 r[i + k] = F(v->ZMM_D(j + k), v->ZMM_D(j + k + 1), &env->sse_status); \
969 for (j = 0; j < 2; i++, j += 2) { \
970 r[i + k] = F(s->ZMM_D(j + k), s->ZMM_D(j + k + 1), &env->sse_status); \
973 for (i = 0; i < 1 << SHIFT; i++) { \
974 d->ZMM_D(i) = r[i]; \
978 SSE_HELPER_HPD(haddpd
, float64_add
)
979 SSE_HELPER_HPD(hsubpd
, float64_sub
)
981 void glue(helper_addsubps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
984 for (i
= 0; i
< 2 << SHIFT
; i
+= 2) {
985 d
->ZMM_S(i
) = float32_sub(v
->ZMM_S(i
), s
->ZMM_S(i
), &env
->sse_status
);
986 d
->ZMM_S(i
+1) = float32_add(v
->ZMM_S(i
+1), s
->ZMM_S(i
+1), &env
->sse_status
);
990 void glue(helper_addsubpd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
993 for (i
= 0; i
< 1 << SHIFT
; i
+= 2) {
994 d
->ZMM_D(i
) = float64_sub(v
->ZMM_D(i
), s
->ZMM_D(i
), &env
->sse_status
);
995 d
->ZMM_D(i
+1) = float64_add(v
->ZMM_D(i
+1), s
->ZMM_D(i
+1), &env
->sse_status
);
999 #define SSE_HELPER_CMP_P(name, F, C) \
1000 void glue(helper_ ## name ## ps, SUFFIX)(CPUX86State *env, \
1001 Reg *d, Reg *v, Reg *s) \
1004 for (i = 0; i < 2 << SHIFT; i++) { \
1005 d->ZMM_L(i) = C(F(32, v->ZMM_S(i), s->ZMM_S(i))) ? -1 : 0; \
1009 void glue(helper_ ## name ## pd, SUFFIX)(CPUX86State *env, \
1010 Reg *d, Reg *v, Reg *s) \
1013 for (i = 0; i < 1 << SHIFT; i++) { \
1014 d->ZMM_Q(i) = C(F(64, v->ZMM_D(i), s->ZMM_D(i))) ? -1 : 0; \
1019 #define SSE_HELPER_CMP(name, F, C) \
1020 SSE_HELPER_CMP_P(name, F, C) \
1021 void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
1024 d->ZMM_L(0) = C(F(32, v->ZMM_S(0), s->ZMM_S(0))) ? -1 : 0; \
1025 for (i = 1; i < 2 << SHIFT; i++) { \
1026 d->ZMM_L(i) = v->ZMM_L(i); \
1030 void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
1033 d->ZMM_Q(0) = C(F(64, v->ZMM_D(0), s->ZMM_D(0))) ? -1 : 0; \
1034 for (i = 1; i < 1 << SHIFT; i++) { \
1035 d->ZMM_Q(i) = v->ZMM_Q(i); \
1039 static inline bool FPU_EQU(FloatRelation x
)
1041 return (x
== float_relation_equal
|| x
== float_relation_unordered
);
1043 static inline bool FPU_GE(FloatRelation x
)
1045 return (x
== float_relation_equal
|| x
== float_relation_greater
);
1047 #define FPU_EQ(x) (x == float_relation_equal)
1048 #define FPU_LT(x) (x == float_relation_less)
1049 #define FPU_LE(x) (x <= float_relation_equal)
1050 #define FPU_GT(x) (x == float_relation_greater)
1051 #define FPU_UNORD(x) (x == float_relation_unordered)
1052 /* We must make sure we evaluate the argument in case it is a signalling NAN */
1053 #define FPU_FALSE(x) (x == float_relation_equal && 0)
1055 #define FPU_CMPQ(size, a, b) \
1056 float ## size ## _compare_quiet(a, b, &env->sse_status)
1057 #define FPU_CMPS(size, a, b) \
1058 float ## size ## _compare(a, b, &env->sse_status)
1061 #define SSE_HELPER_CMP(name, F, C) SSE_HELPER_CMP_P(name, F, C)
1064 SSE_HELPER_CMP(cmpeq
, FPU_CMPQ
, FPU_EQ
)
1065 SSE_HELPER_CMP(cmplt
, FPU_CMPS
, FPU_LT
)
1066 SSE_HELPER_CMP(cmple
, FPU_CMPS
, FPU_LE
)
1067 SSE_HELPER_CMP(cmpunord
, FPU_CMPQ
, FPU_UNORD
)
1068 SSE_HELPER_CMP(cmpneq
, FPU_CMPQ
, !FPU_EQ
)
1069 SSE_HELPER_CMP(cmpnlt
, FPU_CMPS
, !FPU_LT
)
1070 SSE_HELPER_CMP(cmpnle
, FPU_CMPS
, !FPU_LE
)
1071 SSE_HELPER_CMP(cmpord
, FPU_CMPQ
, !FPU_UNORD
)
1073 SSE_HELPER_CMP(cmpequ
, FPU_CMPQ
, FPU_EQU
)
1074 SSE_HELPER_CMP(cmpnge
, FPU_CMPS
, !FPU_GE
)
1075 SSE_HELPER_CMP(cmpngt
, FPU_CMPS
, !FPU_GT
)
1076 SSE_HELPER_CMP(cmpfalse
, FPU_CMPQ
, FPU_FALSE
)
1077 SSE_HELPER_CMP(cmpnequ
, FPU_CMPQ
, !FPU_EQU
)
1078 SSE_HELPER_CMP(cmpge
, FPU_CMPS
, FPU_GE
)
1079 SSE_HELPER_CMP(cmpgt
, FPU_CMPS
, FPU_GT
)
1080 SSE_HELPER_CMP(cmptrue
, FPU_CMPQ
, !FPU_FALSE
)
1082 SSE_HELPER_CMP(cmpeqs
, FPU_CMPS
, FPU_EQ
)
1083 SSE_HELPER_CMP(cmpltq
, FPU_CMPQ
, FPU_LT
)
1084 SSE_HELPER_CMP(cmpleq
, FPU_CMPQ
, FPU_LE
)
1085 SSE_HELPER_CMP(cmpunords
, FPU_CMPS
, FPU_UNORD
)
1086 SSE_HELPER_CMP(cmpneqq
, FPU_CMPS
, !FPU_EQ
)
1087 SSE_HELPER_CMP(cmpnltq
, FPU_CMPQ
, !FPU_LT
)
1088 SSE_HELPER_CMP(cmpnleq
, FPU_CMPQ
, !FPU_LE
)
1089 SSE_HELPER_CMP(cmpords
, FPU_CMPS
, !FPU_UNORD
)
1091 SSE_HELPER_CMP(cmpequs
, FPU_CMPS
, FPU_EQU
)
1092 SSE_HELPER_CMP(cmpngeq
, FPU_CMPQ
, !FPU_GE
)
1093 SSE_HELPER_CMP(cmpngtq
, FPU_CMPQ
, !FPU_GT
)
1094 SSE_HELPER_CMP(cmpfalses
, FPU_CMPS
, FPU_FALSE
)
1095 SSE_HELPER_CMP(cmpnequs
, FPU_CMPS
, !FPU_EQU
)
1096 SSE_HELPER_CMP(cmpgeq
, FPU_CMPQ
, FPU_GE
)
1097 SSE_HELPER_CMP(cmpgtq
, FPU_CMPQ
, FPU_GT
)
1098 SSE_HELPER_CMP(cmptrues
, FPU_CMPS
, !FPU_FALSE
)
1100 #undef SSE_HELPER_CMP
1103 static const int comis_eflags
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
1105 void helper_ucomiss(CPUX86State
*env
, Reg
*d
, Reg
*s
)
1112 ret
= float32_compare_quiet(s0
, s1
, &env
->sse_status
);
1113 CC_SRC
= comis_eflags
[ret
+ 1];
1114 CC_OP
= CC_OP_EFLAGS
;
1117 void helper_comiss(CPUX86State
*env
, Reg
*d
, Reg
*s
)
1124 ret
= float32_compare(s0
, s1
, &env
->sse_status
);
1125 CC_SRC
= comis_eflags
[ret
+ 1];
1126 CC_OP
= CC_OP_EFLAGS
;
1129 void helper_ucomisd(CPUX86State
*env
, Reg
*d
, Reg
*s
)
1136 ret
= float64_compare_quiet(d0
, d1
, &env
->sse_status
);
1137 CC_SRC
= comis_eflags
[ret
+ 1];
1138 CC_OP
= CC_OP_EFLAGS
;
1141 void helper_comisd(CPUX86State
*env
, Reg
*d
, Reg
*s
)
1148 ret
= float64_compare(d0
, d1
, &env
->sse_status
);
1149 CC_SRC
= comis_eflags
[ret
+ 1];
1150 CC_OP
= CC_OP_EFLAGS
;
1154 uint32_t glue(helper_movmskps
, SUFFIX
)(CPUX86State
*env
, Reg
*s
)
1160 for (i
= 0; i
< 2 << SHIFT
; i
++) {
1161 mask
|= (s
->ZMM_L(i
) >> (31 - i
)) & (1 << i
);
1166 uint32_t glue(helper_movmskpd
, SUFFIX
)(CPUX86State
*env
, Reg
*s
)
1172 for (i
= 0; i
< 1 << SHIFT
; i
++) {
1173 mask
|= (s
->ZMM_Q(i
) >> (63 - i
)) & (1 << i
);
1180 #define PACK_HELPER_B(name, F) \
1181 void glue(helper_pack ## name, SUFFIX)(CPUX86State *env, \
1182 Reg *d, Reg *v, Reg *s) \
1184 uint8_t r[PACK_WIDTH * 2]; \
1186 for (j = 0; j < 4 << SHIFT; j += PACK_WIDTH) { \
1187 for (k = 0; k < PACK_WIDTH; k++) { \
1188 r[k] = F((int16_t)v->W(j + k)); \
1190 for (k = 0; k < PACK_WIDTH; k++) { \
1191 r[PACK_WIDTH + k] = F((int16_t)s->W(j + k)); \
1193 for (k = 0; k < PACK_WIDTH * 2; k++) { \
1194 d->B(2 * j + k) = r[k]; \
1199 PACK_HELPER_B(sswb
, satsb
)
1200 PACK_HELPER_B(uswb
, satub
)
1202 void glue(helper_packssdw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
1204 uint16_t r
[PACK_WIDTH
];
1207 for (j
= 0; j
< 2 << SHIFT
; j
+= PACK_WIDTH
/ 2) {
1208 for (k
= 0; k
< PACK_WIDTH
/ 2; k
++) {
1209 r
[k
] = satsw(v
->L(j
+ k
));
1211 for (k
= 0; k
< PACK_WIDTH
/ 2; k
++) {
1212 r
[PACK_WIDTH
/ 2 + k
] = satsw(s
->L(j
+ k
));
1214 for (k
= 0; k
< PACK_WIDTH
; k
++) {
1215 d
->W(2 * j
+ k
) = r
[k
];
1220 #define UNPCK_OP(base_name, base) \
1222 void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\
1223 Reg *d, Reg *v, Reg *s) \
1225 uint8_t r[PACK_WIDTH * 2]; \
1228 for (j = 0; j < 8 << SHIFT; ) { \
1229 int k = j + base * PACK_WIDTH; \
1230 for (i = 0; i < PACK_WIDTH; i++) { \
1231 r[2 * i] = v->B(k + i); \
1232 r[2 * i + 1] = s->B(k + i); \
1234 for (i = 0; i < PACK_WIDTH * 2; i++, j++) { \
1240 void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\
1241 Reg *d, Reg *v, Reg *s) \
1243 uint16_t r[PACK_WIDTH]; \
1246 for (j = 0; j < 4 << SHIFT; ) { \
1247 int k = j + base * PACK_WIDTH / 2; \
1248 for (i = 0; i < PACK_WIDTH / 2; i++) { \
1249 r[2 * i] = v->W(k + i); \
1250 r[2 * i + 1] = s->W(k + i); \
1252 for (i = 0; i < PACK_WIDTH; i++, j++) { \
1258 void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\
1259 Reg *d, Reg *v, Reg *s) \
1261 uint32_t r[PACK_WIDTH / 2]; \
1264 for (j = 0; j < 2 << SHIFT; ) { \
1265 int k = j + base * PACK_WIDTH / 4; \
1266 for (i = 0; i < PACK_WIDTH / 4; i++) { \
1267 r[2 * i] = v->L(k + i); \
1268 r[2 * i + 1] = s->L(k + i); \
1270 for (i = 0; i < PACK_WIDTH / 2; i++, j++) { \
1277 void glue(helper_punpck ## base_name ## qdq, SUFFIX)( \
1278 CPUX86State *env, Reg *d, Reg *v, Reg *s) \
1283 for (i = 0; i < 1 << SHIFT; i += 2) { \
1284 r[0] = v->Q(base + i); \
1285 r[1] = s->Q(base + i); \
1287 d->Q(i + 1) = r[1]; \
1296 #undef PACK_HELPER_B
1300 /* 3DNow! float ops */
1302 void helper_pi2fd(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1304 d
->MMX_S(0) = int32_to_float32(s
->MMX_L(0), &env
->mmx_status
);
1305 d
->MMX_S(1) = int32_to_float32(s
->MMX_L(1), &env
->mmx_status
);
1308 void helper_pi2fw(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1310 d
->MMX_S(0) = int32_to_float32((int16_t)s
->MMX_W(0), &env
->mmx_status
);
1311 d
->MMX_S(1) = int32_to_float32((int16_t)s
->MMX_W(2), &env
->mmx_status
);
1314 void helper_pf2id(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1316 d
->MMX_L(0) = float32_to_int32_round_to_zero(s
->MMX_S(0), &env
->mmx_status
);
1317 d
->MMX_L(1) = float32_to_int32_round_to_zero(s
->MMX_S(1), &env
->mmx_status
);
1320 void helper_pf2iw(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1322 d
->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s
->MMX_S(0),
1324 d
->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s
->MMX_S(1),
1328 void helper_pfacc(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1332 r
= float32_add(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1333 d
->MMX_S(1) = float32_add(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1337 void helper_pfadd(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1339 d
->MMX_S(0) = float32_add(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1340 d
->MMX_S(1) = float32_add(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1343 void helper_pfcmpeq(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1345 d
->MMX_L(0) = float32_eq_quiet(d
->MMX_S(0), s
->MMX_S(0),
1346 &env
->mmx_status
) ? -1 : 0;
1347 d
->MMX_L(1) = float32_eq_quiet(d
->MMX_S(1), s
->MMX_S(1),
1348 &env
->mmx_status
) ? -1 : 0;
1351 void helper_pfcmpge(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1353 d
->MMX_L(0) = float32_le(s
->MMX_S(0), d
->MMX_S(0),
1354 &env
->mmx_status
) ? -1 : 0;
1355 d
->MMX_L(1) = float32_le(s
->MMX_S(1), d
->MMX_S(1),
1356 &env
->mmx_status
) ? -1 : 0;
1359 void helper_pfcmpgt(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1361 d
->MMX_L(0) = float32_lt(s
->MMX_S(0), d
->MMX_S(0),
1362 &env
->mmx_status
) ? -1 : 0;
1363 d
->MMX_L(1) = float32_lt(s
->MMX_S(1), d
->MMX_S(1),
1364 &env
->mmx_status
) ? -1 : 0;
1367 void helper_pfmax(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1369 if (float32_lt(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
)) {
1370 d
->MMX_S(0) = s
->MMX_S(0);
1372 if (float32_lt(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
)) {
1373 d
->MMX_S(1) = s
->MMX_S(1);
1377 void helper_pfmin(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1379 if (float32_lt(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
)) {
1380 d
->MMX_S(0) = s
->MMX_S(0);
1382 if (float32_lt(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
)) {
1383 d
->MMX_S(1) = s
->MMX_S(1);
1387 void helper_pfmul(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1389 d
->MMX_S(0) = float32_mul(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1390 d
->MMX_S(1) = float32_mul(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1393 void helper_pfnacc(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1397 r
= float32_sub(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1398 d
->MMX_S(1) = float32_sub(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1402 void helper_pfpnacc(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1406 r
= float32_sub(d
->MMX_S(0), d
->MMX_S(1), &env
->mmx_status
);
1407 d
->MMX_S(1) = float32_add(s
->MMX_S(0), s
->MMX_S(1), &env
->mmx_status
);
1411 void helper_pfrcp(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1413 d
->MMX_S(0) = float32_div(float32_one
, s
->MMX_S(0), &env
->mmx_status
);
1414 d
->MMX_S(1) = d
->MMX_S(0);
1417 void helper_pfrsqrt(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1419 d
->MMX_L(1) = s
->MMX_L(0) & 0x7fffffff;
1420 d
->MMX_S(1) = float32_div(float32_one
,
1421 float32_sqrt(d
->MMX_S(1), &env
->mmx_status
),
1423 d
->MMX_L(1) |= s
->MMX_L(0) & 0x80000000;
1424 d
->MMX_L(0) = d
->MMX_L(1);
1427 void helper_pfsub(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1429 d
->MMX_S(0) = float32_sub(d
->MMX_S(0), s
->MMX_S(0), &env
->mmx_status
);
1430 d
->MMX_S(1) = float32_sub(d
->MMX_S(1), s
->MMX_S(1), &env
->mmx_status
);
1433 void helper_pfsubr(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1435 d
->MMX_S(0) = float32_sub(s
->MMX_S(0), d
->MMX_S(0), &env
->mmx_status
);
1436 d
->MMX_S(1) = float32_sub(s
->MMX_S(1), d
->MMX_S(1), &env
->mmx_status
);
1439 void helper_pswapd(CPUX86State
*env
, MMXReg
*d
, MMXReg
*s
)
1444 d
->MMX_L(0) = s
->MMX_L(1);
1449 /* SSSE3 op helpers */
1450 void glue(helper_pshufb
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
1456 for (i
= 0; i
< 8; i
++) {
1457 r
[i
] = (s
->B(i
) & 0x80) ? 0 : (v
->B(s
->B(i
) & 7));
1459 for (i
= 0; i
< 8; i
++) {
1463 uint8_t r
[8 << SHIFT
];
1465 for (i
= 0; i
< 8 << SHIFT
; i
++) {
1467 r
[i
] = (s
->B(i
) & 0x80) ? 0 : v
->B(j
| (s
->B(i
) & 0xf));
1469 for (i
= 0; i
< 8 << SHIFT
; i
++) {
1475 #define SSE_HELPER_HW(name, F) \
1476 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
1478 uint16_t r[4 << SHIFT]; \
1480 for (k = 0; k < 4 << SHIFT; k += LANE_WIDTH / 2) { \
1481 for (i = j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \
1482 r[i + k] = F(v->W(j + k), v->W(j + k + 1)); \
1484 for (j = 0; j < LANE_WIDTH / 2; i++, j += 2) { \
1485 r[i + k] = F(s->W(j + k), s->W(j + k + 1)); \
1488 for (i = 0; i < 4 << SHIFT; i++) { \
1493 #define SSE_HELPER_HL(name, F) \
1494 void glue(helper_ ## name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s) \
1496 uint32_t r[2 << SHIFT]; \
1498 for (k = 0; k < 2 << SHIFT; k += LANE_WIDTH / 4) { \
1499 for (i = j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \
1500 r[i + k] = F(v->L(j + k), v->L(j + k + 1)); \
1502 for (j = 0; j < LANE_WIDTH / 4; i++, j += 2) { \
1503 r[i + k] = F(s->L(j + k), s->L(j + k + 1)); \
1506 for (i = 0; i < 2 << SHIFT; i++) { \
1511 SSE_HELPER_HW(phaddw
, FADD
)
1512 SSE_HELPER_HW(phsubw
, FSUB
)
1513 SSE_HELPER_HW(phaddsw
, FADDSW
)
1514 SSE_HELPER_HW(phsubsw
, FSUBSW
)
1515 SSE_HELPER_HL(phaddd
, FADD
)
1516 SSE_HELPER_HL(phsubd
, FSUB
)
1518 #undef SSE_HELPER_HW
1519 #undef SSE_HELPER_HL
1521 void glue(helper_pmaddubsw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
1524 for (i
= 0; i
< 4 << SHIFT
; i
++) {
1525 d
->W(i
) = satsw((int8_t)s
->B(i
* 2) * (uint8_t)v
->B(i
* 2) +
1526 (int8_t)s
->B(i
* 2 + 1) * (uint8_t)v
->B(i
* 2 + 1));
1530 #define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15)
1531 SSE_HELPER_W(helper_pmulhrsw
, FMULHRSW
)
1533 #define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d)
1534 #define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d)
1535 #define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d)
1536 SSE_HELPER_B(helper_psignb
, FSIGNB
)
1537 SSE_HELPER_W(helper_psignw
, FSIGNW
)
1538 SSE_HELPER_L(helper_psignd
, FSIGNL
)
1540 void glue(helper_palignr
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
,
1545 /* XXX could be checked during translation */
1546 if (imm
>= (SHIFT
? 32 : 16)) {
1547 for (i
= 0; i
< (1 << SHIFT
); i
++) {
1551 int shift
= imm
* 8;
1552 #define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
1554 d
->Q(0) = SHR(s
->Q(0), shift
- 0) |
1555 SHR(v
->Q(0), shift
- 64);
1557 for (i
= 0; i
< (1 << SHIFT
); i
+= 2) {
1560 r0
= SHR(s
->Q(i
), shift
- 0) |
1561 SHR(s
->Q(i
+ 1), shift
- 64) |
1562 SHR(v
->Q(i
), shift
- 128) |
1563 SHR(v
->Q(i
+ 1), shift
- 192);
1564 r1
= SHR(s
->Q(i
), shift
+ 64) |
1565 SHR(s
->Q(i
+ 1), shift
- 0) |
1566 SHR(v
->Q(i
), shift
- 64) |
1567 SHR(v
->Q(i
+ 1), shift
- 128);
1578 #define SSE_HELPER_V(name, elem, num, F) \
1579 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \
1583 for (i = 0; i < num; i++) { \
1584 d->elem(i) = F(v->elem(i), s->elem(i), m->elem(i)); \
1588 #define SSE_HELPER_I(name, elem, num, F) \
1589 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *v, Reg *s, \
1593 for (i = 0; i < num; i++) { \
1595 d->elem(i) = F(v->elem(i), s->elem(i), (imm >> j) & 1); \
1599 /* SSE4.1 op helpers */
1600 #define FBLENDVB(v, s, m) ((m & 0x80) ? s : v)
1601 #define FBLENDVPS(v, s, m) ((m & 0x80000000) ? s : v)
1602 #define FBLENDVPD(v, s, m) ((m & 0x8000000000000000LL) ? s : v)
1603 SSE_HELPER_V(helper_pblendvb
, B
, 8 << SHIFT
, FBLENDVB
)
1604 SSE_HELPER_V(helper_blendvps
, L
, 2 << SHIFT
, FBLENDVPS
)
1605 SSE_HELPER_V(helper_blendvpd
, Q
, 1 << SHIFT
, FBLENDVPD
)
1607 void glue(helper_ptest
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
1609 uint64_t zf
= 0, cf
= 0;
1612 for (i
= 0; i
< 1 << SHIFT
; i
++) {
1613 zf
|= (s
->Q(i
) & d
->Q(i
));
1614 cf
|= (s
->Q(i
) & ~d
->Q(i
));
1616 CC_SRC
= (zf
? 0 : CC_Z
) | (cf
? 0 : CC_C
);
1617 CC_OP
= CC_OP_EFLAGS
;
1620 #define FMOVSLDUP(i) s->L((i) & ~1)
1621 #define FMOVSHDUP(i) s->L((i) | 1)
1622 #define FMOVDLDUP(i) s->Q((i) & ~1)
1624 #define SSE_HELPER_F(name, elem, num, F) \
1625 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
1628 for (int i = n; --i >= 0; ) { \
1629 d->elem(i) = F(i); \
1634 SSE_HELPER_F(helper_pmovsxbw
, W
, 4 << SHIFT
, (int8_t) s
->B
)
1635 SSE_HELPER_F(helper_pmovsxbd
, L
, 2 << SHIFT
, (int8_t) s
->B
)
1636 SSE_HELPER_F(helper_pmovsxbq
, Q
, 1 << SHIFT
, (int8_t) s
->B
)
1637 SSE_HELPER_F(helper_pmovsxwd
, L
, 2 << SHIFT
, (int16_t) s
->W
)
1638 SSE_HELPER_F(helper_pmovsxwq
, Q
, 1 << SHIFT
, (int16_t) s
->W
)
1639 SSE_HELPER_F(helper_pmovsxdq
, Q
, 1 << SHIFT
, (int32_t) s
->L
)
1640 SSE_HELPER_F(helper_pmovzxbw
, W
, 4 << SHIFT
, s
->B
)
1641 SSE_HELPER_F(helper_pmovzxbd
, L
, 2 << SHIFT
, s
->B
)
1642 SSE_HELPER_F(helper_pmovzxbq
, Q
, 1 << SHIFT
, s
->B
)
1643 SSE_HELPER_F(helper_pmovzxwd
, L
, 2 << SHIFT
, s
->W
)
1644 SSE_HELPER_F(helper_pmovzxwq
, Q
, 1 << SHIFT
, s
->W
)
1645 SSE_HELPER_F(helper_pmovzxdq
, Q
, 1 << SHIFT
, s
->L
)
1646 SSE_HELPER_F(helper_pmovsldup
, L
, 2 << SHIFT
, FMOVSLDUP
)
1647 SSE_HELPER_F(helper_pmovshdup
, L
, 2 << SHIFT
, FMOVSHDUP
)
1648 SSE_HELPER_F(helper_pmovdldup
, Q
, 1 << SHIFT
, FMOVDLDUP
)
1651 void glue(helper_pmuldq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
1655 for (i
= 0; i
< 1 << SHIFT
; i
++) {
1656 d
->Q(i
) = (int64_t)(int32_t) v
->L(2 * i
) * (int32_t) s
->L(2 * i
);
1660 void glue(helper_packusdw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
1665 for (i
= 0, j
= 0; i
<= 2 << SHIFT
; i
+= 8, j
+= 4) {
1666 r
[0] = satuw(v
->L(j
));
1667 r
[1] = satuw(v
->L(j
+ 1));
1668 r
[2] = satuw(v
->L(j
+ 2));
1669 r
[3] = satuw(v
->L(j
+ 3));
1670 r
[4] = satuw(s
->L(j
));
1671 r
[5] = satuw(s
->L(j
+ 1));
1672 r
[6] = satuw(s
->L(j
+ 2));
1673 r
[7] = satuw(s
->L(j
+ 3));
1674 for (k
= 0; k
< 8; k
++) {
1681 void glue(helper_phminposuw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
1685 if (s
->W(1) < s
->W(idx
)) {
1688 if (s
->W(2) < s
->W(idx
)) {
1691 if (s
->W(3) < s
->W(idx
)) {
1694 if (s
->W(4) < s
->W(idx
)) {
1697 if (s
->W(5) < s
->W(idx
)) {
1700 if (s
->W(6) < s
->W(idx
)) {
1703 if (s
->W(7) < s
->W(idx
)) {
1707 d
->W(0) = s
->W(idx
);
1714 void glue(helper_roundps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
1717 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
1718 signed char prev_rounding_mode
;
1721 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1722 if (!(mode
& (1 << 2))) {
1723 set_x86_rounding_mode(mode
& 3, &env
->sse_status
);
1726 for (i
= 0; i
< 2 << SHIFT
; i
++) {
1727 d
->ZMM_S(i
) = float32_round_to_int(s
->ZMM_S(i
), &env
->sse_status
);
1730 if (mode
& (1 << 3) && !(old_flags
& float_flag_inexact
)) {
1731 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1732 ~float_flag_inexact
,
1735 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1738 void glue(helper_roundpd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
1741 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
1742 signed char prev_rounding_mode
;
1745 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1746 if (!(mode
& (1 << 2))) {
1747 set_x86_rounding_mode(mode
& 3, &env
->sse_status
);
1750 for (i
= 0; i
< 1 << SHIFT
; i
++) {
1751 d
->ZMM_D(i
) = float64_round_to_int(s
->ZMM_D(i
), &env
->sse_status
);
1754 if (mode
& (1 << 3) && !(old_flags
& float_flag_inexact
)) {
1755 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1756 ~float_flag_inexact
,
1759 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1763 void glue(helper_roundss
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
,
1766 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
1767 signed char prev_rounding_mode
;
1770 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1771 if (!(mode
& (1 << 2))) {
1772 set_x86_rounding_mode(mode
& 3, &env
->sse_status
);
1775 d
->ZMM_S(0) = float32_round_to_int(s
->ZMM_S(0), &env
->sse_status
);
1776 for (i
= 1; i
< 2 << SHIFT
; i
++) {
1777 d
->ZMM_L(i
) = v
->ZMM_L(i
);
1780 if (mode
& (1 << 3) && !(old_flags
& float_flag_inexact
)) {
1781 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1782 ~float_flag_inexact
,
1785 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1788 void glue(helper_roundsd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
,
1791 uint8_t old_flags
= get_float_exception_flags(&env
->sse_status
);
1792 signed char prev_rounding_mode
;
1795 prev_rounding_mode
= env
->sse_status
.float_rounding_mode
;
1796 if (!(mode
& (1 << 2))) {
1797 set_x86_rounding_mode(mode
& 3, &env
->sse_status
);
1800 d
->ZMM_D(0) = float64_round_to_int(s
->ZMM_D(0), &env
->sse_status
);
1801 for (i
= 1; i
< 1 << SHIFT
; i
++) {
1802 d
->ZMM_Q(i
) = v
->ZMM_Q(i
);
1805 if (mode
& (1 << 3) && !(old_flags
& float_flag_inexact
)) {
1806 set_float_exception_flags(get_float_exception_flags(&env
->sse_status
) &
1807 ~float_flag_inexact
,
1810 env
->sse_status
.float_rounding_mode
= prev_rounding_mode
;
1814 #define FBLENDP(v, s, m) (m ? s : v)
1815 SSE_HELPER_I(helper_blendps
, L
, 2 << SHIFT
, FBLENDP
)
1816 SSE_HELPER_I(helper_blendpd
, Q
, 1 << SHIFT
, FBLENDP
)
1817 SSE_HELPER_I(helper_pblendw
, W
, 4 << SHIFT
, FBLENDP
)
1819 void glue(helper_dpps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
,
1822 float32 prod1
, prod2
, temp2
, temp3
, temp4
;
1825 for (i
= 0; i
< 2 << SHIFT
; i
+= 4) {
1827 * We must evaluate (A+B)+(C+D), not ((A+B)+C)+D
1828 * to correctly round the intermediate results
1830 if (mask
& (1 << 4)) {
1831 prod1
= float32_mul(v
->ZMM_S(i
), s
->ZMM_S(i
), &env
->sse_status
);
1833 prod1
= float32_zero
;
1835 if (mask
& (1 << 5)) {
1836 prod2
= float32_mul(v
->ZMM_S(i
+1), s
->ZMM_S(i
+1), &env
->sse_status
);
1838 prod2
= float32_zero
;
1840 temp2
= float32_add(prod1
, prod2
, &env
->sse_status
);
1841 if (mask
& (1 << 6)) {
1842 prod1
= float32_mul(v
->ZMM_S(i
+2), s
->ZMM_S(i
+2), &env
->sse_status
);
1844 prod1
= float32_zero
;
1846 if (mask
& (1 << 7)) {
1847 prod2
= float32_mul(v
->ZMM_S(i
+3), s
->ZMM_S(i
+3), &env
->sse_status
);
1849 prod2
= float32_zero
;
1851 temp3
= float32_add(prod1
, prod2
, &env
->sse_status
);
1852 temp4
= float32_add(temp2
, temp3
, &env
->sse_status
);
1854 d
->ZMM_S(i
) = (mask
& (1 << 0)) ? temp4
: float32_zero
;
1855 d
->ZMM_S(i
+1) = (mask
& (1 << 1)) ? temp4
: float32_zero
;
1856 d
->ZMM_S(i
+2) = (mask
& (1 << 2)) ? temp4
: float32_zero
;
1857 d
->ZMM_S(i
+3) = (mask
& (1 << 3)) ? temp4
: float32_zero
;
1862 /* Oddly, there is no ymm version of dppd */
1863 void glue(helper_dppd
, SUFFIX
)(CPUX86State
*env
,
1864 Reg
*d
, Reg
*v
, Reg
*s
, uint32_t mask
)
1866 float64 prod1
, prod2
, temp2
;
1868 if (mask
& (1 << 4)) {
1869 prod1
= float64_mul(v
->ZMM_D(0), s
->ZMM_D(0), &env
->sse_status
);
1871 prod1
= float64_zero
;
1873 if (mask
& (1 << 5)) {
1874 prod2
= float64_mul(v
->ZMM_D(1), s
->ZMM_D(1), &env
->sse_status
);
1876 prod2
= float64_zero
;
1878 temp2
= float64_add(prod1
, prod2
, &env
->sse_status
);
1879 d
->ZMM_D(0) = (mask
& (1 << 0)) ? temp2
: float64_zero
;
1880 d
->ZMM_D(1) = (mask
& (1 << 1)) ? temp2
: float64_zero
;
1884 void glue(helper_mpsadbw
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
,
1890 for (j
= 0; j
< 4 << SHIFT
; ) {
1891 int s0
= (j
* 2) + ((offset
& 3) << 2);
1892 int d0
= (j
* 2) + ((offset
& 4) << 0);
1893 for (i
= 0; i
< LANE_WIDTH
/ 2; i
++, d0
++) {
1895 r
[i
] += abs1(v
->B(d0
+ 0) - s
->B(s0
+ 0));
1896 r
[i
] += abs1(v
->B(d0
+ 1) - s
->B(s0
+ 1));
1897 r
[i
] += abs1(v
->B(d0
+ 2) - s
->B(s0
+ 2));
1898 r
[i
] += abs1(v
->B(d0
+ 3) - s
->B(s0
+ 3));
1900 for (i
= 0; i
< LANE_WIDTH
/ 2; i
++, j
++) {
1907 /* SSE4.2 op helpers */
1909 static inline int pcmp_elen(CPUX86State
*env
, int reg
, uint32_t ctrl
)
1911 target_long val
, limit
;
1913 /* Presence of REX.W is indicated by a bit higher than 7 set */
1915 val
= (target_long
)env
->regs
[reg
];
1917 val
= (int32_t)env
->regs
[reg
];
1924 if ((val
> limit
) || (val
< -limit
)) {
1930 static inline int pcmp_ilen(Reg
*r
, uint8_t ctrl
)
1935 while (val
< 8 && r
->W(val
)) {
1939 while (val
< 16 && r
->B(val
)) {
1947 static inline int pcmp_val(Reg
*r
, uint8_t ctrl
, int i
)
1949 switch ((ctrl
>> 0) & 3) {
1955 return (int8_t)r
->B(i
);
1958 return (int16_t)r
->W(i
);
1962 static inline unsigned pcmpxstrx(CPUX86State
*env
, Reg
*d
, Reg
*s
,
1963 uint8_t ctrl
, int valids
, int validd
)
1965 unsigned int res
= 0;
1968 int upper
= (ctrl
& 1) ? 7 : 15;
1973 CC_SRC
= (valids
< upper
? CC_Z
: 0) | (validd
< upper
? CC_S
: 0);
1974 CC_OP
= CC_OP_EFLAGS
;
1976 switch ((ctrl
>> 2) & 3) {
1978 for (j
= valids
; j
>= 0; j
--) {
1980 v
= pcmp_val(s
, ctrl
, j
);
1981 for (i
= validd
; i
>= 0; i
--) {
1982 res
|= (v
== pcmp_val(d
, ctrl
, i
));
1987 for (j
= valids
; j
>= 0; j
--) {
1989 v
= pcmp_val(s
, ctrl
, j
);
1990 for (i
= ((validd
- 1) | 1); i
>= 0; i
-= 2) {
1991 res
|= (pcmp_val(d
, ctrl
, i
- 0) >= v
&&
1992 pcmp_val(d
, ctrl
, i
- 1) <= v
);
1997 res
= (1 << (upper
- MAX(valids
, validd
))) - 1;
1998 res
<<= MAX(valids
, validd
) - MIN(valids
, validd
);
1999 for (i
= MIN(valids
, validd
); i
>= 0; i
--) {
2001 v
= pcmp_val(s
, ctrl
, i
);
2002 res
|= (v
== pcmp_val(d
, ctrl
, i
));
2007 res
= (2 << upper
) - 1;
2010 for (j
= valids
== upper
? valids
: valids
- validd
; j
>= 0; j
--) {
2013 for (i
= MIN(valids
- j
, validd
); i
>= 0; i
--) {
2014 v
&= (pcmp_val(s
, ctrl
, i
+ j
) == pcmp_val(d
, ctrl
, i
));
2021 switch ((ctrl
>> 4) & 3) {
2023 res
^= (2 << upper
) - 1;
2026 res
^= (1 << (valids
+ 1)) - 1;
2040 void glue(helper_pcmpestri
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
2043 unsigned int res
= pcmpxstrx(env
, d
, s
, ctrl
,
2044 pcmp_elen(env
, R_EDX
, ctrl
),
2045 pcmp_elen(env
, R_EAX
, ctrl
));
2048 env
->regs
[R_ECX
] = (ctrl
& (1 << 6)) ? 31 - clz32(res
) : ctz32(res
);
2050 env
->regs
[R_ECX
] = 16 >> (ctrl
& (1 << 0));
2054 void glue(helper_pcmpestrm
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
2058 unsigned int res
= pcmpxstrx(env
, d
, s
, ctrl
,
2059 pcmp_elen(env
, R_EDX
, ctrl
),
2060 pcmp_elen(env
, R_EAX
, ctrl
));
2062 if ((ctrl
>> 6) & 1) {
2064 for (i
= 0; i
< 8; i
++, res
>>= 1) {
2065 env
->xmm_regs
[0].W(i
) = (res
& 1) ? ~0 : 0;
2068 for (i
= 0; i
< 16; i
++, res
>>= 1) {
2069 env
->xmm_regs
[0].B(i
) = (res
& 1) ? ~0 : 0;
2073 env
->xmm_regs
[0].Q(1) = 0;
2074 env
->xmm_regs
[0].Q(0) = res
;
2078 void glue(helper_pcmpistri
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
2081 unsigned int res
= pcmpxstrx(env
, d
, s
, ctrl
,
2083 pcmp_ilen(d
, ctrl
));
2086 env
->regs
[R_ECX
] = (ctrl
& (1 << 6)) ? 31 - clz32(res
) : ctz32(res
);
2088 env
->regs
[R_ECX
] = 16 >> (ctrl
& (1 << 0));
2092 void glue(helper_pcmpistrm
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
2096 unsigned int res
= pcmpxstrx(env
, d
, s
, ctrl
,
2098 pcmp_ilen(d
, ctrl
));
2100 if ((ctrl
>> 6) & 1) {
2102 for (i
= 0; i
< 8; i
++, res
>>= 1) {
2103 env
->xmm_regs
[0].W(i
) = (res
& 1) ? ~0 : 0;
2106 for (i
= 0; i
< 16; i
++, res
>>= 1) {
2107 env
->xmm_regs
[0].B(i
) = (res
& 1) ? ~0 : 0;
2111 env
->xmm_regs
[0].Q(1) = 0;
2112 env
->xmm_regs
[0].Q(0) = res
;
2116 #define CRCPOLY 0x1edc6f41
2117 #define CRCPOLY_BITREV 0x82f63b78
2118 target_ulong
helper_crc32(uint32_t crc1
, target_ulong msg
, uint32_t len
)
2120 target_ulong crc
= (msg
& ((target_ulong
) -1 >>
2121 (TARGET_LONG_BITS
- len
))) ^ crc1
;
2124 crc
= (crc
>> 1) ^ ((crc
& 1) ? CRCPOLY_BITREV
: 0);
2132 void glue(helper_pclmulqdq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
,
2135 int a_idx
= (ctrl
& 1) != 0;
2136 int b_idx
= (ctrl
& 16) != 0;
2138 for (int i
= 0; i
< SHIFT
; i
++) {
2139 uint64_t a
= v
->Q(2 * i
+ a_idx
);
2140 uint64_t b
= s
->Q(2 * i
+ b_idx
);
2141 Int128
*r
= (Int128
*)&d
->ZMM_X(i
);
2143 *r
= clmul_64(a
, b
);
2147 void glue(helper_aesdec
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2149 for (int i
= 0; i
< SHIFT
; i
++) {
2150 AESState
*ad
= (AESState
*)&d
->ZMM_X(i
);
2151 AESState
*st
= (AESState
*)&v
->ZMM_X(i
);
2152 AESState
*rk
= (AESState
*)&s
->ZMM_X(i
);
2154 aesdec_ISB_ISR_IMC_AK(ad
, st
, rk
, false);
2158 void glue(helper_aesdeclast
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2160 for (int i
= 0; i
< SHIFT
; i
++) {
2161 AESState
*ad
= (AESState
*)&d
->ZMM_X(i
);
2162 AESState
*st
= (AESState
*)&v
->ZMM_X(i
);
2163 AESState
*rk
= (AESState
*)&s
->ZMM_X(i
);
2165 aesdec_ISB_ISR_AK(ad
, st
, rk
, false);
2169 void glue(helper_aesenc
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2171 for (int i
= 0; i
< SHIFT
; i
++) {
2172 AESState
*ad
= (AESState
*)&d
->ZMM_X(i
);
2173 AESState
*st
= (AESState
*)&v
->ZMM_X(i
);
2174 AESState
*rk
= (AESState
*)&s
->ZMM_X(i
);
2176 aesenc_SB_SR_MC_AK(ad
, st
, rk
, false);
2180 void glue(helper_aesenclast
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2182 for (int i
= 0; i
< SHIFT
; i
++) {
2183 AESState
*ad
= (AESState
*)&d
->ZMM_X(i
);
2184 AESState
*st
= (AESState
*)&v
->ZMM_X(i
);
2185 AESState
*rk
= (AESState
*)&s
->ZMM_X(i
);
2187 aesenc_SB_SR_AK(ad
, st
, rk
, false);
2192 void glue(helper_aesimc
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
2194 AESState
*ad
= (AESState
*)&d
->ZMM_X(0);
2195 AESState
*st
= (AESState
*)&s
->ZMM_X(0);
2197 aesdec_IMC(ad
, st
, false);
2200 void glue(helper_aeskeygenassist
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
,
2206 for (i
= 0 ; i
< 4 ; i
++) {
2207 d
->B(i
) = AES_sbox
[tmp
.B(i
+ 4)];
2208 d
->B(i
+ 8) = AES_sbox
[tmp
.B(i
+ 12)];
2210 d
->L(1) = (d
->L(0) << 24 | d
->L(0) >> 8) ^ ctrl
;
2211 d
->L(3) = (d
->L(2) << 24 | d
->L(2) >> 8) ^ ctrl
;
2217 void glue(helper_vpermilpd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2222 for (i
= 0; i
< 1 << SHIFT
; i
+= 2) {
2223 r0
= v
->Q(i
+ ((s
->Q(i
) >> 1) & 1));
2224 r1
= v
->Q(i
+ ((s
->Q(i
+1) >> 1) & 1));
2230 void glue(helper_vpermilps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2232 uint32_t r0
, r1
, r2
, r3
;
2235 for (i
= 0; i
< 2 << SHIFT
; i
+= 4) {
2236 r0
= v
->L(i
+ (s
->L(i
) & 3));
2237 r1
= v
->L(i
+ (s
->L(i
+1) & 3));
2238 r2
= v
->L(i
+ (s
->L(i
+2) & 3));
2239 r3
= v
->L(i
+ (s
->L(i
+3) & 3));
2247 void glue(helper_vpermilpd_imm
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t order
)
2252 for (i
= 0; i
< 1 << SHIFT
; i
+= 2) {
2253 r0
= s
->Q(i
+ ((order
>> 0) & 1));
2254 r1
= s
->Q(i
+ ((order
>> 1) & 1));
2262 void glue(helper_vpermilps_imm
, SUFFIX
)(Reg
*d
, Reg
*s
, uint32_t order
)
2264 uint32_t r0
, r1
, r2
, r3
;
2267 for (i
= 0; i
< 2 << SHIFT
; i
+= 4) {
2268 r0
= s
->L(i
+ ((order
>> 0) & 3));
2269 r1
= s
->L(i
+ ((order
>> 2) & 3));
2270 r2
= s
->L(i
+ ((order
>> 4) & 3));
2271 r3
= s
->L(i
+ ((order
>> 6) & 3));
2280 #define FPSRLVD(x, c) (c < 32 ? ((x) >> c) : 0)
2281 #define FPSRLVQ(x, c) (c < 64 ? ((x) >> c) : 0)
2282 #define FPSRAVD(x, c) ((int32_t)(x) >> (c < 32 ? c : 31))
2283 #define FPSRAVQ(x, c) ((int64_t)(x) >> (c < 64 ? c : 63))
2284 #define FPSLLVD(x, c) (c < 32 ? ((x) << c) : 0)
2285 #define FPSLLVQ(x, c) (c < 64 ? ((x) << c) : 0)
2288 SSE_HELPER_L(helper_vpsrlvd
, FPSRLVD
)
2289 SSE_HELPER_L(helper_vpsravd
, FPSRAVD
)
2290 SSE_HELPER_L(helper_vpsllvd
, FPSLLVD
)
2292 SSE_HELPER_Q(helper_vpsrlvq
, FPSRLVQ
)
2293 SSE_HELPER_Q(helper_vpsravq
, FPSRAVQ
)
2294 SSE_HELPER_Q(helper_vpsllvq
, FPSLLVQ
)
2296 void glue(helper_vtestps
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
2298 uint32_t zf
= 0, cf
= 0;
2301 for (i
= 0; i
< 2 << SHIFT
; i
++) {
2302 zf
|= (s
->L(i
) & d
->L(i
));
2303 cf
|= (s
->L(i
) & ~d
->L(i
));
2305 CC_SRC
= ((zf
>> 31) ? 0 : CC_Z
) | ((cf
>> 31) ? 0 : CC_C
);
2306 CC_OP
= CC_OP_EFLAGS
;
2309 void glue(helper_vtestpd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*s
)
2311 uint64_t zf
= 0, cf
= 0;
2314 for (i
= 0; i
< 1 << SHIFT
; i
++) {
2315 zf
|= (s
->Q(i
) & d
->Q(i
));
2316 cf
|= (s
->Q(i
) & ~d
->Q(i
));
2318 CC_SRC
= ((zf
>> 63) ? 0 : CC_Z
) | ((cf
>> 63) ? 0 : CC_C
);
2319 CC_OP
= CC_OP_EFLAGS
;
2322 void glue(helper_vpmaskmovd_st
, SUFFIX
)(CPUX86State
*env
,
2323 Reg
*v
, Reg
*s
, target_ulong a0
)
2327 for (i
= 0; i
< (2 << SHIFT
); i
++) {
2328 if (v
->L(i
) >> 31) {
2329 cpu_stl_data_ra(env
, a0
+ i
* 4, s
->L(i
), GETPC());
2334 void glue(helper_vpmaskmovq_st
, SUFFIX
)(CPUX86State
*env
,
2335 Reg
*v
, Reg
*s
, target_ulong a0
)
2339 for (i
= 0; i
< (1 << SHIFT
); i
++) {
2340 if (v
->Q(i
) >> 63) {
2341 cpu_stq_data_ra(env
, a0
+ i
* 8, s
->Q(i
), GETPC());
2346 void glue(helper_vpmaskmovd
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2350 for (i
= 0; i
< (2 << SHIFT
); i
++) {
2351 d
->L(i
) = (v
->L(i
) >> 31) ? s
->L(i
) : 0;
2355 void glue(helper_vpmaskmovq
, SUFFIX
)(CPUX86State
*env
, Reg
*d
, Reg
*v
, Reg
*s
)
2359 for (i
= 0; i
< (1 << SHIFT
); i
++) {
2360 d
->Q(i
) = (v
->Q(i
) >> 63) ? s
->Q(i
) : 0;
2364 void glue(helper_vpgatherdd
, SUFFIX
)(CPUX86State
*env
,
2365 Reg
*d
, Reg
*v
, Reg
*s
, target_ulong a0
, unsigned scale
)
2368 for (i
= 0; i
< (2 << SHIFT
); i
++) {
2369 if (v
->L(i
) >> 31) {
2370 target_ulong addr
= a0
2371 + ((target_ulong
)(int32_t)s
->L(i
) << scale
);
2372 d
->L(i
) = cpu_ldl_data_ra(env
, addr
, GETPC());
2378 void glue(helper_vpgatherdq
, SUFFIX
)(CPUX86State
*env
,
2379 Reg
*d
, Reg
*v
, Reg
*s
, target_ulong a0
, unsigned scale
)
2382 for (i
= 0; i
< (1 << SHIFT
); i
++) {
2383 if (v
->Q(i
) >> 63) {
2384 target_ulong addr
= a0
2385 + ((target_ulong
)(int32_t)s
->L(i
) << scale
);
2386 d
->Q(i
) = cpu_ldq_data_ra(env
, addr
, GETPC());
2392 void glue(helper_vpgatherqd
, SUFFIX
)(CPUX86State
*env
,
2393 Reg
*d
, Reg
*v
, Reg
*s
, target_ulong a0
, unsigned scale
)
2396 for (i
= 0; i
< (1 << SHIFT
); i
++) {
2397 if (v
->L(i
) >> 31) {
2398 target_ulong addr
= a0
2399 + ((target_ulong
)(int64_t)s
->Q(i
) << scale
);
2400 d
->L(i
) = cpu_ldl_data_ra(env
, addr
, GETPC());
2404 for (i
/= 2; i
< 1 << SHIFT
; i
++) {
2410 void glue(helper_vpgatherqq
, SUFFIX
)(CPUX86State
*env
,
2411 Reg
*d
, Reg
*v
, Reg
*s
, target_ulong a0
, unsigned scale
)
2414 for (i
= 0; i
< (1 << SHIFT
); i
++) {
2415 if (v
->Q(i
) >> 63) {
2416 target_ulong addr
= a0
2417 + ((target_ulong
)(int64_t)s
->Q(i
) << scale
);
2418 d
->Q(i
) = cpu_ldq_data_ra(env
, addr
, GETPC());
2426 void helper_vpermdq_ymm(Reg
*d
, Reg
*v
, Reg
*s
, uint32_t order
)
2428 uint64_t r0
, r1
, r2
, r3
;
2430 switch (order
& 3) {
2447 default: /* default case added to help the compiler to avoid warnings */
2448 g_assert_not_reached();
2450 switch ((order
>> 4) & 3) {
2467 default: /* default case added to help the compiler to avoid warnings */
2468 g_assert_not_reached();
2484 void helper_vpermq_ymm(Reg
*d
, Reg
*s
, uint32_t order
)
2486 uint64_t r0
, r1
, r2
, r3
;
2487 r0
= s
->Q(order
& 3);
2488 r1
= s
->Q((order
>> 2) & 3);
2489 r2
= s
->Q((order
>> 4) & 3);
2490 r3
= s
->Q((order
>> 6) & 3);
2497 void helper_vpermd_ymm(Reg
*d
, Reg
*v
, Reg
*s
)
2502 for (i
= 0; i
< 8; i
++) {
2503 r
[i
] = s
->L(v
->L(i
) & 7);
2505 for (i
= 0; i
< 8; i
++) {
2511 /* FMA3 op helpers */
2513 #define SSE_HELPER_FMAS(name, elem, F) \
2514 void name(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, int flags) \
2516 d->elem(0) = F(a->elem(0), b->elem(0), c->elem(0), flags, &env->sse_status); \
2518 #define SSE_HELPER_FMAP(name, elem, num, F) \
2519 void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *a, Reg *b, Reg *c, \
2520 int flags, int flip) \
2523 for (i = 0; i < num; i++) { \
2524 d->elem(i) = F(a->elem(i), b->elem(i), c->elem(i), flags, &env->sse_status); \
2529 SSE_HELPER_FMAS(helper_fma4ss
, ZMM_S
, float32_muladd
)
2530 SSE_HELPER_FMAS(helper_fma4sd
, ZMM_D
, float64_muladd
)
2534 SSE_HELPER_FMAP(helper_fma4ps
, ZMM_S
, 2 << SHIFT
, float32_muladd
)
2535 SSE_HELPER_FMAP(helper_fma4pd
, ZMM_D
, 1 << SHIFT
, float64_muladd
)
2539 #define SSE_HELPER_SHA1RNDS4(name, F, K) \
2540 void name(Reg *d, Reg *a, Reg *b) \
2542 uint32_t A, B, C, D, E, t, i; \
2550 for (i = 0; i <= 3; i++) { \
2551 t = F(B, C, D) + rol32(A, 5) + b->L(3 - i) + E + K; \
2565 #define SHA1_F0(b, c, d) (((b) & (c)) ^ (~(b) & (d)))
2566 #define SHA1_F1(b, c, d) ((b) ^ (c) ^ (d))
2567 #define SHA1_F2(b, c, d) (((b) & (c)) ^ ((b) & (d)) ^ ((c) & (d)))
2569 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f0
, SHA1_F0
, 0x5A827999)
2570 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f1
, SHA1_F1
, 0x6ED9EBA1)
2571 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f2
, SHA1_F2
, 0x8F1BBCDC)
2572 SSE_HELPER_SHA1RNDS4(helper_sha1rnds4_f3
, SHA1_F1
, 0xCA62C1D6)
2574 void helper_sha1nexte(Reg
*d
, Reg
*a
, Reg
*b
)
2576 d
->L(3) = b
->L(3) + rol32(a
->L(3), 30);
2582 void helper_sha1msg1(Reg
*d
, Reg
*a
, Reg
*b
)
2584 /* These could be overwritten by the first two assignments, save them. */
2585 uint32_t b3
= b
->L(3);
2586 uint32_t b2
= b
->L(2);
2588 d
->L(3) = a
->L(3) ^ a
->L(1);
2589 d
->L(2) = a
->L(2) ^ a
->L(0);
2590 d
->L(1) = a
->L(1) ^ b3
;
2591 d
->L(0) = a
->L(0) ^ b2
;
2594 void helper_sha1msg2(Reg
*d
, Reg
*a
, Reg
*b
)
2596 d
->L(3) = rol32(a
->L(3) ^ b
->L(2), 1);
2597 d
->L(2) = rol32(a
->L(2) ^ b
->L(1), 1);
2598 d
->L(1) = rol32(a
->L(1) ^ b
->L(0), 1);
2599 d
->L(0) = rol32(a
->L(0) ^ d
->L(3), 1);
2602 #define SHA256_CH(e, f, g) (((e) & (f)) ^ (~(e) & (g)))
2603 #define SHA256_MAJ(a, b, c) (((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c)))
2605 #define SHA256_RNDS0(w) (ror32((w), 2) ^ ror32((w), 13) ^ ror32((w), 22))
2606 #define SHA256_RNDS1(w) (ror32((w), 6) ^ ror32((w), 11) ^ ror32((w), 25))
2607 #define SHA256_MSGS0(w) (ror32((w), 7) ^ ror32((w), 18) ^ ((w) >> 3))
2608 #define SHA256_MSGS1(w) (ror32((w), 17) ^ ror32((w), 19) ^ ((w) >> 10))
2610 void helper_sha256rnds2(Reg
*d
, Reg
*a
, Reg
*b
, uint32_t wk0
, uint32_t wk1
)
2614 uint32_t A
= b
->L(3);
2615 uint32_t B
= b
->L(2);
2616 uint32_t C
= a
->L(3);
2617 uint32_t D
= a
->L(2);
2618 uint32_t E
= b
->L(1);
2619 uint32_t F
= b
->L(0);
2620 uint32_t G
= a
->L(1);
2621 uint32_t H
= a
->L(0);
2624 t
= SHA256_CH(E
, F
, G
) + SHA256_RNDS1(E
) + wk0
+ H
;
2625 AA
= t
+ SHA256_MAJ(A
, B
, C
) + SHA256_RNDS0(A
);
2628 /* These will be B and F at the end of the odd round */
2632 D
= C
, C
= B
, B
= A
, A
= AA
;
2633 H
= G
, G
= F
, F
= E
, E
= EE
;
2636 t
= SHA256_CH(E
, F
, G
) + SHA256_RNDS1(E
) + wk1
+ H
;
2637 AA
= t
+ SHA256_MAJ(A
, B
, C
) + SHA256_RNDS0(A
);
2644 void helper_sha256msg1(Reg
*d
, Reg
*a
, Reg
*b
)
2646 /* b->L(0) could be overwritten by the first assignment, save it. */
2647 uint32_t b0
= b
->L(0);
2649 d
->L(0) = a
->L(0) + SHA256_MSGS0(a
->L(1));
2650 d
->L(1) = a
->L(1) + SHA256_MSGS0(a
->L(2));
2651 d
->L(2) = a
->L(2) + SHA256_MSGS0(a
->L(3));
2652 d
->L(3) = a
->L(3) + SHA256_MSGS0(b0
);
2655 void helper_sha256msg2(Reg
*d
, Reg
*a
, Reg
*b
)
2657 /* Earlier assignments cannot overwrite any of the two operands. */
2658 d
->L(0) = a
->L(0) + SHA256_MSGS1(b
->L(2));
2659 d
->L(1) = a
->L(1) + SHA256_MSGS1(b
->L(3));
2660 /* Yes, this reuses the previously computed values. */
2661 d
->L(2) = a
->L(2) + SHA256_MSGS1(d
->L(0));
2662 d
->L(3) = a
->L(3) + SHA256_MSGS1(d
->L(1));