2 * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
4 * Copyright (c) 2014 Imagination Technologies
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/helper-proto.h"
23 /* Data format min and max values */
24 #define DF_BITS(df) (1 << ((df) + 3))
26 #define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
27 #define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
29 #define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
30 #define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
32 #define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
33 #define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
35 #define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
36 #define SIGNED(x, df) \
37 ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
39 /* Element-by-element access macros */
40 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
42 static inline void msa_move_v(wr_t
*pwd
, wr_t
*pws
)
46 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
47 pwd
->d
[i
] = pws
->d
[i
];
51 #define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
52 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
55 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
56 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
58 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
63 MSA_FN_IMM8(andi_b
, pwd
->b
[i
], pws
->b
[i
] & i8
)
64 MSA_FN_IMM8(ori_b
, pwd
->b
[i
], pws
->b
[i
] | i8
)
65 MSA_FN_IMM8(nori_b
, pwd
->b
[i
], ~(pws
->b
[i
] | i8
))
66 MSA_FN_IMM8(xori_b
, pwd
->b
[i
], pws
->b
[i
] ^ i8
)
68 #define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
69 UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
70 MSA_FN_IMM8(bmnzi_b
, pwd
->b
[i
],
71 BIT_MOVE_IF_NOT_ZERO(pwd
->b
[i
], pws
->b
[i
], i8
, DF_BYTE
))
73 #define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
74 UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
75 MSA_FN_IMM8(bmzi_b
, pwd
->b
[i
],
76 BIT_MOVE_IF_ZERO(pwd
->b
[i
], pws
->b
[i
], i8
, DF_BYTE
))
78 #define BIT_SELECT(dest, arg1, arg2, df) \
79 UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
80 MSA_FN_IMM8(bseli_b
, pwd
->b
[i
],
81 BIT_SELECT(pwd
->b
[i
], pws
->b
[i
], i8
, DF_BYTE
))
85 #define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
87 void helper_msa_shf_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
88 uint32_t ws
, uint32_t imm
)
90 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
91 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
97 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
98 pwx
->b
[i
] = pws
->b
[SHF_POS(i
, imm
)];
102 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
103 pwx
->h
[i
] = pws
->h
[SHF_POS(i
, imm
)];
107 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
108 pwx
->w
[i
] = pws
->w
[SHF_POS(i
, imm
)];
114 msa_move_v(pwd
, pwx
);
117 #define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \
118 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
121 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
122 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
123 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
125 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
130 MSA_FN_VECTOR(and_v
, pwd
->d
[i
], pws
->d
[i
] & pwt
->d
[i
])
131 MSA_FN_VECTOR(or_v
, pwd
->d
[i
], pws
->d
[i
] | pwt
->d
[i
])
132 MSA_FN_VECTOR(nor_v
, pwd
->d
[i
], ~(pws
->d
[i
] | pwt
->d
[i
]))
133 MSA_FN_VECTOR(xor_v
, pwd
->d
[i
], pws
->d
[i
] ^ pwt
->d
[i
])
134 MSA_FN_VECTOR(bmnz_v
, pwd
->d
[i
],
135 BIT_MOVE_IF_NOT_ZERO(pwd
->d
[i
], pws
->d
[i
], pwt
->d
[i
], DF_DOUBLE
))
136 MSA_FN_VECTOR(bmz_v
, pwd
->d
[i
],
137 BIT_MOVE_IF_ZERO(pwd
->d
[i
], pws
->d
[i
], pwt
->d
[i
], DF_DOUBLE
))
138 MSA_FN_VECTOR(bsel_v
, pwd
->d
[i
],
139 BIT_SELECT(pwd
->d
[i
], pws
->d
[i
], pwt
->d
[i
], DF_DOUBLE
))
140 #undef BIT_MOVE_IF_NOT_ZERO
141 #undef BIT_MOVE_IF_ZERO
145 static inline int64_t msa_addv_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
150 static inline int64_t msa_subv_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
155 static inline int64_t msa_ceq_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
157 return arg1
== arg2
? -1 : 0;
160 static inline int64_t msa_cle_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
162 return arg1
<= arg2
? -1 : 0;
165 static inline int64_t msa_cle_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
167 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
168 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
169 return u_arg1
<= u_arg2
? -1 : 0;
172 static inline int64_t msa_clt_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
174 return arg1
< arg2
? -1 : 0;
177 static inline int64_t msa_clt_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
179 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
180 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
181 return u_arg1
< u_arg2
? -1 : 0;
184 static inline int64_t msa_max_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
186 return arg1
> arg2
? arg1
: arg2
;
189 static inline int64_t msa_max_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
191 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
192 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
193 return u_arg1
> u_arg2
? arg1
: arg2
;
196 static inline int64_t msa_min_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
198 return arg1
< arg2
? arg1
: arg2
;
201 static inline int64_t msa_min_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
203 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
204 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
205 return u_arg1
< u_arg2
? arg1
: arg2
;
208 #define MSA_BINOP_IMM_DF(helper, func) \
209 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
210 uint32_t wd, uint32_t ws, int32_t u5) \
212 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
213 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
218 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
219 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
223 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
224 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
228 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
229 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
233 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
234 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
242 MSA_BINOP_IMM_DF(addvi
, addv
)
243 MSA_BINOP_IMM_DF(subvi
, subv
)
244 MSA_BINOP_IMM_DF(ceqi
, ceq
)
245 MSA_BINOP_IMM_DF(clei_s
, cle_s
)
246 MSA_BINOP_IMM_DF(clei_u
, cle_u
)
247 MSA_BINOP_IMM_DF(clti_s
, clt_s
)
248 MSA_BINOP_IMM_DF(clti_u
, clt_u
)
249 MSA_BINOP_IMM_DF(maxi_s
, max_s
)
250 MSA_BINOP_IMM_DF(maxi_u
, max_u
)
251 MSA_BINOP_IMM_DF(mini_s
, min_s
)
252 MSA_BINOP_IMM_DF(mini_u
, min_u
)
253 #undef MSA_BINOP_IMM_DF
255 void helper_msa_ldi_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
258 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
263 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
264 pwd
->b
[i
] = (int8_t)s10
;
268 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
269 pwd
->h
[i
] = (int16_t)s10
;
273 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
274 pwd
->w
[i
] = (int32_t)s10
;
278 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
279 pwd
->d
[i
] = (int64_t)s10
;
287 /* Data format bit position and unsigned values */
288 #define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
290 static inline int64_t msa_sll_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
292 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
293 return arg1
<< b_arg2
;
296 static inline int64_t msa_sra_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
298 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
299 return arg1
>> b_arg2
;
302 static inline int64_t msa_srl_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
304 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
305 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
306 return u_arg1
>> b_arg2
;
309 static inline int64_t msa_bclr_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
311 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
312 return UNSIGNED(arg1
& (~(1LL << b_arg2
)), df
);
315 static inline int64_t msa_bset_df(uint32_t df
, int64_t arg1
,
318 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
319 return UNSIGNED(arg1
| (1LL << b_arg2
), df
);
322 static inline int64_t msa_bneg_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
324 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
325 return UNSIGNED(arg1
^ (1LL << b_arg2
), df
);
328 static inline int64_t msa_binsl_df(uint32_t df
, int64_t dest
, int64_t arg1
,
331 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
332 uint64_t u_dest
= UNSIGNED(dest
, df
);
333 int32_t sh_d
= BIT_POSITION(arg2
, df
) + 1;
334 int32_t sh_a
= DF_BITS(df
) - sh_d
;
335 if (sh_d
== DF_BITS(df
)) {
338 return UNSIGNED(UNSIGNED(u_dest
<< sh_d
, df
) >> sh_d
, df
) |
339 UNSIGNED(UNSIGNED(u_arg1
>> sh_a
, df
) << sh_a
, df
);
343 static inline int64_t msa_binsr_df(uint32_t df
, int64_t dest
, int64_t arg1
,
346 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
347 uint64_t u_dest
= UNSIGNED(dest
, df
);
348 int32_t sh_d
= BIT_POSITION(arg2
, df
) + 1;
349 int32_t sh_a
= DF_BITS(df
) - sh_d
;
350 if (sh_d
== DF_BITS(df
)) {
353 return UNSIGNED(UNSIGNED(u_dest
>> sh_d
, df
) << sh_d
, df
) |
354 UNSIGNED(UNSIGNED(u_arg1
<< sh_a
, df
) >> sh_a
, df
);
358 static inline int64_t msa_sat_s_df(uint32_t df
, int64_t arg
, uint32_t m
)
360 return arg
< M_MIN_INT(m
+1) ? M_MIN_INT(m
+1) :
361 arg
> M_MAX_INT(m
+1) ? M_MAX_INT(m
+1) :
365 static inline int64_t msa_sat_u_df(uint32_t df
, int64_t arg
, uint32_t m
)
367 uint64_t u_arg
= UNSIGNED(arg
, df
);
368 return u_arg
< M_MAX_UINT(m
+1) ? u_arg
:
372 static inline int64_t msa_srar_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
374 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
378 int64_t r_bit
= (arg1
>> (b_arg2
- 1)) & 1;
379 return (arg1
>> b_arg2
) + r_bit
;
383 static inline int64_t msa_srlr_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
385 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
386 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
390 uint64_t r_bit
= (u_arg1
>> (b_arg2
- 1)) & 1;
391 return (u_arg1
>> b_arg2
) + r_bit
;
395 #define MSA_BINOP_IMMU_DF(helper, func) \
396 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
397 uint32_t ws, uint32_t u5) \
399 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
400 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
405 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
406 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
410 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
411 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
415 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
416 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
420 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
421 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
429 MSA_BINOP_IMMU_DF(slli
, sll
)
430 MSA_BINOP_IMMU_DF(srai
, sra
)
431 MSA_BINOP_IMMU_DF(srli
, srl
)
432 MSA_BINOP_IMMU_DF(bclri
, bclr
)
433 MSA_BINOP_IMMU_DF(bseti
, bset
)
434 MSA_BINOP_IMMU_DF(bnegi
, bneg
)
435 MSA_BINOP_IMMU_DF(sat_s
, sat_s
)
436 MSA_BINOP_IMMU_DF(sat_u
, sat_u
)
437 MSA_BINOP_IMMU_DF(srari
, srar
)
438 MSA_BINOP_IMMU_DF(srlri
, srlr
)
439 #undef MSA_BINOP_IMMU_DF
441 #define MSA_TEROP_IMMU_DF(helper, func) \
442 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
443 uint32_t wd, uint32_t ws, uint32_t u5) \
445 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
446 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
451 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
452 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
457 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
458 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
463 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
464 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
469 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
470 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
479 MSA_TEROP_IMMU_DF(binsli
, binsl
)
480 MSA_TEROP_IMMU_DF(binsri
, binsr
)
481 #undef MSA_TEROP_IMMU_DF
483 static inline int64_t msa_max_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
485 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
486 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
487 return abs_arg1
> abs_arg2
? arg1
: arg2
;
490 static inline int64_t msa_min_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
492 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
493 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
494 return abs_arg1
< abs_arg2
? arg1
: arg2
;
497 static inline int64_t msa_add_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
499 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
500 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
501 return abs_arg1
+ abs_arg2
;
504 static inline int64_t msa_adds_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
506 uint64_t max_int
= (uint64_t)DF_MAX_INT(df
);
507 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
508 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
509 if (abs_arg1
> max_int
|| abs_arg2
> max_int
) {
510 return (int64_t)max_int
;
512 return (abs_arg1
< max_int
- abs_arg2
) ? abs_arg1
+ abs_arg2
: max_int
;
516 static inline int64_t msa_adds_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
518 int64_t max_int
= DF_MAX_INT(df
);
519 int64_t min_int
= DF_MIN_INT(df
);
521 return (min_int
- arg1
< arg2
) ? arg1
+ arg2
: min_int
;
523 return (arg2
< max_int
- arg1
) ? arg1
+ arg2
: max_int
;
527 static inline uint64_t msa_adds_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
529 uint64_t max_uint
= DF_MAX_UINT(df
);
530 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
531 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
532 return (u_arg1
< max_uint
- u_arg2
) ? u_arg1
+ u_arg2
: max_uint
;
535 static inline int64_t msa_ave_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
538 return (arg1
>> 1) + (arg2
>> 1) + (arg1
& arg2
& 1);
541 static inline uint64_t msa_ave_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
543 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
544 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
546 return (u_arg1
>> 1) + (u_arg2
>> 1) + (u_arg1
& u_arg2
& 1);
549 static inline int64_t msa_aver_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
552 return (arg1
>> 1) + (arg2
>> 1) + ((arg1
| arg2
) & 1);
555 static inline uint64_t msa_aver_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
557 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
558 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
560 return (u_arg1
>> 1) + (u_arg2
>> 1) + ((u_arg1
| u_arg2
) & 1);
563 static inline int64_t msa_subs_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
565 int64_t max_int
= DF_MAX_INT(df
);
566 int64_t min_int
= DF_MIN_INT(df
);
568 return (min_int
+ arg2
< arg1
) ? arg1
- arg2
: min_int
;
570 return (arg1
< max_int
+ arg2
) ? arg1
- arg2
: max_int
;
574 static inline int64_t msa_subs_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
576 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
577 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
578 return (u_arg1
> u_arg2
) ? u_arg1
- u_arg2
: 0;
581 static inline int64_t msa_subsus_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
583 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
584 uint64_t max_uint
= DF_MAX_UINT(df
);
586 uint64_t u_arg2
= (uint64_t)arg2
;
587 return (u_arg1
> u_arg2
) ?
588 (int64_t)(u_arg1
- u_arg2
) :
591 uint64_t u_arg2
= (uint64_t)(-arg2
);
592 return (u_arg1
< max_uint
- u_arg2
) ?
593 (int64_t)(u_arg1
+ u_arg2
) :
598 static inline int64_t msa_subsuu_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
600 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
601 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
602 int64_t max_int
= DF_MAX_INT(df
);
603 int64_t min_int
= DF_MIN_INT(df
);
604 if (u_arg1
> u_arg2
) {
605 return u_arg1
- u_arg2
< (uint64_t)max_int
?
606 (int64_t)(u_arg1
- u_arg2
) :
609 return u_arg2
- u_arg1
< (uint64_t)(-min_int
) ?
610 (int64_t)(u_arg1
- u_arg2
) :
615 static inline int64_t msa_asub_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
618 return (arg1
< arg2
) ?
619 (uint64_t)(arg2
- arg1
) : (uint64_t)(arg1
- arg2
);
622 static inline uint64_t msa_asub_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
624 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
625 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
626 /* unsigned compare */
627 return (u_arg1
< u_arg2
) ?
628 (uint64_t)(u_arg2
- u_arg1
) : (uint64_t)(u_arg1
- u_arg2
);
631 static inline int64_t msa_mulv_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
636 static inline int64_t msa_div_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
638 if (arg1
== DF_MIN_INT(df
) && arg2
== -1) {
639 return DF_MIN_INT(df
);
641 return arg2
? arg1
/ arg2
: 0;
644 static inline int64_t msa_div_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
646 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
647 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
648 return u_arg2
? u_arg1
/ u_arg2
: 0;
651 static inline int64_t msa_mod_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
653 if (arg1
== DF_MIN_INT(df
) && arg2
== -1) {
656 return arg2
? arg1
% arg2
: 0;
659 static inline int64_t msa_mod_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
661 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
662 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
663 return u_arg2
? u_arg1
% u_arg2
: 0;
666 #define SIGNED_EVEN(a, df) \
667 ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
669 #define UNSIGNED_EVEN(a, df) \
670 ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
672 #define SIGNED_ODD(a, df) \
673 ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
675 #define UNSIGNED_ODD(a, df) \
676 ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
678 #define SIGNED_EXTRACT(e, o, a, df) \
680 e = SIGNED_EVEN(a, df); \
681 o = SIGNED_ODD(a, df); \
684 #define UNSIGNED_EXTRACT(e, o, a, df) \
686 e = UNSIGNED_EVEN(a, df); \
687 o = UNSIGNED_ODD(a, df); \
690 static inline int64_t msa_dotp_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
696 SIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
697 SIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
698 return (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
701 static inline int64_t msa_dotp_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
707 UNSIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
708 UNSIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
709 return (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
712 #define CONCATENATE_AND_SLIDE(s, k) \
714 for (i = 0; i < s; i++) { \
715 v[i] = pws->b[s * k + i]; \
716 v[i + s] = pwd->b[s * k + i]; \
718 for (i = 0; i < s; i++) { \
719 pwd->b[s * k + i] = v[i + n]; \
723 static inline void msa_sld_df(uint32_t df
, wr_t
*pwd
,
724 wr_t
*pws
, target_ulong rt
)
726 uint32_t n
= rt
% DF_ELEMENTS(df
);
732 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE
), 0);
735 for (k
= 0; k
< 2; k
++) {
736 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF
), k
);
740 for (k
= 0; k
< 4; k
++) {
741 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD
), k
);
745 for (k
= 0; k
< 8; k
++) {
746 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE
), k
);
754 static inline int64_t msa_hadd_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
756 return SIGNED_ODD(arg1
, df
) + SIGNED_EVEN(arg2
, df
);
759 static inline int64_t msa_hadd_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
761 return UNSIGNED_ODD(arg1
, df
) + UNSIGNED_EVEN(arg2
, df
);
764 static inline int64_t msa_hsub_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
766 return SIGNED_ODD(arg1
, df
) - SIGNED_EVEN(arg2
, df
);
769 static inline int64_t msa_hsub_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
771 return UNSIGNED_ODD(arg1
, df
) - UNSIGNED_EVEN(arg2
, df
);
774 static inline int64_t msa_mul_q_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
776 int64_t q_min
= DF_MIN_INT(df
);
777 int64_t q_max
= DF_MAX_INT(df
);
779 if (arg1
== q_min
&& arg2
== q_min
) {
782 return (arg1
* arg2
) >> (DF_BITS(df
) - 1);
785 static inline int64_t msa_mulr_q_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
787 int64_t q_min
= DF_MIN_INT(df
);
788 int64_t q_max
= DF_MAX_INT(df
);
789 int64_t r_bit
= 1 << (DF_BITS(df
) - 2);
791 if (arg1
== q_min
&& arg2
== q_min
) {
794 return (arg1
* arg2
+ r_bit
) >> (DF_BITS(df
) - 1);
797 #define MSA_BINOP_DF(func) \
798 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
799 uint32_t wd, uint32_t ws, uint32_t wt) \
801 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
802 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
803 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
808 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
809 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \
813 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
814 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \
818 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
819 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \
823 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
824 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \
861 MSA_BINOP_DF(subsus_u
)
862 MSA_BINOP_DF(subsuu_s
)
883 void helper_msa_sld_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
884 uint32_t ws
, uint32_t rt
)
886 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
887 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
889 msa_sld_df(df
, pwd
, pws
, env
->active_tc
.gpr
[rt
]);
892 static inline int64_t msa_maddv_df(uint32_t df
, int64_t dest
, int64_t arg1
,
895 return dest
+ arg1
* arg2
;
898 static inline int64_t msa_msubv_df(uint32_t df
, int64_t dest
, int64_t arg1
,
901 return dest
- arg1
* arg2
;
904 static inline int64_t msa_dpadd_s_df(uint32_t df
, int64_t dest
, int64_t arg1
,
911 SIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
912 SIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
913 return dest
+ (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
916 static inline int64_t msa_dpadd_u_df(uint32_t df
, int64_t dest
, int64_t arg1
,
923 UNSIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
924 UNSIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
925 return dest
+ (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
928 static inline int64_t msa_dpsub_s_df(uint32_t df
, int64_t dest
, int64_t arg1
,
935 SIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
936 SIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
937 return dest
- ((even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
));
940 static inline int64_t msa_dpsub_u_df(uint32_t df
, int64_t dest
, int64_t arg1
,
947 UNSIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
948 UNSIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
949 return dest
- ((even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
));
952 static inline int64_t msa_madd_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
955 int64_t q_prod
, q_ret
;
957 int64_t q_max
= DF_MAX_INT(df
);
958 int64_t q_min
= DF_MIN_INT(df
);
960 q_prod
= arg1
* arg2
;
961 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) + q_prod
) >> (DF_BITS(df
) - 1);
963 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
966 static inline int64_t msa_msub_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
969 int64_t q_prod
, q_ret
;
971 int64_t q_max
= DF_MAX_INT(df
);
972 int64_t q_min
= DF_MIN_INT(df
);
974 q_prod
= arg1
* arg2
;
975 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) - q_prod
) >> (DF_BITS(df
) - 1);
977 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
980 static inline int64_t msa_maddr_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
983 int64_t q_prod
, q_ret
;
985 int64_t q_max
= DF_MAX_INT(df
);
986 int64_t q_min
= DF_MIN_INT(df
);
987 int64_t r_bit
= 1 << (DF_BITS(df
) - 2);
989 q_prod
= arg1
* arg2
;
990 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) + q_prod
+ r_bit
) >> (DF_BITS(df
) - 1);
992 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
995 static inline int64_t msa_msubr_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
998 int64_t q_prod
, q_ret
;
1000 int64_t q_max
= DF_MAX_INT(df
);
1001 int64_t q_min
= DF_MIN_INT(df
);
1002 int64_t r_bit
= 1 << (DF_BITS(df
) - 2);
1004 q_prod
= arg1
* arg2
;
1005 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) - q_prod
+ r_bit
) >> (DF_BITS(df
) - 1);
1007 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
1010 #define MSA_TEROP_DF(func) \
1011 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1012 uint32_t ws, uint32_t wt) \
1014 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1015 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1016 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1021 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1022 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
1027 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1028 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
1033 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1034 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
1039 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1040 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
1051 MSA_TEROP_DF(dpadd_s
)
1052 MSA_TEROP_DF(dpadd_u
)
1053 MSA_TEROP_DF(dpsub_s
)
1054 MSA_TEROP_DF(dpsub_u
)
1057 MSA_TEROP_DF(madd_q
)
1058 MSA_TEROP_DF(msub_q
)
1059 MSA_TEROP_DF(maddr_q
)
1060 MSA_TEROP_DF(msubr_q
)
1063 static inline void msa_splat_df(uint32_t df
, wr_t
*pwd
,
1064 wr_t
*pws
, target_ulong rt
)
1066 uint32_t n
= rt
% DF_ELEMENTS(df
);
1071 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
1072 pwd
->b
[i
] = pws
->b
[n
];
1076 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
1077 pwd
->h
[i
] = pws
->h
[n
];
1081 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1082 pwd
->w
[i
] = pws
->w
[n
];
1086 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1087 pwd
->d
[i
] = pws
->d
[n
];
1095 void helper_msa_splat_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1096 uint32_t ws
, uint32_t rt
)
1098 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1099 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1101 msa_splat_df(df
, pwd
, pws
, env
->active_tc
.gpr
[rt
]);
1104 #define MSA_DO_B MSA_DO(b)
1105 #define MSA_DO_H MSA_DO(h)
1106 #define MSA_DO_W MSA_DO(w)
1107 #define MSA_DO_D MSA_DO(d)
1109 #define MSA_LOOP_B MSA_LOOP(B)
1110 #define MSA_LOOP_H MSA_LOOP(H)
1111 #define MSA_LOOP_W MSA_LOOP(W)
1112 #define MSA_LOOP_D MSA_LOOP(D)
1114 #define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
1115 #define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
1116 #define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
1117 #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
1119 #define MSA_LOOP(DF) \
1120 for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
1124 #define MSA_FN_DF(FUNC) \
1125 void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1126 uint32_t ws, uint32_t wt) \
1128 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1129 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1130 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1131 wr_t wx, *pwx = &wx; \
1149 msa_move_v(pwd, pwx); \
1152 #define MSA_LOOP_COND(DF) \
1153 (DF_ELEMENTS(DF) / 2)
1155 #define Rb(pwr, i) (pwr->b[i])
1156 #define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2])
1157 #define Rh(pwr, i) (pwr->h[i])
1158 #define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2])
1159 #define Rw(pwr, i) (pwr->w[i])
1160 #define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2])
1161 #define Rd(pwr, i) (pwr->d[i])
1162 #define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2])
1164 #define MSA_DO(DF) \
1166 R##DF(pwx, i) = pwt->DF[2*i]; \
1167 L##DF(pwx, i) = pws->DF[2*i]; \
1172 #define MSA_DO(DF) \
1174 R##DF(pwx, i) = pwt->DF[2*i+1]; \
1175 L##DF(pwx, i) = pws->DF[2*i+1]; \
1180 #define MSA_DO(DF) \
1182 pwx->DF[2*i] = L##DF(pwt, i); \
1183 pwx->DF[2*i+1] = L##DF(pws, i); \
1188 #define MSA_DO(DF) \
1190 pwx->DF[2*i] = R##DF(pwt, i); \
1191 pwx->DF[2*i+1] = R##DF(pws, i); \
1196 #define MSA_DO(DF) \
1198 pwx->DF[2*i] = pwt->DF[2*i]; \
1199 pwx->DF[2*i+1] = pws->DF[2*i]; \
1204 #define MSA_DO(DF) \
1206 pwx->DF[2*i] = pwt->DF[2*i+1]; \
1207 pwx->DF[2*i+1] = pws->DF[2*i+1]; \
1211 #undef MSA_LOOP_COND
1213 #define MSA_LOOP_COND(DF) \
1216 #define MSA_DO(DF) \
1218 uint32_t n = DF_ELEMENTS(df); \
1219 uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
1221 (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
1225 #undef MSA_LOOP_COND
1228 void helper_msa_sldi_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1229 uint32_t ws
, uint32_t n
)
1231 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1232 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1234 msa_sld_df(df
, pwd
, pws
, n
);
1237 void helper_msa_splati_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1238 uint32_t ws
, uint32_t n
)
1240 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1241 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1243 msa_splat_df(df
, pwd
, pws
, n
);
1246 void helper_msa_copy_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t rd
,
1247 uint32_t ws
, uint32_t n
)
1249 n
%= DF_ELEMENTS(df
);
1253 env
->active_tc
.gpr
[rd
] = (int8_t)env
->active_fpu
.fpr
[ws
].wr
.b
[n
];
1256 env
->active_tc
.gpr
[rd
] = (int16_t)env
->active_fpu
.fpr
[ws
].wr
.h
[n
];
1259 env
->active_tc
.gpr
[rd
] = (int32_t)env
->active_fpu
.fpr
[ws
].wr
.w
[n
];
1261 #ifdef TARGET_MIPS64
1263 env
->active_tc
.gpr
[rd
] = (int64_t)env
->active_fpu
.fpr
[ws
].wr
.d
[n
];
1271 void helper_msa_copy_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t rd
,
1272 uint32_t ws
, uint32_t n
)
1274 n
%= DF_ELEMENTS(df
);
1278 env
->active_tc
.gpr
[rd
] = (uint8_t)env
->active_fpu
.fpr
[ws
].wr
.b
[n
];
1281 env
->active_tc
.gpr
[rd
] = (uint16_t)env
->active_fpu
.fpr
[ws
].wr
.h
[n
];
1284 env
->active_tc
.gpr
[rd
] = (uint32_t)env
->active_fpu
.fpr
[ws
].wr
.w
[n
];
1286 #ifdef TARGET_MIPS64
1288 env
->active_tc
.gpr
[rd
] = (uint64_t)env
->active_fpu
.fpr
[ws
].wr
.d
[n
];
1296 void helper_msa_insert_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1297 uint32_t rs_num
, uint32_t n
)
1299 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1300 target_ulong rs
= env
->active_tc
.gpr
[rs_num
];
1304 pwd
->b
[n
] = (int8_t)rs
;
1307 pwd
->h
[n
] = (int16_t)rs
;
1310 pwd
->w
[n
] = (int32_t)rs
;
1313 pwd
->d
[n
] = (int64_t)rs
;
1320 void helper_msa_insve_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1321 uint32_t ws
, uint32_t n
)
1323 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1324 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1328 pwd
->b
[n
] = (int8_t)pws
->b
[0];
1331 pwd
->h
[n
] = (int16_t)pws
->h
[0];
1334 pwd
->w
[n
] = (int32_t)pws
->w
[0];
1337 pwd
->d
[n
] = (int64_t)pws
->d
[0];
1344 void helper_msa_ctcmsa(CPUMIPSState
*env
, target_ulong elm
, uint32_t cd
)
1350 env
->active_tc
.msacsr
= (int32_t)elm
& MSACSR_MASK
;
1351 restore_msa_fp_status(env
);
1352 /* check exception */
1353 if ((GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
)
1354 & GET_FP_CAUSE(env
->active_tc
.msacsr
)) {
1355 helper_raise_exception(env
, EXCP_MSAFPE
);
1361 target_ulong
helper_msa_cfcmsa(CPUMIPSState
*env
, uint32_t cs
)
1367 return env
->active_tc
.msacsr
& MSACSR_MASK
;
1372 void helper_msa_move_v(CPUMIPSState
*env
, uint32_t wd
, uint32_t ws
)
1374 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1375 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1377 msa_move_v(pwd
, pws
);
1380 static inline int64_t msa_pcnt_df(uint32_t df
, int64_t arg
)
1384 x
= UNSIGNED(arg
, df
);
1386 x
= (x
& 0x5555555555555555ULL
) + ((x
>> 1) & 0x5555555555555555ULL
);
1387 x
= (x
& 0x3333333333333333ULL
) + ((x
>> 2) & 0x3333333333333333ULL
);
1388 x
= (x
& 0x0F0F0F0F0F0F0F0FULL
) + ((x
>> 4) & 0x0F0F0F0F0F0F0F0FULL
);
1389 x
= (x
& 0x00FF00FF00FF00FFULL
) + ((x
>> 8) & 0x00FF00FF00FF00FFULL
);
1390 x
= (x
& 0x0000FFFF0000FFFFULL
) + ((x
>> 16) & 0x0000FFFF0000FFFFULL
);
1391 x
= (x
& 0x00000000FFFFFFFFULL
) + ((x
>> 32));
1396 static inline int64_t msa_nlzc_df(uint32_t df
, int64_t arg
)
1401 x
= UNSIGNED(arg
, df
);
1403 c
= DF_BITS(df
) / 2;
1417 static inline int64_t msa_nloc_df(uint32_t df
, int64_t arg
)
1419 return msa_nlzc_df(df
, UNSIGNED((~arg
), df
));
1422 void helper_msa_fill_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1425 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1430 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
1431 pwd
->b
[i
] = (int8_t)env
->active_tc
.gpr
[rs
];
1435 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
1436 pwd
->h
[i
] = (int16_t)env
->active_tc
.gpr
[rs
];
1440 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1441 pwd
->w
[i
] = (int32_t)env
->active_tc
.gpr
[rs
];
1445 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1446 pwd
->d
[i
] = (int64_t)env
->active_tc
.gpr
[rs
];
1454 #define MSA_UNOP_DF(func) \
1455 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
1456 uint32_t wd, uint32_t ws) \
1458 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1459 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1464 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1465 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \
1469 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1470 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \
1474 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1475 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \
1479 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1480 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \
1493 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1494 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1496 #define FLOAT_SNAN16 (float16_default_nan ^ 0x0220)
1498 #define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020)
1500 #define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL)
1501 /* 0x7ff0000000000020 */
1503 static inline void clear_msacsr_cause(CPUMIPSState
*env
)
1505 SET_FP_CAUSE(env
->active_tc
.msacsr
, 0);
1508 static inline void check_msacsr_cause(CPUMIPSState
*env
)
1510 if ((GET_FP_CAUSE(env
->active_tc
.msacsr
) &
1511 (GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
)) == 0) {
1512 UPDATE_FP_FLAGS(env
->active_tc
.msacsr
,
1513 GET_FP_CAUSE(env
->active_tc
.msacsr
));
1515 helper_raise_exception(env
, EXCP_MSAFPE
);
1519 /* Flush-to-zero use cases for update_msacsr() */
1520 #define CLEAR_FS_UNDERFLOW 1
1521 #define CLEAR_IS_INEXACT 2
1522 #define RECIPROCAL_INEXACT 4
1524 static inline int update_msacsr(CPUMIPSState
*env
, int action
, int denormal
)
1532 ieee_ex
= get_float_exception_flags(&env
->active_tc
.msa_fp_status
);
1534 /* QEMU softfloat does not signal all underflow cases */
1536 ieee_ex
|= float_flag_underflow
;
1539 c
= ieee_ex_to_mips(ieee_ex
);
1540 enable
= GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
;
1542 /* Set Inexact (I) when flushing inputs to zero */
1543 if ((ieee_ex
& float_flag_input_denormal
) &&
1544 (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0) {
1545 if (action
& CLEAR_IS_INEXACT
) {
1552 /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
1553 if ((ieee_ex
& float_flag_output_denormal
) &&
1554 (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0) {
1556 if (action
& CLEAR_FS_UNDERFLOW
) {
1563 /* Set Inexact (I) when Overflow (O) is not enabled */
1564 if ((c
& FP_OVERFLOW
) != 0 && (enable
& FP_OVERFLOW
) == 0) {
1568 /* Clear Exact Underflow when Underflow (U) is not enabled */
1569 if ((c
& FP_UNDERFLOW
) != 0 && (enable
& FP_UNDERFLOW
) == 0 &&
1570 (c
& FP_INEXACT
) == 0) {
1574 /* Reciprocal operations set only Inexact when valid and not
1576 if ((action
& RECIPROCAL_INEXACT
) &&
1577 (c
& (FP_INVALID
| FP_DIV0
)) == 0) {
1581 cause
= c
& enable
; /* all current enabled exceptions */
1584 /* No enabled exception, update the MSACSR Cause
1585 with all current exceptions */
1586 SET_FP_CAUSE(env
->active_tc
.msacsr
,
1587 (GET_FP_CAUSE(env
->active_tc
.msacsr
) | c
));
1589 /* Current exceptions are enabled */
1590 if ((env
->active_tc
.msacsr
& MSACSR_NX_MASK
) == 0) {
1591 /* Exception(s) will trap, update MSACSR Cause
1592 with all enabled exceptions */
1593 SET_FP_CAUSE(env
->active_tc
.msacsr
,
1594 (GET_FP_CAUSE(env
->active_tc
.msacsr
) | c
));
1601 static inline int get_enabled_exceptions(const CPUMIPSState
*env
, int c
)
1603 int enable
= GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
;
1607 static inline float16
float16_from_float32(int32 a
, flag ieee
,
1608 float_status
*status
)
1612 f_val
= float32_to_float16((float32
)a
, ieee
, status
);
1613 f_val
= float16_maybe_silence_nan(f_val
);
1615 return a
< 0 ? (f_val
| (1 << 15)) : f_val
;
1618 static inline float32
float32_from_float64(int64 a
, float_status
*status
)
1622 f_val
= float64_to_float32((float64
)a
, status
);
1623 f_val
= float32_maybe_silence_nan(f_val
);
1625 return a
< 0 ? (f_val
| (1 << 31)) : f_val
;
1628 static inline float32
float32_from_float16(int16_t a
, flag ieee
,
1629 float_status
*status
)
1633 f_val
= float16_to_float32((float16
)a
, ieee
, status
);
1634 f_val
= float32_maybe_silence_nan(f_val
);
1636 return a
< 0 ? (f_val
| (1 << 31)) : f_val
;
1639 static inline float64
float64_from_float32(int32 a
, float_status
*status
)
1643 f_val
= float32_to_float64((float64
)a
, status
);
1644 f_val
= float64_maybe_silence_nan(f_val
);
1646 return a
< 0 ? (f_val
| (1ULL << 63)) : f_val
;
1649 static inline float32
float32_from_q16(int16_t a
, float_status
*status
)
1653 /* conversion as integer and scaling */
1654 f_val
= int32_to_float32(a
, status
);
1655 f_val
= float32_scalbn(f_val
, -15, status
);
1660 static inline float64
float64_from_q32(int32 a
, float_status
*status
)
1664 /* conversion as integer and scaling */
1665 f_val
= int32_to_float64(a
, status
);
1666 f_val
= float64_scalbn(f_val
, -31, status
);
1671 static inline int16_t float32_to_q16(float32 a
, float_status
*status
)
1674 int32 q_min
= 0xffff8000;
1675 int32 q_max
= 0x00007fff;
1679 if (float32_is_any_nan(a
)) {
1680 float_raise(float_flag_invalid
, status
);
1685 a
= float32_scalbn(a
, 15, status
);
1687 ieee_ex
= get_float_exception_flags(status
);
1688 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1691 if (ieee_ex
& float_flag_overflow
) {
1692 float_raise(float_flag_inexact
, status
);
1693 return (int32
)a
< 0 ? q_min
: q_max
;
1696 /* conversion to int */
1697 q_val
= float32_to_int32(a
, status
);
1699 ieee_ex
= get_float_exception_flags(status
);
1700 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1703 if (ieee_ex
& float_flag_invalid
) {
1704 set_float_exception_flags(ieee_ex
& (~float_flag_invalid
)
1706 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1707 return (int32
)a
< 0 ? q_min
: q_max
;
1710 if (q_val
< q_min
) {
1711 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1712 return (int16_t)q_min
;
1715 if (q_max
< q_val
) {
1716 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1717 return (int16_t)q_max
;
1720 return (int16_t)q_val
;
1723 static inline int32
float64_to_q32(float64 a
, float_status
*status
)
1726 int64 q_min
= 0xffffffff80000000LL
;
1727 int64 q_max
= 0x000000007fffffffLL
;
1731 if (float64_is_any_nan(a
)) {
1732 float_raise(float_flag_invalid
, status
);
1737 a
= float64_scalbn(a
, 31, status
);
1739 ieee_ex
= get_float_exception_flags(status
);
1740 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1743 if (ieee_ex
& float_flag_overflow
) {
1744 float_raise(float_flag_inexact
, status
);
1745 return (int64
)a
< 0 ? q_min
: q_max
;
1748 /* conversion to integer */
1749 q_val
= float64_to_int64(a
, status
);
1751 ieee_ex
= get_float_exception_flags(status
);
1752 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1755 if (ieee_ex
& float_flag_invalid
) {
1756 set_float_exception_flags(ieee_ex
& (~float_flag_invalid
)
1758 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1759 return (int64
)a
< 0 ? q_min
: q_max
;
1762 if (q_val
< q_min
) {
1763 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1764 return (int32
)q_min
;
1767 if (q_max
< q_val
) {
1768 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1769 return (int32
)q_max
;
1772 return (int32
)q_val
;
1775 #define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
1777 float_status *status = &env->active_tc.msa_fp_status; \
1780 set_float_exception_flags(0, status); \
1782 cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
1784 cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \
1786 DEST = cond ? M_MAX_UINT(BITS) : 0; \
1787 c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
1789 if (get_enabled_exceptions(env, c)) { \
1790 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
1794 #define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
1796 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1797 if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
1802 #define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
1804 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1806 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1810 #define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
1812 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1814 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1818 #define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
1820 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1822 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1824 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1829 #define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
1831 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1833 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1837 #define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
1839 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1841 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1845 #define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
1847 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1849 MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
1853 static inline void compare_af(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1854 wr_t
*pwt
, uint32_t df
, int quiet
)
1856 wr_t wx
, *pwx
= &wx
;
1859 clear_msacsr_cause(env
);
1863 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1864 MSA_FLOAT_AF(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1868 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1869 MSA_FLOAT_AF(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1876 check_msacsr_cause(env
);
1878 msa_move_v(pwd
, pwx
);
1881 static inline void compare_un(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1882 wr_t
*pwt
, uint32_t df
, int quiet
)
1884 wr_t wx
, *pwx
= &wx
;
1887 clear_msacsr_cause(env
);
1891 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1892 MSA_FLOAT_COND(pwx
->w
[i
], unordered
, pws
->w
[i
], pwt
->w
[i
], 32,
1897 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1898 MSA_FLOAT_COND(pwx
->d
[i
], unordered
, pws
->d
[i
], pwt
->d
[i
], 64,
1906 check_msacsr_cause(env
);
1908 msa_move_v(pwd
, pwx
);
1911 static inline void compare_eq(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1912 wr_t
*pwt
, uint32_t df
, int quiet
)
1914 wr_t wx
, *pwx
= &wx
;
1917 clear_msacsr_cause(env
);
1921 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1922 MSA_FLOAT_COND(pwx
->w
[i
], eq
, pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1926 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1927 MSA_FLOAT_COND(pwx
->d
[i
], eq
, pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1934 check_msacsr_cause(env
);
1936 msa_move_v(pwd
, pwx
);
1939 static inline void compare_ueq(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1940 wr_t
*pwt
, uint32_t df
, int quiet
)
1942 wr_t wx
, *pwx
= &wx
;
1945 clear_msacsr_cause(env
);
1949 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1950 MSA_FLOAT_UEQ(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1954 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1955 MSA_FLOAT_UEQ(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1962 check_msacsr_cause(env
);
1964 msa_move_v(pwd
, pwx
);
1967 static inline void compare_lt(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1968 wr_t
*pwt
, uint32_t df
, int quiet
)
1970 wr_t wx
, *pwx
= &wx
;
1973 clear_msacsr_cause(env
);
1977 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1978 MSA_FLOAT_COND(pwx
->w
[i
], lt
, pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1982 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1983 MSA_FLOAT_COND(pwx
->d
[i
], lt
, pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1990 check_msacsr_cause(env
);
1992 msa_move_v(pwd
, pwx
);
1995 static inline void compare_ult(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1996 wr_t
*pwt
, uint32_t df
, int quiet
)
1998 wr_t wx
, *pwx
= &wx
;
2001 clear_msacsr_cause(env
);
2005 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2006 MSA_FLOAT_ULT(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2010 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2011 MSA_FLOAT_ULT(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2018 check_msacsr_cause(env
);
2020 msa_move_v(pwd
, pwx
);
2023 static inline void compare_le(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2024 wr_t
*pwt
, uint32_t df
, int quiet
)
2026 wr_t wx
, *pwx
= &wx
;
2029 clear_msacsr_cause(env
);
2033 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2034 MSA_FLOAT_COND(pwx
->w
[i
], le
, pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2038 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2039 MSA_FLOAT_COND(pwx
->d
[i
], le
, pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2046 check_msacsr_cause(env
);
2048 msa_move_v(pwd
, pwx
);
2051 static inline void compare_ule(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2052 wr_t
*pwt
, uint32_t df
, int quiet
)
2054 wr_t wx
, *pwx
= &wx
;
2057 clear_msacsr_cause(env
);
2061 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2062 MSA_FLOAT_ULE(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2066 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2067 MSA_FLOAT_ULE(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2074 check_msacsr_cause(env
);
2076 msa_move_v(pwd
, pwx
);
2079 static inline void compare_or(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2080 wr_t
*pwt
, uint32_t df
, int quiet
)
2082 wr_t wx
, *pwx
= &wx
;
2085 clear_msacsr_cause(env
);
2089 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2090 MSA_FLOAT_OR(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2094 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2095 MSA_FLOAT_OR(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2102 check_msacsr_cause(env
);
2104 msa_move_v(pwd
, pwx
);
2107 static inline void compare_une(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2108 wr_t
*pwt
, uint32_t df
, int quiet
)
2110 wr_t wx
, *pwx
= &wx
;
2113 clear_msacsr_cause(env
);
2117 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2118 MSA_FLOAT_UNE(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2122 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2123 MSA_FLOAT_UNE(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2130 check_msacsr_cause(env
);
2132 msa_move_v(pwd
, pwx
);
2135 static inline void compare_ne(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2136 wr_t
*pwt
, uint32_t df
, int quiet
) {
2137 wr_t wx
, *pwx
= &wx
;
2140 clear_msacsr_cause(env
);
2144 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2145 MSA_FLOAT_NE(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2149 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2150 MSA_FLOAT_NE(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2157 check_msacsr_cause(env
);
2159 msa_move_v(pwd
, pwx
);
2162 void helper_msa_fcaf_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2163 uint32_t ws
, uint32_t wt
)
2165 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2166 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2167 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2168 compare_af(env
, pwd
, pws
, pwt
, df
, 1);
2171 void helper_msa_fcun_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2172 uint32_t ws
, uint32_t wt
)
2174 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2175 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2176 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2177 compare_un(env
, pwd
, pws
, pwt
, df
, 1);
2180 void helper_msa_fceq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2181 uint32_t ws
, uint32_t wt
)
2183 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2184 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2185 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2186 compare_eq(env
, pwd
, pws
, pwt
, df
, 1);
2189 void helper_msa_fcueq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2190 uint32_t ws
, uint32_t wt
)
2192 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2193 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2194 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2195 compare_ueq(env
, pwd
, pws
, pwt
, df
, 1);
2198 void helper_msa_fclt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2199 uint32_t ws
, uint32_t wt
)
2201 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2202 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2203 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2204 compare_lt(env
, pwd
, pws
, pwt
, df
, 1);
2207 void helper_msa_fcult_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2208 uint32_t ws
, uint32_t wt
)
2210 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2211 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2212 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2213 compare_ult(env
, pwd
, pws
, pwt
, df
, 1);
2216 void helper_msa_fcle_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2217 uint32_t ws
, uint32_t wt
)
2219 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2220 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2221 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2222 compare_le(env
, pwd
, pws
, pwt
, df
, 1);
2225 void helper_msa_fcule_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2226 uint32_t ws
, uint32_t wt
)
2228 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2229 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2230 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2231 compare_ule(env
, pwd
, pws
, pwt
, df
, 1);
2234 void helper_msa_fsaf_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2235 uint32_t ws
, uint32_t wt
)
2237 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2238 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2239 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2240 compare_af(env
, pwd
, pws
, pwt
, df
, 0);
2243 void helper_msa_fsun_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2244 uint32_t ws
, uint32_t wt
)
2246 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2247 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2248 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2249 compare_un(env
, pwd
, pws
, pwt
, df
, 0);
2252 void helper_msa_fseq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2253 uint32_t ws
, uint32_t wt
)
2255 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2256 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2257 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2258 compare_eq(env
, pwd
, pws
, pwt
, df
, 0);
2261 void helper_msa_fsueq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2262 uint32_t ws
, uint32_t wt
)
2264 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2265 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2266 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2267 compare_ueq(env
, pwd
, pws
, pwt
, df
, 0);
2270 void helper_msa_fslt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2271 uint32_t ws
, uint32_t wt
)
2273 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2274 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2275 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2276 compare_lt(env
, pwd
, pws
, pwt
, df
, 0);
2279 void helper_msa_fsult_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2280 uint32_t ws
, uint32_t wt
)
2282 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2283 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2284 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2285 compare_ult(env
, pwd
, pws
, pwt
, df
, 0);
2288 void helper_msa_fsle_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2289 uint32_t ws
, uint32_t wt
)
2291 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2292 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2293 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2294 compare_le(env
, pwd
, pws
, pwt
, df
, 0);
2297 void helper_msa_fsule_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2298 uint32_t ws
, uint32_t wt
)
2300 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2301 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2302 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2303 compare_ule(env
, pwd
, pws
, pwt
, df
, 0);
2306 void helper_msa_fcor_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2307 uint32_t ws
, uint32_t wt
)
2309 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2310 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2311 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2312 compare_or(env
, pwd
, pws
, pwt
, df
, 1);
2315 void helper_msa_fcune_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2316 uint32_t ws
, uint32_t wt
)
2318 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2319 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2320 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2321 compare_une(env
, pwd
, pws
, pwt
, df
, 1);
2324 void helper_msa_fcne_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2325 uint32_t ws
, uint32_t wt
)
2327 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2328 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2329 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2330 compare_ne(env
, pwd
, pws
, pwt
, df
, 1);
2333 void helper_msa_fsor_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2334 uint32_t ws
, uint32_t wt
)
2336 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2337 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2338 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2339 compare_or(env
, pwd
, pws
, pwt
, df
, 0);
2342 void helper_msa_fsune_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2343 uint32_t ws
, uint32_t wt
)
2345 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2346 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2347 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2348 compare_une(env
, pwd
, pws
, pwt
, df
, 0);
2351 void helper_msa_fsne_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2352 uint32_t ws
, uint32_t wt
)
2354 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2355 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2356 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2357 compare_ne(env
, pwd
, pws
, pwt
, df
, 0);
2360 #define float16_is_zero(ARG) 0
2361 #define float16_is_zero_or_denormal(ARG) 0
2363 #define IS_DENORMAL(ARG, BITS) \
2364 (!float ## BITS ## _is_zero(ARG) \
2365 && float ## BITS ## _is_zero_or_denormal(ARG))
2367 #define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
2369 float_status *status = &env->active_tc.msa_fp_status; \
2372 set_float_exception_flags(0, status); \
2373 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2374 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2376 if (get_enabled_exceptions(env, c)) { \
2377 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2381 void helper_msa_fadd_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2382 uint32_t ws
, uint32_t wt
)
2384 wr_t wx
, *pwx
= &wx
;
2385 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2386 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2387 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2390 clear_msacsr_cause(env
);
2394 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2395 MSA_FLOAT_BINOP(pwx
->w
[i
], add
, pws
->w
[i
], pwt
->w
[i
], 32);
2399 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2400 MSA_FLOAT_BINOP(pwx
->d
[i
], add
, pws
->d
[i
], pwt
->d
[i
], 64);
2407 check_msacsr_cause(env
);
2408 msa_move_v(pwd
, pwx
);
2411 void helper_msa_fsub_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2412 uint32_t ws
, uint32_t wt
)
2414 wr_t wx
, *pwx
= &wx
;
2415 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2416 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2417 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2420 clear_msacsr_cause(env
);
2424 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2425 MSA_FLOAT_BINOP(pwx
->w
[i
], sub
, pws
->w
[i
], pwt
->w
[i
], 32);
2429 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2430 MSA_FLOAT_BINOP(pwx
->d
[i
], sub
, pws
->d
[i
], pwt
->d
[i
], 64);
2437 check_msacsr_cause(env
);
2438 msa_move_v(pwd
, pwx
);
2441 void helper_msa_fmul_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2442 uint32_t ws
, uint32_t wt
)
2444 wr_t wx
, *pwx
= &wx
;
2445 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2446 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2447 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2450 clear_msacsr_cause(env
);
2454 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2455 MSA_FLOAT_BINOP(pwx
->w
[i
], mul
, pws
->w
[i
], pwt
->w
[i
], 32);
2459 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2460 MSA_FLOAT_BINOP(pwx
->d
[i
], mul
, pws
->d
[i
], pwt
->d
[i
], 64);
2467 check_msacsr_cause(env
);
2469 msa_move_v(pwd
, pwx
);
2472 void helper_msa_fdiv_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2473 uint32_t ws
, uint32_t wt
)
2475 wr_t wx
, *pwx
= &wx
;
2476 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2477 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2478 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2481 clear_msacsr_cause(env
);
2485 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2486 MSA_FLOAT_BINOP(pwx
->w
[i
], div
, pws
->w
[i
], pwt
->w
[i
], 32);
2490 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2491 MSA_FLOAT_BINOP(pwx
->d
[i
], div
, pws
->d
[i
], pwt
->d
[i
], 64);
2498 check_msacsr_cause(env
);
2500 msa_move_v(pwd
, pwx
);
2503 #define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
2505 float_status *status = &env->active_tc.msa_fp_status; \
2508 set_float_exception_flags(0, status); \
2509 DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \
2510 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2512 if (get_enabled_exceptions(env, c)) { \
2513 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2517 void helper_msa_fmadd_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2518 uint32_t ws
, uint32_t wt
)
2520 wr_t wx
, *pwx
= &wx
;
2521 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2522 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2523 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2526 clear_msacsr_cause(env
);
2530 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2531 MSA_FLOAT_MULADD(pwx
->w
[i
], pwd
->w
[i
],
2532 pws
->w
[i
], pwt
->w
[i
], 0, 32);
2536 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2537 MSA_FLOAT_MULADD(pwx
->d
[i
], pwd
->d
[i
],
2538 pws
->d
[i
], pwt
->d
[i
], 0, 64);
2545 check_msacsr_cause(env
);
2547 msa_move_v(pwd
, pwx
);
2550 void helper_msa_fmsub_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2551 uint32_t ws
, uint32_t wt
)
2553 wr_t wx
, *pwx
= &wx
;
2554 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2555 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2556 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2559 clear_msacsr_cause(env
);
2563 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2564 MSA_FLOAT_MULADD(pwx
->w
[i
], pwd
->w
[i
],
2565 pws
->w
[i
], pwt
->w
[i
],
2566 float_muladd_negate_product
, 32);
2570 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2571 MSA_FLOAT_MULADD(pwx
->d
[i
], pwd
->d
[i
],
2572 pws
->d
[i
], pwt
->d
[i
],
2573 float_muladd_negate_product
, 64);
2580 check_msacsr_cause(env
);
2582 msa_move_v(pwd
, pwx
);
2585 void helper_msa_fexp2_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2586 uint32_t ws
, uint32_t wt
)
2588 wr_t wx
, *pwx
= &wx
;
2589 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2590 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2591 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2594 clear_msacsr_cause(env
);
2598 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2599 MSA_FLOAT_BINOP(pwx
->w
[i
], scalbn
, pws
->w
[i
],
2600 pwt
->w
[i
] > 0x200 ? 0x200 :
2601 pwt
->w
[i
] < -0x200 ? -0x200 : pwt
->w
[i
],
2606 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2607 MSA_FLOAT_BINOP(pwx
->d
[i
], scalbn
, pws
->d
[i
],
2608 pwt
->d
[i
] > 0x1000 ? 0x1000 :
2609 pwt
->d
[i
] < -0x1000 ? -0x1000 : pwt
->d
[i
],
2617 check_msacsr_cause(env
);
2619 msa_move_v(pwd
, pwx
);
2622 #define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
2624 float_status *status = &env->active_tc.msa_fp_status; \
2627 set_float_exception_flags(0, status); \
2628 DEST = float ## BITS ## _ ## OP(ARG, status); \
2629 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2631 if (get_enabled_exceptions(env, c)) { \
2632 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2636 void helper_msa_fexdo_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2637 uint32_t ws
, uint32_t wt
)
2639 wr_t wx
, *pwx
= &wx
;
2640 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2641 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2642 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2645 clear_msacsr_cause(env
);
2649 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2650 /* Half precision floats come in two formats: standard
2651 IEEE and "ARM" format. The latter gains extra exponent
2652 range by omitting the NaN/Inf encodings. */
2655 MSA_FLOAT_BINOP(Lh(pwx
, i
), from_float32
, pws
->w
[i
], ieee
, 16);
2656 MSA_FLOAT_BINOP(Rh(pwx
, i
), from_float32
, pwt
->w
[i
], ieee
, 16);
2660 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2661 MSA_FLOAT_UNOP(Lw(pwx
, i
), from_float64
, pws
->d
[i
], 32);
2662 MSA_FLOAT_UNOP(Rw(pwx
, i
), from_float64
, pwt
->d
[i
], 32);
2669 check_msacsr_cause(env
);
2670 msa_move_v(pwd
, pwx
);
2673 #define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
2675 float_status *status = &env->active_tc.msa_fp_status; \
2678 set_float_exception_flags(0, status); \
2679 DEST = float ## BITS ## _ ## OP(ARG, status); \
2680 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2682 if (get_enabled_exceptions(env, c)) { \
2683 DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \
2687 void helper_msa_ftq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2688 uint32_t ws
, uint32_t wt
)
2690 wr_t wx
, *pwx
= &wx
;
2691 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2692 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2693 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2696 clear_msacsr_cause(env
);
2700 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2701 MSA_FLOAT_UNOP_XD(Lh(pwx
, i
), to_q16
, pws
->w
[i
], 32, 16);
2702 MSA_FLOAT_UNOP_XD(Rh(pwx
, i
), to_q16
, pwt
->w
[i
], 32, 16);
2706 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2707 MSA_FLOAT_UNOP_XD(Lw(pwx
, i
), to_q32
, pws
->d
[i
], 64, 32);
2708 MSA_FLOAT_UNOP_XD(Rw(pwx
, i
), to_q32
, pwt
->d
[i
], 64, 32);
2715 check_msacsr_cause(env
);
2717 msa_move_v(pwd
, pwx
);
2720 #define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \
2721 !float ## BITS ## _is_any_nan(ARG1) \
2722 && float ## BITS ## _is_quiet_nan(ARG2)
2724 #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
2726 float_status *status = &env->active_tc.msa_fp_status; \
2729 set_float_exception_flags(0, status); \
2730 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2731 c = update_msacsr(env, 0, 0); \
2733 if (get_enabled_exceptions(env, c)) { \
2734 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2738 #define FMAXMIN_A(F, G, X, _S, _T, BITS) \
2740 uint## BITS ##_t S = _S, T = _T; \
2741 uint## BITS ##_t as, at, xs, xt, xd; \
2742 if (NUMBER_QNAN_PAIR(S, T, BITS)) { \
2745 else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \
2748 as = float## BITS ##_abs(S); \
2749 at = float## BITS ##_abs(T); \
2750 MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
2751 MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
2752 MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
2753 X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
2756 void helper_msa_fmin_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2757 uint32_t ws
, uint32_t wt
)
2759 wr_t wx
, *pwx
= &wx
;
2760 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2761 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2762 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2765 clear_msacsr_cause(env
);
2769 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2770 if (NUMBER_QNAN_PAIR(pws
->w
[i
], pwt
->w
[i
], 32)) {
2771 MSA_FLOAT_MAXOP(pwx
->w
[i
], min
, pws
->w
[i
], pws
->w
[i
], 32);
2772 } else if (NUMBER_QNAN_PAIR(pwt
->w
[i
], pws
->w
[i
], 32)) {
2773 MSA_FLOAT_MAXOP(pwx
->w
[i
], min
, pwt
->w
[i
], pwt
->w
[i
], 32);
2775 MSA_FLOAT_MAXOP(pwx
->w
[i
], min
, pws
->w
[i
], pwt
->w
[i
], 32);
2780 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2781 if (NUMBER_QNAN_PAIR(pws
->d
[i
], pwt
->d
[i
], 64)) {
2782 MSA_FLOAT_MAXOP(pwx
->d
[i
], min
, pws
->d
[i
], pws
->d
[i
], 64);
2783 } else if (NUMBER_QNAN_PAIR(pwt
->d
[i
], pws
->d
[i
], 64)) {
2784 MSA_FLOAT_MAXOP(pwx
->d
[i
], min
, pwt
->d
[i
], pwt
->d
[i
], 64);
2786 MSA_FLOAT_MAXOP(pwx
->d
[i
], min
, pws
->d
[i
], pwt
->d
[i
], 64);
2794 check_msacsr_cause(env
);
2796 msa_move_v(pwd
, pwx
);
2799 void helper_msa_fmin_a_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2800 uint32_t ws
, uint32_t wt
)
2802 wr_t wx
, *pwx
= &wx
;
2803 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2804 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2805 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2808 clear_msacsr_cause(env
);
2812 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2813 FMAXMIN_A(min
, max
, pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32);
2817 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2818 FMAXMIN_A(min
, max
, pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64);
2825 check_msacsr_cause(env
);
2827 msa_move_v(pwd
, pwx
);
2830 void helper_msa_fmax_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2831 uint32_t ws
, uint32_t wt
)
2833 wr_t wx
, *pwx
= &wx
;
2834 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2835 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2836 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2839 clear_msacsr_cause(env
);
2843 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2844 if (NUMBER_QNAN_PAIR(pws
->w
[i
], pwt
->w
[i
], 32)) {
2845 MSA_FLOAT_MAXOP(pwx
->w
[i
], max
, pws
->w
[i
], pws
->w
[i
], 32);
2846 } else if (NUMBER_QNAN_PAIR(pwt
->w
[i
], pws
->w
[i
], 32)) {
2847 MSA_FLOAT_MAXOP(pwx
->w
[i
], max
, pwt
->w
[i
], pwt
->w
[i
], 32);
2849 MSA_FLOAT_MAXOP(pwx
->w
[i
], max
, pws
->w
[i
], pwt
->w
[i
], 32);
2854 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2855 if (NUMBER_QNAN_PAIR(pws
->d
[i
], pwt
->d
[i
], 64)) {
2856 MSA_FLOAT_MAXOP(pwx
->d
[i
], max
, pws
->d
[i
], pws
->d
[i
], 64);
2857 } else if (NUMBER_QNAN_PAIR(pwt
->d
[i
], pws
->d
[i
], 64)) {
2858 MSA_FLOAT_MAXOP(pwx
->d
[i
], max
, pwt
->d
[i
], pwt
->d
[i
], 64);
2860 MSA_FLOAT_MAXOP(pwx
->d
[i
], max
, pws
->d
[i
], pwt
->d
[i
], 64);
2868 check_msacsr_cause(env
);
2870 msa_move_v(pwd
, pwx
);
2873 void helper_msa_fmax_a_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2874 uint32_t ws
, uint32_t wt
)
2876 wr_t wx
, *pwx
= &wx
;
2877 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2878 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2879 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2882 clear_msacsr_cause(env
);
2886 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2887 FMAXMIN_A(max
, min
, pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32);
2891 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2892 FMAXMIN_A(max
, min
, pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64);
2899 check_msacsr_cause(env
);
2901 msa_move_v(pwd
, pwx
);
2904 void helper_msa_fclass_df(CPUMIPSState
*env
, uint32_t df
,
2905 uint32_t wd
, uint32_t ws
)
2907 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2908 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2909 if (df
== DF_WORD
) {
2910 pwd
->w
[0] = helper_float_class_s(pws
->w
[0]);
2911 pwd
->w
[1] = helper_float_class_s(pws
->w
[1]);
2912 pwd
->w
[2] = helper_float_class_s(pws
->w
[2]);
2913 pwd
->w
[3] = helper_float_class_s(pws
->w
[3]);
2915 pwd
->d
[0] = helper_float_class_d(pws
->d
[0]);
2916 pwd
->d
[1] = helper_float_class_d(pws
->d
[1]);
2920 #define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
2922 float_status *status = &env->active_tc.msa_fp_status; \
2925 set_float_exception_flags(0, status); \
2926 DEST = float ## BITS ## _ ## OP(ARG, status); \
2927 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2929 if (get_enabled_exceptions(env, c)) { \
2930 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2931 } else if (float ## BITS ## _is_any_nan(ARG)) { \
2936 void helper_msa_ftrunc_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2939 wr_t wx
, *pwx
= &wx
;
2940 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2941 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2944 clear_msacsr_cause(env
);
2948 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2949 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_int32_round_to_zero
, pws
->w
[i
], 32);
2953 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2954 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_int64_round_to_zero
, pws
->d
[i
], 64);
2961 check_msacsr_cause(env
);
2963 msa_move_v(pwd
, pwx
);
2966 void helper_msa_ftrunc_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2969 wr_t wx
, *pwx
= &wx
;
2970 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2971 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2974 clear_msacsr_cause(env
);
2978 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2979 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_uint32_round_to_zero
, pws
->w
[i
], 32);
2983 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2984 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_uint64_round_to_zero
, pws
->d
[i
], 64);
2991 check_msacsr_cause(env
);
2993 msa_move_v(pwd
, pwx
);
2996 void helper_msa_fsqrt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2999 wr_t wx
, *pwx
= &wx
;
3000 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3001 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3004 clear_msacsr_cause(env
);
3008 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3009 MSA_FLOAT_UNOP(pwx
->w
[i
], sqrt
, pws
->w
[i
], 32);
3013 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3014 MSA_FLOAT_UNOP(pwx
->d
[i
], sqrt
, pws
->d
[i
], 64);
3021 check_msacsr_cause(env
);
3023 msa_move_v(pwd
, pwx
);
3026 #define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
3028 float_status *status = &env->active_tc.msa_fp_status; \
3031 set_float_exception_flags(0, status); \
3032 DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
3033 c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
3034 float ## BITS ## _is_quiet_nan(DEST) ? \
3035 0 : RECIPROCAL_INEXACT, \
3036 IS_DENORMAL(DEST, BITS)); \
3038 if (get_enabled_exceptions(env, c)) { \
3039 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3043 void helper_msa_frsqrt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3046 wr_t wx
, *pwx
= &wx
;
3047 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3048 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3051 clear_msacsr_cause(env
);
3055 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3056 MSA_FLOAT_RECIPROCAL(pwx
->w
[i
], float32_sqrt(pws
->w
[i
],
3057 &env
->active_tc
.msa_fp_status
), 32);
3061 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3062 MSA_FLOAT_RECIPROCAL(pwx
->d
[i
], float64_sqrt(pws
->d
[i
],
3063 &env
->active_tc
.msa_fp_status
), 64);
3070 check_msacsr_cause(env
);
3072 msa_move_v(pwd
, pwx
);
3075 void helper_msa_frcp_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3078 wr_t wx
, *pwx
= &wx
;
3079 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3080 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3083 clear_msacsr_cause(env
);
3087 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3088 MSA_FLOAT_RECIPROCAL(pwx
->w
[i
], pws
->w
[i
], 32);
3092 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3093 MSA_FLOAT_RECIPROCAL(pwx
->d
[i
], pws
->d
[i
], 64);
3100 check_msacsr_cause(env
);
3102 msa_move_v(pwd
, pwx
);
3105 void helper_msa_frint_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3108 wr_t wx
, *pwx
= &wx
;
3109 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3110 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3113 clear_msacsr_cause(env
);
3117 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3118 MSA_FLOAT_UNOP(pwx
->w
[i
], round_to_int
, pws
->w
[i
], 32);
3122 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3123 MSA_FLOAT_UNOP(pwx
->d
[i
], round_to_int
, pws
->d
[i
], 64);
3130 check_msacsr_cause(env
);
3132 msa_move_v(pwd
, pwx
);
3135 #define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
3137 float_status *status = &env->active_tc.msa_fp_status; \
3140 set_float_exception_flags(0, status); \
3141 set_float_rounding_mode(float_round_down, status); \
3142 DEST = float ## BITS ## _ ## log2(ARG, status); \
3143 DEST = float ## BITS ## _ ## round_to_int(DEST, status); \
3144 set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
3145 MSACSR_RM_MASK) >> MSACSR_RM], \
3148 set_float_exception_flags(get_float_exception_flags(status) & \
3149 (~float_flag_inexact), \
3152 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
3154 if (get_enabled_exceptions(env, c)) { \
3155 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3159 void helper_msa_flog2_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3162 wr_t wx
, *pwx
= &wx
;
3163 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3164 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3167 clear_msacsr_cause(env
);
3171 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3172 MSA_FLOAT_LOGB(pwx
->w
[i
], pws
->w
[i
], 32);
3176 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3177 MSA_FLOAT_LOGB(pwx
->d
[i
], pws
->d
[i
], 64);
3184 check_msacsr_cause(env
);
3186 msa_move_v(pwd
, pwx
);
3189 void helper_msa_fexupl_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3192 wr_t wx
, *pwx
= &wx
;
3193 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3194 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3197 clear_msacsr_cause(env
);
3201 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3202 /* Half precision floats come in two formats: standard
3203 IEEE and "ARM" format. The latter gains extra exponent
3204 range by omitting the NaN/Inf encodings. */
3207 MSA_FLOAT_BINOP(pwx
->w
[i
], from_float16
, Lh(pws
, i
), ieee
, 32);
3211 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3212 MSA_FLOAT_UNOP(pwx
->d
[i
], from_float32
, Lw(pws
, i
), 64);
3219 check_msacsr_cause(env
);
3220 msa_move_v(pwd
, pwx
);
3223 void helper_msa_fexupr_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3226 wr_t wx
, *pwx
= &wx
;
3227 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3228 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3231 clear_msacsr_cause(env
);
3235 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3236 /* Half precision floats come in two formats: standard
3237 IEEE and "ARM" format. The latter gains extra exponent
3238 range by omitting the NaN/Inf encodings. */
3241 MSA_FLOAT_BINOP(pwx
->w
[i
], from_float16
, Rh(pws
, i
), ieee
, 32);
3245 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3246 MSA_FLOAT_UNOP(pwx
->d
[i
], from_float32
, Rw(pws
, i
), 64);
3253 check_msacsr_cause(env
);
3254 msa_move_v(pwd
, pwx
);
3257 void helper_msa_ffql_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3260 wr_t wx
, *pwx
= &wx
;
3261 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3262 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3267 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3268 MSA_FLOAT_UNOP(pwx
->w
[i
], from_q16
, Lh(pws
, i
), 32);
3272 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3273 MSA_FLOAT_UNOP(pwx
->d
[i
], from_q32
, Lw(pws
, i
), 64);
3280 msa_move_v(pwd
, pwx
);
3283 void helper_msa_ffqr_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3286 wr_t wx
, *pwx
= &wx
;
3287 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3288 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3293 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3294 MSA_FLOAT_UNOP(pwx
->w
[i
], from_q16
, Rh(pws
, i
), 32);
3298 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3299 MSA_FLOAT_UNOP(pwx
->d
[i
], from_q32
, Rw(pws
, i
), 64);
3306 msa_move_v(pwd
, pwx
);
3309 void helper_msa_ftint_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3312 wr_t wx
, *pwx
= &wx
;
3313 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3314 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3317 clear_msacsr_cause(env
);
3321 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3322 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_int32
, pws
->w
[i
], 32);
3326 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3327 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_int64
, pws
->d
[i
], 64);
3334 check_msacsr_cause(env
);
3336 msa_move_v(pwd
, pwx
);
3339 void helper_msa_ftint_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3342 wr_t wx
, *pwx
= &wx
;
3343 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3344 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3347 clear_msacsr_cause(env
);
3351 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3352 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_uint32
, pws
->w
[i
], 32);
3356 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3357 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_uint64
, pws
->d
[i
], 64);
3364 check_msacsr_cause(env
);
3366 msa_move_v(pwd
, pwx
);
3369 #define float32_from_int32 int32_to_float32
3370 #define float32_from_uint32 uint32_to_float32
3372 #define float64_from_int64 int64_to_float64
3373 #define float64_from_uint64 uint64_to_float64
3375 void helper_msa_ffint_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3378 wr_t wx
, *pwx
= &wx
;
3379 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3380 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3383 clear_msacsr_cause(env
);
3387 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3388 MSA_FLOAT_UNOP(pwx
->w
[i
], from_int32
, pws
->w
[i
], 32);
3392 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3393 MSA_FLOAT_UNOP(pwx
->d
[i
], from_int64
, pws
->d
[i
], 64);
3400 check_msacsr_cause(env
);
3402 msa_move_v(pwd
, pwx
);
3405 void helper_msa_ffint_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3408 wr_t wx
, *pwx
= &wx
;
3409 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3410 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3413 clear_msacsr_cause(env
);
3417 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3418 MSA_FLOAT_UNOP(pwx
->w
[i
], from_uint32
, pws
->w
[i
], 32);
3422 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3423 MSA_FLOAT_UNOP(pwx
->d
[i
], from_uint64
, pws
->d
[i
], 64);
3430 check_msacsr_cause(env
);
3432 msa_move_v(pwd
, pwx
);