2 * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
4 * Copyright (c) 2014 Imagination Technologies
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
24 /* Data format min and max values */
25 #define DF_BITS(df) (1 << ((df) + 3))
27 #define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
28 #define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
30 #define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
31 #define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
33 #define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
34 #define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
36 #define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
37 #define SIGNED(x, df) \
38 ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
40 /* Element-by-element access macros */
41 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
43 static inline void msa_move_v(wr_t
*pwd
, wr_t
*pws
)
47 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
48 pwd
->d
[i
] = pws
->d
[i
];
52 #define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
53 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
56 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
57 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
59 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
64 MSA_FN_IMM8(andi_b
, pwd
->b
[i
], pws
->b
[i
] & i8
)
65 MSA_FN_IMM8(ori_b
, pwd
->b
[i
], pws
->b
[i
] | i8
)
66 MSA_FN_IMM8(nori_b
, pwd
->b
[i
], ~(pws
->b
[i
] | i8
))
67 MSA_FN_IMM8(xori_b
, pwd
->b
[i
], pws
->b
[i
] ^ i8
)
69 #define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
70 UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
71 MSA_FN_IMM8(bmnzi_b
, pwd
->b
[i
],
72 BIT_MOVE_IF_NOT_ZERO(pwd
->b
[i
], pws
->b
[i
], i8
, DF_BYTE
))
74 #define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
75 UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
76 MSA_FN_IMM8(bmzi_b
, pwd
->b
[i
],
77 BIT_MOVE_IF_ZERO(pwd
->b
[i
], pws
->b
[i
], i8
, DF_BYTE
))
79 #define BIT_SELECT(dest, arg1, arg2, df) \
80 UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
81 MSA_FN_IMM8(bseli_b
, pwd
->b
[i
],
82 BIT_SELECT(pwd
->b
[i
], pws
->b
[i
], i8
, DF_BYTE
))
86 #define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
88 void helper_msa_shf_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
89 uint32_t ws
, uint32_t imm
)
91 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
92 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
98 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
99 pwx
->b
[i
] = pws
->b
[SHF_POS(i
, imm
)];
103 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
104 pwx
->h
[i
] = pws
->h
[SHF_POS(i
, imm
)];
108 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
109 pwx
->w
[i
] = pws
->w
[SHF_POS(i
, imm
)];
115 msa_move_v(pwd
, pwx
);
118 #define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \
119 void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
122 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
123 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
124 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
126 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
131 MSA_FN_VECTOR(and_v
, pwd
->d
[i
], pws
->d
[i
] & pwt
->d
[i
])
132 MSA_FN_VECTOR(or_v
, pwd
->d
[i
], pws
->d
[i
] | pwt
->d
[i
])
133 MSA_FN_VECTOR(nor_v
, pwd
->d
[i
], ~(pws
->d
[i
] | pwt
->d
[i
]))
134 MSA_FN_VECTOR(xor_v
, pwd
->d
[i
], pws
->d
[i
] ^ pwt
->d
[i
])
135 MSA_FN_VECTOR(bmnz_v
, pwd
->d
[i
],
136 BIT_MOVE_IF_NOT_ZERO(pwd
->d
[i
], pws
->d
[i
], pwt
->d
[i
], DF_DOUBLE
))
137 MSA_FN_VECTOR(bmz_v
, pwd
->d
[i
],
138 BIT_MOVE_IF_ZERO(pwd
->d
[i
], pws
->d
[i
], pwt
->d
[i
], DF_DOUBLE
))
139 MSA_FN_VECTOR(bsel_v
, pwd
->d
[i
],
140 BIT_SELECT(pwd
->d
[i
], pws
->d
[i
], pwt
->d
[i
], DF_DOUBLE
))
141 #undef BIT_MOVE_IF_NOT_ZERO
142 #undef BIT_MOVE_IF_ZERO
146 static inline int64_t msa_addv_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
151 static inline int64_t msa_subv_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
156 static inline int64_t msa_ceq_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
158 return arg1
== arg2
? -1 : 0;
161 static inline int64_t msa_cle_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
163 return arg1
<= arg2
? -1 : 0;
166 static inline int64_t msa_cle_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
168 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
169 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
170 return u_arg1
<= u_arg2
? -1 : 0;
173 static inline int64_t msa_clt_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
175 return arg1
< arg2
? -1 : 0;
178 static inline int64_t msa_clt_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
180 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
181 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
182 return u_arg1
< u_arg2
? -1 : 0;
185 static inline int64_t msa_max_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
187 return arg1
> arg2
? arg1
: arg2
;
190 static inline int64_t msa_max_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
192 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
193 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
194 return u_arg1
> u_arg2
? arg1
: arg2
;
197 static inline int64_t msa_min_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
199 return arg1
< arg2
? arg1
: arg2
;
202 static inline int64_t msa_min_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
204 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
205 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
206 return u_arg1
< u_arg2
? arg1
: arg2
;
209 #define MSA_BINOP_IMM_DF(helper, func) \
210 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
211 uint32_t wd, uint32_t ws, int32_t u5) \
213 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
214 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
219 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
220 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
224 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
225 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
229 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
230 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
234 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
235 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
243 MSA_BINOP_IMM_DF(addvi
, addv
)
244 MSA_BINOP_IMM_DF(subvi
, subv
)
245 MSA_BINOP_IMM_DF(ceqi
, ceq
)
246 MSA_BINOP_IMM_DF(clei_s
, cle_s
)
247 MSA_BINOP_IMM_DF(clei_u
, cle_u
)
248 MSA_BINOP_IMM_DF(clti_s
, clt_s
)
249 MSA_BINOP_IMM_DF(clti_u
, clt_u
)
250 MSA_BINOP_IMM_DF(maxi_s
, max_s
)
251 MSA_BINOP_IMM_DF(maxi_u
, max_u
)
252 MSA_BINOP_IMM_DF(mini_s
, min_s
)
253 MSA_BINOP_IMM_DF(mini_u
, min_u
)
254 #undef MSA_BINOP_IMM_DF
256 void helper_msa_ldi_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
259 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
264 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
265 pwd
->b
[i
] = (int8_t)s10
;
269 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
270 pwd
->h
[i
] = (int16_t)s10
;
274 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
275 pwd
->w
[i
] = (int32_t)s10
;
279 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
280 pwd
->d
[i
] = (int64_t)s10
;
288 /* Data format bit position and unsigned values */
289 #define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
291 static inline int64_t msa_sll_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
293 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
294 return arg1
<< b_arg2
;
297 static inline int64_t msa_sra_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
299 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
300 return arg1
>> b_arg2
;
303 static inline int64_t msa_srl_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
305 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
306 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
307 return u_arg1
>> b_arg2
;
310 static inline int64_t msa_bclr_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
312 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
313 return UNSIGNED(arg1
& (~(1LL << b_arg2
)), df
);
316 static inline int64_t msa_bset_df(uint32_t df
, int64_t arg1
,
319 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
320 return UNSIGNED(arg1
| (1LL << b_arg2
), df
);
323 static inline int64_t msa_bneg_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
325 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
326 return UNSIGNED(arg1
^ (1LL << b_arg2
), df
);
329 static inline int64_t msa_binsl_df(uint32_t df
, int64_t dest
, int64_t arg1
,
332 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
333 uint64_t u_dest
= UNSIGNED(dest
, df
);
334 int32_t sh_d
= BIT_POSITION(arg2
, df
) + 1;
335 int32_t sh_a
= DF_BITS(df
) - sh_d
;
336 if (sh_d
== DF_BITS(df
)) {
339 return UNSIGNED(UNSIGNED(u_dest
<< sh_d
, df
) >> sh_d
, df
) |
340 UNSIGNED(UNSIGNED(u_arg1
>> sh_a
, df
) << sh_a
, df
);
344 static inline int64_t msa_binsr_df(uint32_t df
, int64_t dest
, int64_t arg1
,
347 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
348 uint64_t u_dest
= UNSIGNED(dest
, df
);
349 int32_t sh_d
= BIT_POSITION(arg2
, df
) + 1;
350 int32_t sh_a
= DF_BITS(df
) - sh_d
;
351 if (sh_d
== DF_BITS(df
)) {
354 return UNSIGNED(UNSIGNED(u_dest
>> sh_d
, df
) << sh_d
, df
) |
355 UNSIGNED(UNSIGNED(u_arg1
<< sh_a
, df
) >> sh_a
, df
);
359 static inline int64_t msa_sat_s_df(uint32_t df
, int64_t arg
, uint32_t m
)
361 return arg
< M_MIN_INT(m
+1) ? M_MIN_INT(m
+1) :
362 arg
> M_MAX_INT(m
+1) ? M_MAX_INT(m
+1) :
366 static inline int64_t msa_sat_u_df(uint32_t df
, int64_t arg
, uint32_t m
)
368 uint64_t u_arg
= UNSIGNED(arg
, df
);
369 return u_arg
< M_MAX_UINT(m
+1) ? u_arg
:
373 static inline int64_t msa_srar_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
375 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
379 int64_t r_bit
= (arg1
>> (b_arg2
- 1)) & 1;
380 return (arg1
>> b_arg2
) + r_bit
;
384 static inline int64_t msa_srlr_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
386 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
387 int32_t b_arg2
= BIT_POSITION(arg2
, df
);
391 uint64_t r_bit
= (u_arg1
>> (b_arg2
- 1)) & 1;
392 return (u_arg1
>> b_arg2
) + r_bit
;
396 #define MSA_BINOP_IMMU_DF(helper, func) \
397 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
398 uint32_t ws, uint32_t u5) \
400 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
401 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
406 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
407 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
411 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
412 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
416 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
417 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
421 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
422 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
430 MSA_BINOP_IMMU_DF(slli
, sll
)
431 MSA_BINOP_IMMU_DF(srai
, sra
)
432 MSA_BINOP_IMMU_DF(srli
, srl
)
433 MSA_BINOP_IMMU_DF(bclri
, bclr
)
434 MSA_BINOP_IMMU_DF(bseti
, bset
)
435 MSA_BINOP_IMMU_DF(bnegi
, bneg
)
436 MSA_BINOP_IMMU_DF(sat_s
, sat_s
)
437 MSA_BINOP_IMMU_DF(sat_u
, sat_u
)
438 MSA_BINOP_IMMU_DF(srari
, srar
)
439 MSA_BINOP_IMMU_DF(srlri
, srlr
)
440 #undef MSA_BINOP_IMMU_DF
442 #define MSA_TEROP_IMMU_DF(helper, func) \
443 void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
444 uint32_t wd, uint32_t ws, uint32_t u5) \
446 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
447 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
452 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
453 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
458 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
459 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
464 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
465 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
470 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
471 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
480 MSA_TEROP_IMMU_DF(binsli
, binsl
)
481 MSA_TEROP_IMMU_DF(binsri
, binsr
)
482 #undef MSA_TEROP_IMMU_DF
484 static inline int64_t msa_max_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
486 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
487 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
488 return abs_arg1
> abs_arg2
? arg1
: arg2
;
491 static inline int64_t msa_min_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
493 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
494 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
495 return abs_arg1
< abs_arg2
? arg1
: arg2
;
498 static inline int64_t msa_add_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
500 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
501 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
502 return abs_arg1
+ abs_arg2
;
505 static inline int64_t msa_adds_a_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
507 uint64_t max_int
= (uint64_t)DF_MAX_INT(df
);
508 uint64_t abs_arg1
= arg1
>= 0 ? arg1
: -arg1
;
509 uint64_t abs_arg2
= arg2
>= 0 ? arg2
: -arg2
;
510 if (abs_arg1
> max_int
|| abs_arg2
> max_int
) {
511 return (int64_t)max_int
;
513 return (abs_arg1
< max_int
- abs_arg2
) ? abs_arg1
+ abs_arg2
: max_int
;
517 static inline int64_t msa_adds_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
519 int64_t max_int
= DF_MAX_INT(df
);
520 int64_t min_int
= DF_MIN_INT(df
);
522 return (min_int
- arg1
< arg2
) ? arg1
+ arg2
: min_int
;
524 return (arg2
< max_int
- arg1
) ? arg1
+ arg2
: max_int
;
528 static inline uint64_t msa_adds_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
530 uint64_t max_uint
= DF_MAX_UINT(df
);
531 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
532 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
533 return (u_arg1
< max_uint
- u_arg2
) ? u_arg1
+ u_arg2
: max_uint
;
536 static inline int64_t msa_ave_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
539 return (arg1
>> 1) + (arg2
>> 1) + (arg1
& arg2
& 1);
542 static inline uint64_t msa_ave_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
544 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
545 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
547 return (u_arg1
>> 1) + (u_arg2
>> 1) + (u_arg1
& u_arg2
& 1);
550 static inline int64_t msa_aver_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
553 return (arg1
>> 1) + (arg2
>> 1) + ((arg1
| arg2
) & 1);
556 static inline uint64_t msa_aver_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
558 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
559 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
561 return (u_arg1
>> 1) + (u_arg2
>> 1) + ((u_arg1
| u_arg2
) & 1);
564 static inline int64_t msa_subs_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
566 int64_t max_int
= DF_MAX_INT(df
);
567 int64_t min_int
= DF_MIN_INT(df
);
569 return (min_int
+ arg2
< arg1
) ? arg1
- arg2
: min_int
;
571 return (arg1
< max_int
+ arg2
) ? arg1
- arg2
: max_int
;
575 static inline int64_t msa_subs_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
577 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
578 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
579 return (u_arg1
> u_arg2
) ? u_arg1
- u_arg2
: 0;
582 static inline int64_t msa_subsus_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
584 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
585 uint64_t max_uint
= DF_MAX_UINT(df
);
587 uint64_t u_arg2
= (uint64_t)arg2
;
588 return (u_arg1
> u_arg2
) ?
589 (int64_t)(u_arg1
- u_arg2
) :
592 uint64_t u_arg2
= (uint64_t)(-arg2
);
593 return (u_arg1
< max_uint
- u_arg2
) ?
594 (int64_t)(u_arg1
+ u_arg2
) :
599 static inline int64_t msa_subsuu_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
601 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
602 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
603 int64_t max_int
= DF_MAX_INT(df
);
604 int64_t min_int
= DF_MIN_INT(df
);
605 if (u_arg1
> u_arg2
) {
606 return u_arg1
- u_arg2
< (uint64_t)max_int
?
607 (int64_t)(u_arg1
- u_arg2
) :
610 return u_arg2
- u_arg1
< (uint64_t)(-min_int
) ?
611 (int64_t)(u_arg1
- u_arg2
) :
616 static inline int64_t msa_asub_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
619 return (arg1
< arg2
) ?
620 (uint64_t)(arg2
- arg1
) : (uint64_t)(arg1
- arg2
);
623 static inline uint64_t msa_asub_u_df(uint32_t df
, uint64_t arg1
, uint64_t arg2
)
625 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
626 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
627 /* unsigned compare */
628 return (u_arg1
< u_arg2
) ?
629 (uint64_t)(u_arg2
- u_arg1
) : (uint64_t)(u_arg1
- u_arg2
);
632 static inline int64_t msa_mulv_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
637 static inline int64_t msa_div_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
639 if (arg1
== DF_MIN_INT(df
) && arg2
== -1) {
640 return DF_MIN_INT(df
);
642 return arg2
? arg1
/ arg2
: 0;
645 static inline int64_t msa_div_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
647 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
648 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
649 return u_arg2
? u_arg1
/ u_arg2
: 0;
652 static inline int64_t msa_mod_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
654 if (arg1
== DF_MIN_INT(df
) && arg2
== -1) {
657 return arg2
? arg1
% arg2
: 0;
660 static inline int64_t msa_mod_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
662 uint64_t u_arg1
= UNSIGNED(arg1
, df
);
663 uint64_t u_arg2
= UNSIGNED(arg2
, df
);
664 return u_arg2
? u_arg1
% u_arg2
: 0;
667 #define SIGNED_EVEN(a, df) \
668 ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
670 #define UNSIGNED_EVEN(a, df) \
671 ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
673 #define SIGNED_ODD(a, df) \
674 ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
676 #define UNSIGNED_ODD(a, df) \
677 ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
679 #define SIGNED_EXTRACT(e, o, a, df) \
681 e = SIGNED_EVEN(a, df); \
682 o = SIGNED_ODD(a, df); \
685 #define UNSIGNED_EXTRACT(e, o, a, df) \
687 e = UNSIGNED_EVEN(a, df); \
688 o = UNSIGNED_ODD(a, df); \
691 static inline int64_t msa_dotp_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
697 SIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
698 SIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
699 return (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
702 static inline int64_t msa_dotp_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
708 UNSIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
709 UNSIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
710 return (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
713 #define CONCATENATE_AND_SLIDE(s, k) \
715 for (i = 0; i < s; i++) { \
716 v[i] = pws->b[s * k + i]; \
717 v[i + s] = pwd->b[s * k + i]; \
719 for (i = 0; i < s; i++) { \
720 pwd->b[s * k + i] = v[i + n]; \
724 static inline void msa_sld_df(uint32_t df
, wr_t
*pwd
,
725 wr_t
*pws
, target_ulong rt
)
727 uint32_t n
= rt
% DF_ELEMENTS(df
);
733 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE
), 0);
736 for (k
= 0; k
< 2; k
++) {
737 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF
), k
);
741 for (k
= 0; k
< 4; k
++) {
742 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD
), k
);
746 for (k
= 0; k
< 8; k
++) {
747 CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE
), k
);
755 static inline int64_t msa_hadd_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
757 return SIGNED_ODD(arg1
, df
) + SIGNED_EVEN(arg2
, df
);
760 static inline int64_t msa_hadd_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
762 return UNSIGNED_ODD(arg1
, df
) + UNSIGNED_EVEN(arg2
, df
);
765 static inline int64_t msa_hsub_s_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
767 return SIGNED_ODD(arg1
, df
) - SIGNED_EVEN(arg2
, df
);
770 static inline int64_t msa_hsub_u_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
772 return UNSIGNED_ODD(arg1
, df
) - UNSIGNED_EVEN(arg2
, df
);
775 static inline int64_t msa_mul_q_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
777 int64_t q_min
= DF_MIN_INT(df
);
778 int64_t q_max
= DF_MAX_INT(df
);
780 if (arg1
== q_min
&& arg2
== q_min
) {
783 return (arg1
* arg2
) >> (DF_BITS(df
) - 1);
786 static inline int64_t msa_mulr_q_df(uint32_t df
, int64_t arg1
, int64_t arg2
)
788 int64_t q_min
= DF_MIN_INT(df
);
789 int64_t q_max
= DF_MAX_INT(df
);
790 int64_t r_bit
= 1 << (DF_BITS(df
) - 2);
792 if (arg1
== q_min
&& arg2
== q_min
) {
795 return (arg1
* arg2
+ r_bit
) >> (DF_BITS(df
) - 1);
798 #define MSA_BINOP_DF(func) \
799 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
800 uint32_t wd, uint32_t ws, uint32_t wt) \
802 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
803 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
804 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
809 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
810 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \
814 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
815 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \
819 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
820 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \
824 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
825 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \
862 MSA_BINOP_DF(subsus_u
)
863 MSA_BINOP_DF(subsuu_s
)
884 void helper_msa_sld_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
885 uint32_t ws
, uint32_t rt
)
887 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
888 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
890 msa_sld_df(df
, pwd
, pws
, env
->active_tc
.gpr
[rt
]);
893 static inline int64_t msa_maddv_df(uint32_t df
, int64_t dest
, int64_t arg1
,
896 return dest
+ arg1
* arg2
;
899 static inline int64_t msa_msubv_df(uint32_t df
, int64_t dest
, int64_t arg1
,
902 return dest
- arg1
* arg2
;
905 static inline int64_t msa_dpadd_s_df(uint32_t df
, int64_t dest
, int64_t arg1
,
912 SIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
913 SIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
914 return dest
+ (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
917 static inline int64_t msa_dpadd_u_df(uint32_t df
, int64_t dest
, int64_t arg1
,
924 UNSIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
925 UNSIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
926 return dest
+ (even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
);
929 static inline int64_t msa_dpsub_s_df(uint32_t df
, int64_t dest
, int64_t arg1
,
936 SIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
937 SIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
938 return dest
- ((even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
));
941 static inline int64_t msa_dpsub_u_df(uint32_t df
, int64_t dest
, int64_t arg1
,
948 UNSIGNED_EXTRACT(even_arg1
, odd_arg1
, arg1
, df
);
949 UNSIGNED_EXTRACT(even_arg2
, odd_arg2
, arg2
, df
);
950 return dest
- ((even_arg1
* even_arg2
) + (odd_arg1
* odd_arg2
));
953 static inline int64_t msa_madd_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
956 int64_t q_prod
, q_ret
;
958 int64_t q_max
= DF_MAX_INT(df
);
959 int64_t q_min
= DF_MIN_INT(df
);
961 q_prod
= arg1
* arg2
;
962 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) + q_prod
) >> (DF_BITS(df
) - 1);
964 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
967 static inline int64_t msa_msub_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
970 int64_t q_prod
, q_ret
;
972 int64_t q_max
= DF_MAX_INT(df
);
973 int64_t q_min
= DF_MIN_INT(df
);
975 q_prod
= arg1
* arg2
;
976 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) - q_prod
) >> (DF_BITS(df
) - 1);
978 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
981 static inline int64_t msa_maddr_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
984 int64_t q_prod
, q_ret
;
986 int64_t q_max
= DF_MAX_INT(df
);
987 int64_t q_min
= DF_MIN_INT(df
);
988 int64_t r_bit
= 1 << (DF_BITS(df
) - 2);
990 q_prod
= arg1
* arg2
;
991 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) + q_prod
+ r_bit
) >> (DF_BITS(df
) - 1);
993 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
996 static inline int64_t msa_msubr_q_df(uint32_t df
, int64_t dest
, int64_t arg1
,
999 int64_t q_prod
, q_ret
;
1001 int64_t q_max
= DF_MAX_INT(df
);
1002 int64_t q_min
= DF_MIN_INT(df
);
1003 int64_t r_bit
= 1 << (DF_BITS(df
) - 2);
1005 q_prod
= arg1
* arg2
;
1006 q_ret
= ((dest
<< (DF_BITS(df
) - 1)) - q_prod
+ r_bit
) >> (DF_BITS(df
) - 1);
1008 return (q_ret
< q_min
) ? q_min
: (q_max
< q_ret
) ? q_max
: q_ret
;
1011 #define MSA_TEROP_DF(func) \
1012 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1013 uint32_t ws, uint32_t wt) \
1015 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1016 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1017 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1022 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1023 pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
1028 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1029 pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
1034 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1035 pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
1040 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1041 pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
1052 MSA_TEROP_DF(dpadd_s
)
1053 MSA_TEROP_DF(dpadd_u
)
1054 MSA_TEROP_DF(dpsub_s
)
1055 MSA_TEROP_DF(dpsub_u
)
1058 MSA_TEROP_DF(madd_q
)
1059 MSA_TEROP_DF(msub_q
)
1060 MSA_TEROP_DF(maddr_q
)
1061 MSA_TEROP_DF(msubr_q
)
1064 static inline void msa_splat_df(uint32_t df
, wr_t
*pwd
,
1065 wr_t
*pws
, target_ulong rt
)
1067 uint32_t n
= rt
% DF_ELEMENTS(df
);
1072 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
1073 pwd
->b
[i
] = pws
->b
[n
];
1077 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
1078 pwd
->h
[i
] = pws
->h
[n
];
1082 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1083 pwd
->w
[i
] = pws
->w
[n
];
1087 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1088 pwd
->d
[i
] = pws
->d
[n
];
1096 void helper_msa_splat_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1097 uint32_t ws
, uint32_t rt
)
1099 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1100 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1102 msa_splat_df(df
, pwd
, pws
, env
->active_tc
.gpr
[rt
]);
1105 #define MSA_DO_B MSA_DO(b)
1106 #define MSA_DO_H MSA_DO(h)
1107 #define MSA_DO_W MSA_DO(w)
1108 #define MSA_DO_D MSA_DO(d)
1110 #define MSA_LOOP_B MSA_LOOP(B)
1111 #define MSA_LOOP_H MSA_LOOP(H)
1112 #define MSA_LOOP_W MSA_LOOP(W)
1113 #define MSA_LOOP_D MSA_LOOP(D)
1115 #define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
1116 #define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
1117 #define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
1118 #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
1120 #define MSA_LOOP(DF) \
1121 for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
1125 #define MSA_FN_DF(FUNC) \
1126 void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
1127 uint32_t ws, uint32_t wt) \
1129 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1130 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1131 wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
1132 wr_t wx, *pwx = &wx; \
1150 msa_move_v(pwd, pwx); \
1153 #define MSA_LOOP_COND(DF) \
1154 (DF_ELEMENTS(DF) / 2)
1156 #define Rb(pwr, i) (pwr->b[i])
1157 #define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2])
1158 #define Rh(pwr, i) (pwr->h[i])
1159 #define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2])
1160 #define Rw(pwr, i) (pwr->w[i])
1161 #define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2])
1162 #define Rd(pwr, i) (pwr->d[i])
1163 #define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2])
1165 #define MSA_DO(DF) \
1167 R##DF(pwx, i) = pwt->DF[2*i]; \
1168 L##DF(pwx, i) = pws->DF[2*i]; \
1173 #define MSA_DO(DF) \
1175 R##DF(pwx, i) = pwt->DF[2*i+1]; \
1176 L##DF(pwx, i) = pws->DF[2*i+1]; \
1181 #define MSA_DO(DF) \
1183 pwx->DF[2*i] = L##DF(pwt, i); \
1184 pwx->DF[2*i+1] = L##DF(pws, i); \
1189 #define MSA_DO(DF) \
1191 pwx->DF[2*i] = R##DF(pwt, i); \
1192 pwx->DF[2*i+1] = R##DF(pws, i); \
1197 #define MSA_DO(DF) \
1199 pwx->DF[2*i] = pwt->DF[2*i]; \
1200 pwx->DF[2*i+1] = pws->DF[2*i]; \
1205 #define MSA_DO(DF) \
1207 pwx->DF[2*i] = pwt->DF[2*i+1]; \
1208 pwx->DF[2*i+1] = pws->DF[2*i+1]; \
1212 #undef MSA_LOOP_COND
1214 #define MSA_LOOP_COND(DF) \
1217 #define MSA_DO(DF) \
1219 uint32_t n = DF_ELEMENTS(df); \
1220 uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
1222 (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
1226 #undef MSA_LOOP_COND
1229 void helper_msa_sldi_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1230 uint32_t ws
, uint32_t n
)
1232 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1233 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1235 msa_sld_df(df
, pwd
, pws
, n
);
1238 void helper_msa_splati_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1239 uint32_t ws
, uint32_t n
)
1241 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1242 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1244 msa_splat_df(df
, pwd
, pws
, n
);
1247 void helper_msa_copy_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t rd
,
1248 uint32_t ws
, uint32_t n
)
1250 n
%= DF_ELEMENTS(df
);
1254 env
->active_tc
.gpr
[rd
] = (int8_t)env
->active_fpu
.fpr
[ws
].wr
.b
[n
];
1257 env
->active_tc
.gpr
[rd
] = (int16_t)env
->active_fpu
.fpr
[ws
].wr
.h
[n
];
1260 env
->active_tc
.gpr
[rd
] = (int32_t)env
->active_fpu
.fpr
[ws
].wr
.w
[n
];
1262 #ifdef TARGET_MIPS64
1264 env
->active_tc
.gpr
[rd
] = (int64_t)env
->active_fpu
.fpr
[ws
].wr
.d
[n
];
1272 void helper_msa_copy_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t rd
,
1273 uint32_t ws
, uint32_t n
)
1275 n
%= DF_ELEMENTS(df
);
1279 env
->active_tc
.gpr
[rd
] = (uint8_t)env
->active_fpu
.fpr
[ws
].wr
.b
[n
];
1282 env
->active_tc
.gpr
[rd
] = (uint16_t)env
->active_fpu
.fpr
[ws
].wr
.h
[n
];
1285 env
->active_tc
.gpr
[rd
] = (uint32_t)env
->active_fpu
.fpr
[ws
].wr
.w
[n
];
1287 #ifdef TARGET_MIPS64
1289 env
->active_tc
.gpr
[rd
] = (uint64_t)env
->active_fpu
.fpr
[ws
].wr
.d
[n
];
1297 void helper_msa_insert_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1298 uint32_t rs_num
, uint32_t n
)
1300 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1301 target_ulong rs
= env
->active_tc
.gpr
[rs_num
];
1305 pwd
->b
[n
] = (int8_t)rs
;
1308 pwd
->h
[n
] = (int16_t)rs
;
1311 pwd
->w
[n
] = (int32_t)rs
;
1314 pwd
->d
[n
] = (int64_t)rs
;
1321 void helper_msa_insve_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1322 uint32_t ws
, uint32_t n
)
1324 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1325 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1329 pwd
->b
[n
] = (int8_t)pws
->b
[0];
1332 pwd
->h
[n
] = (int16_t)pws
->h
[0];
1335 pwd
->w
[n
] = (int32_t)pws
->w
[0];
1338 pwd
->d
[n
] = (int64_t)pws
->d
[0];
1345 void helper_msa_ctcmsa(CPUMIPSState
*env
, target_ulong elm
, uint32_t cd
)
1351 env
->active_tc
.msacsr
= (int32_t)elm
& MSACSR_MASK
;
1352 restore_msa_fp_status(env
);
1353 /* check exception */
1354 if ((GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
)
1355 & GET_FP_CAUSE(env
->active_tc
.msacsr
)) {
1356 do_raise_exception(env
, EXCP_MSAFPE
, GETPC());
1362 target_ulong
helper_msa_cfcmsa(CPUMIPSState
*env
, uint32_t cs
)
1368 return env
->active_tc
.msacsr
& MSACSR_MASK
;
1373 void helper_msa_move_v(CPUMIPSState
*env
, uint32_t wd
, uint32_t ws
)
1375 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1376 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
1378 msa_move_v(pwd
, pws
);
1381 static inline int64_t msa_pcnt_df(uint32_t df
, int64_t arg
)
1385 x
= UNSIGNED(arg
, df
);
1387 x
= (x
& 0x5555555555555555ULL
) + ((x
>> 1) & 0x5555555555555555ULL
);
1388 x
= (x
& 0x3333333333333333ULL
) + ((x
>> 2) & 0x3333333333333333ULL
);
1389 x
= (x
& 0x0F0F0F0F0F0F0F0FULL
) + ((x
>> 4) & 0x0F0F0F0F0F0F0F0FULL
);
1390 x
= (x
& 0x00FF00FF00FF00FFULL
) + ((x
>> 8) & 0x00FF00FF00FF00FFULL
);
1391 x
= (x
& 0x0000FFFF0000FFFFULL
) + ((x
>> 16) & 0x0000FFFF0000FFFFULL
);
1392 x
= (x
& 0x00000000FFFFFFFFULL
) + ((x
>> 32));
1397 static inline int64_t msa_nlzc_df(uint32_t df
, int64_t arg
)
1402 x
= UNSIGNED(arg
, df
);
1404 c
= DF_BITS(df
) / 2;
1418 static inline int64_t msa_nloc_df(uint32_t df
, int64_t arg
)
1420 return msa_nlzc_df(df
, UNSIGNED((~arg
), df
));
1423 void helper_msa_fill_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
1426 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
1431 for (i
= 0; i
< DF_ELEMENTS(DF_BYTE
); i
++) {
1432 pwd
->b
[i
] = (int8_t)env
->active_tc
.gpr
[rs
];
1436 for (i
= 0; i
< DF_ELEMENTS(DF_HALF
); i
++) {
1437 pwd
->h
[i
] = (int16_t)env
->active_tc
.gpr
[rs
];
1441 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1442 pwd
->w
[i
] = (int32_t)env
->active_tc
.gpr
[rs
];
1446 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1447 pwd
->d
[i
] = (int64_t)env
->active_tc
.gpr
[rs
];
1455 #define MSA_UNOP_DF(func) \
1456 void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
1457 uint32_t wd, uint32_t ws) \
1459 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
1460 wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
1465 for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
1466 pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \
1470 for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
1471 pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \
1475 for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
1476 pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \
1480 for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
1481 pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \
1494 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
1495 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
1497 #define FLOAT_SNAN16 (float16_default_nan ^ 0x0220)
1499 #define FLOAT_SNAN32 (float32_default_nan ^ 0x00400020)
1501 #define FLOAT_SNAN64 (float64_default_nan ^ 0x0008000000000020ULL)
1502 /* 0x7ff0000000000020 */
1504 static inline void clear_msacsr_cause(CPUMIPSState
*env
)
1506 SET_FP_CAUSE(env
->active_tc
.msacsr
, 0);
1509 static inline void check_msacsr_cause(CPUMIPSState
*env
, uintptr_t retaddr
)
1511 if ((GET_FP_CAUSE(env
->active_tc
.msacsr
) &
1512 (GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
)) == 0) {
1513 UPDATE_FP_FLAGS(env
->active_tc
.msacsr
,
1514 GET_FP_CAUSE(env
->active_tc
.msacsr
));
1516 do_raise_exception(env
, EXCP_MSAFPE
, retaddr
);
1520 /* Flush-to-zero use cases for update_msacsr() */
1521 #define CLEAR_FS_UNDERFLOW 1
1522 #define CLEAR_IS_INEXACT 2
1523 #define RECIPROCAL_INEXACT 4
1525 static inline int update_msacsr(CPUMIPSState
*env
, int action
, int denormal
)
1533 ieee_ex
= get_float_exception_flags(&env
->active_tc
.msa_fp_status
);
1535 /* QEMU softfloat does not signal all underflow cases */
1537 ieee_ex
|= float_flag_underflow
;
1540 c
= ieee_ex_to_mips(ieee_ex
);
1541 enable
= GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
;
1543 /* Set Inexact (I) when flushing inputs to zero */
1544 if ((ieee_ex
& float_flag_input_denormal
) &&
1545 (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0) {
1546 if (action
& CLEAR_IS_INEXACT
) {
1553 /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
1554 if ((ieee_ex
& float_flag_output_denormal
) &&
1555 (env
->active_tc
.msacsr
& MSACSR_FS_MASK
) != 0) {
1557 if (action
& CLEAR_FS_UNDERFLOW
) {
1564 /* Set Inexact (I) when Overflow (O) is not enabled */
1565 if ((c
& FP_OVERFLOW
) != 0 && (enable
& FP_OVERFLOW
) == 0) {
1569 /* Clear Exact Underflow when Underflow (U) is not enabled */
1570 if ((c
& FP_UNDERFLOW
) != 0 && (enable
& FP_UNDERFLOW
) == 0 &&
1571 (c
& FP_INEXACT
) == 0) {
1575 /* Reciprocal operations set only Inexact when valid and not
1577 if ((action
& RECIPROCAL_INEXACT
) &&
1578 (c
& (FP_INVALID
| FP_DIV0
)) == 0) {
1582 cause
= c
& enable
; /* all current enabled exceptions */
1585 /* No enabled exception, update the MSACSR Cause
1586 with all current exceptions */
1587 SET_FP_CAUSE(env
->active_tc
.msacsr
,
1588 (GET_FP_CAUSE(env
->active_tc
.msacsr
) | c
));
1590 /* Current exceptions are enabled */
1591 if ((env
->active_tc
.msacsr
& MSACSR_NX_MASK
) == 0) {
1592 /* Exception(s) will trap, update MSACSR Cause
1593 with all enabled exceptions */
1594 SET_FP_CAUSE(env
->active_tc
.msacsr
,
1595 (GET_FP_CAUSE(env
->active_tc
.msacsr
) | c
));
1602 static inline int get_enabled_exceptions(const CPUMIPSState
*env
, int c
)
1604 int enable
= GET_FP_ENABLE(env
->active_tc
.msacsr
) | FP_UNIMPLEMENTED
;
1608 static inline float16
float16_from_float32(int32_t a
, flag ieee
,
1609 float_status
*status
)
1613 f_val
= float32_to_float16((float32
)a
, ieee
, status
);
1614 f_val
= float16_maybe_silence_nan(f_val
);
1616 return a
< 0 ? (f_val
| (1 << 15)) : f_val
;
1619 static inline float32
float32_from_float64(int64_t a
, float_status
*status
)
1623 f_val
= float64_to_float32((float64
)a
, status
);
1624 f_val
= float32_maybe_silence_nan(f_val
);
1626 return a
< 0 ? (f_val
| (1 << 31)) : f_val
;
1629 static inline float32
float32_from_float16(int16_t a
, flag ieee
,
1630 float_status
*status
)
1634 f_val
= float16_to_float32((float16
)a
, ieee
, status
);
1635 f_val
= float32_maybe_silence_nan(f_val
);
1637 return a
< 0 ? (f_val
| (1 << 31)) : f_val
;
1640 static inline float64
float64_from_float32(int32_t a
, float_status
*status
)
1644 f_val
= float32_to_float64((float64
)a
, status
);
1645 f_val
= float64_maybe_silence_nan(f_val
);
1647 return a
< 0 ? (f_val
| (1ULL << 63)) : f_val
;
1650 static inline float32
float32_from_q16(int16_t a
, float_status
*status
)
1654 /* conversion as integer and scaling */
1655 f_val
= int32_to_float32(a
, status
);
1656 f_val
= float32_scalbn(f_val
, -15, status
);
1661 static inline float64
float64_from_q32(int32_t a
, float_status
*status
)
1665 /* conversion as integer and scaling */
1666 f_val
= int32_to_float64(a
, status
);
1667 f_val
= float64_scalbn(f_val
, -31, status
);
1672 static inline int16_t float32_to_q16(float32 a
, float_status
*status
)
1675 int32_t q_min
= 0xffff8000;
1676 int32_t q_max
= 0x00007fff;
1680 if (float32_is_any_nan(a
)) {
1681 float_raise(float_flag_invalid
, status
);
1686 a
= float32_scalbn(a
, 15, status
);
1688 ieee_ex
= get_float_exception_flags(status
);
1689 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1692 if (ieee_ex
& float_flag_overflow
) {
1693 float_raise(float_flag_inexact
, status
);
1694 return (int32_t)a
< 0 ? q_min
: q_max
;
1697 /* conversion to int */
1698 q_val
= float32_to_int32(a
, status
);
1700 ieee_ex
= get_float_exception_flags(status
);
1701 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1704 if (ieee_ex
& float_flag_invalid
) {
1705 set_float_exception_flags(ieee_ex
& (~float_flag_invalid
)
1707 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1708 return (int32_t)a
< 0 ? q_min
: q_max
;
1711 if (q_val
< q_min
) {
1712 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1713 return (int16_t)q_min
;
1716 if (q_max
< q_val
) {
1717 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1718 return (int16_t)q_max
;
1721 return (int16_t)q_val
;
1724 static inline int32_t float64_to_q32(float64 a
, float_status
*status
)
1727 int64_t q_min
= 0xffffffff80000000LL
;
1728 int64_t q_max
= 0x000000007fffffffLL
;
1732 if (float64_is_any_nan(a
)) {
1733 float_raise(float_flag_invalid
, status
);
1738 a
= float64_scalbn(a
, 31, status
);
1740 ieee_ex
= get_float_exception_flags(status
);
1741 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1744 if (ieee_ex
& float_flag_overflow
) {
1745 float_raise(float_flag_inexact
, status
);
1746 return (int64_t)a
< 0 ? q_min
: q_max
;
1749 /* conversion to integer */
1750 q_val
= float64_to_int64(a
, status
);
1752 ieee_ex
= get_float_exception_flags(status
);
1753 set_float_exception_flags(ieee_ex
& (~float_flag_underflow
)
1756 if (ieee_ex
& float_flag_invalid
) {
1757 set_float_exception_flags(ieee_ex
& (~float_flag_invalid
)
1759 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1760 return (int64_t)a
< 0 ? q_min
: q_max
;
1763 if (q_val
< q_min
) {
1764 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1765 return (int32_t)q_min
;
1768 if (q_max
< q_val
) {
1769 float_raise(float_flag_overflow
| float_flag_inexact
, status
);
1770 return (int32_t)q_max
;
1773 return (int32_t)q_val
;
1776 #define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
1778 float_status *status = &env->active_tc.msa_fp_status; \
1781 set_float_exception_flags(0, status); \
1783 cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
1785 cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \
1787 DEST = cond ? M_MAX_UINT(BITS) : 0; \
1788 c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
1790 if (get_enabled_exceptions(env, c)) { \
1791 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
1795 #define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
1797 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1798 if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
1803 #define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
1805 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1807 MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
1811 #define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
1813 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1815 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1819 #define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
1821 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1823 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1825 MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
1830 #define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
1832 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1834 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1838 #define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
1840 MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
1842 MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
1846 #define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
1848 MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
1850 MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
1854 static inline void compare_af(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1855 wr_t
*pwt
, uint32_t df
, int quiet
,
1858 wr_t wx
, *pwx
= &wx
;
1861 clear_msacsr_cause(env
);
1865 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1866 MSA_FLOAT_AF(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1870 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1871 MSA_FLOAT_AF(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1878 check_msacsr_cause(env
, retaddr
);
1880 msa_move_v(pwd
, pwx
);
1883 static inline void compare_un(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1884 wr_t
*pwt
, uint32_t df
, int quiet
,
1887 wr_t wx
, *pwx
= &wx
;
1890 clear_msacsr_cause(env
);
1894 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1895 MSA_FLOAT_COND(pwx
->w
[i
], unordered
, pws
->w
[i
], pwt
->w
[i
], 32,
1900 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1901 MSA_FLOAT_COND(pwx
->d
[i
], unordered
, pws
->d
[i
], pwt
->d
[i
], 64,
1909 check_msacsr_cause(env
, retaddr
);
1911 msa_move_v(pwd
, pwx
);
1914 static inline void compare_eq(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1915 wr_t
*pwt
, uint32_t df
, int quiet
,
1918 wr_t wx
, *pwx
= &wx
;
1921 clear_msacsr_cause(env
);
1925 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1926 MSA_FLOAT_COND(pwx
->w
[i
], eq
, pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1930 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1931 MSA_FLOAT_COND(pwx
->d
[i
], eq
, pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1938 check_msacsr_cause(env
, retaddr
);
1940 msa_move_v(pwd
, pwx
);
1943 static inline void compare_ueq(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1944 wr_t
*pwt
, uint32_t df
, int quiet
,
1947 wr_t wx
, *pwx
= &wx
;
1950 clear_msacsr_cause(env
);
1954 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1955 MSA_FLOAT_UEQ(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1959 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1960 MSA_FLOAT_UEQ(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1967 check_msacsr_cause(env
, retaddr
);
1969 msa_move_v(pwd
, pwx
);
1972 static inline void compare_lt(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
1973 wr_t
*pwt
, uint32_t df
, int quiet
,
1976 wr_t wx
, *pwx
= &wx
;
1979 clear_msacsr_cause(env
);
1983 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
1984 MSA_FLOAT_COND(pwx
->w
[i
], lt
, pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
1988 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
1989 MSA_FLOAT_COND(pwx
->d
[i
], lt
, pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
1996 check_msacsr_cause(env
, retaddr
);
1998 msa_move_v(pwd
, pwx
);
2001 static inline void compare_ult(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2002 wr_t
*pwt
, uint32_t df
, int quiet
,
2005 wr_t wx
, *pwx
= &wx
;
2008 clear_msacsr_cause(env
);
2012 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2013 MSA_FLOAT_ULT(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2017 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2018 MSA_FLOAT_ULT(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2025 check_msacsr_cause(env
, retaddr
);
2027 msa_move_v(pwd
, pwx
);
2030 static inline void compare_le(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2031 wr_t
*pwt
, uint32_t df
, int quiet
,
2034 wr_t wx
, *pwx
= &wx
;
2037 clear_msacsr_cause(env
);
2041 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2042 MSA_FLOAT_COND(pwx
->w
[i
], le
, pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2046 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2047 MSA_FLOAT_COND(pwx
->d
[i
], le
, pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2054 check_msacsr_cause(env
, retaddr
);
2056 msa_move_v(pwd
, pwx
);
2059 static inline void compare_ule(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2060 wr_t
*pwt
, uint32_t df
, int quiet
,
2063 wr_t wx
, *pwx
= &wx
;
2066 clear_msacsr_cause(env
);
2070 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2071 MSA_FLOAT_ULE(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2075 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2076 MSA_FLOAT_ULE(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2083 check_msacsr_cause(env
, retaddr
);
2085 msa_move_v(pwd
, pwx
);
2088 static inline void compare_or(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2089 wr_t
*pwt
, uint32_t df
, int quiet
,
2092 wr_t wx
, *pwx
= &wx
;
2095 clear_msacsr_cause(env
);
2099 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2100 MSA_FLOAT_OR(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2104 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2105 MSA_FLOAT_OR(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2112 check_msacsr_cause(env
, retaddr
);
2114 msa_move_v(pwd
, pwx
);
2117 static inline void compare_une(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2118 wr_t
*pwt
, uint32_t df
, int quiet
,
2121 wr_t wx
, *pwx
= &wx
;
2124 clear_msacsr_cause(env
);
2128 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2129 MSA_FLOAT_UNE(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2133 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2134 MSA_FLOAT_UNE(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2141 check_msacsr_cause(env
, retaddr
);
2143 msa_move_v(pwd
, pwx
);
2146 static inline void compare_ne(CPUMIPSState
*env
, wr_t
*pwd
, wr_t
*pws
,
2147 wr_t
*pwt
, uint32_t df
, int quiet
,
2150 wr_t wx
, *pwx
= &wx
;
2153 clear_msacsr_cause(env
);
2157 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2158 MSA_FLOAT_NE(pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32, quiet
);
2162 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2163 MSA_FLOAT_NE(pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64, quiet
);
2170 check_msacsr_cause(env
, retaddr
);
2172 msa_move_v(pwd
, pwx
);
2175 void helper_msa_fcaf_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2176 uint32_t ws
, uint32_t wt
)
2178 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2179 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2180 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2181 compare_af(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2184 void helper_msa_fcun_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2185 uint32_t ws
, uint32_t wt
)
2187 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2188 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2189 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2190 compare_un(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2193 void helper_msa_fceq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2194 uint32_t ws
, uint32_t wt
)
2196 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2197 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2198 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2199 compare_eq(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2202 void helper_msa_fcueq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2203 uint32_t ws
, uint32_t wt
)
2205 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2206 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2207 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2208 compare_ueq(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2211 void helper_msa_fclt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2212 uint32_t ws
, uint32_t wt
)
2214 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2215 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2216 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2217 compare_lt(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2220 void helper_msa_fcult_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2221 uint32_t ws
, uint32_t wt
)
2223 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2224 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2225 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2226 compare_ult(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2229 void helper_msa_fcle_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2230 uint32_t ws
, uint32_t wt
)
2232 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2233 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2234 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2235 compare_le(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2238 void helper_msa_fcule_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2239 uint32_t ws
, uint32_t wt
)
2241 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2242 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2243 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2244 compare_ule(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2247 void helper_msa_fsaf_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2248 uint32_t ws
, uint32_t wt
)
2250 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2251 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2252 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2253 compare_af(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2256 void helper_msa_fsun_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2257 uint32_t ws
, uint32_t wt
)
2259 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2260 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2261 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2262 compare_un(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2265 void helper_msa_fseq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2266 uint32_t ws
, uint32_t wt
)
2268 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2269 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2270 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2271 compare_eq(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2274 void helper_msa_fsueq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2275 uint32_t ws
, uint32_t wt
)
2277 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2278 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2279 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2280 compare_ueq(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2283 void helper_msa_fslt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2284 uint32_t ws
, uint32_t wt
)
2286 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2287 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2288 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2289 compare_lt(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2292 void helper_msa_fsult_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2293 uint32_t ws
, uint32_t wt
)
2295 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2296 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2297 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2298 compare_ult(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2301 void helper_msa_fsle_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2302 uint32_t ws
, uint32_t wt
)
2304 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2305 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2306 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2307 compare_le(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2310 void helper_msa_fsule_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2311 uint32_t ws
, uint32_t wt
)
2313 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2314 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2315 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2316 compare_ule(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2319 void helper_msa_fcor_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2320 uint32_t ws
, uint32_t wt
)
2322 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2323 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2324 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2325 compare_or(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2328 void helper_msa_fcune_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2329 uint32_t ws
, uint32_t wt
)
2331 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2332 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2333 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2334 compare_une(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2337 void helper_msa_fcne_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2338 uint32_t ws
, uint32_t wt
)
2340 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2341 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2342 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2343 compare_ne(env
, pwd
, pws
, pwt
, df
, 1, GETPC());
2346 void helper_msa_fsor_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2347 uint32_t ws
, uint32_t wt
)
2349 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2350 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2351 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2352 compare_or(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2355 void helper_msa_fsune_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2356 uint32_t ws
, uint32_t wt
)
2358 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2359 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2360 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2361 compare_une(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2364 void helper_msa_fsne_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2365 uint32_t ws
, uint32_t wt
)
2367 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2368 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2369 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2370 compare_ne(env
, pwd
, pws
, pwt
, df
, 0, GETPC());
2373 #define float16_is_zero(ARG) 0
2374 #define float16_is_zero_or_denormal(ARG) 0
2376 #define IS_DENORMAL(ARG, BITS) \
2377 (!float ## BITS ## _is_zero(ARG) \
2378 && float ## BITS ## _is_zero_or_denormal(ARG))
2380 #define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
2382 float_status *status = &env->active_tc.msa_fp_status; \
2385 set_float_exception_flags(0, status); \
2386 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2387 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2389 if (get_enabled_exceptions(env, c)) { \
2390 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2394 void helper_msa_fadd_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2395 uint32_t ws
, uint32_t wt
)
2397 wr_t wx
, *pwx
= &wx
;
2398 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2399 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2400 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2403 clear_msacsr_cause(env
);
2407 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2408 MSA_FLOAT_BINOP(pwx
->w
[i
], add
, pws
->w
[i
], pwt
->w
[i
], 32);
2412 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2413 MSA_FLOAT_BINOP(pwx
->d
[i
], add
, pws
->d
[i
], pwt
->d
[i
], 64);
2420 check_msacsr_cause(env
, GETPC());
2421 msa_move_v(pwd
, pwx
);
2424 void helper_msa_fsub_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2425 uint32_t ws
, uint32_t wt
)
2427 wr_t wx
, *pwx
= &wx
;
2428 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2429 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2430 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2433 clear_msacsr_cause(env
);
2437 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2438 MSA_FLOAT_BINOP(pwx
->w
[i
], sub
, pws
->w
[i
], pwt
->w
[i
], 32);
2442 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2443 MSA_FLOAT_BINOP(pwx
->d
[i
], sub
, pws
->d
[i
], pwt
->d
[i
], 64);
2450 check_msacsr_cause(env
, GETPC());
2451 msa_move_v(pwd
, pwx
);
2454 void helper_msa_fmul_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2455 uint32_t ws
, uint32_t wt
)
2457 wr_t wx
, *pwx
= &wx
;
2458 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2459 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2460 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2463 clear_msacsr_cause(env
);
2467 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2468 MSA_FLOAT_BINOP(pwx
->w
[i
], mul
, pws
->w
[i
], pwt
->w
[i
], 32);
2472 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2473 MSA_FLOAT_BINOP(pwx
->d
[i
], mul
, pws
->d
[i
], pwt
->d
[i
], 64);
2480 check_msacsr_cause(env
, GETPC());
2482 msa_move_v(pwd
, pwx
);
2485 void helper_msa_fdiv_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2486 uint32_t ws
, uint32_t wt
)
2488 wr_t wx
, *pwx
= &wx
;
2489 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2490 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2491 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2494 clear_msacsr_cause(env
);
2498 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2499 MSA_FLOAT_BINOP(pwx
->w
[i
], div
, pws
->w
[i
], pwt
->w
[i
], 32);
2503 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2504 MSA_FLOAT_BINOP(pwx
->d
[i
], div
, pws
->d
[i
], pwt
->d
[i
], 64);
2511 check_msacsr_cause(env
, GETPC());
2513 msa_move_v(pwd
, pwx
);
2516 #define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
2518 float_status *status = &env->active_tc.msa_fp_status; \
2521 set_float_exception_flags(0, status); \
2522 DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \
2523 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2525 if (get_enabled_exceptions(env, c)) { \
2526 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2530 void helper_msa_fmadd_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2531 uint32_t ws
, uint32_t wt
)
2533 wr_t wx
, *pwx
= &wx
;
2534 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2535 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2536 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2539 clear_msacsr_cause(env
);
2543 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2544 MSA_FLOAT_MULADD(pwx
->w
[i
], pwd
->w
[i
],
2545 pws
->w
[i
], pwt
->w
[i
], 0, 32);
2549 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2550 MSA_FLOAT_MULADD(pwx
->d
[i
], pwd
->d
[i
],
2551 pws
->d
[i
], pwt
->d
[i
], 0, 64);
2558 check_msacsr_cause(env
, GETPC());
2560 msa_move_v(pwd
, pwx
);
2563 void helper_msa_fmsub_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2564 uint32_t ws
, uint32_t wt
)
2566 wr_t wx
, *pwx
= &wx
;
2567 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2568 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2569 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2572 clear_msacsr_cause(env
);
2576 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2577 MSA_FLOAT_MULADD(pwx
->w
[i
], pwd
->w
[i
],
2578 pws
->w
[i
], pwt
->w
[i
],
2579 float_muladd_negate_product
, 32);
2583 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2584 MSA_FLOAT_MULADD(pwx
->d
[i
], pwd
->d
[i
],
2585 pws
->d
[i
], pwt
->d
[i
],
2586 float_muladd_negate_product
, 64);
2593 check_msacsr_cause(env
, GETPC());
2595 msa_move_v(pwd
, pwx
);
2598 void helper_msa_fexp2_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2599 uint32_t ws
, uint32_t wt
)
2601 wr_t wx
, *pwx
= &wx
;
2602 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2603 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2604 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2607 clear_msacsr_cause(env
);
2611 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2612 MSA_FLOAT_BINOP(pwx
->w
[i
], scalbn
, pws
->w
[i
],
2613 pwt
->w
[i
] > 0x200 ? 0x200 :
2614 pwt
->w
[i
] < -0x200 ? -0x200 : pwt
->w
[i
],
2619 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2620 MSA_FLOAT_BINOP(pwx
->d
[i
], scalbn
, pws
->d
[i
],
2621 pwt
->d
[i
] > 0x1000 ? 0x1000 :
2622 pwt
->d
[i
] < -0x1000 ? -0x1000 : pwt
->d
[i
],
2630 check_msacsr_cause(env
, GETPC());
2632 msa_move_v(pwd
, pwx
);
2635 #define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
2637 float_status *status = &env->active_tc.msa_fp_status; \
2640 set_float_exception_flags(0, status); \
2641 DEST = float ## BITS ## _ ## OP(ARG, status); \
2642 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
2644 if (get_enabled_exceptions(env, c)) { \
2645 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2649 void helper_msa_fexdo_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2650 uint32_t ws
, uint32_t wt
)
2652 wr_t wx
, *pwx
= &wx
;
2653 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2654 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2655 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2658 clear_msacsr_cause(env
);
2662 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2663 /* Half precision floats come in two formats: standard
2664 IEEE and "ARM" format. The latter gains extra exponent
2665 range by omitting the NaN/Inf encodings. */
2668 MSA_FLOAT_BINOP(Lh(pwx
, i
), from_float32
, pws
->w
[i
], ieee
, 16);
2669 MSA_FLOAT_BINOP(Rh(pwx
, i
), from_float32
, pwt
->w
[i
], ieee
, 16);
2673 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2674 MSA_FLOAT_UNOP(Lw(pwx
, i
), from_float64
, pws
->d
[i
], 32);
2675 MSA_FLOAT_UNOP(Rw(pwx
, i
), from_float64
, pwt
->d
[i
], 32);
2682 check_msacsr_cause(env
, GETPC());
2683 msa_move_v(pwd
, pwx
);
2686 #define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
2688 float_status *status = &env->active_tc.msa_fp_status; \
2691 set_float_exception_flags(0, status); \
2692 DEST = float ## BITS ## _ ## OP(ARG, status); \
2693 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2695 if (get_enabled_exceptions(env, c)) { \
2696 DEST = ((FLOAT_SNAN ## XBITS >> 6) << 6) | c; \
2700 void helper_msa_ftq_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2701 uint32_t ws
, uint32_t wt
)
2703 wr_t wx
, *pwx
= &wx
;
2704 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2705 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2706 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2709 clear_msacsr_cause(env
);
2713 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2714 MSA_FLOAT_UNOP_XD(Lh(pwx
, i
), to_q16
, pws
->w
[i
], 32, 16);
2715 MSA_FLOAT_UNOP_XD(Rh(pwx
, i
), to_q16
, pwt
->w
[i
], 32, 16);
2719 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2720 MSA_FLOAT_UNOP_XD(Lw(pwx
, i
), to_q32
, pws
->d
[i
], 64, 32);
2721 MSA_FLOAT_UNOP_XD(Rw(pwx
, i
), to_q32
, pwt
->d
[i
], 64, 32);
2728 check_msacsr_cause(env
, GETPC());
2730 msa_move_v(pwd
, pwx
);
2733 #define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS) \
2734 !float ## BITS ## _is_any_nan(ARG1) \
2735 && float ## BITS ## _is_quiet_nan(ARG2)
2737 #define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
2739 float_status *status = &env->active_tc.msa_fp_status; \
2742 set_float_exception_flags(0, status); \
2743 DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
2744 c = update_msacsr(env, 0, 0); \
2746 if (get_enabled_exceptions(env, c)) { \
2747 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2751 #define FMAXMIN_A(F, G, X, _S, _T, BITS) \
2753 uint## BITS ##_t S = _S, T = _T; \
2754 uint## BITS ##_t as, at, xs, xt, xd; \
2755 if (NUMBER_QNAN_PAIR(S, T, BITS)) { \
2758 else if (NUMBER_QNAN_PAIR(T, S, BITS)) { \
2761 as = float## BITS ##_abs(S); \
2762 at = float## BITS ##_abs(T); \
2763 MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
2764 MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
2765 MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
2766 X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
2769 void helper_msa_fmin_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2770 uint32_t ws
, uint32_t wt
)
2772 wr_t wx
, *pwx
= &wx
;
2773 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2774 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2775 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2778 clear_msacsr_cause(env
);
2782 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2783 if (NUMBER_QNAN_PAIR(pws
->w
[i
], pwt
->w
[i
], 32)) {
2784 MSA_FLOAT_MAXOP(pwx
->w
[i
], min
, pws
->w
[i
], pws
->w
[i
], 32);
2785 } else if (NUMBER_QNAN_PAIR(pwt
->w
[i
], pws
->w
[i
], 32)) {
2786 MSA_FLOAT_MAXOP(pwx
->w
[i
], min
, pwt
->w
[i
], pwt
->w
[i
], 32);
2788 MSA_FLOAT_MAXOP(pwx
->w
[i
], min
, pws
->w
[i
], pwt
->w
[i
], 32);
2793 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2794 if (NUMBER_QNAN_PAIR(pws
->d
[i
], pwt
->d
[i
], 64)) {
2795 MSA_FLOAT_MAXOP(pwx
->d
[i
], min
, pws
->d
[i
], pws
->d
[i
], 64);
2796 } else if (NUMBER_QNAN_PAIR(pwt
->d
[i
], pws
->d
[i
], 64)) {
2797 MSA_FLOAT_MAXOP(pwx
->d
[i
], min
, pwt
->d
[i
], pwt
->d
[i
], 64);
2799 MSA_FLOAT_MAXOP(pwx
->d
[i
], min
, pws
->d
[i
], pwt
->d
[i
], 64);
2807 check_msacsr_cause(env
, GETPC());
2809 msa_move_v(pwd
, pwx
);
2812 void helper_msa_fmin_a_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2813 uint32_t ws
, uint32_t wt
)
2815 wr_t wx
, *pwx
= &wx
;
2816 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2817 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2818 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2821 clear_msacsr_cause(env
);
2825 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2826 FMAXMIN_A(min
, max
, pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32);
2830 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2831 FMAXMIN_A(min
, max
, pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64);
2838 check_msacsr_cause(env
, GETPC());
2840 msa_move_v(pwd
, pwx
);
2843 void helper_msa_fmax_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2844 uint32_t ws
, uint32_t wt
)
2846 wr_t wx
, *pwx
= &wx
;
2847 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2848 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2849 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2852 clear_msacsr_cause(env
);
2856 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2857 if (NUMBER_QNAN_PAIR(pws
->w
[i
], pwt
->w
[i
], 32)) {
2858 MSA_FLOAT_MAXOP(pwx
->w
[i
], max
, pws
->w
[i
], pws
->w
[i
], 32);
2859 } else if (NUMBER_QNAN_PAIR(pwt
->w
[i
], pws
->w
[i
], 32)) {
2860 MSA_FLOAT_MAXOP(pwx
->w
[i
], max
, pwt
->w
[i
], pwt
->w
[i
], 32);
2862 MSA_FLOAT_MAXOP(pwx
->w
[i
], max
, pws
->w
[i
], pwt
->w
[i
], 32);
2867 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2868 if (NUMBER_QNAN_PAIR(pws
->d
[i
], pwt
->d
[i
], 64)) {
2869 MSA_FLOAT_MAXOP(pwx
->d
[i
], max
, pws
->d
[i
], pws
->d
[i
], 64);
2870 } else if (NUMBER_QNAN_PAIR(pwt
->d
[i
], pws
->d
[i
], 64)) {
2871 MSA_FLOAT_MAXOP(pwx
->d
[i
], max
, pwt
->d
[i
], pwt
->d
[i
], 64);
2873 MSA_FLOAT_MAXOP(pwx
->d
[i
], max
, pws
->d
[i
], pwt
->d
[i
], 64);
2881 check_msacsr_cause(env
, GETPC());
2883 msa_move_v(pwd
, pwx
);
2886 void helper_msa_fmax_a_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2887 uint32_t ws
, uint32_t wt
)
2889 wr_t wx
, *pwx
= &wx
;
2890 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2891 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2892 wr_t
*pwt
= &(env
->active_fpu
.fpr
[wt
].wr
);
2895 clear_msacsr_cause(env
);
2899 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2900 FMAXMIN_A(max
, min
, pwx
->w
[i
], pws
->w
[i
], pwt
->w
[i
], 32);
2904 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2905 FMAXMIN_A(max
, min
, pwx
->d
[i
], pws
->d
[i
], pwt
->d
[i
], 64);
2912 check_msacsr_cause(env
, GETPC());
2914 msa_move_v(pwd
, pwx
);
2917 void helper_msa_fclass_df(CPUMIPSState
*env
, uint32_t df
,
2918 uint32_t wd
, uint32_t ws
)
2920 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2921 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2922 if (df
== DF_WORD
) {
2923 pwd
->w
[0] = helper_float_class_s(pws
->w
[0]);
2924 pwd
->w
[1] = helper_float_class_s(pws
->w
[1]);
2925 pwd
->w
[2] = helper_float_class_s(pws
->w
[2]);
2926 pwd
->w
[3] = helper_float_class_s(pws
->w
[3]);
2928 pwd
->d
[0] = helper_float_class_d(pws
->d
[0]);
2929 pwd
->d
[1] = helper_float_class_d(pws
->d
[1]);
2933 #define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
2935 float_status *status = &env->active_tc.msa_fp_status; \
2938 set_float_exception_flags(0, status); \
2939 DEST = float ## BITS ## _ ## OP(ARG, status); \
2940 c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
2942 if (get_enabled_exceptions(env, c)) { \
2943 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
2944 } else if (float ## BITS ## _is_any_nan(ARG)) { \
2949 void helper_msa_ftrunc_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2952 wr_t wx
, *pwx
= &wx
;
2953 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2954 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2957 clear_msacsr_cause(env
);
2961 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2962 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_int32_round_to_zero
, pws
->w
[i
], 32);
2966 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2967 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_int64_round_to_zero
, pws
->d
[i
], 64);
2974 check_msacsr_cause(env
, GETPC());
2976 msa_move_v(pwd
, pwx
);
2979 void helper_msa_ftrunc_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
2982 wr_t wx
, *pwx
= &wx
;
2983 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
2984 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
2987 clear_msacsr_cause(env
);
2991 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
2992 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_uint32_round_to_zero
, pws
->w
[i
], 32);
2996 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
2997 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_uint64_round_to_zero
, pws
->d
[i
], 64);
3004 check_msacsr_cause(env
, GETPC());
3006 msa_move_v(pwd
, pwx
);
3009 void helper_msa_fsqrt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3012 wr_t wx
, *pwx
= &wx
;
3013 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3014 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3017 clear_msacsr_cause(env
);
3021 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3022 MSA_FLOAT_UNOP(pwx
->w
[i
], sqrt
, pws
->w
[i
], 32);
3026 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3027 MSA_FLOAT_UNOP(pwx
->d
[i
], sqrt
, pws
->d
[i
], 64);
3034 check_msacsr_cause(env
, GETPC());
3036 msa_move_v(pwd
, pwx
);
3039 #define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
3041 float_status *status = &env->active_tc.msa_fp_status; \
3044 set_float_exception_flags(0, status); \
3045 DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
3046 c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
3047 float ## BITS ## _is_quiet_nan(DEST) ? \
3048 0 : RECIPROCAL_INEXACT, \
3049 IS_DENORMAL(DEST, BITS)); \
3051 if (get_enabled_exceptions(env, c)) { \
3052 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3056 void helper_msa_frsqrt_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3059 wr_t wx
, *pwx
= &wx
;
3060 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3061 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3064 clear_msacsr_cause(env
);
3068 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3069 MSA_FLOAT_RECIPROCAL(pwx
->w
[i
], float32_sqrt(pws
->w
[i
],
3070 &env
->active_tc
.msa_fp_status
), 32);
3074 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3075 MSA_FLOAT_RECIPROCAL(pwx
->d
[i
], float64_sqrt(pws
->d
[i
],
3076 &env
->active_tc
.msa_fp_status
), 64);
3083 check_msacsr_cause(env
, GETPC());
3085 msa_move_v(pwd
, pwx
);
3088 void helper_msa_frcp_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3091 wr_t wx
, *pwx
= &wx
;
3092 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3093 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3096 clear_msacsr_cause(env
);
3100 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3101 MSA_FLOAT_RECIPROCAL(pwx
->w
[i
], pws
->w
[i
], 32);
3105 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3106 MSA_FLOAT_RECIPROCAL(pwx
->d
[i
], pws
->d
[i
], 64);
3113 check_msacsr_cause(env
, GETPC());
3115 msa_move_v(pwd
, pwx
);
3118 void helper_msa_frint_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3121 wr_t wx
, *pwx
= &wx
;
3122 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3123 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3126 clear_msacsr_cause(env
);
3130 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3131 MSA_FLOAT_UNOP(pwx
->w
[i
], round_to_int
, pws
->w
[i
], 32);
3135 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3136 MSA_FLOAT_UNOP(pwx
->d
[i
], round_to_int
, pws
->d
[i
], 64);
3143 check_msacsr_cause(env
, GETPC());
3145 msa_move_v(pwd
, pwx
);
3148 #define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
3150 float_status *status = &env->active_tc.msa_fp_status; \
3153 set_float_exception_flags(0, status); \
3154 set_float_rounding_mode(float_round_down, status); \
3155 DEST = float ## BITS ## _ ## log2(ARG, status); \
3156 DEST = float ## BITS ## _ ## round_to_int(DEST, status); \
3157 set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
3158 MSACSR_RM_MASK) >> MSACSR_RM], \
3161 set_float_exception_flags(get_float_exception_flags(status) & \
3162 (~float_flag_inexact), \
3165 c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
3167 if (get_enabled_exceptions(env, c)) { \
3168 DEST = ((FLOAT_SNAN ## BITS >> 6) << 6) | c; \
3172 void helper_msa_flog2_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3175 wr_t wx
, *pwx
= &wx
;
3176 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3177 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3180 clear_msacsr_cause(env
);
3184 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3185 MSA_FLOAT_LOGB(pwx
->w
[i
], pws
->w
[i
], 32);
3189 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3190 MSA_FLOAT_LOGB(pwx
->d
[i
], pws
->d
[i
], 64);
3197 check_msacsr_cause(env
, GETPC());
3199 msa_move_v(pwd
, pwx
);
3202 void helper_msa_fexupl_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3205 wr_t wx
, *pwx
= &wx
;
3206 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3207 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3210 clear_msacsr_cause(env
);
3214 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3215 /* Half precision floats come in two formats: standard
3216 IEEE and "ARM" format. The latter gains extra exponent
3217 range by omitting the NaN/Inf encodings. */
3220 MSA_FLOAT_BINOP(pwx
->w
[i
], from_float16
, Lh(pws
, i
), ieee
, 32);
3224 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3225 MSA_FLOAT_UNOP(pwx
->d
[i
], from_float32
, Lw(pws
, i
), 64);
3232 check_msacsr_cause(env
, GETPC());
3233 msa_move_v(pwd
, pwx
);
3236 void helper_msa_fexupr_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3239 wr_t wx
, *pwx
= &wx
;
3240 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3241 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3244 clear_msacsr_cause(env
);
3248 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3249 /* Half precision floats come in two formats: standard
3250 IEEE and "ARM" format. The latter gains extra exponent
3251 range by omitting the NaN/Inf encodings. */
3254 MSA_FLOAT_BINOP(pwx
->w
[i
], from_float16
, Rh(pws
, i
), ieee
, 32);
3258 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3259 MSA_FLOAT_UNOP(pwx
->d
[i
], from_float32
, Rw(pws
, i
), 64);
3266 check_msacsr_cause(env
, GETPC());
3267 msa_move_v(pwd
, pwx
);
3270 void helper_msa_ffql_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3273 wr_t wx
, *pwx
= &wx
;
3274 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3275 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3280 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3281 MSA_FLOAT_UNOP(pwx
->w
[i
], from_q16
, Lh(pws
, i
), 32);
3285 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3286 MSA_FLOAT_UNOP(pwx
->d
[i
], from_q32
, Lw(pws
, i
), 64);
3293 msa_move_v(pwd
, pwx
);
3296 void helper_msa_ffqr_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3299 wr_t wx
, *pwx
= &wx
;
3300 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3301 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3306 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3307 MSA_FLOAT_UNOP(pwx
->w
[i
], from_q16
, Rh(pws
, i
), 32);
3311 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3312 MSA_FLOAT_UNOP(pwx
->d
[i
], from_q32
, Rw(pws
, i
), 64);
3319 msa_move_v(pwd
, pwx
);
3322 void helper_msa_ftint_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3325 wr_t wx
, *pwx
= &wx
;
3326 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3327 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3330 clear_msacsr_cause(env
);
3334 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3335 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_int32
, pws
->w
[i
], 32);
3339 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3340 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_int64
, pws
->d
[i
], 64);
3347 check_msacsr_cause(env
, GETPC());
3349 msa_move_v(pwd
, pwx
);
3352 void helper_msa_ftint_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3355 wr_t wx
, *pwx
= &wx
;
3356 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3357 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3360 clear_msacsr_cause(env
);
3364 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3365 MSA_FLOAT_UNOP0(pwx
->w
[i
], to_uint32
, pws
->w
[i
], 32);
3369 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3370 MSA_FLOAT_UNOP0(pwx
->d
[i
], to_uint64
, pws
->d
[i
], 64);
3377 check_msacsr_cause(env
, GETPC());
3379 msa_move_v(pwd
, pwx
);
3382 #define float32_from_int32 int32_to_float32
3383 #define float32_from_uint32 uint32_to_float32
3385 #define float64_from_int64 int64_to_float64
3386 #define float64_from_uint64 uint64_to_float64
3388 void helper_msa_ffint_s_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3391 wr_t wx
, *pwx
= &wx
;
3392 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3393 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3396 clear_msacsr_cause(env
);
3400 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3401 MSA_FLOAT_UNOP(pwx
->w
[i
], from_int32
, pws
->w
[i
], 32);
3405 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3406 MSA_FLOAT_UNOP(pwx
->d
[i
], from_int64
, pws
->d
[i
], 64);
3413 check_msacsr_cause(env
, GETPC());
3415 msa_move_v(pwd
, pwx
);
3418 void helper_msa_ffint_u_df(CPUMIPSState
*env
, uint32_t df
, uint32_t wd
,
3421 wr_t wx
, *pwx
= &wx
;
3422 wr_t
*pwd
= &(env
->active_fpu
.fpr
[wd
].wr
);
3423 wr_t
*pws
= &(env
->active_fpu
.fpr
[ws
].wr
);
3426 clear_msacsr_cause(env
);
3430 for (i
= 0; i
< DF_ELEMENTS(DF_WORD
); i
++) {
3431 MSA_FLOAT_UNOP(pwx
->w
[i
], from_uint32
, pws
->w
[i
], 32);
3435 for (i
= 0; i
< DF_ELEMENTS(DF_DOUBLE
); i
++) {
3436 MSA_FLOAT_UNOP(pwx
->d
[i
], from_uint64
, pws
->d
[i
], 64);
3443 check_msacsr_cause(env
, GETPC());
3445 msa_move_v(pwd
, pwx
);