target/arm: adjust program counter for wfi exception in AArch32
[qemu/ar7.git] / target / riscv / insn_trans / trans_rvd.inc.c
blob393fa0248ce9e38aa79cc9116c0b19536990f800
1 /*
2 * RISC-V translation routines for the RV64D Standard Extension.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2018 Peer Adelt, peer.adelt@hni.uni-paderborn.de
6 * Bastian Koppelmann, kbastian@mail.uni-paderborn.de
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
21 static bool trans_fld(DisasContext *ctx, arg_fld *a)
23 TCGv t0 = tcg_temp_new();
24 gen_get_gpr(t0, a->rs1);
25 REQUIRE_FPU;
26 REQUIRE_EXT(ctx, RVD);
27 tcg_gen_addi_tl(t0, t0, a->imm);
29 tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], t0, ctx->mem_idx, MO_TEQ);
31 mark_fs_dirty(ctx);
32 tcg_temp_free(t0);
33 return true;
36 static bool trans_fsd(DisasContext *ctx, arg_fsd *a)
38 TCGv t0 = tcg_temp_new();
39 gen_get_gpr(t0, a->rs1);
40 REQUIRE_FPU;
41 REQUIRE_EXT(ctx, RVD);
42 tcg_gen_addi_tl(t0, t0, a->imm);
44 tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], t0, ctx->mem_idx, MO_TEQ);
46 mark_fs_dirty(ctx);
47 tcg_temp_free(t0);
48 return true;
51 static bool trans_fmadd_d(DisasContext *ctx, arg_fmadd_d *a)
53 REQUIRE_FPU;
54 REQUIRE_EXT(ctx, RVD);
55 gen_set_rm(ctx, a->rm);
56 gen_helper_fmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
57 cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
58 mark_fs_dirty(ctx);
59 return true;
62 static bool trans_fmsub_d(DisasContext *ctx, arg_fmsub_d *a)
64 REQUIRE_FPU;
65 REQUIRE_EXT(ctx, RVD);
66 gen_set_rm(ctx, a->rm);
67 gen_helper_fmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
68 cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
69 mark_fs_dirty(ctx);
70 return true;
73 static bool trans_fnmsub_d(DisasContext *ctx, arg_fnmsub_d *a)
75 REQUIRE_FPU;
76 REQUIRE_EXT(ctx, RVD);
77 gen_set_rm(ctx, a->rm);
78 gen_helper_fnmsub_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
79 cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
80 mark_fs_dirty(ctx);
81 return true;
84 static bool trans_fnmadd_d(DisasContext *ctx, arg_fnmadd_d *a)
86 REQUIRE_FPU;
87 REQUIRE_EXT(ctx, RVD);
88 gen_set_rm(ctx, a->rm);
89 gen_helper_fnmadd_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1],
90 cpu_fpr[a->rs2], cpu_fpr[a->rs3]);
91 mark_fs_dirty(ctx);
92 return true;
95 static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
97 REQUIRE_FPU;
98 REQUIRE_EXT(ctx, RVD);
100 gen_set_rm(ctx, a->rm);
101 gen_helper_fadd_d(cpu_fpr[a->rd], cpu_env,
102 cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
104 mark_fs_dirty(ctx);
105 return true;
108 static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
110 REQUIRE_FPU;
111 REQUIRE_EXT(ctx, RVD);
113 gen_set_rm(ctx, a->rm);
114 gen_helper_fsub_d(cpu_fpr[a->rd], cpu_env,
115 cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
117 mark_fs_dirty(ctx);
118 return true;
121 static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
123 REQUIRE_FPU;
124 REQUIRE_EXT(ctx, RVD);
126 gen_set_rm(ctx, a->rm);
127 gen_helper_fmul_d(cpu_fpr[a->rd], cpu_env,
128 cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
130 mark_fs_dirty(ctx);
131 return true;
134 static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
136 REQUIRE_FPU;
137 REQUIRE_EXT(ctx, RVD);
139 gen_set_rm(ctx, a->rm);
140 gen_helper_fdiv_d(cpu_fpr[a->rd], cpu_env,
141 cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
143 mark_fs_dirty(ctx);
144 return true;
147 static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
149 REQUIRE_FPU;
150 REQUIRE_EXT(ctx, RVD);
152 gen_set_rm(ctx, a->rm);
153 gen_helper_fsqrt_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
155 mark_fs_dirty(ctx);
156 return true;
159 static bool trans_fsgnj_d(DisasContext *ctx, arg_fsgnj_d *a)
161 if (a->rs1 == a->rs2) { /* FMOV */
162 tcg_gen_mov_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1]);
163 } else {
164 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rs2],
165 cpu_fpr[a->rs1], 0, 63);
167 mark_fs_dirty(ctx);
168 return true;
171 static bool trans_fsgnjn_d(DisasContext *ctx, arg_fsgnjn_d *a)
173 REQUIRE_FPU;
174 REQUIRE_EXT(ctx, RVD);
175 if (a->rs1 == a->rs2) { /* FNEG */
176 tcg_gen_xori_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], INT64_MIN);
177 } else {
178 TCGv_i64 t0 = tcg_temp_new_i64();
179 tcg_gen_not_i64(t0, cpu_fpr[a->rs2]);
180 tcg_gen_deposit_i64(cpu_fpr[a->rd], t0, cpu_fpr[a->rs1], 0, 63);
181 tcg_temp_free_i64(t0);
183 mark_fs_dirty(ctx);
184 return true;
187 static bool trans_fsgnjx_d(DisasContext *ctx, arg_fsgnjx_d *a)
189 REQUIRE_FPU;
190 REQUIRE_EXT(ctx, RVD);
191 if (a->rs1 == a->rs2) { /* FABS */
192 tcg_gen_andi_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], ~INT64_MIN);
193 } else {
194 TCGv_i64 t0 = tcg_temp_new_i64();
195 tcg_gen_andi_i64(t0, cpu_fpr[a->rs2], INT64_MIN);
196 tcg_gen_xor_i64(cpu_fpr[a->rd], cpu_fpr[a->rs1], t0);
197 tcg_temp_free_i64(t0);
199 mark_fs_dirty(ctx);
200 return true;
203 static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
205 REQUIRE_FPU;
206 REQUIRE_EXT(ctx, RVD);
208 gen_helper_fmin_d(cpu_fpr[a->rd], cpu_env,
209 cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
211 mark_fs_dirty(ctx);
212 return true;
215 static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
217 REQUIRE_FPU;
218 REQUIRE_EXT(ctx, RVD);
220 gen_helper_fmax_d(cpu_fpr[a->rd], cpu_env,
221 cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
223 mark_fs_dirty(ctx);
224 return true;
227 static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
229 REQUIRE_FPU;
230 REQUIRE_EXT(ctx, RVD);
232 gen_set_rm(ctx, a->rm);
233 gen_helper_fcvt_s_d(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
235 mark_fs_dirty(ctx);
236 return true;
239 static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
241 REQUIRE_FPU;
242 REQUIRE_EXT(ctx, RVD);
244 gen_set_rm(ctx, a->rm);
245 gen_helper_fcvt_d_s(cpu_fpr[a->rd], cpu_env, cpu_fpr[a->rs1]);
247 mark_fs_dirty(ctx);
248 return true;
251 static bool trans_feq_d(DisasContext *ctx, arg_feq_d *a)
253 REQUIRE_FPU;
254 REQUIRE_EXT(ctx, RVD);
256 TCGv t0 = tcg_temp_new();
257 gen_helper_feq_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
258 gen_set_gpr(a->rd, t0);
259 tcg_temp_free(t0);
261 return true;
264 static bool trans_flt_d(DisasContext *ctx, arg_flt_d *a)
266 REQUIRE_FPU;
267 REQUIRE_EXT(ctx, RVD);
269 TCGv t0 = tcg_temp_new();
270 gen_helper_flt_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
271 gen_set_gpr(a->rd, t0);
272 tcg_temp_free(t0);
274 return true;
277 static bool trans_fle_d(DisasContext *ctx, arg_fle_d *a)
279 REQUIRE_FPU;
280 REQUIRE_EXT(ctx, RVD);
282 TCGv t0 = tcg_temp_new();
283 gen_helper_fle_d(t0, cpu_env, cpu_fpr[a->rs1], cpu_fpr[a->rs2]);
284 gen_set_gpr(a->rd, t0);
285 tcg_temp_free(t0);
287 return true;
290 static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
292 REQUIRE_FPU;
293 REQUIRE_EXT(ctx, RVD);
295 TCGv t0 = tcg_temp_new();
296 gen_helper_fclass_d(t0, cpu_fpr[a->rs1]);
297 gen_set_gpr(a->rd, t0);
298 tcg_temp_free(t0);
299 return true;
302 static bool trans_fcvt_w_d(DisasContext *ctx, arg_fcvt_w_d *a)
304 REQUIRE_FPU;
305 REQUIRE_EXT(ctx, RVD);
307 TCGv t0 = tcg_temp_new();
308 gen_set_rm(ctx, a->rm);
309 gen_helper_fcvt_w_d(t0, cpu_env, cpu_fpr[a->rs1]);
310 gen_set_gpr(a->rd, t0);
311 tcg_temp_free(t0);
313 return true;
316 static bool trans_fcvt_wu_d(DisasContext *ctx, arg_fcvt_wu_d *a)
318 REQUIRE_FPU;
319 REQUIRE_EXT(ctx, RVD);
321 TCGv t0 = tcg_temp_new();
322 gen_set_rm(ctx, a->rm);
323 gen_helper_fcvt_wu_d(t0, cpu_env, cpu_fpr[a->rs1]);
324 gen_set_gpr(a->rd, t0);
325 tcg_temp_free(t0);
327 return true;
330 static bool trans_fcvt_d_w(DisasContext *ctx, arg_fcvt_d_w *a)
332 REQUIRE_FPU;
333 REQUIRE_EXT(ctx, RVD);
335 TCGv t0 = tcg_temp_new();
336 gen_get_gpr(t0, a->rs1);
338 gen_set_rm(ctx, a->rm);
339 gen_helper_fcvt_d_w(cpu_fpr[a->rd], cpu_env, t0);
340 tcg_temp_free(t0);
342 mark_fs_dirty(ctx);
343 return true;
346 static bool trans_fcvt_d_wu(DisasContext *ctx, arg_fcvt_d_wu *a)
348 REQUIRE_FPU;
349 REQUIRE_EXT(ctx, RVD);
351 TCGv t0 = tcg_temp_new();
352 gen_get_gpr(t0, a->rs1);
354 gen_set_rm(ctx, a->rm);
355 gen_helper_fcvt_d_wu(cpu_fpr[a->rd], cpu_env, t0);
356 tcg_temp_free(t0);
358 mark_fs_dirty(ctx);
359 return true;
362 #ifdef TARGET_RISCV64
364 static bool trans_fcvt_l_d(DisasContext *ctx, arg_fcvt_l_d *a)
366 REQUIRE_FPU;
367 REQUIRE_EXT(ctx, RVD);
369 TCGv t0 = tcg_temp_new();
370 gen_set_rm(ctx, a->rm);
371 gen_helper_fcvt_l_d(t0, cpu_env, cpu_fpr[a->rs1]);
372 gen_set_gpr(a->rd, t0);
373 tcg_temp_free(t0);
374 return true;
377 static bool trans_fcvt_lu_d(DisasContext *ctx, arg_fcvt_lu_d *a)
379 REQUIRE_FPU;
380 REQUIRE_EXT(ctx, RVD);
382 TCGv t0 = tcg_temp_new();
383 gen_set_rm(ctx, a->rm);
384 gen_helper_fcvt_lu_d(t0, cpu_env, cpu_fpr[a->rs1]);
385 gen_set_gpr(a->rd, t0);
386 tcg_temp_free(t0);
387 return true;
390 static bool trans_fmv_x_d(DisasContext *ctx, arg_fmv_x_d *a)
392 REQUIRE_FPU;
393 REQUIRE_EXT(ctx, RVD);
395 gen_set_gpr(a->rd, cpu_fpr[a->rs1]);
396 return true;
399 static bool trans_fcvt_d_l(DisasContext *ctx, arg_fcvt_d_l *a)
401 REQUIRE_FPU;
402 REQUIRE_EXT(ctx, RVD);
404 TCGv t0 = tcg_temp_new();
405 gen_get_gpr(t0, a->rs1);
407 gen_set_rm(ctx, a->rm);
408 gen_helper_fcvt_d_l(cpu_fpr[a->rd], cpu_env, t0);
409 tcg_temp_free(t0);
410 mark_fs_dirty(ctx);
411 return true;
414 static bool trans_fcvt_d_lu(DisasContext *ctx, arg_fcvt_d_lu *a)
416 REQUIRE_FPU;
417 REQUIRE_EXT(ctx, RVD);
419 TCGv t0 = tcg_temp_new();
420 gen_get_gpr(t0, a->rs1);
422 gen_set_rm(ctx, a->rm);
423 gen_helper_fcvt_d_lu(cpu_fpr[a->rd], cpu_env, t0);
424 tcg_temp_free(t0);
425 mark_fs_dirty(ctx);
426 return true;
429 static bool trans_fmv_d_x(DisasContext *ctx, arg_fmv_d_x *a)
431 REQUIRE_FPU;
432 REQUIRE_EXT(ctx, RVD);
434 TCGv t0 = tcg_temp_new();
435 gen_get_gpr(t0, a->rs1);
437 tcg_gen_mov_tl(cpu_fpr[a->rd], t0);
438 tcg_temp_free(t0);
439 mark_fs_dirty(ctx);
440 return true;
442 #endif