Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / target / hexagon / gen_tcg.h
blob19697b42a501faf918fae3aa426b5542ee77b31b
1 /*
2 * Copyright(c) 2019-2022 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #ifndef HEXAGON_GEN_TCG_H
19 #define HEXAGON_GEN_TCG_H
22 * Here is a primer to understand the tag names for load/store instructions
24 * Data types
25 * b signed byte r0 = memb(r2+#0)
26 * ub unsigned byte r0 = memub(r2+#0)
27 * h signed half word (16 bits) r0 = memh(r2+#0)
28 * uh unsigned half word r0 = memuh(r2+#0)
29 * i integer (32 bits) r0 = memw(r2+#0)
30 * d double word (64 bits) r1:0 = memd(r2+#0)
32 * Addressing modes
33 * _io indirect with offset r0 = memw(r1+#4)
34 * _ur absolute with register offset r0 = memw(r1<<#4+##variable)
35 * _rr indirect with register offset r0 = memw(r1+r4<<#2)
36 * gp global pointer relative r0 = memw(gp+#200)
37 * _sp stack pointer relative r0 = memw(r29+#12)
38 * _ap absolute set r0 = memw(r1=##variable)
39 * _pr post increment register r0 = memw(r1++m1)
40 * _pbr post increment bit reverse r0 = memw(r1++m1:brev)
41 * _pi post increment immediate r0 = memb(r1++#1)
42 * _pci post increment circular immediate r0 = memw(r1++#4:circ(m0))
43 * _pcr post increment circular register r0 = memw(r1++I:circ(m0))
46 /* Macros for complex addressing modes */
47 #define GET_EA_ap \
48 do { \
49 fEA_IMM(UiV); \
50 tcg_gen_movi_tl(ReV, UiV); \
51 } while (0)
52 #define GET_EA_pr \
53 do { \
54 fEA_REG(RxV); \
55 fPM_M(RxV, MuV); \
56 } while (0)
57 #define GET_EA_pbr \
58 do { \
59 gen_helper_fbrev(EA, RxV); \
60 tcg_gen_add_tl(RxV, RxV, MuV); \
61 } while (0)
62 #define GET_EA_pi \
63 do { \
64 fEA_REG(RxV); \
65 fPM_I(RxV, siV); \
66 } while (0)
67 #define GET_EA_pci \
68 do { \
69 TCGv tcgv_siV = tcg_constant_tl(siV); \
70 tcg_gen_mov_tl(EA, RxV); \
71 gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \
72 hex_gpr[HEX_REG_CS0 + MuN]); \
73 } while (0)
74 #define GET_EA_pcr(SHIFT) \
75 do { \
76 TCGv ireg = tcg_temp_new(); \
77 tcg_gen_mov_tl(EA, RxV); \
78 gen_read_ireg(ireg, MuV, (SHIFT)); \
79 gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
80 tcg_temp_free(ireg); \
81 } while (0)
83 /* Instructions with multiple definitions */
84 #define fGEN_TCG_LOAD_AP(RES, SIZE, SIGN) \
85 do { \
86 fMUST_IMMEXT(UiV); \
87 fEA_IMM(UiV); \
88 fLOAD(1, SIZE, SIGN, EA, RES); \
89 tcg_gen_movi_tl(ReV, UiV); \
90 } while (0)
92 #define fGEN_TCG_L4_loadrub_ap(SHORTCODE) \
93 fGEN_TCG_LOAD_AP(RdV, 1, u)
94 #define fGEN_TCG_L4_loadrb_ap(SHORTCODE) \
95 fGEN_TCG_LOAD_AP(RdV, 1, s)
96 #define fGEN_TCG_L4_loadruh_ap(SHORTCODE) \
97 fGEN_TCG_LOAD_AP(RdV, 2, u)
98 #define fGEN_TCG_L4_loadrh_ap(SHORTCODE) \
99 fGEN_TCG_LOAD_AP(RdV, 2, s)
100 #define fGEN_TCG_L4_loadri_ap(SHORTCODE) \
101 fGEN_TCG_LOAD_AP(RdV, 4, u)
102 #define fGEN_TCG_L4_loadrd_ap(SHORTCODE) \
103 fGEN_TCG_LOAD_AP(RddV, 8, u)
105 #define fGEN_TCG_L2_loadrub_pci(SHORTCODE) SHORTCODE
106 #define fGEN_TCG_L2_loadrb_pci(SHORTCODE) SHORTCODE
107 #define fGEN_TCG_L2_loadruh_pci(SHORTCODE) SHORTCODE
108 #define fGEN_TCG_L2_loadrh_pci(SHORTCODE) SHORTCODE
109 #define fGEN_TCG_L2_loadri_pci(SHORTCODE) SHORTCODE
110 #define fGEN_TCG_L2_loadrd_pci(SHORTCODE) SHORTCODE
112 #define fGEN_TCG_LOAD_pcr(SHIFT, LOAD) \
113 do { \
114 TCGv ireg = tcg_temp_new(); \
115 tcg_gen_mov_tl(EA, RxV); \
116 gen_read_ireg(ireg, MuV, SHIFT); \
117 gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
118 LOAD; \
119 tcg_temp_free(ireg); \
120 } while (0)
122 #define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \
123 fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, u, EA, RdV))
124 #define fGEN_TCG_L2_loadrb_pcr(SHORTCODE) \
125 fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, s, EA, RdV))
126 #define fGEN_TCG_L2_loadruh_pcr(SHORTCODE) \
127 fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, u, EA, RdV))
128 #define fGEN_TCG_L2_loadrh_pcr(SHORTCODE) \
129 fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, s, EA, RdV))
130 #define fGEN_TCG_L2_loadri_pcr(SHORTCODE) \
131 fGEN_TCG_LOAD_pcr(2, fLOAD(1, 4, u, EA, RdV))
132 #define fGEN_TCG_L2_loadrd_pcr(SHORTCODE) \
133 fGEN_TCG_LOAD_pcr(3, fLOAD(1, 8, u, EA, RddV))
135 #define fGEN_TCG_L2_loadrub_pr(SHORTCODE) SHORTCODE
136 #define fGEN_TCG_L2_loadrub_pbr(SHORTCODE) SHORTCODE
137 #define fGEN_TCG_L2_loadrub_pi(SHORTCODE) SHORTCODE
138 #define fGEN_TCG_L2_loadrb_pr(SHORTCODE) SHORTCODE
139 #define fGEN_TCG_L2_loadrb_pbr(SHORTCODE) SHORTCODE
140 #define fGEN_TCG_L2_loadrb_pi(SHORTCODE) SHORTCODE
141 #define fGEN_TCG_L2_loadruh_pr(SHORTCODE) SHORTCODE
142 #define fGEN_TCG_L2_loadruh_pbr(SHORTCODE) SHORTCODE
143 #define fGEN_TCG_L2_loadruh_pi(SHORTCODE) SHORTCODE
144 #define fGEN_TCG_L2_loadrh_pr(SHORTCODE) SHORTCODE
145 #define fGEN_TCG_L2_loadrh_pbr(SHORTCODE) SHORTCODE
146 #define fGEN_TCG_L2_loadrh_pi(SHORTCODE) SHORTCODE
147 #define fGEN_TCG_L2_loadri_pr(SHORTCODE) SHORTCODE
148 #define fGEN_TCG_L2_loadri_pbr(SHORTCODE) SHORTCODE
149 #define fGEN_TCG_L2_loadri_pi(SHORTCODE) SHORTCODE
150 #define fGEN_TCG_L2_loadrd_pr(SHORTCODE) SHORTCODE
151 #define fGEN_TCG_L2_loadrd_pbr(SHORTCODE) SHORTCODE
152 #define fGEN_TCG_L2_loadrd_pi(SHORTCODE) SHORTCODE
155 * These instructions load 2 bytes and places them in
156 * two halves of the destination register.
157 * The GET_EA macro determines the addressing mode.
158 * The SIGN argument determines whether to zero-extend or
159 * sign-extend.
161 #define fGEN_TCG_loadbXw2(GET_EA, SIGN) \
162 do { \
163 TCGv tmp = tcg_temp_new(); \
164 TCGv byte = tcg_temp_new(); \
165 GET_EA; \
166 fLOAD(1, 2, u, EA, tmp); \
167 tcg_gen_movi_tl(RdV, 0); \
168 for (int i = 0; i < 2; i++) { \
169 gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \
171 tcg_temp_free(tmp); \
172 tcg_temp_free(byte); \
173 } while (0)
175 #define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \
176 fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), false)
177 #define fGEN_TCG_L4_loadbzw2_ur(SHORTCODE) \
178 fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), false)
179 #define fGEN_TCG_L2_loadbsw2_io(SHORTCODE) \
180 fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), true)
181 #define fGEN_TCG_L4_loadbsw2_ur(SHORTCODE) \
182 fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), true)
183 #define fGEN_TCG_L4_loadbzw2_ap(SHORTCODE) \
184 fGEN_TCG_loadbXw2(GET_EA_ap, false)
185 #define fGEN_TCG_L2_loadbzw2_pr(SHORTCODE) \
186 fGEN_TCG_loadbXw2(GET_EA_pr, false)
187 #define fGEN_TCG_L2_loadbzw2_pbr(SHORTCODE) \
188 fGEN_TCG_loadbXw2(GET_EA_pbr, false)
189 #define fGEN_TCG_L2_loadbzw2_pi(SHORTCODE) \
190 fGEN_TCG_loadbXw2(GET_EA_pi, false)
191 #define fGEN_TCG_L4_loadbsw2_ap(SHORTCODE) \
192 fGEN_TCG_loadbXw2(GET_EA_ap, true)
193 #define fGEN_TCG_L2_loadbsw2_pr(SHORTCODE) \
194 fGEN_TCG_loadbXw2(GET_EA_pr, true)
195 #define fGEN_TCG_L2_loadbsw2_pbr(SHORTCODE) \
196 fGEN_TCG_loadbXw2(GET_EA_pbr, true)
197 #define fGEN_TCG_L2_loadbsw2_pi(SHORTCODE) \
198 fGEN_TCG_loadbXw2(GET_EA_pi, true)
199 #define fGEN_TCG_L2_loadbzw2_pci(SHORTCODE) \
200 fGEN_TCG_loadbXw2(GET_EA_pci, false)
201 #define fGEN_TCG_L2_loadbsw2_pci(SHORTCODE) \
202 fGEN_TCG_loadbXw2(GET_EA_pci, true)
203 #define fGEN_TCG_L2_loadbzw2_pcr(SHORTCODE) \
204 fGEN_TCG_loadbXw2(GET_EA_pcr(1), false)
205 #define fGEN_TCG_L2_loadbsw2_pcr(SHORTCODE) \
206 fGEN_TCG_loadbXw2(GET_EA_pcr(1), true)
209 * These instructions load 4 bytes and places them in
210 * four halves of the destination register pair.
211 * The GET_EA macro determines the addressing mode.
212 * The SIGN argument determines whether to zero-extend or
213 * sign-extend.
215 #define fGEN_TCG_loadbXw4(GET_EA, SIGN) \
216 do { \
217 TCGv tmp = tcg_temp_new(); \
218 TCGv byte = tcg_temp_new(); \
219 GET_EA; \
220 fLOAD(1, 4, u, EA, tmp); \
221 tcg_gen_movi_i64(RddV, 0); \
222 for (int i = 0; i < 4; i++) { \
223 gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN))); \
225 tcg_temp_free(tmp); \
226 tcg_temp_free(byte); \
227 } while (0)
229 #define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \
230 fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), false)
231 #define fGEN_TCG_L4_loadbzw4_ur(SHORTCODE) \
232 fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), false)
233 #define fGEN_TCG_L2_loadbsw4_io(SHORTCODE) \
234 fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), true)
235 #define fGEN_TCG_L4_loadbsw4_ur(SHORTCODE) \
236 fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), true)
237 #define fGEN_TCG_L2_loadbzw4_pci(SHORTCODE) \
238 fGEN_TCG_loadbXw4(GET_EA_pci, false)
239 #define fGEN_TCG_L2_loadbsw4_pci(SHORTCODE) \
240 fGEN_TCG_loadbXw4(GET_EA_pci, true)
241 #define fGEN_TCG_L2_loadbzw4_pcr(SHORTCODE) \
242 fGEN_TCG_loadbXw4(GET_EA_pcr(2), false)
243 #define fGEN_TCG_L2_loadbsw4_pcr(SHORTCODE) \
244 fGEN_TCG_loadbXw4(GET_EA_pcr(2), true)
245 #define fGEN_TCG_L4_loadbzw4_ap(SHORTCODE) \
246 fGEN_TCG_loadbXw4(GET_EA_ap, false)
247 #define fGEN_TCG_L2_loadbzw4_pr(SHORTCODE) \
248 fGEN_TCG_loadbXw4(GET_EA_pr, false)
249 #define fGEN_TCG_L2_loadbzw4_pbr(SHORTCODE) \
250 fGEN_TCG_loadbXw4(GET_EA_pbr, false)
251 #define fGEN_TCG_L2_loadbzw4_pi(SHORTCODE) \
252 fGEN_TCG_loadbXw4(GET_EA_pi, false)
253 #define fGEN_TCG_L4_loadbsw4_ap(SHORTCODE) \
254 fGEN_TCG_loadbXw4(GET_EA_ap, true)
255 #define fGEN_TCG_L2_loadbsw4_pr(SHORTCODE) \
256 fGEN_TCG_loadbXw4(GET_EA_pr, true)
257 #define fGEN_TCG_L2_loadbsw4_pbr(SHORTCODE) \
258 fGEN_TCG_loadbXw4(GET_EA_pbr, true)
259 #define fGEN_TCG_L2_loadbsw4_pi(SHORTCODE) \
260 fGEN_TCG_loadbXw4(GET_EA_pi, true)
263 * These instructions load a half word, shift the destination right by 16 bits
264 * and place the loaded value in the high half word of the destination pair.
265 * The GET_EA macro determines the addressing mode.
267 #define fGEN_TCG_loadalignh(GET_EA) \
268 do { \
269 TCGv tmp = tcg_temp_new(); \
270 TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
271 GET_EA; \
272 fLOAD(1, 2, u, EA, tmp); \
273 tcg_gen_extu_i32_i64(tmp_i64, tmp); \
274 tcg_gen_shri_i64(RyyV, RyyV, 16); \
275 tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \
276 tcg_temp_free(tmp); \
277 tcg_temp_free_i64(tmp_i64); \
278 } while (0)
280 #define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \
281 fGEN_TCG_loadalignh(fEA_IRs(UiV, RtV, uiV))
282 #define fGEN_TCG_L2_loadalignh_io(SHORTCODE) \
283 fGEN_TCG_loadalignh(fEA_RI(RsV, siV))
284 #define fGEN_TCG_L2_loadalignh_pci(SHORTCODE) \
285 fGEN_TCG_loadalignh(GET_EA_pci)
286 #define fGEN_TCG_L2_loadalignh_pcr(SHORTCODE) \
287 fGEN_TCG_loadalignh(GET_EA_pcr(1))
288 #define fGEN_TCG_L4_loadalignh_ap(SHORTCODE) \
289 fGEN_TCG_loadalignh(GET_EA_ap)
290 #define fGEN_TCG_L2_loadalignh_pr(SHORTCODE) \
291 fGEN_TCG_loadalignh(GET_EA_pr)
292 #define fGEN_TCG_L2_loadalignh_pbr(SHORTCODE) \
293 fGEN_TCG_loadalignh(GET_EA_pbr)
294 #define fGEN_TCG_L2_loadalignh_pi(SHORTCODE) \
295 fGEN_TCG_loadalignh(GET_EA_pi)
297 /* Same as above, but loads a byte instead of half word */
298 #define fGEN_TCG_loadalignb(GET_EA) \
299 do { \
300 TCGv tmp = tcg_temp_new(); \
301 TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
302 GET_EA; \
303 fLOAD(1, 1, u, EA, tmp); \
304 tcg_gen_extu_i32_i64(tmp_i64, tmp); \
305 tcg_gen_shri_i64(RyyV, RyyV, 8); \
306 tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \
307 tcg_temp_free(tmp); \
308 tcg_temp_free_i64(tmp_i64); \
309 } while (0)
311 #define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \
312 fGEN_TCG_loadalignb(fEA_RI(RsV, siV))
313 #define fGEN_TCG_L4_loadalignb_ur(SHORTCODE) \
314 fGEN_TCG_loadalignb(fEA_IRs(UiV, RtV, uiV))
315 #define fGEN_TCG_L2_loadalignb_pci(SHORTCODE) \
316 fGEN_TCG_loadalignb(GET_EA_pci)
317 #define fGEN_TCG_L2_loadalignb_pcr(SHORTCODE) \
318 fGEN_TCG_loadalignb(GET_EA_pcr(0))
319 #define fGEN_TCG_L4_loadalignb_ap(SHORTCODE) \
320 fGEN_TCG_loadalignb(GET_EA_ap)
321 #define fGEN_TCG_L2_loadalignb_pr(SHORTCODE) \
322 fGEN_TCG_loadalignb(GET_EA_pr)
323 #define fGEN_TCG_L2_loadalignb_pbr(SHORTCODE) \
324 fGEN_TCG_loadalignb(GET_EA_pbr)
325 #define fGEN_TCG_L2_loadalignb_pi(SHORTCODE) \
326 fGEN_TCG_loadalignb(GET_EA_pi)
329 * Predicated loads
330 * Here is a primer to understand the tag names
332 * Predicate used
333 * t true "old" value if (p0) r0 = memb(r2+#0)
334 * f false "old" value if (!p0) r0 = memb(r2+#0)
335 * tnew true "new" value if (p0.new) r0 = memb(r2+#0)
336 * fnew false "new" value if (!p0.new) r0 = memb(r2+#0)
338 #define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \
339 do { \
340 TCGv LSB = tcg_temp_local_new(); \
341 TCGLabel *label = gen_new_label(); \
342 tcg_gen_movi_tl(EA, 0); \
343 PRED; \
344 CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \
345 PRED_LOAD_CANCEL(LSB, EA); \
346 tcg_gen_movi_tl(RdV, 0); \
347 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
348 fLOAD(1, SIZE, SIGN, EA, RdV); \
349 gen_set_label(label); \
350 tcg_temp_free(LSB); \
351 } while (0)
353 #define fGEN_TCG_L2_ploadrubt_pi(SHORTCODE) \
354 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, u)
355 #define fGEN_TCG_L2_ploadrubf_pi(SHORTCODE) \
356 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, u)
357 #define fGEN_TCG_L2_ploadrubtnew_pi(SHORTCODE) \
358 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, u)
359 #define fGEN_TCG_L2_ploadrubfnew_pi(SHORTCODE) \
360 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 1, u)
361 #define fGEN_TCG_L2_ploadrbt_pi(SHORTCODE) \
362 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, s)
363 #define fGEN_TCG_L2_ploadrbf_pi(SHORTCODE) \
364 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, s)
365 #define fGEN_TCG_L2_ploadrbtnew_pi(SHORTCODE) \
366 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, s)
367 #define fGEN_TCG_L2_ploadrbfnew_pi(SHORTCODE) \
368 fGEN_TCG_PRED_LOAD({ fEA_REG(RxV); fPM_I(RxV, siV); }, \
369 fLSBNEWNOT(PtN), 1, s)
371 #define fGEN_TCG_L2_ploadruht_pi(SHORTCODE) \
372 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, u)
373 #define fGEN_TCG_L2_ploadruhf_pi(SHORTCODE) \
374 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, u)
375 #define fGEN_TCG_L2_ploadruhtnew_pi(SHORTCODE) \
376 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, u)
377 #define fGEN_TCG_L2_ploadruhfnew_pi(SHORTCODE) \
378 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, u)
379 #define fGEN_TCG_L2_ploadrht_pi(SHORTCODE) \
380 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, s)
381 #define fGEN_TCG_L2_ploadrhf_pi(SHORTCODE) \
382 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, s)
383 #define fGEN_TCG_L2_ploadrhtnew_pi(SHORTCODE) \
384 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, s)
385 #define fGEN_TCG_L2_ploadrhfnew_pi(SHORTCODE) \
386 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, s)
388 #define fGEN_TCG_L2_ploadrit_pi(SHORTCODE) \
389 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 4, u)
390 #define fGEN_TCG_L2_ploadrif_pi(SHORTCODE) \
391 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 4, u)
392 #define fGEN_TCG_L2_ploadritnew_pi(SHORTCODE) \
393 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 4, u)
394 #define fGEN_TCG_L2_ploadrifnew_pi(SHORTCODE) \
395 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 4, u)
397 /* Predicated loads into a register pair */
398 #define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \
399 do { \
400 TCGv LSB = tcg_temp_local_new(); \
401 TCGLabel *label = gen_new_label(); \
402 tcg_gen_movi_tl(EA, 0); \
403 PRED; \
404 CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \
405 PRED_LOAD_CANCEL(LSB, EA); \
406 tcg_gen_movi_i64(RddV, 0); \
407 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
408 fLOAD(1, 8, u, EA, RddV); \
409 gen_set_label(label); \
410 tcg_temp_free(LSB); \
411 } while (0)
413 #define fGEN_TCG_L2_ploadrdt_pi(SHORTCODE) \
414 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLD(PtV))
415 #define fGEN_TCG_L2_ploadrdf_pi(SHORTCODE) \
416 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLDNOT(PtV))
417 #define fGEN_TCG_L2_ploadrdtnew_pi(SHORTCODE) \
418 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEW(PtN))
419 #define fGEN_TCG_L2_ploadrdfnew_pi(SHORTCODE) \
420 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEWNOT(PtN))
422 /* load-locked and store-locked */
423 #define fGEN_TCG_L2_loadw_locked(SHORTCODE) \
424 SHORTCODE
425 #define fGEN_TCG_L4_loadd_locked(SHORTCODE) \
426 SHORTCODE
427 #define fGEN_TCG_S2_storew_locked(SHORTCODE) \
428 SHORTCODE
429 #define fGEN_TCG_S4_stored_locked(SHORTCODE) \
430 SHORTCODE
432 #define fGEN_TCG_STORE(SHORTCODE) \
433 do { \
434 TCGv HALF = tcg_temp_new(); \
435 TCGv BYTE = tcg_temp_new(); \
436 SHORTCODE; \
437 tcg_temp_free(HALF); \
438 tcg_temp_free(BYTE); \
439 } while (0)
441 #define fGEN_TCG_STORE_pcr(SHIFT, STORE) \
442 do { \
443 TCGv ireg = tcg_temp_new(); \
444 TCGv HALF = tcg_temp_new(); \
445 TCGv BYTE = tcg_temp_new(); \
446 tcg_gen_mov_tl(EA, RxV); \
447 gen_read_ireg(ireg, MuV, SHIFT); \
448 gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
449 STORE; \
450 tcg_temp_free(ireg); \
451 tcg_temp_free(HALF); \
452 tcg_temp_free(BYTE); \
453 } while (0)
455 #define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \
456 fGEN_TCG_STORE(SHORTCODE)
457 #define fGEN_TCG_S2_storerb_pci(SHORTCODE) \
458 fGEN_TCG_STORE(SHORTCODE)
459 #define fGEN_TCG_S2_storerb_pcr(SHORTCODE) \
460 fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, RtV)))
462 #define fGEN_TCG_S2_storerh_pbr(SHORTCODE) \
463 fGEN_TCG_STORE(SHORTCODE)
464 #define fGEN_TCG_S2_storerh_pci(SHORTCODE) \
465 fGEN_TCG_STORE(SHORTCODE)
466 #define fGEN_TCG_S2_storerh_pcr(SHORTCODE) \
467 fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, RtV)))
469 #define fGEN_TCG_S2_storerf_pbr(SHORTCODE) \
470 fGEN_TCG_STORE(SHORTCODE)
471 #define fGEN_TCG_S2_storerf_pci(SHORTCODE) \
472 fGEN_TCG_STORE(SHORTCODE)
473 #define fGEN_TCG_S2_storerf_pcr(SHORTCODE) \
474 fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(1, RtV)))
476 #define fGEN_TCG_S2_storeri_pbr(SHORTCODE) \
477 fGEN_TCG_STORE(SHORTCODE)
478 #define fGEN_TCG_S2_storeri_pci(SHORTCODE) \
479 fGEN_TCG_STORE(SHORTCODE)
480 #define fGEN_TCG_S2_storeri_pcr(SHORTCODE) \
481 fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, RtV))
483 #define fGEN_TCG_S2_storerd_pbr(SHORTCODE) \
484 fGEN_TCG_STORE(SHORTCODE)
485 #define fGEN_TCG_S2_storerd_pci(SHORTCODE) \
486 fGEN_TCG_STORE(SHORTCODE)
487 #define fGEN_TCG_S2_storerd_pcr(SHORTCODE) \
488 fGEN_TCG_STORE_pcr(3, fSTORE(1, 8, EA, RttV))
490 #define fGEN_TCG_S2_storerbnew_pbr(SHORTCODE) \
491 fGEN_TCG_STORE(SHORTCODE)
492 #define fGEN_TCG_S2_storerbnew_pci(SHORTCODE) \
493 fGEN_TCG_STORE(SHORTCODE)
494 #define fGEN_TCG_S2_storerbnew_pcr(SHORTCODE) \
495 fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, NtN)))
497 #define fGEN_TCG_S2_storerhnew_pbr(SHORTCODE) \
498 fGEN_TCG_STORE(SHORTCODE)
499 #define fGEN_TCG_S2_storerhnew_pci(SHORTCODE) \
500 fGEN_TCG_STORE(SHORTCODE)
501 #define fGEN_TCG_S2_storerhnew_pcr(SHORTCODE) \
502 fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, NtN)))
504 #define fGEN_TCG_S2_storerinew_pbr(SHORTCODE) \
505 fGEN_TCG_STORE(SHORTCODE)
506 #define fGEN_TCG_S2_storerinew_pci(SHORTCODE) \
507 fGEN_TCG_STORE(SHORTCODE)
508 #define fGEN_TCG_S2_storerinew_pcr(SHORTCODE) \
509 fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN))
512 * Mathematical operations with more than one definition require
513 * special handling
515 #define fGEN_TCG_A5_ACS(SHORTCODE) \
516 do { \
517 gen_helper_vacsh_pred(PeV, cpu_env, RxxV, RssV, RttV); \
518 gen_helper_vacsh_val(RxxV, cpu_env, RxxV, RssV, RttV); \
519 } while (0)
522 * Approximate reciprocal
523 * r3,p1 = sfrecipa(r0, r1)
525 * The helper packs the 2 32-bit results into a 64-bit value,
526 * so unpack them into the proper results.
528 #define fGEN_TCG_F2_sfrecipa(SHORTCODE) \
529 do { \
530 TCGv_i64 tmp = tcg_temp_new_i64(); \
531 gen_helper_sfrecipa(tmp, cpu_env, RsV, RtV); \
532 tcg_gen_extrh_i64_i32(RdV, tmp); \
533 tcg_gen_extrl_i64_i32(PeV, tmp); \
534 tcg_temp_free_i64(tmp); \
535 } while (0)
538 * Approximation of the reciprocal square root
539 * r1,p0 = sfinvsqrta(r0)
541 * The helper packs the 2 32-bit results into a 64-bit value,
542 * so unpack them into the proper results.
544 #define fGEN_TCG_F2_sfinvsqrta(SHORTCODE) \
545 do { \
546 TCGv_i64 tmp = tcg_temp_new_i64(); \
547 gen_helper_sfinvsqrta(tmp, cpu_env, RsV); \
548 tcg_gen_extrh_i64_i32(RdV, tmp); \
549 tcg_gen_extrl_i64_i32(PeV, tmp); \
550 tcg_temp_free_i64(tmp); \
551 } while (0)
554 * Add or subtract with carry.
555 * Predicate register is used as an extra input and output.
556 * r5:4 = add(r1:0, r3:2, p1):carry
558 #define fGEN_TCG_A4_addp_c(SHORTCODE) \
559 do { \
560 TCGv_i64 carry = tcg_temp_new_i64(); \
561 TCGv_i64 zero = tcg_constant_i64(0); \
562 tcg_gen_extu_i32_i64(carry, PxV); \
563 tcg_gen_andi_i64(carry, carry, 1); \
564 tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
565 tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \
566 tcg_gen_extrl_i64_i32(PxV, carry); \
567 gen_8bitsof(PxV, PxV); \
568 tcg_temp_free_i64(carry); \
569 } while (0)
571 /* r5:4 = sub(r1:0, r3:2, p1):carry */
572 #define fGEN_TCG_A4_subp_c(SHORTCODE) \
573 do { \
574 TCGv_i64 carry = tcg_temp_new_i64(); \
575 TCGv_i64 zero = tcg_constant_i64(0); \
576 TCGv_i64 not_RttV = tcg_temp_new_i64(); \
577 tcg_gen_extu_i32_i64(carry, PxV); \
578 tcg_gen_andi_i64(carry, carry, 1); \
579 tcg_gen_not_i64(not_RttV, RttV); \
580 tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
581 tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \
582 tcg_gen_extrl_i64_i32(PxV, carry); \
583 gen_8bitsof(PxV, PxV); \
584 tcg_temp_free_i64(carry); \
585 tcg_temp_free_i64(not_RttV); \
586 } while (0)
589 * Compare each of the 8 unsigned bytes
590 * The minimum is placed in each byte of the destination.
591 * Each bit of the predicate is set true if the bit from the first operand
592 * is greater than the bit from the second operand.
593 * r5:4,p1 = vminub(r1:0, r3:2)
595 #define fGEN_TCG_A6_vminub_RdP(SHORTCODE) \
596 do { \
597 TCGv left = tcg_temp_new(); \
598 TCGv right = tcg_temp_new(); \
599 TCGv tmp = tcg_temp_new(); \
600 tcg_gen_movi_tl(PeV, 0); \
601 tcg_gen_movi_i64(RddV, 0); \
602 for (int i = 0; i < 8; i++) { \
603 gen_get_byte_i64(left, i, RttV, false); \
604 gen_get_byte_i64(right, i, RssV, false); \
605 tcg_gen_setcond_tl(TCG_COND_GT, tmp, left, right); \
606 tcg_gen_deposit_tl(PeV, PeV, tmp, i, 1); \
607 tcg_gen_umin_tl(tmp, left, right); \
608 gen_set_byte_i64(i, RddV, tmp); \
610 tcg_temp_free(left); \
611 tcg_temp_free(right); \
612 tcg_temp_free(tmp); \
613 } while (0)
615 #define fGEN_TCG_J2_call(SHORTCODE) \
616 gen_call(ctx, riV)
618 #define fGEN_TCG_J2_callt(SHORTCODE) \
619 gen_cond_call(ctx, PuV, TCG_COND_EQ, riV)
620 #define fGEN_TCG_J2_callf(SHORTCODE) \
621 gen_cond_call(ctx, PuV, TCG_COND_NE, riV)
623 #define fGEN_TCG_J2_endloop0(SHORTCODE) \
624 gen_endloop0(ctx)
627 * Compound compare and jump instructions
628 * Here is a primer to understand the tag names
630 * Comparison
631 * cmpeqi compare equal to an immediate
632 * cmpgti compare greater than an immediate
633 * cmpgtiu compare greater than an unsigned immediate
634 * cmpeqn1 compare equal to negative 1
635 * cmpgtn1 compare greater than negative 1
636 * cmpeq compare equal (two registers)
637 * cmpgtu compare greater than unsigned (two registers)
638 * tstbit0 test bit zero
640 * Condition
641 * tp0 p0 is true p0 = cmp.eq(r0,#5); if (p0.new) jump:nt address
642 * fp0 p0 is false p0 = cmp.eq(r0,#5); if (!p0.new) jump:nt address
643 * tp1 p1 is true p1 = cmp.eq(r0,#5); if (p1.new) jump:nt address
644 * fp1 p1 is false p1 = cmp.eq(r0,#5); if (!p1.new) jump:nt address
646 * Prediction (not modelled in qemu)
647 * _nt not taken
648 * _t taken
650 #define fGEN_TCG_J4_cmpeq_tp0_jump_t(SHORTCODE) \
651 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
652 #define fGEN_TCG_J4_cmpeq_tp0_jump_nt(SHORTCODE) \
653 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
654 #define fGEN_TCG_J4_cmpeq_fp0_jump_t(SHORTCODE) \
655 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
656 #define fGEN_TCG_J4_cmpeq_fp0_jump_nt(SHORTCODE) \
657 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
658 #define fGEN_TCG_J4_cmpeq_tp1_jump_t(SHORTCODE) \
659 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
660 #define fGEN_TCG_J4_cmpeq_tp1_jump_nt(SHORTCODE) \
661 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
662 #define fGEN_TCG_J4_cmpeq_fp1_jump_t(SHORTCODE) \
663 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
664 #define fGEN_TCG_J4_cmpeq_fp1_jump_nt(SHORTCODE) \
665 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
667 #define fGEN_TCG_J4_cmpgt_tp0_jump_t(SHORTCODE) \
668 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
669 #define fGEN_TCG_J4_cmpgt_tp0_jump_nt(SHORTCODE) \
670 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
671 #define fGEN_TCG_J4_cmpgt_fp0_jump_t(SHORTCODE) \
672 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
673 #define fGEN_TCG_J4_cmpgt_fp0_jump_nt(SHORTCODE) \
674 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
675 #define fGEN_TCG_J4_cmpgt_tp1_jump_t(SHORTCODE) \
676 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
677 #define fGEN_TCG_J4_cmpgt_tp1_jump_nt(SHORTCODE) \
678 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
679 #define fGEN_TCG_J4_cmpgt_fp1_jump_t(SHORTCODE) \
680 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
681 #define fGEN_TCG_J4_cmpgt_fp1_jump_nt(SHORTCODE) \
682 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
684 #define fGEN_TCG_J4_cmpgtu_tp0_jump_t(SHORTCODE) \
685 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
686 #define fGEN_TCG_J4_cmpgtu_tp0_jump_nt(SHORTCODE) \
687 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
688 #define fGEN_TCG_J4_cmpgtu_fp0_jump_t(SHORTCODE) \
689 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
690 #define fGEN_TCG_J4_cmpgtu_fp0_jump_nt(SHORTCODE) \
691 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
692 #define fGEN_TCG_J4_cmpgtu_tp1_jump_t(SHORTCODE) \
693 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
694 #define fGEN_TCG_J4_cmpgtu_tp1_jump_nt(SHORTCODE) \
695 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
696 #define fGEN_TCG_J4_cmpgtu_fp1_jump_t(SHORTCODE) \
697 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
698 #define fGEN_TCG_J4_cmpgtu_fp1_jump_nt(SHORTCODE) \
699 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
701 #define fGEN_TCG_J4_cmpeqi_tp0_jump_t(SHORTCODE) \
702 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
703 #define fGEN_TCG_J4_cmpeqi_tp0_jump_nt(SHORTCODE) \
704 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
705 #define fGEN_TCG_J4_cmpeqi_fp0_jump_t(SHORTCODE) \
706 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
707 #define fGEN_TCG_J4_cmpeqi_fp0_jump_nt(SHORTCODE) \
708 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
709 #define fGEN_TCG_J4_cmpeqi_tp1_jump_t(SHORTCODE) \
710 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
711 #define fGEN_TCG_J4_cmpeqi_tp1_jump_nt(SHORTCODE) \
712 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
713 #define fGEN_TCG_J4_cmpeqi_fp1_jump_t(SHORTCODE) \
714 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
715 #define fGEN_TCG_J4_cmpeqi_fp1_jump_nt(SHORTCODE) \
716 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
718 #define fGEN_TCG_J4_cmpgti_tp0_jump_t(SHORTCODE) \
719 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
720 #define fGEN_TCG_J4_cmpgti_tp0_jump_nt(SHORTCODE) \
721 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
722 #define fGEN_TCG_J4_cmpgti_fp0_jump_t(SHORTCODE) \
723 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
724 #define fGEN_TCG_J4_cmpgti_fp0_jump_nt(SHORTCODE) \
725 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
726 #define fGEN_TCG_J4_cmpgti_tp1_jump_t(SHORTCODE) \
727 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
728 #define fGEN_TCG_J4_cmpgti_tp1_jump_nt(SHORTCODE) \
729 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
730 #define fGEN_TCG_J4_cmpgti_fp1_jump_t(SHORTCODE) \
731 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
732 #define fGEN_TCG_J4_cmpgti_fp1_jump_nt(SHORTCODE) \
733 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
735 #define fGEN_TCG_J4_cmpgtui_tp0_jump_t(SHORTCODE) \
736 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
737 #define fGEN_TCG_J4_cmpgtui_tp0_jump_nt(SHORTCODE) \
738 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
739 #define fGEN_TCG_J4_cmpgtui_fp0_jump_t(SHORTCODE) \
740 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
741 #define fGEN_TCG_J4_cmpgtui_fp0_jump_nt(SHORTCODE) \
742 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
743 #define fGEN_TCG_J4_cmpgtui_tp1_jump_t(SHORTCODE) \
744 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
745 #define fGEN_TCG_J4_cmpgtui_tp1_jump_nt(SHORTCODE) \
746 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
747 #define fGEN_TCG_J4_cmpgtui_fp1_jump_t(SHORTCODE) \
748 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
749 #define fGEN_TCG_J4_cmpgtui_fp1_jump_nt(SHORTCODE) \
750 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
752 #define fGEN_TCG_J4_cmpeqn1_tp0_jump_t(SHORTCODE) \
753 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_EQ, RsV, riV)
754 #define fGEN_TCG_J4_cmpeqn1_tp0_jump_nt(SHORTCODE) \
755 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_EQ, RsV, riV)
756 #define fGEN_TCG_J4_cmpeqn1_fp0_jump_t(SHORTCODE) \
757 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_EQ, RsV, riV)
758 #define fGEN_TCG_J4_cmpeqn1_fp0_jump_nt(SHORTCODE) \
759 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_EQ, RsV, riV)
760 #define fGEN_TCG_J4_cmpeqn1_tp1_jump_t(SHORTCODE) \
761 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_EQ, RsV, riV)
762 #define fGEN_TCG_J4_cmpeqn1_tp1_jump_nt(SHORTCODE) \
763 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_EQ, RsV, riV)
764 #define fGEN_TCG_J4_cmpeqn1_fp1_jump_t(SHORTCODE) \
765 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_EQ, RsV, riV)
766 #define fGEN_TCG_J4_cmpeqn1_fp1_jump_nt(SHORTCODE) \
767 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_EQ, RsV, riV)
769 #define fGEN_TCG_J4_cmpgtn1_tp0_jump_t(SHORTCODE) \
770 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_GT, RsV, riV)
771 #define fGEN_TCG_J4_cmpgtn1_tp0_jump_nt(SHORTCODE) \
772 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_GT, RsV, riV)
773 #define fGEN_TCG_J4_cmpgtn1_fp0_jump_t(SHORTCODE) \
774 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_GT, RsV, riV)
775 #define fGEN_TCG_J4_cmpgtn1_fp0_jump_nt(SHORTCODE) \
776 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_GT, RsV, riV)
777 #define fGEN_TCG_J4_cmpgtn1_tp1_jump_t(SHORTCODE) \
778 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_GT, RsV, riV)
779 #define fGEN_TCG_J4_cmpgtn1_tp1_jump_nt(SHORTCODE) \
780 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_GT, RsV, riV)
781 #define fGEN_TCG_J4_cmpgtn1_fp1_jump_t(SHORTCODE) \
782 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_GT, RsV, riV)
783 #define fGEN_TCG_J4_cmpgtn1_fp1_jump_nt(SHORTCODE) \
784 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_GT, RsV, riV)
786 #define fGEN_TCG_J4_tstbit0_tp0_jump_nt(SHORTCODE) \
787 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_EQ, riV)
788 #define fGEN_TCG_J4_tstbit0_tp0_jump_t(SHORTCODE) \
789 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_EQ, riV)
790 #define fGEN_TCG_J4_tstbit0_fp0_jump_nt(SHORTCODE) \
791 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_NE, riV)
792 #define fGEN_TCG_J4_tstbit0_fp0_jump_t(SHORTCODE) \
793 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_NE, riV)
794 #define fGEN_TCG_J4_tstbit0_tp1_jump_nt(SHORTCODE) \
795 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_EQ, riV)
796 #define fGEN_TCG_J4_tstbit0_tp1_jump_t(SHORTCODE) \
797 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_EQ, riV)
798 #define fGEN_TCG_J4_tstbit0_fp1_jump_nt(SHORTCODE) \
799 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV)
800 #define fGEN_TCG_J4_tstbit0_fp1_jump_t(SHORTCODE) \
801 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV)
803 #define fGEN_TCG_J2_jump(SHORTCODE) \
804 gen_jump(ctx, riV)
805 #define fGEN_TCG_J2_jumpr(SHORTCODE) \
806 gen_jumpr(ctx, RsV)
807 #define fGEN_TCG_J4_jumpseti(SHORTCODE) \
808 do { \
809 tcg_gen_movi_tl(RdV, UiV); \
810 gen_jump(ctx, riV); \
811 } while (0)
813 #define fGEN_TCG_cond_jumpt(COND) \
814 do { \
815 TCGv LSB = tcg_temp_new(); \
816 COND; \
817 gen_cond_jump(ctx, TCG_COND_EQ, LSB, riV); \
818 tcg_temp_free(LSB); \
819 } while (0)
820 #define fGEN_TCG_cond_jumpf(COND) \
821 do { \
822 TCGv LSB = tcg_temp_new(); \
823 COND; \
824 gen_cond_jump(ctx, TCG_COND_NE, LSB, riV); \
825 tcg_temp_free(LSB); \
826 } while (0)
828 #define fGEN_TCG_J2_jumpt(SHORTCODE) \
829 fGEN_TCG_cond_jumpt(fLSBOLD(PuV))
830 #define fGEN_TCG_J2_jumptpt(SHORTCODE) \
831 fGEN_TCG_cond_jumpt(fLSBOLD(PuV))
832 #define fGEN_TCG_J2_jumpf(SHORTCODE) \
833 fGEN_TCG_cond_jumpf(fLSBOLD(PuV))
834 #define fGEN_TCG_J2_jumpfpt(SHORTCODE) \
835 fGEN_TCG_cond_jumpf(fLSBOLD(PuV))
836 #define fGEN_TCG_J2_jumptnew(SHORTCODE) \
837 gen_cond_jump(ctx, TCG_COND_EQ, PuN, riV)
838 #define fGEN_TCG_J2_jumptnewpt(SHORTCODE) \
839 gen_cond_jump(ctx, TCG_COND_EQ, PuN, riV)
840 #define fGEN_TCG_J2_jumpfnewpt(SHORTCODE) \
841 fGEN_TCG_cond_jumpf(fLSBNEW(PuN))
842 #define fGEN_TCG_J2_jumpfnew(SHORTCODE) \
843 fGEN_TCG_cond_jumpf(fLSBNEW(PuN))
844 #define fGEN_TCG_J2_jumprz(SHORTCODE) \
845 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_NE, LSB, RsV, 0))
846 #define fGEN_TCG_J2_jumprzpt(SHORTCODE) \
847 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_NE, LSB, RsV, 0))
848 #define fGEN_TCG_J2_jumprnz(SHORTCODE) \
849 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_EQ, LSB, RsV, 0))
850 #define fGEN_TCG_J2_jumprnzpt(SHORTCODE) \
851 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_EQ, LSB, RsV, 0))
852 #define fGEN_TCG_J2_jumprgtez(SHORTCODE) \
853 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_GE, LSB, RsV, 0))
854 #define fGEN_TCG_J2_jumprgtezpt(SHORTCODE) \
855 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_GE, LSB, RsV, 0))
856 #define fGEN_TCG_J2_jumprltez(SHORTCODE) \
857 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_LE, LSB, RsV, 0))
858 #define fGEN_TCG_J2_jumprltezpt(SHORTCODE) \
859 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_LE, LSB, RsV, 0))
861 #define fGEN_TCG_cond_jumprt(COND) \
862 do { \
863 TCGv LSB = tcg_temp_new(); \
864 COND; \
865 gen_cond_jumpr(ctx, RsV, TCG_COND_EQ, LSB); \
866 tcg_temp_free(LSB); \
867 } while (0)
868 #define fGEN_TCG_cond_jumprf(COND) \
869 do { \
870 TCGv LSB = tcg_temp_new(); \
871 COND; \
872 gen_cond_jumpr(ctx, RsV, TCG_COND_NE, LSB); \
873 tcg_temp_free(LSB); \
874 } while (0)
876 #define fGEN_TCG_J2_jumprt(SHORTCODE) \
877 fGEN_TCG_cond_jumprt(fLSBOLD(PuV))
878 #define fGEN_TCG_J2_jumprtpt(SHORTCODE) \
879 fGEN_TCG_cond_jumprt(fLSBOLD(PuV))
880 #define fGEN_TCG_J2_jumprf(SHORTCODE) \
881 fGEN_TCG_cond_jumprf(fLSBOLD(PuV))
882 #define fGEN_TCG_J2_jumprfpt(SHORTCODE) \
883 fGEN_TCG_cond_jumprf(fLSBOLD(PuV))
884 #define fGEN_TCG_J2_jumprtnew(SHORTCODE) \
885 fGEN_TCG_cond_jumprt(fLSBNEW(PuN))
886 #define fGEN_TCG_J2_jumprtnewpt(SHORTCODE) \
887 fGEN_TCG_cond_jumprt(fLSBNEW(PuN))
888 #define fGEN_TCG_J2_jumprfnew(SHORTCODE) \
889 fGEN_TCG_cond_jumprf(fLSBNEW(PuN))
890 #define fGEN_TCG_J2_jumprfnewpt(SHORTCODE) \
891 fGEN_TCG_cond_jumprf(fLSBNEW(PuN))
894 * New value compare & jump instructions
895 * if ([!]COND(r0.new, r1) jump:t address
896 * if ([!]COND(r0.new, #7) jump:t address
898 #define fGEN_TCG_J4_cmpgt_t_jumpnv_t(SHORTCODE) \
899 gen_cmp_jumpnv(ctx, TCG_COND_GT, NsN, RtV, riV)
900 #define fGEN_TCG_J4_cmpgt_t_jumpnv_nt(SHORTCODE) \
901 gen_cmp_jumpnv(ctx, TCG_COND_GT, NsN, RtV, riV)
902 #define fGEN_TCG_J4_cmpgt_f_jumpnv_t(SHORTCODE) \
903 gen_cmp_jumpnv(ctx, TCG_COND_LE, NsN, RtV, riV)
904 #define fGEN_TCG_J4_cmpgt_f_jumpnv_nt(SHORTCODE) \
905 gen_cmp_jumpnv(ctx, TCG_COND_LE, NsN, RtV, riV)
907 #define fGEN_TCG_J4_cmpeq_t_jumpnv_t(SHORTCODE) \
908 gen_cmp_jumpnv(ctx, TCG_COND_EQ, NsN, RtV, riV)
909 #define fGEN_TCG_J4_cmpeq_t_jumpnv_nt(SHORTCODE) \
910 gen_cmp_jumpnv(ctx, TCG_COND_EQ, NsN, RtV, riV)
911 #define fGEN_TCG_J4_cmpeq_f_jumpnv_t(SHORTCODE) \
912 gen_cmp_jumpnv(ctx, TCG_COND_NE, NsN, RtV, riV)
913 #define fGEN_TCG_J4_cmpeq_f_jumpnv_nt(SHORTCODE) \
914 gen_cmp_jumpnv(ctx, TCG_COND_NE, NsN, RtV, riV)
916 #define fGEN_TCG_J4_cmplt_t_jumpnv_t(SHORTCODE) \
917 gen_cmp_jumpnv(ctx, TCG_COND_LT, NsN, RtV, riV)
918 #define fGEN_TCG_J4_cmplt_t_jumpnv_nt(SHORTCODE) \
919 gen_cmp_jumpnv(ctx, TCG_COND_LT, NsN, RtV, riV)
920 #define fGEN_TCG_J4_cmplt_f_jumpnv_t(SHORTCODE) \
921 gen_cmp_jumpnv(ctx, TCG_COND_GE, NsN, RtV, riV)
922 #define fGEN_TCG_J4_cmplt_f_jumpnv_nt(SHORTCODE) \
923 gen_cmp_jumpnv(ctx, TCG_COND_GE, NsN, RtV, riV)
925 #define fGEN_TCG_J4_cmpeqi_t_jumpnv_t(SHORTCODE) \
926 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, UiV, riV)
927 #define fGEN_TCG_J4_cmpeqi_t_jumpnv_nt(SHORTCODE) \
928 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, UiV, riV)
929 #define fGEN_TCG_J4_cmpeqi_f_jumpnv_t(SHORTCODE) \
930 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, UiV, riV)
931 #define fGEN_TCG_J4_cmpeqi_f_jumpnv_nt(SHORTCODE) \
932 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, UiV, riV)
934 #define fGEN_TCG_J4_cmpgti_t_jumpnv_t(SHORTCODE) \
935 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, UiV, riV)
936 #define fGEN_TCG_J4_cmpgti_t_jumpnv_nt(SHORTCODE) \
937 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, UiV, riV)
938 #define fGEN_TCG_J4_cmpgti_f_jumpnv_t(SHORTCODE) \
939 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, UiV, riV)
940 #define fGEN_TCG_J4_cmpgti_f_jumpnv_nt(SHORTCODE) \
941 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, UiV, riV)
943 #define fGEN_TCG_J4_cmpltu_t_jumpnv_t(SHORTCODE) \
944 gen_cmp_jumpnv(ctx, TCG_COND_LTU, NsN, RtV, riV)
945 #define fGEN_TCG_J4_cmpltu_t_jumpnv_nt(SHORTCODE) \
946 gen_cmp_jumpnv(ctx, TCG_COND_LTU, NsN, RtV, riV)
947 #define fGEN_TCG_J4_cmpltu_f_jumpnv_t(SHORTCODE) \
948 gen_cmp_jumpnv(ctx, TCG_COND_GEU, NsN, RtV, riV)
949 #define fGEN_TCG_J4_cmpltu_f_jumpnv_nt(SHORTCODE) \
950 gen_cmp_jumpnv(ctx, TCG_COND_GEU, NsN, RtV, riV)
952 #define fGEN_TCG_J4_cmpgtui_t_jumpnv_t(SHORTCODE) \
953 gen_cmpi_jumpnv(ctx, TCG_COND_GTU, NsN, UiV, riV)
954 #define fGEN_TCG_J4_cmpgtui_t_jumpnv_nt(SHORTCODE) \
955 gen_cmpi_jumpnv(ctx, TCG_COND_GTU, NsN, UiV, riV)
956 #define fGEN_TCG_J4_cmpgtui_f_jumpnv_t(SHORTCODE) \
957 gen_cmpi_jumpnv(ctx, TCG_COND_LEU, NsN, UiV, riV)
958 #define fGEN_TCG_J4_cmpgtui_f_jumpnv_nt(SHORTCODE) \
959 gen_cmpi_jumpnv(ctx, TCG_COND_LEU, NsN, UiV, riV)
961 #define fGEN_TCG_J4_cmpgtu_t_jumpnv_t(SHORTCODE) \
962 gen_cmp_jumpnv(ctx, TCG_COND_GTU, NsN, RtV, riV)
963 #define fGEN_TCG_J4_cmpgtu_t_jumpnv_nt(SHORTCODE) \
964 gen_cmp_jumpnv(ctx, TCG_COND_GTU, NsN, RtV, riV)
965 #define fGEN_TCG_J4_cmpgtu_f_jumpnv_t(SHORTCODE) \
966 gen_cmp_jumpnv(ctx, TCG_COND_LEU, NsN, RtV, riV)
967 #define fGEN_TCG_J4_cmpgtu_f_jumpnv_nt(SHORTCODE) \
968 gen_cmp_jumpnv(ctx, TCG_COND_LEU, NsN, RtV, riV)
970 #define fGEN_TCG_J4_cmpeqn1_t_jumpnv_t(SHORTCODE) \
971 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, -1, riV)
972 #define fGEN_TCG_J4_cmpeqn1_t_jumpnv_nt(SHORTCODE) \
973 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, -1, riV)
974 #define fGEN_TCG_J4_cmpeqn1_f_jumpnv_t(SHORTCODE) \
975 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, -1, riV)
976 #define fGEN_TCG_J4_cmpeqn1_f_jumpnv_nt(SHORTCODE) \
977 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, -1, riV)
979 #define fGEN_TCG_J4_cmpgtn1_t_jumpnv_t(SHORTCODE) \
980 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, -1, riV)
981 #define fGEN_TCG_J4_cmpgtn1_t_jumpnv_nt(SHORTCODE) \
982 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, -1, riV)
983 #define fGEN_TCG_J4_cmpgtn1_f_jumpnv_t(SHORTCODE) \
984 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, -1, riV)
985 #define fGEN_TCG_J4_cmpgtn1_f_jumpnv_nt(SHORTCODE) \
986 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, -1, riV)
988 #define fGEN_TCG_J4_tstbit0_t_jumpnv_t(SHORTCODE) \
989 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_EQ, riV)
990 #define fGEN_TCG_J4_tstbit0_t_jumpnv_nt(SHORTCODE) \
991 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_EQ, riV)
992 #define fGEN_TCG_J4_tstbit0_f_jumpnv_t(SHORTCODE) \
993 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_NE, riV)
994 #define fGEN_TCG_J4_tstbit0_f_jumpnv_nt(SHORTCODE) \
995 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_NE, riV)
997 /* r0 = r1 ; jump address */
998 #define fGEN_TCG_J4_jumpsetr(SHORTCODE) \
999 do { \
1000 tcg_gen_mov_tl(RdV, RsV); \
1001 gen_jump(ctx, riV); \
1002 } while (0)
1004 #define fGEN_TCG_J2_pause(SHORTCODE) \
1005 do { \
1006 uiV = uiV; \
1007 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->next_PC); \
1008 } while (0)
1010 /* r0 = asr(r1, r2):sat */
1011 #define fGEN_TCG_S2_asr_r_r_sat(SHORTCODE) \
1012 gen_asr_r_r_sat(RdV, RsV, RtV)
1014 /* r0 = asl(r1, r2):sat */
1015 #define fGEN_TCG_S2_asl_r_r_sat(SHORTCODE) \
1016 gen_asl_r_r_sat(RdV, RsV, RtV)
1018 /* Floating point */
1019 #define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \
1020 gen_helper_conv_sf2df(RddV, cpu_env, RsV)
1021 #define fGEN_TCG_F2_conv_df2sf(SHORTCODE) \
1022 gen_helper_conv_df2sf(RdV, cpu_env, RssV)
1023 #define fGEN_TCG_F2_conv_uw2sf(SHORTCODE) \
1024 gen_helper_conv_uw2sf(RdV, cpu_env, RsV)
1025 #define fGEN_TCG_F2_conv_uw2df(SHORTCODE) \
1026 gen_helper_conv_uw2df(RddV, cpu_env, RsV)
1027 #define fGEN_TCG_F2_conv_w2sf(SHORTCODE) \
1028 gen_helper_conv_w2sf(RdV, cpu_env, RsV)
1029 #define fGEN_TCG_F2_conv_w2df(SHORTCODE) \
1030 gen_helper_conv_w2df(RddV, cpu_env, RsV)
1031 #define fGEN_TCG_F2_conv_ud2sf(SHORTCODE) \
1032 gen_helper_conv_ud2sf(RdV, cpu_env, RssV)
1033 #define fGEN_TCG_F2_conv_ud2df(SHORTCODE) \
1034 gen_helper_conv_ud2df(RddV, cpu_env, RssV)
1035 #define fGEN_TCG_F2_conv_d2sf(SHORTCODE) \
1036 gen_helper_conv_d2sf(RdV, cpu_env, RssV)
1037 #define fGEN_TCG_F2_conv_d2df(SHORTCODE) \
1038 gen_helper_conv_d2df(RddV, cpu_env, RssV)
1039 #define fGEN_TCG_F2_conv_sf2uw(SHORTCODE) \
1040 gen_helper_conv_sf2uw(RdV, cpu_env, RsV)
1041 #define fGEN_TCG_F2_conv_sf2w(SHORTCODE) \
1042 gen_helper_conv_sf2w(RdV, cpu_env, RsV)
1043 #define fGEN_TCG_F2_conv_sf2ud(SHORTCODE) \
1044 gen_helper_conv_sf2ud(RddV, cpu_env, RsV)
1045 #define fGEN_TCG_F2_conv_sf2d(SHORTCODE) \
1046 gen_helper_conv_sf2d(RddV, cpu_env, RsV)
1047 #define fGEN_TCG_F2_conv_df2uw(SHORTCODE) \
1048 gen_helper_conv_df2uw(RdV, cpu_env, RssV)
1049 #define fGEN_TCG_F2_conv_df2w(SHORTCODE) \
1050 gen_helper_conv_df2w(RdV, cpu_env, RssV)
1051 #define fGEN_TCG_F2_conv_df2ud(SHORTCODE) \
1052 gen_helper_conv_df2ud(RddV, cpu_env, RssV)
1053 #define fGEN_TCG_F2_conv_df2d(SHORTCODE) \
1054 gen_helper_conv_df2d(RddV, cpu_env, RssV)
1055 #define fGEN_TCG_F2_conv_sf2uw_chop(SHORTCODE) \
1056 gen_helper_conv_sf2uw_chop(RdV, cpu_env, RsV)
1057 #define fGEN_TCG_F2_conv_sf2w_chop(SHORTCODE) \
1058 gen_helper_conv_sf2w_chop(RdV, cpu_env, RsV)
1059 #define fGEN_TCG_F2_conv_sf2ud_chop(SHORTCODE) \
1060 gen_helper_conv_sf2ud_chop(RddV, cpu_env, RsV)
1061 #define fGEN_TCG_F2_conv_sf2d_chop(SHORTCODE) \
1062 gen_helper_conv_sf2d_chop(RddV, cpu_env, RsV)
1063 #define fGEN_TCG_F2_conv_df2uw_chop(SHORTCODE) \
1064 gen_helper_conv_df2uw_chop(RdV, cpu_env, RssV)
1065 #define fGEN_TCG_F2_conv_df2w_chop(SHORTCODE) \
1066 gen_helper_conv_df2w_chop(RdV, cpu_env, RssV)
1067 #define fGEN_TCG_F2_conv_df2ud_chop(SHORTCODE) \
1068 gen_helper_conv_df2ud_chop(RddV, cpu_env, RssV)
1069 #define fGEN_TCG_F2_conv_df2d_chop(SHORTCODE) \
1070 gen_helper_conv_df2d_chop(RddV, cpu_env, RssV)
1071 #define fGEN_TCG_F2_sfadd(SHORTCODE) \
1072 gen_helper_sfadd(RdV, cpu_env, RsV, RtV)
1073 #define fGEN_TCG_F2_sfsub(SHORTCODE) \
1074 gen_helper_sfsub(RdV, cpu_env, RsV, RtV)
1075 #define fGEN_TCG_F2_sfcmpeq(SHORTCODE) \
1076 gen_helper_sfcmpeq(PdV, cpu_env, RsV, RtV)
1077 #define fGEN_TCG_F2_sfcmpgt(SHORTCODE) \
1078 gen_helper_sfcmpgt(PdV, cpu_env, RsV, RtV)
1079 #define fGEN_TCG_F2_sfcmpge(SHORTCODE) \
1080 gen_helper_sfcmpge(PdV, cpu_env, RsV, RtV)
1081 #define fGEN_TCG_F2_sfcmpuo(SHORTCODE) \
1082 gen_helper_sfcmpuo(PdV, cpu_env, RsV, RtV)
1083 #define fGEN_TCG_F2_sfmax(SHORTCODE) \
1084 gen_helper_sfmax(RdV, cpu_env, RsV, RtV)
1085 #define fGEN_TCG_F2_sfmin(SHORTCODE) \
1086 gen_helper_sfmin(RdV, cpu_env, RsV, RtV)
1087 #define fGEN_TCG_F2_sfclass(SHORTCODE) \
1088 do { \
1089 TCGv imm = tcg_constant_tl(uiV); \
1090 gen_helper_sfclass(PdV, cpu_env, RsV, imm); \
1091 } while (0)
1092 #define fGEN_TCG_F2_sffixupn(SHORTCODE) \
1093 gen_helper_sffixupn(RdV, cpu_env, RsV, RtV)
1094 #define fGEN_TCG_F2_sffixupd(SHORTCODE) \
1095 gen_helper_sffixupd(RdV, cpu_env, RsV, RtV)
1096 #define fGEN_TCG_F2_sffixupr(SHORTCODE) \
1097 gen_helper_sffixupr(RdV, cpu_env, RsV)
1098 #define fGEN_TCG_F2_dfadd(SHORTCODE) \
1099 gen_helper_dfadd(RddV, cpu_env, RssV, RttV)
1100 #define fGEN_TCG_F2_dfsub(SHORTCODE) \
1101 gen_helper_dfsub(RddV, cpu_env, RssV, RttV)
1102 #define fGEN_TCG_F2_dfmax(SHORTCODE) \
1103 gen_helper_dfmax(RddV, cpu_env, RssV, RttV)
1104 #define fGEN_TCG_F2_dfmin(SHORTCODE) \
1105 gen_helper_dfmin(RddV, cpu_env, RssV, RttV)
1106 #define fGEN_TCG_F2_dfcmpeq(SHORTCODE) \
1107 gen_helper_dfcmpeq(PdV, cpu_env, RssV, RttV)
1108 #define fGEN_TCG_F2_dfcmpgt(SHORTCODE) \
1109 gen_helper_dfcmpgt(PdV, cpu_env, RssV, RttV)
1110 #define fGEN_TCG_F2_dfcmpge(SHORTCODE) \
1111 gen_helper_dfcmpge(PdV, cpu_env, RssV, RttV)
1112 #define fGEN_TCG_F2_dfcmpuo(SHORTCODE) \
1113 gen_helper_dfcmpuo(PdV, cpu_env, RssV, RttV)
1114 #define fGEN_TCG_F2_dfclass(SHORTCODE) \
1115 do { \
1116 TCGv imm = tcg_constant_tl(uiV); \
1117 gen_helper_dfclass(PdV, cpu_env, RssV, imm); \
1118 } while (0)
1119 #define fGEN_TCG_F2_sfmpy(SHORTCODE) \
1120 gen_helper_sfmpy(RdV, cpu_env, RsV, RtV)
1121 #define fGEN_TCG_F2_sffma(SHORTCODE) \
1122 gen_helper_sffma(RxV, cpu_env, RxV, RsV, RtV)
1123 #define fGEN_TCG_F2_sffma_sc(SHORTCODE) \
1124 gen_helper_sffma_sc(RxV, cpu_env, RxV, RsV, RtV, PuV)
1125 #define fGEN_TCG_F2_sffms(SHORTCODE) \
1126 gen_helper_sffms(RxV, cpu_env, RxV, RsV, RtV)
1127 #define fGEN_TCG_F2_sffma_lib(SHORTCODE) \
1128 gen_helper_sffma_lib(RxV, cpu_env, RxV, RsV, RtV)
1129 #define fGEN_TCG_F2_sffms_lib(SHORTCODE) \
1130 gen_helper_sffms_lib(RxV, cpu_env, RxV, RsV, RtV)
1132 #define fGEN_TCG_F2_dfmpyfix(SHORTCODE) \
1133 gen_helper_dfmpyfix(RddV, cpu_env, RssV, RttV)
1134 #define fGEN_TCG_F2_dfmpyhh(SHORTCODE) \
1135 gen_helper_dfmpyhh(RxxV, cpu_env, RxxV, RssV, RttV)
1137 /* Nothing to do for these in qemu, need to suppress compiler warnings */
1138 #define fGEN_TCG_Y4_l2fetch(SHORTCODE) \
1139 do { \
1140 RsV = RsV; \
1141 RtV = RtV; \
1142 } while (0)
1143 #define fGEN_TCG_Y5_l2fetch(SHORTCODE) \
1144 do { \
1145 RsV = RsV; \
1146 } while (0)
1148 #define fGEN_TCG_J2_trap0(SHORTCODE) \
1149 do { \
1150 uiV = uiV; \
1151 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->pkt->pc); \
1152 TCGv excp = tcg_constant_tl(HEX_EXCP_TRAP0); \
1153 gen_helper_raise_exception(cpu_env, excp); \
1154 } while (0)
1155 #endif