spapr/pci: Correct "does not support hotplugging error messages
[qemu/armbru.git] / target / hexagon / gen_tcg.h
blobd992059fce8afe8b08034d7af8951cf0db3124fb
1 /*
2 * Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #ifndef HEXAGON_GEN_TCG_H
19 #define HEXAGON_GEN_TCG_H
22 * Here is a primer to understand the tag names for load/store instructions
24 * Data types
25 * b signed byte r0 = memb(r2+#0)
26 * ub unsigned byte r0 = memub(r2+#0)
27 * h signed half word (16 bits) r0 = memh(r2+#0)
28 * uh unsigned half word r0 = memuh(r2+#0)
29 * i integer (32 bits) r0 = memw(r2+#0)
30 * d double word (64 bits) r1:0 = memd(r2+#0)
32 * Addressing modes
33 * _io indirect with offset r0 = memw(r1+#4)
34 * _ur absolute with register offset r0 = memw(r1<<#4+##variable)
35 * _rr indirect with register offset r0 = memw(r1+r4<<#2)
36 * gp global pointer relative r0 = memw(gp+#200)
37 * _sp stack pointer relative r0 = memw(r29+#12)
38 * _ap absolute set r0 = memw(r1=##variable)
39 * _pr post increment register r0 = memw(r1++m1)
40 * _pbr post increment bit reverse r0 = memw(r1++m1:brev)
41 * _pi post increment immediate r0 = memb(r1++#1)
42 * _pci post increment circular immediate r0 = memw(r1++#4:circ(m0))
43 * _pcr post increment circular register r0 = memw(r1++I:circ(m0))
46 /* Macros for complex addressing modes */
47 #define GET_EA_ap \
48 do { \
49 fEA_IMM(UiV); \
50 tcg_gen_movi_tl(ReV, UiV); \
51 } while (0)
52 #define GET_EA_pr \
53 do { \
54 fEA_REG(RxV); \
55 fPM_M(RxV, MuV); \
56 } while (0)
57 #define GET_EA_pbr \
58 do { \
59 gen_helper_fbrev(EA, RxV); \
60 tcg_gen_add_tl(RxV, RxV, MuV); \
61 } while (0)
62 #define GET_EA_pi \
63 do { \
64 fEA_REG(RxV); \
65 fPM_I(RxV, siV); \
66 } while (0)
67 #define GET_EA_pci \
68 do { \
69 TCGv tcgv_siV = tcg_constant_tl(siV); \
70 tcg_gen_mov_tl(EA, RxV); \
71 gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \
72 hex_gpr[HEX_REG_CS0 + MuN]); \
73 } while (0)
74 #define GET_EA_pcr(SHIFT) \
75 do { \
76 TCGv ireg = tcg_temp_new(); \
77 tcg_gen_mov_tl(EA, RxV); \
78 gen_read_ireg(ireg, MuV, (SHIFT)); \
79 gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
80 } while (0)
82 /* Instructions with multiple definitions */
83 #define fGEN_TCG_LOAD_AP(RES, SIZE, SIGN) \
84 do { \
85 fMUST_IMMEXT(UiV); \
86 fEA_IMM(UiV); \
87 fLOAD(1, SIZE, SIGN, EA, RES); \
88 tcg_gen_movi_tl(ReV, UiV); \
89 } while (0)
91 #define fGEN_TCG_L4_loadrub_ap(SHORTCODE) \
92 fGEN_TCG_LOAD_AP(RdV, 1, u)
93 #define fGEN_TCG_L4_loadrb_ap(SHORTCODE) \
94 fGEN_TCG_LOAD_AP(RdV, 1, s)
95 #define fGEN_TCG_L4_loadruh_ap(SHORTCODE) \
96 fGEN_TCG_LOAD_AP(RdV, 2, u)
97 #define fGEN_TCG_L4_loadrh_ap(SHORTCODE) \
98 fGEN_TCG_LOAD_AP(RdV, 2, s)
99 #define fGEN_TCG_L4_loadri_ap(SHORTCODE) \
100 fGEN_TCG_LOAD_AP(RdV, 4, u)
101 #define fGEN_TCG_L4_loadrd_ap(SHORTCODE) \
102 fGEN_TCG_LOAD_AP(RddV, 8, u)
104 #define fGEN_TCG_L2_loadrub_pci(SHORTCODE) SHORTCODE
105 #define fGEN_TCG_L2_loadrb_pci(SHORTCODE) SHORTCODE
106 #define fGEN_TCG_L2_loadruh_pci(SHORTCODE) SHORTCODE
107 #define fGEN_TCG_L2_loadrh_pci(SHORTCODE) SHORTCODE
108 #define fGEN_TCG_L2_loadri_pci(SHORTCODE) SHORTCODE
109 #define fGEN_TCG_L2_loadrd_pci(SHORTCODE) SHORTCODE
111 #define fGEN_TCG_LOAD_pcr(SHIFT, LOAD) \
112 do { \
113 TCGv ireg = tcg_temp_new(); \
114 tcg_gen_mov_tl(EA, RxV); \
115 gen_read_ireg(ireg, MuV, SHIFT); \
116 gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
117 LOAD; \
118 } while (0)
120 #define fGEN_TCG_L2_loadrub_pcr(SHORTCODE) \
121 fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, u, EA, RdV))
122 #define fGEN_TCG_L2_loadrb_pcr(SHORTCODE) \
123 fGEN_TCG_LOAD_pcr(0, fLOAD(1, 1, s, EA, RdV))
124 #define fGEN_TCG_L2_loadruh_pcr(SHORTCODE) \
125 fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, u, EA, RdV))
126 #define fGEN_TCG_L2_loadrh_pcr(SHORTCODE) \
127 fGEN_TCG_LOAD_pcr(1, fLOAD(1, 2, s, EA, RdV))
128 #define fGEN_TCG_L2_loadri_pcr(SHORTCODE) \
129 fGEN_TCG_LOAD_pcr(2, fLOAD(1, 4, u, EA, RdV))
130 #define fGEN_TCG_L2_loadrd_pcr(SHORTCODE) \
131 fGEN_TCG_LOAD_pcr(3, fLOAD(1, 8, u, EA, RddV))
133 #define fGEN_TCG_L2_loadrub_pr(SHORTCODE) SHORTCODE
134 #define fGEN_TCG_L2_loadrub_pbr(SHORTCODE) SHORTCODE
135 #define fGEN_TCG_L2_loadrub_pi(SHORTCODE) SHORTCODE
136 #define fGEN_TCG_L2_loadrb_pr(SHORTCODE) SHORTCODE
137 #define fGEN_TCG_L2_loadrb_pbr(SHORTCODE) SHORTCODE
138 #define fGEN_TCG_L2_loadrb_pi(SHORTCODE) SHORTCODE
139 #define fGEN_TCG_L2_loadruh_pr(SHORTCODE) SHORTCODE
140 #define fGEN_TCG_L2_loadruh_pbr(SHORTCODE) SHORTCODE
141 #define fGEN_TCG_L2_loadruh_pi(SHORTCODE) SHORTCODE
142 #define fGEN_TCG_L2_loadrh_pr(SHORTCODE) SHORTCODE
143 #define fGEN_TCG_L2_loadrh_pbr(SHORTCODE) SHORTCODE
144 #define fGEN_TCG_L2_loadrh_pi(SHORTCODE) SHORTCODE
145 #define fGEN_TCG_L2_loadri_pr(SHORTCODE) SHORTCODE
146 #define fGEN_TCG_L2_loadri_pbr(SHORTCODE) SHORTCODE
147 #define fGEN_TCG_L2_loadri_pi(SHORTCODE) SHORTCODE
148 #define fGEN_TCG_L2_loadrd_pr(SHORTCODE) SHORTCODE
149 #define fGEN_TCG_L2_loadrd_pbr(SHORTCODE) SHORTCODE
150 #define fGEN_TCG_L2_loadrd_pi(SHORTCODE) SHORTCODE
153 * These instructions load 2 bytes and places them in
154 * two halves of the destination register.
155 * The GET_EA macro determines the addressing mode.
156 * The SIGN argument determines whether to zero-extend or
157 * sign-extend.
159 #define fGEN_TCG_loadbXw2(GET_EA, SIGN) \
160 do { \
161 TCGv tmp = tcg_temp_new(); \
162 TCGv byte = tcg_temp_new(); \
163 GET_EA; \
164 fLOAD(1, 2, u, EA, tmp); \
165 tcg_gen_movi_tl(RdV, 0); \
166 for (int i = 0; i < 2; i++) { \
167 gen_set_half(i, RdV, gen_get_byte(byte, i, tmp, (SIGN))); \
169 } while (0)
171 #define fGEN_TCG_L2_loadbzw2_io(SHORTCODE) \
172 fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), false)
173 #define fGEN_TCG_L4_loadbzw2_ur(SHORTCODE) \
174 fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), false)
175 #define fGEN_TCG_L2_loadbsw2_io(SHORTCODE) \
176 fGEN_TCG_loadbXw2(fEA_RI(RsV, siV), true)
177 #define fGEN_TCG_L4_loadbsw2_ur(SHORTCODE) \
178 fGEN_TCG_loadbXw2(fEA_IRs(UiV, RtV, uiV), true)
179 #define fGEN_TCG_L4_loadbzw2_ap(SHORTCODE) \
180 fGEN_TCG_loadbXw2(GET_EA_ap, false)
181 #define fGEN_TCG_L2_loadbzw2_pr(SHORTCODE) \
182 fGEN_TCG_loadbXw2(GET_EA_pr, false)
183 #define fGEN_TCG_L2_loadbzw2_pbr(SHORTCODE) \
184 fGEN_TCG_loadbXw2(GET_EA_pbr, false)
185 #define fGEN_TCG_L2_loadbzw2_pi(SHORTCODE) \
186 fGEN_TCG_loadbXw2(GET_EA_pi, false)
187 #define fGEN_TCG_L4_loadbsw2_ap(SHORTCODE) \
188 fGEN_TCG_loadbXw2(GET_EA_ap, true)
189 #define fGEN_TCG_L2_loadbsw2_pr(SHORTCODE) \
190 fGEN_TCG_loadbXw2(GET_EA_pr, true)
191 #define fGEN_TCG_L2_loadbsw2_pbr(SHORTCODE) \
192 fGEN_TCG_loadbXw2(GET_EA_pbr, true)
193 #define fGEN_TCG_L2_loadbsw2_pi(SHORTCODE) \
194 fGEN_TCG_loadbXw2(GET_EA_pi, true)
195 #define fGEN_TCG_L2_loadbzw2_pci(SHORTCODE) \
196 fGEN_TCG_loadbXw2(GET_EA_pci, false)
197 #define fGEN_TCG_L2_loadbsw2_pci(SHORTCODE) \
198 fGEN_TCG_loadbXw2(GET_EA_pci, true)
199 #define fGEN_TCG_L2_loadbzw2_pcr(SHORTCODE) \
200 fGEN_TCG_loadbXw2(GET_EA_pcr(1), false)
201 #define fGEN_TCG_L2_loadbsw2_pcr(SHORTCODE) \
202 fGEN_TCG_loadbXw2(GET_EA_pcr(1), true)
205 * These instructions load 4 bytes and places them in
206 * four halves of the destination register pair.
207 * The GET_EA macro determines the addressing mode.
208 * The SIGN argument determines whether to zero-extend or
209 * sign-extend.
211 #define fGEN_TCG_loadbXw4(GET_EA, SIGN) \
212 do { \
213 TCGv tmp = tcg_temp_new(); \
214 TCGv byte = tcg_temp_new(); \
215 GET_EA; \
216 fLOAD(1, 4, u, EA, tmp); \
217 tcg_gen_movi_i64(RddV, 0); \
218 for (int i = 0; i < 4; i++) { \
219 gen_set_half_i64(i, RddV, gen_get_byte(byte, i, tmp, (SIGN))); \
221 } while (0)
223 #define fGEN_TCG_L2_loadbzw4_io(SHORTCODE) \
224 fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), false)
225 #define fGEN_TCG_L4_loadbzw4_ur(SHORTCODE) \
226 fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), false)
227 #define fGEN_TCG_L2_loadbsw4_io(SHORTCODE) \
228 fGEN_TCG_loadbXw4(fEA_RI(RsV, siV), true)
229 #define fGEN_TCG_L4_loadbsw4_ur(SHORTCODE) \
230 fGEN_TCG_loadbXw4(fEA_IRs(UiV, RtV, uiV), true)
231 #define fGEN_TCG_L2_loadbzw4_pci(SHORTCODE) \
232 fGEN_TCG_loadbXw4(GET_EA_pci, false)
233 #define fGEN_TCG_L2_loadbsw4_pci(SHORTCODE) \
234 fGEN_TCG_loadbXw4(GET_EA_pci, true)
235 #define fGEN_TCG_L2_loadbzw4_pcr(SHORTCODE) \
236 fGEN_TCG_loadbXw4(GET_EA_pcr(2), false)
237 #define fGEN_TCG_L2_loadbsw4_pcr(SHORTCODE) \
238 fGEN_TCG_loadbXw4(GET_EA_pcr(2), true)
239 #define fGEN_TCG_L4_loadbzw4_ap(SHORTCODE) \
240 fGEN_TCG_loadbXw4(GET_EA_ap, false)
241 #define fGEN_TCG_L2_loadbzw4_pr(SHORTCODE) \
242 fGEN_TCG_loadbXw4(GET_EA_pr, false)
243 #define fGEN_TCG_L2_loadbzw4_pbr(SHORTCODE) \
244 fGEN_TCG_loadbXw4(GET_EA_pbr, false)
245 #define fGEN_TCG_L2_loadbzw4_pi(SHORTCODE) \
246 fGEN_TCG_loadbXw4(GET_EA_pi, false)
247 #define fGEN_TCG_L4_loadbsw4_ap(SHORTCODE) \
248 fGEN_TCG_loadbXw4(GET_EA_ap, true)
249 #define fGEN_TCG_L2_loadbsw4_pr(SHORTCODE) \
250 fGEN_TCG_loadbXw4(GET_EA_pr, true)
251 #define fGEN_TCG_L2_loadbsw4_pbr(SHORTCODE) \
252 fGEN_TCG_loadbXw4(GET_EA_pbr, true)
253 #define fGEN_TCG_L2_loadbsw4_pi(SHORTCODE) \
254 fGEN_TCG_loadbXw4(GET_EA_pi, true)
257 * These instructions load a half word, shift the destination right by 16 bits
258 * and place the loaded value in the high half word of the destination pair.
259 * The GET_EA macro determines the addressing mode.
261 #define fGEN_TCG_loadalignh(GET_EA) \
262 do { \
263 TCGv tmp = tcg_temp_new(); \
264 TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
265 GET_EA; \
266 fLOAD(1, 2, u, EA, tmp); \
267 tcg_gen_extu_i32_i64(tmp_i64, tmp); \
268 tcg_gen_shri_i64(RyyV, RyyV, 16); \
269 tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 48, 16); \
270 } while (0)
272 #define fGEN_TCG_L4_loadalignh_ur(SHORTCODE) \
273 fGEN_TCG_loadalignh(fEA_IRs(UiV, RtV, uiV))
274 #define fGEN_TCG_L2_loadalignh_io(SHORTCODE) \
275 fGEN_TCG_loadalignh(fEA_RI(RsV, siV))
276 #define fGEN_TCG_L2_loadalignh_pci(SHORTCODE) \
277 fGEN_TCG_loadalignh(GET_EA_pci)
278 #define fGEN_TCG_L2_loadalignh_pcr(SHORTCODE) \
279 fGEN_TCG_loadalignh(GET_EA_pcr(1))
280 #define fGEN_TCG_L4_loadalignh_ap(SHORTCODE) \
281 fGEN_TCG_loadalignh(GET_EA_ap)
282 #define fGEN_TCG_L2_loadalignh_pr(SHORTCODE) \
283 fGEN_TCG_loadalignh(GET_EA_pr)
284 #define fGEN_TCG_L2_loadalignh_pbr(SHORTCODE) \
285 fGEN_TCG_loadalignh(GET_EA_pbr)
286 #define fGEN_TCG_L2_loadalignh_pi(SHORTCODE) \
287 fGEN_TCG_loadalignh(GET_EA_pi)
289 /* Same as above, but loads a byte instead of half word */
290 #define fGEN_TCG_loadalignb(GET_EA) \
291 do { \
292 TCGv tmp = tcg_temp_new(); \
293 TCGv_i64 tmp_i64 = tcg_temp_new_i64(); \
294 GET_EA; \
295 fLOAD(1, 1, u, EA, tmp); \
296 tcg_gen_extu_i32_i64(tmp_i64, tmp); \
297 tcg_gen_shri_i64(RyyV, RyyV, 8); \
298 tcg_gen_deposit_i64(RyyV, RyyV, tmp_i64, 56, 8); \
299 } while (0)
301 #define fGEN_TCG_L2_loadalignb_io(SHORTCODE) \
302 fGEN_TCG_loadalignb(fEA_RI(RsV, siV))
303 #define fGEN_TCG_L4_loadalignb_ur(SHORTCODE) \
304 fGEN_TCG_loadalignb(fEA_IRs(UiV, RtV, uiV))
305 #define fGEN_TCG_L2_loadalignb_pci(SHORTCODE) \
306 fGEN_TCG_loadalignb(GET_EA_pci)
307 #define fGEN_TCG_L2_loadalignb_pcr(SHORTCODE) \
308 fGEN_TCG_loadalignb(GET_EA_pcr(0))
309 #define fGEN_TCG_L4_loadalignb_ap(SHORTCODE) \
310 fGEN_TCG_loadalignb(GET_EA_ap)
311 #define fGEN_TCG_L2_loadalignb_pr(SHORTCODE) \
312 fGEN_TCG_loadalignb(GET_EA_pr)
313 #define fGEN_TCG_L2_loadalignb_pbr(SHORTCODE) \
314 fGEN_TCG_loadalignb(GET_EA_pbr)
315 #define fGEN_TCG_L2_loadalignb_pi(SHORTCODE) \
316 fGEN_TCG_loadalignb(GET_EA_pi)
319 * Predicated loads
320 * Here is a primer to understand the tag names
322 * Predicate used
323 * t true "old" value if (p0) r0 = memb(r2+#0)
324 * f false "old" value if (!p0) r0 = memb(r2+#0)
325 * tnew true "new" value if (p0.new) r0 = memb(r2+#0)
326 * fnew false "new" value if (!p0.new) r0 = memb(r2+#0)
328 #define fGEN_TCG_PRED_LOAD(GET_EA, PRED, SIZE, SIGN) \
329 do { \
330 TCGv LSB = tcg_temp_new(); \
331 TCGLabel *label = gen_new_label(); \
332 tcg_gen_movi_tl(EA, 0); \
333 PRED; \
334 CHECK_NOSHUF_PRED(GET_EA, SIZE, LSB); \
335 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
336 fLOAD(1, SIZE, SIGN, EA, RdV); \
337 gen_set_label(label); \
338 } while (0)
340 #define fGEN_TCG_L2_ploadrubt_pi(SHORTCODE) \
341 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, u)
342 #define fGEN_TCG_L2_ploadrubf_pi(SHORTCODE) \
343 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, u)
344 #define fGEN_TCG_L2_ploadrubtnew_pi(SHORTCODE) \
345 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, u)
346 #define fGEN_TCG_L2_ploadrubfnew_pi(SHORTCODE) \
347 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 1, u)
348 #define fGEN_TCG_L2_ploadrbt_pi(SHORTCODE) \
349 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 1, s)
350 #define fGEN_TCG_L2_ploadrbf_pi(SHORTCODE) \
351 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 1, s)
352 #define fGEN_TCG_L2_ploadrbtnew_pi(SHORTCODE) \
353 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 1, s)
354 #define fGEN_TCG_L2_ploadrbfnew_pi(SHORTCODE) \
355 fGEN_TCG_PRED_LOAD({ fEA_REG(RxV); fPM_I(RxV, siV); }, \
356 fLSBNEWNOT(PtN), 1, s)
358 #define fGEN_TCG_L2_ploadruht_pi(SHORTCODE) \
359 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, u)
360 #define fGEN_TCG_L2_ploadruhf_pi(SHORTCODE) \
361 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, u)
362 #define fGEN_TCG_L2_ploadruhtnew_pi(SHORTCODE) \
363 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, u)
364 #define fGEN_TCG_L2_ploadruhfnew_pi(SHORTCODE) \
365 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, u)
366 #define fGEN_TCG_L2_ploadrht_pi(SHORTCODE) \
367 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 2, s)
368 #define fGEN_TCG_L2_ploadrhf_pi(SHORTCODE) \
369 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 2, s)
370 #define fGEN_TCG_L2_ploadrhtnew_pi(SHORTCODE) \
371 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 2, s)
372 #define fGEN_TCG_L2_ploadrhfnew_pi(SHORTCODE) \
373 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 2, s)
375 #define fGEN_TCG_L2_ploadrit_pi(SHORTCODE) \
376 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLD(PtV), 4, u)
377 #define fGEN_TCG_L2_ploadrif_pi(SHORTCODE) \
378 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBOLDNOT(PtV), 4, u)
379 #define fGEN_TCG_L2_ploadritnew_pi(SHORTCODE) \
380 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEW(PtN), 4, u)
381 #define fGEN_TCG_L2_ploadrifnew_pi(SHORTCODE) \
382 fGEN_TCG_PRED_LOAD(GET_EA_pi, fLSBNEWNOT(PtN), 4, u)
384 /* Predicated loads into a register pair */
385 #define fGEN_TCG_PRED_LOAD_PAIR(GET_EA, PRED) \
386 do { \
387 TCGv LSB = tcg_temp_new(); \
388 TCGLabel *label = gen_new_label(); \
389 tcg_gen_movi_tl(EA, 0); \
390 PRED; \
391 CHECK_NOSHUF_PRED(GET_EA, 8, LSB); \
392 tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, label); \
393 fLOAD(1, 8, u, EA, RddV); \
394 gen_set_label(label); \
395 } while (0)
397 #define fGEN_TCG_L2_ploadrdt_pi(SHORTCODE) \
398 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLD(PtV))
399 #define fGEN_TCG_L2_ploadrdf_pi(SHORTCODE) \
400 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBOLDNOT(PtV))
401 #define fGEN_TCG_L2_ploadrdtnew_pi(SHORTCODE) \
402 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEW(PtN))
403 #define fGEN_TCG_L2_ploadrdfnew_pi(SHORTCODE) \
404 fGEN_TCG_PRED_LOAD_PAIR(GET_EA_pi, fLSBNEWNOT(PtN))
406 /* load-locked and store-locked */
407 #define fGEN_TCG_L2_loadw_locked(SHORTCODE) \
408 SHORTCODE
409 #define fGEN_TCG_L4_loadd_locked(SHORTCODE) \
410 SHORTCODE
411 #define fGEN_TCG_S2_storew_locked(SHORTCODE) \
412 SHORTCODE
413 #define fGEN_TCG_S4_stored_locked(SHORTCODE) \
414 SHORTCODE
416 #define fGEN_TCG_STORE(SHORTCODE) \
417 do { \
418 TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \
419 TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \
420 SHORTCODE; \
421 } while (0)
423 #define fGEN_TCG_STORE_pcr(SHIFT, STORE) \
424 do { \
425 TCGv ireg = tcg_temp_new(); \
426 TCGv HALF G_GNUC_UNUSED = tcg_temp_new(); \
427 TCGv BYTE G_GNUC_UNUSED = tcg_temp_new(); \
428 tcg_gen_mov_tl(EA, RxV); \
429 gen_read_ireg(ireg, MuV, SHIFT); \
430 gen_helper_fcircadd(RxV, RxV, ireg, MuV, hex_gpr[HEX_REG_CS0 + MuN]); \
431 STORE; \
432 } while (0)
434 #define fGEN_TCG_S2_storerb_pbr(SHORTCODE) \
435 fGEN_TCG_STORE(SHORTCODE)
436 #define fGEN_TCG_S2_storerb_pci(SHORTCODE) \
437 fGEN_TCG_STORE(SHORTCODE)
438 #define fGEN_TCG_S2_storerb_pcr(SHORTCODE) \
439 fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, RtV)))
441 #define fGEN_TCG_S2_storerh_pbr(SHORTCODE) \
442 fGEN_TCG_STORE(SHORTCODE)
443 #define fGEN_TCG_S2_storerh_pci(SHORTCODE) \
444 fGEN_TCG_STORE(SHORTCODE)
445 #define fGEN_TCG_S2_storerh_pcr(SHORTCODE) \
446 fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, RtV)))
448 #define fGEN_TCG_S2_storerf_pbr(SHORTCODE) \
449 fGEN_TCG_STORE(SHORTCODE)
450 #define fGEN_TCG_S2_storerf_pci(SHORTCODE) \
451 fGEN_TCG_STORE(SHORTCODE)
452 #define fGEN_TCG_S2_storerf_pcr(SHORTCODE) \
453 fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(1, RtV)))
455 #define fGEN_TCG_S2_storeri_pbr(SHORTCODE) \
456 fGEN_TCG_STORE(SHORTCODE)
457 #define fGEN_TCG_S2_storeri_pci(SHORTCODE) \
458 fGEN_TCG_STORE(SHORTCODE)
459 #define fGEN_TCG_S2_storeri_pcr(SHORTCODE) \
460 fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, RtV))
462 #define fGEN_TCG_S2_storerd_pbr(SHORTCODE) \
463 fGEN_TCG_STORE(SHORTCODE)
464 #define fGEN_TCG_S2_storerd_pci(SHORTCODE) \
465 fGEN_TCG_STORE(SHORTCODE)
466 #define fGEN_TCG_S2_storerd_pcr(SHORTCODE) \
467 fGEN_TCG_STORE_pcr(3, fSTORE(1, 8, EA, RttV))
469 #define fGEN_TCG_S2_storerbnew_pbr(SHORTCODE) \
470 fGEN_TCG_STORE(SHORTCODE)
471 #define fGEN_TCG_S2_storerbnew_pci(SHORTCODE) \
472 fGEN_TCG_STORE(SHORTCODE)
473 #define fGEN_TCG_S2_storerbnew_pcr(SHORTCODE) \
474 fGEN_TCG_STORE_pcr(0, fSTORE(1, 1, EA, fGETBYTE(0, NtN)))
476 #define fGEN_TCG_S2_storerhnew_pbr(SHORTCODE) \
477 fGEN_TCG_STORE(SHORTCODE)
478 #define fGEN_TCG_S2_storerhnew_pci(SHORTCODE) \
479 fGEN_TCG_STORE(SHORTCODE)
480 #define fGEN_TCG_S2_storerhnew_pcr(SHORTCODE) \
481 fGEN_TCG_STORE_pcr(1, fSTORE(1, 2, EA, fGETHALF(0, NtN)))
483 #define fGEN_TCG_S2_storerinew_pbr(SHORTCODE) \
484 fGEN_TCG_STORE(SHORTCODE)
485 #define fGEN_TCG_S2_storerinew_pci(SHORTCODE) \
486 fGEN_TCG_STORE(SHORTCODE)
487 #define fGEN_TCG_S2_storerinew_pcr(SHORTCODE) \
488 fGEN_TCG_STORE_pcr(2, fSTORE(1, 4, EA, NtN))
490 /* dczeroa clears the 32 byte cache line at the address given */
491 #define fGEN_TCG_Y2_dczeroa(SHORTCODE) SHORTCODE
493 /* In linux-user mode, these are not modelled, suppress compiler warning */
494 #define fGEN_TCG_Y2_dcinva(SHORTCODE) \
495 do { RsV = RsV; } while (0)
496 #define fGEN_TCG_Y2_dccleaninva(SHORTCODE) \
497 do { RsV = RsV; } while (0)
498 #define fGEN_TCG_Y2_dccleana(SHORTCODE) \
499 do { RsV = RsV; } while (0)
500 #define fGEN_TCG_Y2_icinva(SHORTCODE) \
501 do { RsV = RsV; } while (0)
504 * allocframe(#uiV)
505 * RxV == r29
507 #define fGEN_TCG_S2_allocframe(SHORTCODE) \
508 gen_allocframe(ctx, RxV, uiV)
510 /* sub-instruction version (no RxV, so handle it manually) */
511 #define fGEN_TCG_SS2_allocframe(SHORTCODE) \
512 do { \
513 TCGv r29 = tcg_temp_new(); \
514 tcg_gen_mov_tl(r29, hex_gpr[HEX_REG_SP]); \
515 gen_allocframe(ctx, r29, uiV); \
516 gen_log_reg_write(ctx, HEX_REG_SP, r29); \
517 } while (0)
520 * Rdd32 = deallocframe(Rs32):raw
521 * RddV == r31:30
522 * RsV == r30
524 #define fGEN_TCG_L2_deallocframe(SHORTCODE) \
525 gen_deallocframe(ctx, RddV, RsV)
527 /* sub-instruction version (no RddV/RsV, so handle it manually) */
528 #define fGEN_TCG_SL2_deallocframe(SHORTCODE) \
529 do { \
530 TCGv_i64 r31_30 = tcg_temp_new_i64(); \
531 gen_deallocframe(ctx, r31_30, hex_gpr[HEX_REG_FP]); \
532 gen_log_reg_write_pair(ctx, HEX_REG_FP, r31_30); \
533 } while (0)
536 * dealloc_return
537 * Assembler mapped to
538 * r31:30 = dealloc_return(r30):raw
540 #define fGEN_TCG_L4_return(SHORTCODE) \
541 gen_return(ctx, RddV, RsV)
544 * sub-instruction version (no RddV, so handle it manually)
546 #define fGEN_TCG_SL2_return(SHORTCODE) \
547 do { \
548 TCGv_i64 RddV = get_result_gpr_pair(ctx, HEX_REG_FP); \
549 gen_return(ctx, RddV, hex_gpr[HEX_REG_FP]); \
550 gen_log_reg_write_pair(ctx, HEX_REG_FP, RddV); \
551 } while (0)
554 * Conditional returns follow this naming convention
555 * _t predicate true
556 * _f predicate false
557 * _tnew_pt predicate.new true predict taken
558 * _fnew_pt predicate.new false predict taken
559 * _tnew_pnt predicate.new true predict not taken
560 * _fnew_pnt predicate.new false predict not taken
561 * Predictions are not modelled in QEMU
563 * Example:
564 * if (p1) r31:30 = dealloc_return(r30):raw
566 #define fGEN_TCG_L4_return_t(SHORTCODE) \
567 gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_EQ);
568 #define fGEN_TCG_L4_return_f(SHORTCODE) \
569 gen_cond_return(ctx, RddV, RsV, PvV, TCG_COND_NE)
570 #define fGEN_TCG_L4_return_tnew_pt(SHORTCODE) \
571 gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ)
572 #define fGEN_TCG_L4_return_fnew_pt(SHORTCODE) \
573 gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE)
574 #define fGEN_TCG_L4_return_tnew_pnt(SHORTCODE) \
575 gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_EQ)
576 #define fGEN_TCG_L4_return_fnew_pnt(SHORTCODE) \
577 gen_cond_return(ctx, RddV, RsV, PvN, TCG_COND_NE)
579 #define fGEN_TCG_SL2_return_t(SHORTCODE) \
580 gen_cond_return_subinsn(ctx, TCG_COND_EQ, hex_pred[0])
581 #define fGEN_TCG_SL2_return_f(SHORTCODE) \
582 gen_cond_return_subinsn(ctx, TCG_COND_NE, hex_pred[0])
583 #define fGEN_TCG_SL2_return_tnew(SHORTCODE) \
584 gen_cond_return_subinsn(ctx, TCG_COND_EQ, ctx->new_pred_value[0])
585 #define fGEN_TCG_SL2_return_fnew(SHORTCODE) \
586 gen_cond_return_subinsn(ctx, TCG_COND_NE, ctx->new_pred_value[0])
589 * Mathematical operations with more than one definition require
590 * special handling
592 #define fGEN_TCG_A5_ACS(SHORTCODE) \
593 do { \
594 gen_helper_vacsh_pred(PeV, tcg_env, RxxV, RssV, RttV); \
595 gen_helper_vacsh_val(RxxV, tcg_env, RxxV, RssV, RttV, \
596 tcg_constant_tl(ctx->need_commit)); \
597 } while (0)
599 #define fGEN_TCG_S2_cabacdecbin(SHORTCODE) \
600 do { \
601 TCGv p0 = tcg_temp_new(); \
602 gen_helper_cabacdecbin_pred(p0, RssV, RttV); \
603 gen_helper_cabacdecbin_val(RddV, RssV, RttV); \
604 gen_log_pred_write(ctx, 0, p0); \
605 } while (0)
608 * Approximate reciprocal
609 * r3,p1 = sfrecipa(r0, r1)
611 * The helper packs the 2 32-bit results into a 64-bit value,
612 * so unpack them into the proper results.
614 #define fGEN_TCG_F2_sfrecipa(SHORTCODE) \
615 do { \
616 TCGv_i64 tmp = tcg_temp_new_i64(); \
617 gen_helper_sfrecipa(tmp, tcg_env, RsV, RtV); \
618 tcg_gen_extrh_i64_i32(RdV, tmp); \
619 tcg_gen_extrl_i64_i32(PeV, tmp); \
620 } while (0)
623 * Approximation of the reciprocal square root
624 * r1,p0 = sfinvsqrta(r0)
626 * The helper packs the 2 32-bit results into a 64-bit value,
627 * so unpack them into the proper results.
629 #define fGEN_TCG_F2_sfinvsqrta(SHORTCODE) \
630 do { \
631 TCGv_i64 tmp = tcg_temp_new_i64(); \
632 gen_helper_sfinvsqrta(tmp, tcg_env, RsV); \
633 tcg_gen_extrh_i64_i32(RdV, tmp); \
634 tcg_gen_extrl_i64_i32(PeV, tmp); \
635 } while (0)
638 * Add or subtract with carry.
639 * Predicate register is used as an extra input and output.
640 * r5:4 = add(r1:0, r3:2, p1):carry
642 #define fGEN_TCG_A4_addp_c(SHORTCODE) \
643 do { \
644 TCGv_i64 carry = tcg_temp_new_i64(); \
645 TCGv_i64 zero = tcg_constant_i64(0); \
646 tcg_gen_extu_i32_i64(carry, PxV); \
647 tcg_gen_andi_i64(carry, carry, 1); \
648 tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
649 tcg_gen_add2_i64(RddV, carry, RddV, carry, RttV, zero); \
650 tcg_gen_extrl_i64_i32(PxV, carry); \
651 gen_8bitsof(PxV, PxV); \
652 } while (0)
654 /* r5:4 = sub(r1:0, r3:2, p1):carry */
655 #define fGEN_TCG_A4_subp_c(SHORTCODE) \
656 do { \
657 TCGv_i64 carry = tcg_temp_new_i64(); \
658 TCGv_i64 zero = tcg_constant_i64(0); \
659 TCGv_i64 not_RttV = tcg_temp_new_i64(); \
660 tcg_gen_extu_i32_i64(carry, PxV); \
661 tcg_gen_andi_i64(carry, carry, 1); \
662 tcg_gen_not_i64(not_RttV, RttV); \
663 tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \
664 tcg_gen_add2_i64(RddV, carry, RddV, carry, not_RttV, zero); \
665 tcg_gen_extrl_i64_i32(PxV, carry); \
666 gen_8bitsof(PxV, PxV); \
667 } while (0)
670 * Compare each of the 8 unsigned bytes
671 * The minimum is placed in each byte of the destination.
672 * Each bit of the predicate is set true if the bit from the first operand
673 * is greater than the bit from the second operand.
674 * r5:4,p1 = vminub(r1:0, r3:2)
676 #define fGEN_TCG_A6_vminub_RdP(SHORTCODE) \
677 do { \
678 TCGv left = tcg_temp_new(); \
679 TCGv right = tcg_temp_new(); \
680 TCGv tmp = tcg_temp_new(); \
681 tcg_gen_movi_tl(PeV, 0); \
682 tcg_gen_movi_i64(RddV, 0); \
683 for (int i = 0; i < 8; i++) { \
684 gen_get_byte_i64(left, i, RttV, false); \
685 gen_get_byte_i64(right, i, RssV, false); \
686 tcg_gen_setcond_tl(TCG_COND_GT, tmp, left, right); \
687 tcg_gen_deposit_tl(PeV, PeV, tmp, i, 1); \
688 tcg_gen_umin_tl(tmp, left, right); \
689 gen_set_byte_i64(i, RddV, tmp); \
691 } while (0)
693 #define fGEN_TCG_J2_call(SHORTCODE) \
694 gen_call(ctx, riV)
695 #define fGEN_TCG_J2_callr(SHORTCODE) \
696 gen_callr(ctx, RsV)
697 #define fGEN_TCG_J2_callrh(SHORTCODE) \
698 gen_callr(ctx, RsV)
700 #define fGEN_TCG_J2_callt(SHORTCODE) \
701 gen_cond_call(ctx, PuV, TCG_COND_EQ, riV)
702 #define fGEN_TCG_J2_callf(SHORTCODE) \
703 gen_cond_call(ctx, PuV, TCG_COND_NE, riV)
704 #define fGEN_TCG_J2_callrt(SHORTCODE) \
705 gen_cond_callr(ctx, TCG_COND_EQ, PuV, RsV)
706 #define fGEN_TCG_J2_callrf(SHORTCODE) \
707 gen_cond_callr(ctx, TCG_COND_NE, PuV, RsV)
709 #define fGEN_TCG_J2_loop0r(SHORTCODE) \
710 gen_loop0r(ctx, RsV, riV)
711 #define fGEN_TCG_J2_loop1r(SHORTCODE) \
712 gen_loop1r(ctx, RsV, riV)
713 #define fGEN_TCG_J2_loop0i(SHORTCODE) \
714 gen_loop0i(ctx, UiV, riV)
715 #define fGEN_TCG_J2_loop1i(SHORTCODE) \
716 gen_loop1i(ctx, UiV, riV)
717 #define fGEN_TCG_J2_ploop1sr(SHORTCODE) \
718 gen_ploopNsr(ctx, 1, RsV, riV)
719 #define fGEN_TCG_J2_ploop1si(SHORTCODE) \
720 gen_ploopNsi(ctx, 1, UiV, riV)
721 #define fGEN_TCG_J2_ploop2sr(SHORTCODE) \
722 gen_ploopNsr(ctx, 2, RsV, riV)
723 #define fGEN_TCG_J2_ploop2si(SHORTCODE) \
724 gen_ploopNsi(ctx, 2, UiV, riV)
725 #define fGEN_TCG_J2_ploop3sr(SHORTCODE) \
726 gen_ploopNsr(ctx, 3, RsV, riV)
727 #define fGEN_TCG_J2_ploop3si(SHORTCODE) \
728 gen_ploopNsi(ctx, 3, UiV, riV)
730 #define fGEN_TCG_J2_endloop0(SHORTCODE) \
731 gen_endloop0(ctx)
732 #define fGEN_TCG_J2_endloop1(SHORTCODE) \
733 gen_endloop1(ctx)
734 #define fGEN_TCG_J2_endloop01(SHORTCODE) \
735 gen_endloop01(ctx)
738 * Compound compare and jump instructions
739 * Here is a primer to understand the tag names
741 * Comparison
742 * cmpeqi compare equal to an immediate
743 * cmpgti compare greater than an immediate
744 * cmpgtiu compare greater than an unsigned immediate
745 * cmpeqn1 compare equal to negative 1
746 * cmpgtn1 compare greater than negative 1
747 * cmpeq compare equal (two registers)
748 * cmpgtu compare greater than unsigned (two registers)
749 * tstbit0 test bit zero
751 * Condition
752 * tp0 p0 is true p0 = cmp.eq(r0,#5); if (p0.new) jump:nt address
753 * fp0 p0 is false p0 = cmp.eq(r0,#5); if (!p0.new) jump:nt address
754 * tp1 p1 is true p1 = cmp.eq(r0,#5); if (p1.new) jump:nt address
755 * fp1 p1 is false p1 = cmp.eq(r0,#5); if (!p1.new) jump:nt address
757 * Prediction (not modelled in qemu)
758 * _nt not taken
759 * _t taken
761 #define fGEN_TCG_J4_cmpeq_tp0_jump_t(SHORTCODE) \
762 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
763 #define fGEN_TCG_J4_cmpeq_tp0_jump_nt(SHORTCODE) \
764 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
765 #define fGEN_TCG_J4_cmpeq_fp0_jump_t(SHORTCODE) \
766 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
767 #define fGEN_TCG_J4_cmpeq_fp0_jump_nt(SHORTCODE) \
768 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_EQ, RsV, RtV, riV)
769 #define fGEN_TCG_J4_cmpeq_tp1_jump_t(SHORTCODE) \
770 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
771 #define fGEN_TCG_J4_cmpeq_tp1_jump_nt(SHORTCODE) \
772 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
773 #define fGEN_TCG_J4_cmpeq_fp1_jump_t(SHORTCODE) \
774 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
775 #define fGEN_TCG_J4_cmpeq_fp1_jump_nt(SHORTCODE) \
776 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_EQ, RsV, RtV, riV)
778 #define fGEN_TCG_J4_cmpgt_tp0_jump_t(SHORTCODE) \
779 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
780 #define fGEN_TCG_J4_cmpgt_tp0_jump_nt(SHORTCODE) \
781 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
782 #define fGEN_TCG_J4_cmpgt_fp0_jump_t(SHORTCODE) \
783 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
784 #define fGEN_TCG_J4_cmpgt_fp0_jump_nt(SHORTCODE) \
785 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GT, RsV, RtV, riV)
786 #define fGEN_TCG_J4_cmpgt_tp1_jump_t(SHORTCODE) \
787 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
788 #define fGEN_TCG_J4_cmpgt_tp1_jump_nt(SHORTCODE) \
789 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
790 #define fGEN_TCG_J4_cmpgt_fp1_jump_t(SHORTCODE) \
791 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
792 #define fGEN_TCG_J4_cmpgt_fp1_jump_nt(SHORTCODE) \
793 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GT, RsV, RtV, riV)
795 #define fGEN_TCG_J4_cmpgtu_tp0_jump_t(SHORTCODE) \
796 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
797 #define fGEN_TCG_J4_cmpgtu_tp0_jump_nt(SHORTCODE) \
798 gen_cmpnd_cmp_jmp_t(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
799 #define fGEN_TCG_J4_cmpgtu_fp0_jump_t(SHORTCODE) \
800 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
801 #define fGEN_TCG_J4_cmpgtu_fp0_jump_nt(SHORTCODE) \
802 gen_cmpnd_cmp_jmp_f(ctx, 0, TCG_COND_GTU, RsV, RtV, riV)
803 #define fGEN_TCG_J4_cmpgtu_tp1_jump_t(SHORTCODE) \
804 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
805 #define fGEN_TCG_J4_cmpgtu_tp1_jump_nt(SHORTCODE) \
806 gen_cmpnd_cmp_jmp_t(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
807 #define fGEN_TCG_J4_cmpgtu_fp1_jump_t(SHORTCODE) \
808 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
809 #define fGEN_TCG_J4_cmpgtu_fp1_jump_nt(SHORTCODE) \
810 gen_cmpnd_cmp_jmp_f(ctx, 1, TCG_COND_GTU, RsV, RtV, riV)
812 #define fGEN_TCG_J4_cmpeqi_tp0_jump_t(SHORTCODE) \
813 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
814 #define fGEN_TCG_J4_cmpeqi_tp0_jump_nt(SHORTCODE) \
815 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
816 #define fGEN_TCG_J4_cmpeqi_fp0_jump_t(SHORTCODE) \
817 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
818 #define fGEN_TCG_J4_cmpeqi_fp0_jump_nt(SHORTCODE) \
819 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_EQ, RsV, UiV, riV)
820 #define fGEN_TCG_J4_cmpeqi_tp1_jump_t(SHORTCODE) \
821 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
822 #define fGEN_TCG_J4_cmpeqi_tp1_jump_nt(SHORTCODE) \
823 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
824 #define fGEN_TCG_J4_cmpeqi_fp1_jump_t(SHORTCODE) \
825 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
826 #define fGEN_TCG_J4_cmpeqi_fp1_jump_nt(SHORTCODE) \
827 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_EQ, RsV, UiV, riV)
829 #define fGEN_TCG_J4_cmpgti_tp0_jump_t(SHORTCODE) \
830 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
831 #define fGEN_TCG_J4_cmpgti_tp0_jump_nt(SHORTCODE) \
832 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
833 #define fGEN_TCG_J4_cmpgti_fp0_jump_t(SHORTCODE) \
834 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
835 #define fGEN_TCG_J4_cmpgti_fp0_jump_nt(SHORTCODE) \
836 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GT, RsV, UiV, riV)
837 #define fGEN_TCG_J4_cmpgti_tp1_jump_t(SHORTCODE) \
838 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
839 #define fGEN_TCG_J4_cmpgti_tp1_jump_nt(SHORTCODE) \
840 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
841 #define fGEN_TCG_J4_cmpgti_fp1_jump_t(SHORTCODE) \
842 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
843 #define fGEN_TCG_J4_cmpgti_fp1_jump_nt(SHORTCODE) \
844 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GT, RsV, UiV, riV)
846 #define fGEN_TCG_J4_cmpgtui_tp0_jump_t(SHORTCODE) \
847 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
848 #define fGEN_TCG_J4_cmpgtui_tp0_jump_nt(SHORTCODE) \
849 gen_cmpnd_cmpi_jmp_t(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
850 #define fGEN_TCG_J4_cmpgtui_fp0_jump_t(SHORTCODE) \
851 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
852 #define fGEN_TCG_J4_cmpgtui_fp0_jump_nt(SHORTCODE) \
853 gen_cmpnd_cmpi_jmp_f(ctx, 0, TCG_COND_GTU, RsV, UiV, riV)
854 #define fGEN_TCG_J4_cmpgtui_tp1_jump_t(SHORTCODE) \
855 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
856 #define fGEN_TCG_J4_cmpgtui_tp1_jump_nt(SHORTCODE) \
857 gen_cmpnd_cmpi_jmp_t(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
858 #define fGEN_TCG_J4_cmpgtui_fp1_jump_t(SHORTCODE) \
859 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
860 #define fGEN_TCG_J4_cmpgtui_fp1_jump_nt(SHORTCODE) \
861 gen_cmpnd_cmpi_jmp_f(ctx, 1, TCG_COND_GTU, RsV, UiV, riV)
863 #define fGEN_TCG_J4_cmpeqn1_tp0_jump_t(SHORTCODE) \
864 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_EQ, RsV, riV)
865 #define fGEN_TCG_J4_cmpeqn1_tp0_jump_nt(SHORTCODE) \
866 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_EQ, RsV, riV)
867 #define fGEN_TCG_J4_cmpeqn1_fp0_jump_t(SHORTCODE) \
868 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_EQ, RsV, riV)
869 #define fGEN_TCG_J4_cmpeqn1_fp0_jump_nt(SHORTCODE) \
870 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_EQ, RsV, riV)
871 #define fGEN_TCG_J4_cmpeqn1_tp1_jump_t(SHORTCODE) \
872 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_EQ, RsV, riV)
873 #define fGEN_TCG_J4_cmpeqn1_tp1_jump_nt(SHORTCODE) \
874 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_EQ, RsV, riV)
875 #define fGEN_TCG_J4_cmpeqn1_fp1_jump_t(SHORTCODE) \
876 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_EQ, RsV, riV)
877 #define fGEN_TCG_J4_cmpeqn1_fp1_jump_nt(SHORTCODE) \
878 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_EQ, RsV, riV)
880 #define fGEN_TCG_J4_cmpgtn1_tp0_jump_t(SHORTCODE) \
881 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_GT, RsV, riV)
882 #define fGEN_TCG_J4_cmpgtn1_tp0_jump_nt(SHORTCODE) \
883 gen_cmpnd_cmp_n1_jmp_t(ctx, 0, TCG_COND_GT, RsV, riV)
884 #define fGEN_TCG_J4_cmpgtn1_fp0_jump_t(SHORTCODE) \
885 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_GT, RsV, riV)
886 #define fGEN_TCG_J4_cmpgtn1_fp0_jump_nt(SHORTCODE) \
887 gen_cmpnd_cmp_n1_jmp_f(ctx, 0, TCG_COND_GT, RsV, riV)
888 #define fGEN_TCG_J4_cmpgtn1_tp1_jump_t(SHORTCODE) \
889 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_GT, RsV, riV)
890 #define fGEN_TCG_J4_cmpgtn1_tp1_jump_nt(SHORTCODE) \
891 gen_cmpnd_cmp_n1_jmp_t(ctx, 1, TCG_COND_GT, RsV, riV)
892 #define fGEN_TCG_J4_cmpgtn1_fp1_jump_t(SHORTCODE) \
893 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_GT, RsV, riV)
894 #define fGEN_TCG_J4_cmpgtn1_fp1_jump_nt(SHORTCODE) \
895 gen_cmpnd_cmp_n1_jmp_f(ctx, 1, TCG_COND_GT, RsV, riV)
897 #define fGEN_TCG_J4_tstbit0_tp0_jump_nt(SHORTCODE) \
898 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_EQ, riV)
899 #define fGEN_TCG_J4_tstbit0_tp0_jump_t(SHORTCODE) \
900 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_EQ, riV)
901 #define fGEN_TCG_J4_tstbit0_fp0_jump_nt(SHORTCODE) \
902 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_NE, riV)
903 #define fGEN_TCG_J4_tstbit0_fp0_jump_t(SHORTCODE) \
904 gen_cmpnd_tstbit0_jmp(ctx, 0, RsV, TCG_COND_NE, riV)
905 #define fGEN_TCG_J4_tstbit0_tp1_jump_nt(SHORTCODE) \
906 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_EQ, riV)
907 #define fGEN_TCG_J4_tstbit0_tp1_jump_t(SHORTCODE) \
908 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_EQ, riV)
909 #define fGEN_TCG_J4_tstbit0_fp1_jump_nt(SHORTCODE) \
910 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV)
911 #define fGEN_TCG_J4_tstbit0_fp1_jump_t(SHORTCODE) \
912 gen_cmpnd_tstbit0_jmp(ctx, 1, RsV, TCG_COND_NE, riV)
914 /* p0 = cmp.eq(r0, #7) */
915 #define fGEN_TCG_SA1_cmpeqi(SHORTCODE) \
916 do { \
917 TCGv p0 = tcg_temp_new(); \
918 gen_comparei(TCG_COND_EQ, p0, RsV, uiV); \
919 gen_log_pred_write(ctx, 0, p0); \
920 } while (0)
922 #define fGEN_TCG_J2_jump(SHORTCODE) \
923 gen_jump(ctx, riV)
924 #define fGEN_TCG_J2_jumpr(SHORTCODE) \
925 gen_jumpr(ctx, RsV)
926 #define fGEN_TCG_J2_jumprh(SHORTCODE) \
927 gen_jumpr(ctx, RsV)
928 #define fGEN_TCG_J4_jumpseti(SHORTCODE) \
929 do { \
930 tcg_gen_movi_tl(RdV, UiV); \
931 gen_jump(ctx, riV); \
932 } while (0)
934 #define fGEN_TCG_cond_jumpt(COND) \
935 do { \
936 TCGv LSB = tcg_temp_new(); \
937 COND; \
938 gen_cond_jump(ctx, TCG_COND_EQ, LSB, riV); \
939 } while (0)
940 #define fGEN_TCG_cond_jumpf(COND) \
941 do { \
942 TCGv LSB = tcg_temp_new(); \
943 COND; \
944 gen_cond_jump(ctx, TCG_COND_NE, LSB, riV); \
945 } while (0)
947 #define fGEN_TCG_J2_jumpt(SHORTCODE) \
948 fGEN_TCG_cond_jumpt(fLSBOLD(PuV))
949 #define fGEN_TCG_J2_jumptpt(SHORTCODE) \
950 fGEN_TCG_cond_jumpt(fLSBOLD(PuV))
951 #define fGEN_TCG_J2_jumpf(SHORTCODE) \
952 fGEN_TCG_cond_jumpf(fLSBOLD(PuV))
953 #define fGEN_TCG_J2_jumpfpt(SHORTCODE) \
954 fGEN_TCG_cond_jumpf(fLSBOLD(PuV))
955 #define fGEN_TCG_J2_jumptnew(SHORTCODE) \
956 gen_cond_jump(ctx, TCG_COND_EQ, PuN, riV)
957 #define fGEN_TCG_J2_jumptnewpt(SHORTCODE) \
958 gen_cond_jump(ctx, TCG_COND_EQ, PuN, riV)
959 #define fGEN_TCG_J2_jumpfnewpt(SHORTCODE) \
960 fGEN_TCG_cond_jumpf(fLSBNEW(PuN))
961 #define fGEN_TCG_J2_jumpfnew(SHORTCODE) \
962 fGEN_TCG_cond_jumpf(fLSBNEW(PuN))
963 #define fGEN_TCG_J2_jumprz(SHORTCODE) \
964 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_NE, LSB, RsV, 0))
965 #define fGEN_TCG_J2_jumprzpt(SHORTCODE) \
966 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_NE, LSB, RsV, 0))
967 #define fGEN_TCG_J2_jumprnz(SHORTCODE) \
968 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_EQ, LSB, RsV, 0))
969 #define fGEN_TCG_J2_jumprnzpt(SHORTCODE) \
970 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_EQ, LSB, RsV, 0))
971 #define fGEN_TCG_J2_jumprgtez(SHORTCODE) \
972 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_GE, LSB, RsV, 0))
973 #define fGEN_TCG_J2_jumprgtezpt(SHORTCODE) \
974 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_GE, LSB, RsV, 0))
975 #define fGEN_TCG_J2_jumprltez(SHORTCODE) \
976 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_LE, LSB, RsV, 0))
977 #define fGEN_TCG_J2_jumprltezpt(SHORTCODE) \
978 fGEN_TCG_cond_jumpt(tcg_gen_setcondi_tl(TCG_COND_LE, LSB, RsV, 0))
980 #define fGEN_TCG_cond_jumprt(COND) \
981 do { \
982 TCGv LSB = tcg_temp_new(); \
983 COND; \
984 gen_cond_jumpr(ctx, RsV, TCG_COND_EQ, LSB); \
985 } while (0)
986 #define fGEN_TCG_cond_jumprf(COND) \
987 do { \
988 TCGv LSB = tcg_temp_new(); \
989 COND; \
990 gen_cond_jumpr(ctx, RsV, TCG_COND_NE, LSB); \
991 } while (0)
993 #define fGEN_TCG_J2_jumprt(SHORTCODE) \
994 fGEN_TCG_cond_jumprt(fLSBOLD(PuV))
995 #define fGEN_TCG_J2_jumprtpt(SHORTCODE) \
996 fGEN_TCG_cond_jumprt(fLSBOLD(PuV))
997 #define fGEN_TCG_J2_jumprf(SHORTCODE) \
998 fGEN_TCG_cond_jumprf(fLSBOLD(PuV))
999 #define fGEN_TCG_J2_jumprfpt(SHORTCODE) \
1000 fGEN_TCG_cond_jumprf(fLSBOLD(PuV))
1001 #define fGEN_TCG_J2_jumprtnew(SHORTCODE) \
1002 fGEN_TCG_cond_jumprt(fLSBNEW(PuN))
1003 #define fGEN_TCG_J2_jumprtnewpt(SHORTCODE) \
1004 fGEN_TCG_cond_jumprt(fLSBNEW(PuN))
1005 #define fGEN_TCG_J2_jumprfnew(SHORTCODE) \
1006 fGEN_TCG_cond_jumprf(fLSBNEW(PuN))
1007 #define fGEN_TCG_J2_jumprfnewpt(SHORTCODE) \
1008 fGEN_TCG_cond_jumprf(fLSBNEW(PuN))
1011 * New value compare & jump instructions
1012 * if ([!]COND(r0.new, r1) jump:t address
1013 * if ([!]COND(r0.new, #7) jump:t address
1015 #define fGEN_TCG_J4_cmpgt_t_jumpnv_t(SHORTCODE) \
1016 gen_cmp_jumpnv(ctx, TCG_COND_GT, NsN, RtV, riV)
1017 #define fGEN_TCG_J4_cmpgt_t_jumpnv_nt(SHORTCODE) \
1018 gen_cmp_jumpnv(ctx, TCG_COND_GT, NsN, RtV, riV)
1019 #define fGEN_TCG_J4_cmpgt_f_jumpnv_t(SHORTCODE) \
1020 gen_cmp_jumpnv(ctx, TCG_COND_LE, NsN, RtV, riV)
1021 #define fGEN_TCG_J4_cmpgt_f_jumpnv_nt(SHORTCODE) \
1022 gen_cmp_jumpnv(ctx, TCG_COND_LE, NsN, RtV, riV)
1024 #define fGEN_TCG_J4_cmpeq_t_jumpnv_t(SHORTCODE) \
1025 gen_cmp_jumpnv(ctx, TCG_COND_EQ, NsN, RtV, riV)
1026 #define fGEN_TCG_J4_cmpeq_t_jumpnv_nt(SHORTCODE) \
1027 gen_cmp_jumpnv(ctx, TCG_COND_EQ, NsN, RtV, riV)
1028 #define fGEN_TCG_J4_cmpeq_f_jumpnv_t(SHORTCODE) \
1029 gen_cmp_jumpnv(ctx, TCG_COND_NE, NsN, RtV, riV)
1030 #define fGEN_TCG_J4_cmpeq_f_jumpnv_nt(SHORTCODE) \
1031 gen_cmp_jumpnv(ctx, TCG_COND_NE, NsN, RtV, riV)
1033 #define fGEN_TCG_J4_cmplt_t_jumpnv_t(SHORTCODE) \
1034 gen_cmp_jumpnv(ctx, TCG_COND_LT, NsN, RtV, riV)
1035 #define fGEN_TCG_J4_cmplt_t_jumpnv_nt(SHORTCODE) \
1036 gen_cmp_jumpnv(ctx, TCG_COND_LT, NsN, RtV, riV)
1037 #define fGEN_TCG_J4_cmplt_f_jumpnv_t(SHORTCODE) \
1038 gen_cmp_jumpnv(ctx, TCG_COND_GE, NsN, RtV, riV)
1039 #define fGEN_TCG_J4_cmplt_f_jumpnv_nt(SHORTCODE) \
1040 gen_cmp_jumpnv(ctx, TCG_COND_GE, NsN, RtV, riV)
1042 #define fGEN_TCG_J4_cmpeqi_t_jumpnv_t(SHORTCODE) \
1043 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, UiV, riV)
1044 #define fGEN_TCG_J4_cmpeqi_t_jumpnv_nt(SHORTCODE) \
1045 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, UiV, riV)
1046 #define fGEN_TCG_J4_cmpeqi_f_jumpnv_t(SHORTCODE) \
1047 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, UiV, riV)
1048 #define fGEN_TCG_J4_cmpeqi_f_jumpnv_nt(SHORTCODE) \
1049 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, UiV, riV)
1051 #define fGEN_TCG_J4_cmpgti_t_jumpnv_t(SHORTCODE) \
1052 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, UiV, riV)
1053 #define fGEN_TCG_J4_cmpgti_t_jumpnv_nt(SHORTCODE) \
1054 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, UiV, riV)
1055 #define fGEN_TCG_J4_cmpgti_f_jumpnv_t(SHORTCODE) \
1056 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, UiV, riV)
1057 #define fGEN_TCG_J4_cmpgti_f_jumpnv_nt(SHORTCODE) \
1058 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, UiV, riV)
1060 #define fGEN_TCG_J4_cmpltu_t_jumpnv_t(SHORTCODE) \
1061 gen_cmp_jumpnv(ctx, TCG_COND_LTU, NsN, RtV, riV)
1062 #define fGEN_TCG_J4_cmpltu_t_jumpnv_nt(SHORTCODE) \
1063 gen_cmp_jumpnv(ctx, TCG_COND_LTU, NsN, RtV, riV)
1064 #define fGEN_TCG_J4_cmpltu_f_jumpnv_t(SHORTCODE) \
1065 gen_cmp_jumpnv(ctx, TCG_COND_GEU, NsN, RtV, riV)
1066 #define fGEN_TCG_J4_cmpltu_f_jumpnv_nt(SHORTCODE) \
1067 gen_cmp_jumpnv(ctx, TCG_COND_GEU, NsN, RtV, riV)
1069 #define fGEN_TCG_J4_cmpgtui_t_jumpnv_t(SHORTCODE) \
1070 gen_cmpi_jumpnv(ctx, TCG_COND_GTU, NsN, UiV, riV)
1071 #define fGEN_TCG_J4_cmpgtui_t_jumpnv_nt(SHORTCODE) \
1072 gen_cmpi_jumpnv(ctx, TCG_COND_GTU, NsN, UiV, riV)
1073 #define fGEN_TCG_J4_cmpgtui_f_jumpnv_t(SHORTCODE) \
1074 gen_cmpi_jumpnv(ctx, TCG_COND_LEU, NsN, UiV, riV)
1075 #define fGEN_TCG_J4_cmpgtui_f_jumpnv_nt(SHORTCODE) \
1076 gen_cmpi_jumpnv(ctx, TCG_COND_LEU, NsN, UiV, riV)
1078 #define fGEN_TCG_J4_cmpgtu_t_jumpnv_t(SHORTCODE) \
1079 gen_cmp_jumpnv(ctx, TCG_COND_GTU, NsN, RtV, riV)
1080 #define fGEN_TCG_J4_cmpgtu_t_jumpnv_nt(SHORTCODE) \
1081 gen_cmp_jumpnv(ctx, TCG_COND_GTU, NsN, RtV, riV)
1082 #define fGEN_TCG_J4_cmpgtu_f_jumpnv_t(SHORTCODE) \
1083 gen_cmp_jumpnv(ctx, TCG_COND_LEU, NsN, RtV, riV)
1084 #define fGEN_TCG_J4_cmpgtu_f_jumpnv_nt(SHORTCODE) \
1085 gen_cmp_jumpnv(ctx, TCG_COND_LEU, NsN, RtV, riV)
1087 #define fGEN_TCG_J4_cmpeqn1_t_jumpnv_t(SHORTCODE) \
1088 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, -1, riV)
1089 #define fGEN_TCG_J4_cmpeqn1_t_jumpnv_nt(SHORTCODE) \
1090 gen_cmpi_jumpnv(ctx, TCG_COND_EQ, NsN, -1, riV)
1091 #define fGEN_TCG_J4_cmpeqn1_f_jumpnv_t(SHORTCODE) \
1092 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, -1, riV)
1093 #define fGEN_TCG_J4_cmpeqn1_f_jumpnv_nt(SHORTCODE) \
1094 gen_cmpi_jumpnv(ctx, TCG_COND_NE, NsN, -1, riV)
1096 #define fGEN_TCG_J4_cmpgtn1_t_jumpnv_t(SHORTCODE) \
1097 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, -1, riV)
1098 #define fGEN_TCG_J4_cmpgtn1_t_jumpnv_nt(SHORTCODE) \
1099 gen_cmpi_jumpnv(ctx, TCG_COND_GT, NsN, -1, riV)
1100 #define fGEN_TCG_J4_cmpgtn1_f_jumpnv_t(SHORTCODE) \
1101 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, -1, riV)
1102 #define fGEN_TCG_J4_cmpgtn1_f_jumpnv_nt(SHORTCODE) \
1103 gen_cmpi_jumpnv(ctx, TCG_COND_LE, NsN, -1, riV)
1105 #define fGEN_TCG_J4_tstbit0_t_jumpnv_t(SHORTCODE) \
1106 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_EQ, riV)
1107 #define fGEN_TCG_J4_tstbit0_t_jumpnv_nt(SHORTCODE) \
1108 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_EQ, riV)
1109 #define fGEN_TCG_J4_tstbit0_f_jumpnv_t(SHORTCODE) \
1110 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_NE, riV)
1111 #define fGEN_TCG_J4_tstbit0_f_jumpnv_nt(SHORTCODE) \
1112 gen_testbit0_jumpnv(ctx, NsN, TCG_COND_NE, riV)
1114 /* r0 = r1 ; jump address */
1115 #define fGEN_TCG_J4_jumpsetr(SHORTCODE) \
1116 do { \
1117 tcg_gen_mov_tl(RdV, RsV); \
1118 gen_jump(ctx, riV); \
1119 } while (0)
1121 /* if (p0.new) r0 = #0 */
1122 #define fGEN_TCG_SA1_clrtnew(SHORTCODE) \
1123 do { \
1124 tcg_gen_movcond_tl(TCG_COND_EQ, RdV, \
1125 ctx->new_pred_value[0], tcg_constant_tl(0), \
1126 RdV, tcg_constant_tl(0)); \
1127 } while (0)
1129 /* if (!p0.new) r0 = #0 */
1130 #define fGEN_TCG_SA1_clrfnew(SHORTCODE) \
1131 do { \
1132 tcg_gen_movcond_tl(TCG_COND_NE, RdV, \
1133 ctx->new_pred_value[0], tcg_constant_tl(0), \
1134 RdV, tcg_constant_tl(0)); \
1135 } while (0)
1137 #define fGEN_TCG_J2_pause(SHORTCODE) \
1138 do { \
1139 uiV = uiV; \
1140 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->next_PC); \
1141 } while (0)
1143 /* r0 = asr(r1, r2):sat */
1144 #define fGEN_TCG_S2_asr_r_r_sat(SHORTCODE) \
1145 gen_asr_r_r_sat(ctx, RdV, RsV, RtV)
1147 /* r0 = asl(r1, r2):sat */
1148 #define fGEN_TCG_S2_asl_r_r_sat(SHORTCODE) \
1149 gen_asl_r_r_sat(ctx, RdV, RsV, RtV)
1151 #define fGEN_TCG_SL2_jumpr31(SHORTCODE) \
1152 gen_jumpr(ctx, hex_gpr[HEX_REG_LR])
1154 #define fGEN_TCG_SL2_jumpr31_t(SHORTCODE) \
1155 gen_cond_jumpr31(ctx, TCG_COND_EQ, hex_pred[0])
1156 #define fGEN_TCG_SL2_jumpr31_f(SHORTCODE) \
1157 gen_cond_jumpr31(ctx, TCG_COND_NE, hex_pred[0])
1159 #define fGEN_TCG_SL2_jumpr31_tnew(SHORTCODE) \
1160 gen_cond_jumpr31(ctx, TCG_COND_EQ, ctx->new_pred_value[0])
1161 #define fGEN_TCG_SL2_jumpr31_fnew(SHORTCODE) \
1162 gen_cond_jumpr31(ctx, TCG_COND_NE, ctx->new_pred_value[0])
1164 /* Count trailing zeros/ones */
1165 #define fGEN_TCG_S2_ct0(SHORTCODE) \
1166 do { \
1167 tcg_gen_ctzi_tl(RdV, RsV, 32); \
1168 } while (0)
1169 #define fGEN_TCG_S2_ct1(SHORTCODE) \
1170 do { \
1171 tcg_gen_not_tl(RdV, RsV); \
1172 tcg_gen_ctzi_tl(RdV, RdV, 32); \
1173 } while (0)
1174 #define fGEN_TCG_S2_ct0p(SHORTCODE) \
1175 do { \
1176 TCGv_i64 tmp = tcg_temp_new_i64(); \
1177 tcg_gen_ctzi_i64(tmp, RssV, 64); \
1178 tcg_gen_extrl_i64_i32(RdV, tmp); \
1179 } while (0)
1180 #define fGEN_TCG_S2_ct1p(SHORTCODE) \
1181 do { \
1182 TCGv_i64 tmp = tcg_temp_new_i64(); \
1183 tcg_gen_not_i64(tmp, RssV); \
1184 tcg_gen_ctzi_i64(tmp, tmp, 64); \
1185 tcg_gen_extrl_i64_i32(RdV, tmp); \
1186 } while (0)
1188 #define fGEN_TCG_S2_insert(SHORTCODE) \
1189 do { \
1190 int width = uiV; \
1191 int offset = UiV; \
1192 if (width != 0) { \
1193 if (offset + width > 32) { \
1194 width = 32 - offset; \
1196 tcg_gen_deposit_tl(RxV, RxV, RsV, offset, width); \
1198 } while (0)
1199 #define fGEN_TCG_S2_insert_rp(SHORTCODE) \
1200 gen_insert_rp(ctx, RxV, RsV, RttV)
1201 #define fGEN_TCG_S2_asr_r_svw_trun(SHORTCODE) \
1202 gen_asr_r_svw_trun(ctx, RdV, RssV, RtV)
1203 #define fGEN_TCG_A2_swiz(SHORTCODE) \
1204 tcg_gen_bswap_tl(RdV, RsV)
1206 /* Floating point */
1207 #define fGEN_TCG_F2_conv_sf2df(SHORTCODE) \
1208 gen_helper_conv_sf2df(RddV, tcg_env, RsV)
1209 #define fGEN_TCG_F2_conv_df2sf(SHORTCODE) \
1210 gen_helper_conv_df2sf(RdV, tcg_env, RssV)
1211 #define fGEN_TCG_F2_conv_uw2sf(SHORTCODE) \
1212 gen_helper_conv_uw2sf(RdV, tcg_env, RsV)
1213 #define fGEN_TCG_F2_conv_uw2df(SHORTCODE) \
1214 gen_helper_conv_uw2df(RddV, tcg_env, RsV)
1215 #define fGEN_TCG_F2_conv_w2sf(SHORTCODE) \
1216 gen_helper_conv_w2sf(RdV, tcg_env, RsV)
1217 #define fGEN_TCG_F2_conv_w2df(SHORTCODE) \
1218 gen_helper_conv_w2df(RddV, tcg_env, RsV)
1219 #define fGEN_TCG_F2_conv_ud2sf(SHORTCODE) \
1220 gen_helper_conv_ud2sf(RdV, tcg_env, RssV)
1221 #define fGEN_TCG_F2_conv_ud2df(SHORTCODE) \
1222 gen_helper_conv_ud2df(RddV, tcg_env, RssV)
1223 #define fGEN_TCG_F2_conv_d2sf(SHORTCODE) \
1224 gen_helper_conv_d2sf(RdV, tcg_env, RssV)
1225 #define fGEN_TCG_F2_conv_d2df(SHORTCODE) \
1226 gen_helper_conv_d2df(RddV, tcg_env, RssV)
1227 #define fGEN_TCG_F2_conv_sf2uw(SHORTCODE) \
1228 gen_helper_conv_sf2uw(RdV, tcg_env, RsV)
1229 #define fGEN_TCG_F2_conv_sf2w(SHORTCODE) \
1230 gen_helper_conv_sf2w(RdV, tcg_env, RsV)
1231 #define fGEN_TCG_F2_conv_sf2ud(SHORTCODE) \
1232 gen_helper_conv_sf2ud(RddV, tcg_env, RsV)
1233 #define fGEN_TCG_F2_conv_sf2d(SHORTCODE) \
1234 gen_helper_conv_sf2d(RddV, tcg_env, RsV)
1235 #define fGEN_TCG_F2_conv_df2uw(SHORTCODE) \
1236 gen_helper_conv_df2uw(RdV, tcg_env, RssV)
1237 #define fGEN_TCG_F2_conv_df2w(SHORTCODE) \
1238 gen_helper_conv_df2w(RdV, tcg_env, RssV)
1239 #define fGEN_TCG_F2_conv_df2ud(SHORTCODE) \
1240 gen_helper_conv_df2ud(RddV, tcg_env, RssV)
1241 #define fGEN_TCG_F2_conv_df2d(SHORTCODE) \
1242 gen_helper_conv_df2d(RddV, tcg_env, RssV)
1243 #define fGEN_TCG_F2_conv_sf2uw_chop(SHORTCODE) \
1244 gen_helper_conv_sf2uw_chop(RdV, tcg_env, RsV)
1245 #define fGEN_TCG_F2_conv_sf2w_chop(SHORTCODE) \
1246 gen_helper_conv_sf2w_chop(RdV, tcg_env, RsV)
1247 #define fGEN_TCG_F2_conv_sf2ud_chop(SHORTCODE) \
1248 gen_helper_conv_sf2ud_chop(RddV, tcg_env, RsV)
1249 #define fGEN_TCG_F2_conv_sf2d_chop(SHORTCODE) \
1250 gen_helper_conv_sf2d_chop(RddV, tcg_env, RsV)
1251 #define fGEN_TCG_F2_conv_df2uw_chop(SHORTCODE) \
1252 gen_helper_conv_df2uw_chop(RdV, tcg_env, RssV)
1253 #define fGEN_TCG_F2_conv_df2w_chop(SHORTCODE) \
1254 gen_helper_conv_df2w_chop(RdV, tcg_env, RssV)
1255 #define fGEN_TCG_F2_conv_df2ud_chop(SHORTCODE) \
1256 gen_helper_conv_df2ud_chop(RddV, tcg_env, RssV)
1257 #define fGEN_TCG_F2_conv_df2d_chop(SHORTCODE) \
1258 gen_helper_conv_df2d_chop(RddV, tcg_env, RssV)
1259 #define fGEN_TCG_F2_sfadd(SHORTCODE) \
1260 gen_helper_sfadd(RdV, tcg_env, RsV, RtV)
1261 #define fGEN_TCG_F2_sfsub(SHORTCODE) \
1262 gen_helper_sfsub(RdV, tcg_env, RsV, RtV)
1263 #define fGEN_TCG_F2_sfcmpeq(SHORTCODE) \
1264 gen_helper_sfcmpeq(PdV, tcg_env, RsV, RtV)
1265 #define fGEN_TCG_F2_sfcmpgt(SHORTCODE) \
1266 gen_helper_sfcmpgt(PdV, tcg_env, RsV, RtV)
1267 #define fGEN_TCG_F2_sfcmpge(SHORTCODE) \
1268 gen_helper_sfcmpge(PdV, tcg_env, RsV, RtV)
1269 #define fGEN_TCG_F2_sfcmpuo(SHORTCODE) \
1270 gen_helper_sfcmpuo(PdV, tcg_env, RsV, RtV)
1271 #define fGEN_TCG_F2_sfmax(SHORTCODE) \
1272 gen_helper_sfmax(RdV, tcg_env, RsV, RtV)
1273 #define fGEN_TCG_F2_sfmin(SHORTCODE) \
1274 gen_helper_sfmin(RdV, tcg_env, RsV, RtV)
1275 #define fGEN_TCG_F2_sfclass(SHORTCODE) \
1276 do { \
1277 TCGv imm = tcg_constant_tl(uiV); \
1278 gen_helper_sfclass(PdV, tcg_env, RsV, imm); \
1279 } while (0)
1280 #define fGEN_TCG_F2_sffixupn(SHORTCODE) \
1281 gen_helper_sffixupn(RdV, tcg_env, RsV, RtV)
1282 #define fGEN_TCG_F2_sffixupd(SHORTCODE) \
1283 gen_helper_sffixupd(RdV, tcg_env, RsV, RtV)
1284 #define fGEN_TCG_F2_sffixupr(SHORTCODE) \
1285 gen_helper_sffixupr(RdV, tcg_env, RsV)
1286 #define fGEN_TCG_F2_dfadd(SHORTCODE) \
1287 gen_helper_dfadd(RddV, tcg_env, RssV, RttV)
1288 #define fGEN_TCG_F2_dfsub(SHORTCODE) \
1289 gen_helper_dfsub(RddV, tcg_env, RssV, RttV)
1290 #define fGEN_TCG_F2_dfmax(SHORTCODE) \
1291 gen_helper_dfmax(RddV, tcg_env, RssV, RttV)
1292 #define fGEN_TCG_F2_dfmin(SHORTCODE) \
1293 gen_helper_dfmin(RddV, tcg_env, RssV, RttV)
1294 #define fGEN_TCG_F2_dfcmpeq(SHORTCODE) \
1295 gen_helper_dfcmpeq(PdV, tcg_env, RssV, RttV)
1296 #define fGEN_TCG_F2_dfcmpgt(SHORTCODE) \
1297 gen_helper_dfcmpgt(PdV, tcg_env, RssV, RttV)
1298 #define fGEN_TCG_F2_dfcmpge(SHORTCODE) \
1299 gen_helper_dfcmpge(PdV, tcg_env, RssV, RttV)
1300 #define fGEN_TCG_F2_dfcmpuo(SHORTCODE) \
1301 gen_helper_dfcmpuo(PdV, tcg_env, RssV, RttV)
1302 #define fGEN_TCG_F2_dfclass(SHORTCODE) \
1303 do { \
1304 TCGv imm = tcg_constant_tl(uiV); \
1305 gen_helper_dfclass(PdV, tcg_env, RssV, imm); \
1306 } while (0)
1307 #define fGEN_TCG_F2_sfmpy(SHORTCODE) \
1308 gen_helper_sfmpy(RdV, tcg_env, RsV, RtV)
1309 #define fGEN_TCG_F2_sffma(SHORTCODE) \
1310 gen_helper_sffma(RxV, tcg_env, RxV, RsV, RtV)
1311 #define fGEN_TCG_F2_sffma_sc(SHORTCODE) \
1312 gen_helper_sffma_sc(RxV, tcg_env, RxV, RsV, RtV, PuV)
1313 #define fGEN_TCG_F2_sffms(SHORTCODE) \
1314 gen_helper_sffms(RxV, tcg_env, RxV, RsV, RtV)
1315 #define fGEN_TCG_F2_sffma_lib(SHORTCODE) \
1316 gen_helper_sffma_lib(RxV, tcg_env, RxV, RsV, RtV)
1317 #define fGEN_TCG_F2_sffms_lib(SHORTCODE) \
1318 gen_helper_sffms_lib(RxV, tcg_env, RxV, RsV, RtV)
1320 #define fGEN_TCG_F2_dfmpyfix(SHORTCODE) \
1321 gen_helper_dfmpyfix(RddV, tcg_env, RssV, RttV)
1322 #define fGEN_TCG_F2_dfmpyhh(SHORTCODE) \
1323 gen_helper_dfmpyhh(RxxV, tcg_env, RxxV, RssV, RttV)
1325 /* Nothing to do for these in qemu, need to suppress compiler warnings */
1326 #define fGEN_TCG_Y4_l2fetch(SHORTCODE) \
1327 do { \
1328 RsV = RsV; \
1329 RtV = RtV; \
1330 } while (0)
1331 #define fGEN_TCG_Y5_l2fetch(SHORTCODE) \
1332 do { \
1333 RsV = RsV; \
1334 } while (0)
1335 #define fGEN_TCG_Y2_isync(SHORTCODE) \
1336 do { } while (0)
1337 #define fGEN_TCG_Y2_barrier(SHORTCODE) \
1338 do { } while (0)
1339 #define fGEN_TCG_Y2_syncht(SHORTCODE) \
1340 do { } while (0)
1341 #define fGEN_TCG_Y2_dcfetchbo(SHORTCODE) \
1342 do { \
1343 RsV = RsV; \
1344 uiV = uiV; \
1345 } while (0)
1347 #define fGEN_TCG_L2_loadw_aq(SHORTCODE) SHORTCODE
1348 #define fGEN_TCG_L4_loadd_aq(SHORTCODE) SHORTCODE
1350 /* Nothing to do for these in qemu, need to suppress compiler warnings */
1351 #define fGEN_TCG_R6_release_at_vi(SHORTCODE) \
1352 do { \
1353 RsV = RsV; \
1354 } while (0)
1355 #define fGEN_TCG_R6_release_st_vi(SHORTCODE) \
1356 do { \
1357 RsV = RsV; \
1358 } while (0)
1360 #define fGEN_TCG_S2_storew_rl_at_vi(SHORTCODE) SHORTCODE
1361 #define fGEN_TCG_S4_stored_rl_at_vi(SHORTCODE) SHORTCODE
1362 #define fGEN_TCG_S2_storew_rl_st_vi(SHORTCODE) SHORTCODE
1363 #define fGEN_TCG_S4_stored_rl_st_vi(SHORTCODE) SHORTCODE
1365 #define fGEN_TCG_J2_trap0(SHORTCODE) \
1366 do { \
1367 uiV = uiV; \
1368 tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->pkt->pc); \
1369 TCGv excp = tcg_constant_tl(HEX_EXCP_TRAP0); \
1370 gen_helper_raise_exception(tcg_env, excp); \
1371 } while (0)
1372 #endif