2 * PowerPC emulation micro-operations helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "op_mem_access.h"
23 /* Multiple word / string load and store */
24 void glue(do_lmw
, MEMSUFFIX
) (int dst
)
26 for (; dst
< 32; dst
++, T0
+= 4) {
27 env
->gpr
[dst
] = glue(ldu32
, MEMSUFFIX
)((uint32_t)T0
);
31 #if defined(TARGET_PPC64)
32 void glue(do_lmw_64
, MEMSUFFIX
) (int dst
)
34 for (; dst
< 32; dst
++, T0
+= 4) {
35 env
->gpr
[dst
] = glue(ldu32
, MEMSUFFIX
)((uint64_t)T0
);
40 void glue(do_stmw
, MEMSUFFIX
) (int src
)
42 for (; src
< 32; src
++, T0
+= 4) {
43 glue(st32
, MEMSUFFIX
)((uint32_t)T0
, env
->gpr
[src
]);
47 #if defined(TARGET_PPC64)
48 void glue(do_stmw_64
, MEMSUFFIX
) (int src
)
50 for (; src
< 32; src
++, T0
+= 4) {
51 glue(st32
, MEMSUFFIX
)((uint64_t)T0
, env
->gpr
[src
]);
56 void glue(do_lmw_le
, MEMSUFFIX
) (int dst
)
58 for (; dst
< 32; dst
++, T0
+= 4) {
59 env
->gpr
[dst
] = glue(ldu32r
, MEMSUFFIX
)((uint32_t)T0
);
63 #if defined(TARGET_PPC64)
64 void glue(do_lmw_le_64
, MEMSUFFIX
) (int dst
)
66 for (; dst
< 32; dst
++, T0
+= 4) {
67 env
->gpr
[dst
] = glue(ldu32r
, MEMSUFFIX
)((uint64_t)T0
);
72 void glue(do_stmw_le
, MEMSUFFIX
) (int src
)
74 for (; src
< 32; src
++, T0
+= 4) {
75 glue(st32r
, MEMSUFFIX
)((uint32_t)T0
, env
->gpr
[src
]);
79 #if defined(TARGET_PPC64)
80 void glue(do_stmw_le_64
, MEMSUFFIX
) (int src
)
82 for (; src
< 32; src
++, T0
+= 4) {
83 glue(st32r
, MEMSUFFIX
)((uint64_t)T0
, env
->gpr
[src
]);
88 void glue(do_lsw
, MEMSUFFIX
) (int dst
)
93 for (; T1
> 3; T1
-= 4, T0
+= 4) {
94 env
->gpr
[dst
++] = glue(ldu32
, MEMSUFFIX
)((uint32_t)T0
);
95 if (unlikely(dst
== 32))
98 if (unlikely(T1
!= 0)) {
100 for (sh
= 24; T1
> 0; T1
--, T0
++, sh
-= 8) {
101 tmp
|= glue(ldu8
, MEMSUFFIX
)((uint32_t)T0
) << sh
;
107 #if defined(TARGET_PPC64)
108 void glue(do_lsw_64
, MEMSUFFIX
) (int dst
)
113 for (; T1
> 3; T1
-= 4, T0
+= 4) {
114 env
->gpr
[dst
++] = glue(ldu32
, MEMSUFFIX
)((uint64_t)T0
);
115 if (unlikely(dst
== 32))
118 if (unlikely(T1
!= 0)) {
120 for (sh
= 24; T1
> 0; T1
--, T0
++, sh
-= 8) {
121 tmp
|= glue(ldu8
, MEMSUFFIX
)((uint64_t)T0
) << sh
;
128 void glue(do_stsw
, MEMSUFFIX
) (int src
)
132 for (; T1
> 3; T1
-= 4, T0
+= 4) {
133 glue(st32
, MEMSUFFIX
)((uint32_t)T0
, env
->gpr
[src
++]);
134 if (unlikely(src
== 32))
137 if (unlikely(T1
!= 0)) {
138 for (sh
= 24; T1
> 0; T1
--, T0
++, sh
-= 8)
139 glue(st8
, MEMSUFFIX
)((uint32_t)T0
, (env
->gpr
[src
] >> sh
) & 0xFF);
143 #if defined(TARGET_PPC64)
144 void glue(do_stsw_64
, MEMSUFFIX
) (int src
)
148 for (; T1
> 3; T1
-= 4, T0
+= 4) {
149 glue(st32
, MEMSUFFIX
)((uint64_t)T0
, env
->gpr
[src
++]);
150 if (unlikely(src
== 32))
153 if (unlikely(T1
!= 0)) {
154 for (sh
= 24; T1
> 0; T1
--, T0
++, sh
-= 8)
155 glue(st8
, MEMSUFFIX
)((uint64_t)T0
, (env
->gpr
[src
] >> sh
) & 0xFF);
160 /* Instruction cache invalidation helper */
161 void glue(do_icbi
, MEMSUFFIX
) (void)
164 /* Invalidate one cache line :
165 * PowerPC specification says this is to be treated like a load
166 * (not a fetch) by the MMU. To be sure it will be so,
167 * do the load "by hand".
169 T0
&= ~(env
->icache_line_size
- 1);
170 tmp
= glue(ldl
, MEMSUFFIX
)((uint32_t)T0
);
171 tb_invalidate_page_range((uint32_t)T0
,
172 (uint32_t)(T0
+ env
->icache_line_size
));
175 #if defined(TARGET_PPC64)
176 void glue(do_icbi_64
, MEMSUFFIX
) (void)
179 /* Invalidate one cache line :
180 * PowerPC specification says this is to be treated like a load
181 * (not a fetch) by the MMU. To be sure it will be so,
182 * do the load "by hand".
184 T0
&= ~(env
->icache_line_size
- 1);
185 tmp
= glue(ldq
, MEMSUFFIX
)((uint64_t)T0
);
186 tb_invalidate_page_range((uint64_t)T0
,
187 (uint64_t)(T0
+ env
->icache_line_size
));
191 void glue(do_dcbz
, MEMSUFFIX
) (void)
193 int dcache_line_size
= env
->dcache_line_size
;
195 /* XXX: should be 970 specific (?) */
196 if (((env
->spr
[SPR_970_HID5
] >> 7) & 0x3) == 1)
197 dcache_line_size
= 32;
198 T0
&= ~(uint32_t)(dcache_line_size
- 1);
199 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x00), 0);
200 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x04), 0);
201 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x08), 0);
202 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x0C), 0);
203 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x10), 0);
204 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x14), 0);
205 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x18), 0);
206 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x1C), 0);
207 if (dcache_line_size
>= 64) {
208 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x20UL
), 0);
209 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x24UL
), 0);
210 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x28UL
), 0);
211 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x2CUL
), 0);
212 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x30UL
), 0);
213 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x34UL
), 0);
214 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x38UL
), 0);
215 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x3CUL
), 0);
216 if (dcache_line_size
>= 128) {
217 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x40UL
), 0);
218 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x44UL
), 0);
219 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x48UL
), 0);
220 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x4CUL
), 0);
221 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x50UL
), 0);
222 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x54UL
), 0);
223 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x58UL
), 0);
224 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x5CUL
), 0);
225 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x60UL
), 0);
226 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x64UL
), 0);
227 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x68UL
), 0);
228 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x6CUL
), 0);
229 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x70UL
), 0);
230 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x74UL
), 0);
231 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x78UL
), 0);
232 glue(stl
, MEMSUFFIX
)((uint32_t)(T0
+ 0x7CUL
), 0);
237 #if defined(TARGET_PPC64)
238 void glue(do_dcbz_64
, MEMSUFFIX
) (void)
240 int dcache_line_size
= env
->dcache_line_size
;
242 /* XXX: should be 970 specific (?) */
243 if (((env
->spr
[SPR_970_HID5
] >> 6) & 0x3) == 0x2)
244 dcache_line_size
= 32;
245 T0
&= ~(uint64_t)(dcache_line_size
- 1);
246 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x00), 0);
247 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x04), 0);
248 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x08), 0);
249 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x0C), 0);
250 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x10), 0);
251 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x14), 0);
252 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x18), 0);
253 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x1C), 0);
254 if (dcache_line_size
>= 64) {
255 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x20UL
), 0);
256 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x24UL
), 0);
257 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x28UL
), 0);
258 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x2CUL
), 0);
259 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x30UL
), 0);
260 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x34UL
), 0);
261 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x38UL
), 0);
262 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x3CUL
), 0);
263 if (dcache_line_size
>= 128) {
264 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x40UL
), 0);
265 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x44UL
), 0);
266 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x48UL
), 0);
267 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x4CUL
), 0);
268 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x50UL
), 0);
269 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x54UL
), 0);
270 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x58UL
), 0);
271 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x5CUL
), 0);
272 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x60UL
), 0);
273 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x64UL
), 0);
274 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x68UL
), 0);
275 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x6CUL
), 0);
276 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x70UL
), 0);
277 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x74UL
), 0);
278 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x78UL
), 0);
279 glue(stl
, MEMSUFFIX
)((uint64_t)(T0
+ 0x7CUL
), 0);
285 /* PowerPC 601 specific instructions (POWER bridge) */
287 void glue(do_POWER_lscbx
, MEMSUFFIX
) (int dest
, int ra
, int rb
)
293 for (i
= 0; i
< T1
; i
++) {
294 c
= glue(ldu8
, MEMSUFFIX
)((uint32_t)T0
++);
295 /* ra (if not 0) and rb are never modified */
296 if (likely(reg
!= rb
&& (ra
== 0 || reg
!= ra
))) {
297 env
->gpr
[reg
] = (env
->gpr
[reg
] & ~(0xFF << d
)) | (c
<< d
);
299 if (unlikely(c
== T2
))
301 if (likely(d
!= 0)) {
312 /* XXX: TAGs are not managed */
313 void glue(do_POWER2_lfq
, MEMSUFFIX
) (void)
315 FT0
= glue(ldfq
, MEMSUFFIX
)((uint32_t)T0
);
316 FT1
= glue(ldfq
, MEMSUFFIX
)((uint32_t)(T0
+ 4));
319 static always_inline float64
glue(ldfqr
, MEMSUFFIX
) (target_ulong EA
)
323 u
.d
= glue(ldfq
, MEMSUFFIX
)(EA
);
324 u
.ll
= bswap64(u
.ll
);
329 void glue(do_POWER2_lfq_le
, MEMSUFFIX
) (void)
331 FT0
= glue(ldfqr
, MEMSUFFIX
)((uint32_t)(T0
+ 4));
332 FT1
= glue(ldfqr
, MEMSUFFIX
)((uint32_t)T0
);
335 void glue(do_POWER2_stfq
, MEMSUFFIX
) (void)
337 glue(stfq
, MEMSUFFIX
)((uint32_t)T0
, FT0
);
338 glue(stfq
, MEMSUFFIX
)((uint32_t)(T0
+ 4), FT1
);
341 static always_inline
void glue(stfqr
, MEMSUFFIX
) (target_ulong EA
, float64 d
)
346 u
.ll
= bswap64(u
.ll
);
347 glue(stfq
, MEMSUFFIX
)(EA
, u
.d
);
350 void glue(do_POWER2_stfq_le
, MEMSUFFIX
) (void)
352 glue(stfqr
, MEMSUFFIX
)((uint32_t)(T0
+ 4), FT0
);
353 glue(stfqr
, MEMSUFFIX
)((uint32_t)T0
, FT1
);