Last AIO patch, by Vladimir N. Oleynik.
[qemu/dscho.git] / target-ppc / op_helper_mem.h
blob49ec1c42f4286226d5f55f1cfa9fdbba57a0b3eb
1 /*
2 * PowerPC emulation micro-operations helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 /* Multiple word / string load and store */
22 static inline target_ulong glue(ld32r, MEMSUFFIX) (target_ulong EA)
24 uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
25 return ((tmp & 0xFF000000UL) >> 24) | ((tmp & 0x00FF0000UL) >> 8) |
26 ((tmp & 0x0000FF00UL) << 8) | ((tmp & 0x000000FFUL) << 24);
29 static inline void glue(st32r, MEMSUFFIX) (target_ulong EA, target_ulong data)
31 uint32_t tmp =
32 ((data & 0xFF000000UL) >> 24) | ((data & 0x00FF0000UL) >> 8) |
33 ((data & 0x0000FF00UL) << 8) | ((data & 0x000000FFUL) << 24);
34 glue(stl, MEMSUFFIX)(EA, tmp);
37 void glue(do_lmw, MEMSUFFIX) (int dst)
39 for (; dst < 32; dst++, T0 += 4) {
40 ugpr(dst) = glue(ldl, MEMSUFFIX)((uint32_t)T0);
44 #if defined(TARGET_PPC64)
45 void glue(do_lmw_64, MEMSUFFIX) (int dst)
47 for (; dst < 32; dst++, T0 += 4) {
48 ugpr(dst) = glue(ldl, MEMSUFFIX)((uint64_t)T0);
51 #endif
53 void glue(do_stmw, MEMSUFFIX) (int src)
55 for (; src < 32; src++, T0 += 4) {
56 glue(stl, MEMSUFFIX)((uint32_t)T0, ugpr(src));
60 #if defined(TARGET_PPC64)
61 void glue(do_stmw_64, MEMSUFFIX) (int src)
63 for (; src < 32; src++, T0 += 4) {
64 glue(stl, MEMSUFFIX)((uint64_t)T0, ugpr(src));
67 #endif
69 void glue(do_lmw_le, MEMSUFFIX) (int dst)
71 for (; dst < 32; dst++, T0 += 4) {
72 ugpr(dst) = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
76 #if defined(TARGET_PPC64)
77 void glue(do_lmw_le_64, MEMSUFFIX) (int dst)
79 for (; dst < 32; dst++, T0 += 4) {
80 ugpr(dst) = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
83 #endif
85 void glue(do_stmw_le, MEMSUFFIX) (int src)
87 for (; src < 32; src++, T0 += 4) {
88 glue(st32r, MEMSUFFIX)((uint32_t)T0, ugpr(src));
92 #if defined(TARGET_PPC64)
93 void glue(do_stmw_le_64, MEMSUFFIX) (int src)
95 for (; src < 32; src++, T0 += 4) {
96 glue(st32r, MEMSUFFIX)((uint64_t)T0, ugpr(src));
99 #endif
101 void glue(do_lsw, MEMSUFFIX) (int dst)
103 uint32_t tmp;
104 int sh;
106 for (; T1 > 3; T1 -= 4, T0 += 4) {
107 ugpr(dst++) = glue(ldl, MEMSUFFIX)((uint32_t)T0);
108 if (unlikely(dst == 32))
109 dst = 0;
111 if (unlikely(T1 != 0)) {
112 tmp = 0;
113 for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
114 tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
116 ugpr(dst) = tmp;
120 #if defined(TARGET_PPC64)
121 void glue(do_lsw_64, MEMSUFFIX) (int dst)
123 uint32_t tmp;
124 int sh;
126 for (; T1 > 3; T1 -= 4, T0 += 4) {
127 ugpr(dst++) = glue(ldl, MEMSUFFIX)((uint64_t)T0);
128 if (unlikely(dst == 32))
129 dst = 0;
131 if (unlikely(T1 != 0)) {
132 tmp = 0;
133 for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
134 tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
136 ugpr(dst) = tmp;
139 #endif
141 void glue(do_stsw, MEMSUFFIX) (int src)
143 int sh;
145 for (; T1 > 3; T1 -= 4, T0 += 4) {
146 glue(stl, MEMSUFFIX)((uint32_t)T0, ugpr(src++));
147 if (unlikely(src == 32))
148 src = 0;
150 if (unlikely(T1 != 0)) {
151 for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
152 glue(stb, MEMSUFFIX)((uint32_t)T0, (ugpr(src) >> sh) & 0xFF);
156 #if defined(TARGET_PPC64)
157 void glue(do_stsw_64, MEMSUFFIX) (int src)
159 int sh;
161 for (; T1 > 3; T1 -= 4, T0 += 4) {
162 glue(stl, MEMSUFFIX)((uint64_t)T0, ugpr(src++));
163 if (unlikely(src == 32))
164 src = 0;
166 if (unlikely(T1 != 0)) {
167 for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
168 glue(stb, MEMSUFFIX)((uint64_t)T0, (ugpr(src) >> sh) & 0xFF);
171 #endif
173 void glue(do_lsw_le, MEMSUFFIX) (int dst)
175 uint32_t tmp;
176 int sh;
178 for (; T1 > 3; T1 -= 4, T0 += 4) {
179 ugpr(dst++) = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
180 if (unlikely(dst == 32))
181 dst = 0;
183 if (unlikely(T1 != 0)) {
184 tmp = 0;
185 for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
186 tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
188 ugpr(dst) = tmp;
192 #if defined(TARGET_PPC64)
193 void glue(do_lsw_le_64, MEMSUFFIX) (int dst)
195 uint32_t tmp;
196 int sh;
198 for (; T1 > 3; T1 -= 4, T0 += 4) {
199 ugpr(dst++) = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
200 if (unlikely(dst == 32))
201 dst = 0;
203 if (unlikely(T1 != 0)) {
204 tmp = 0;
205 for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
206 tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
208 ugpr(dst) = tmp;
211 #endif
213 void glue(do_stsw_le, MEMSUFFIX) (int src)
215 int sh;
217 for (; T1 > 3; T1 -= 4, T0 += 4) {
218 glue(st32r, MEMSUFFIX)((uint32_t)T0, ugpr(src++));
219 if (unlikely(src == 32))
220 src = 0;
222 if (unlikely(T1 != 0)) {
223 for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
224 glue(stb, MEMSUFFIX)((uint32_t)T0, (ugpr(src) >> sh) & 0xFF);
228 #if defined(TARGET_PPC64)
229 void glue(do_stsw_le_64, MEMSUFFIX) (int src)
231 int sh;
233 for (; T1 > 3; T1 -= 4, T0 += 4) {
234 glue(st32r, MEMSUFFIX)((uint64_t)T0, ugpr(src++));
235 if (unlikely(src == 32))
236 src = 0;
238 if (unlikely(T1 != 0)) {
239 for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
240 glue(stb, MEMSUFFIX)((uint64_t)T0, (ugpr(src) >> sh) & 0xFF);
243 #endif
245 /* Instruction cache invalidation helper */
246 void glue(do_icbi, MEMSUFFIX) (void)
248 uint32_t tmp;
249 /* Invalidate one cache line :
250 * PowerPC specification says this is to be treated like a load
251 * (not a fetch) by the MMU. To be sure it will be so,
252 * do the load "by hand".
254 tmp = glue(ldl, MEMSUFFIX)((uint32_t)T0);
255 T0 &= ~(ICACHE_LINE_SIZE - 1);
256 tb_invalidate_page_range((uint32_t)T0, (uint32_t)(T0 + ICACHE_LINE_SIZE));
259 #if defined(TARGET_PPC64)
260 void glue(do_icbi_64, MEMSUFFIX) (void)
262 uint64_t tmp;
263 /* Invalidate one cache line :
264 * PowerPC specification says this is to be treated like a load
265 * (not a fetch) by the MMU. To be sure it will be so,
266 * do the load "by hand".
268 tmp = glue(ldq, MEMSUFFIX)((uint64_t)T0);
269 T0 &= ~(ICACHE_LINE_SIZE - 1);
270 tb_invalidate_page_range((uint64_t)T0, (uint64_t)(T0 + ICACHE_LINE_SIZE));
272 #endif
274 /* PPC 601 specific instructions (POWER bridge) */
275 // XXX: to be tested
276 void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb)
278 int i, c, d, reg;
280 d = 24;
281 reg = dest;
282 for (i = 0; i < T1; i++) {
283 c = glue(ldub, MEMSUFFIX)((uint32_t)T0++);
284 /* ra (if not 0) and rb are never modified */
285 if (likely(reg != rb && (ra == 0 || reg != ra))) {
286 ugpr(reg) = (ugpr(reg) & ~(0xFF << d)) | (c << d);
288 if (unlikely(c == T2))
289 break;
290 if (likely(d != 0)) {
291 d -= 8;
292 } else {
293 d = 24;
294 reg++;
295 reg = reg & 0x1F;
298 T0 = i;
301 /* XXX: TAGs are not managed */
302 void glue(do_POWER2_lfq, MEMSUFFIX) (void)
304 FT0 = glue(ldfq, MEMSUFFIX)((uint32_t)T0);
305 FT1 = glue(ldfq, MEMSUFFIX)((uint32_t)(T0 + 4));
308 static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
310 union {
311 double d;
312 uint64_t u;
313 } u;
315 u.d = glue(ldfq, MEMSUFFIX)(EA);
316 u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
317 ((u.u & 0x00FF000000000000ULL) >> 40) |
318 ((u.u & 0x0000FF0000000000ULL) >> 24) |
319 ((u.u & 0x000000FF00000000ULL) >> 8) |
320 ((u.u & 0x00000000FF000000ULL) << 8) |
321 ((u.u & 0x0000000000FF0000ULL) << 24) |
322 ((u.u & 0x000000000000FF00ULL) << 40) |
323 ((u.u & 0x00000000000000FFULL) << 56);
325 return u.d;
328 void glue(do_POWER2_lfq_le, MEMSUFFIX) (void)
330 FT0 = glue(ldfqr, MEMSUFFIX)((uint32_t)(T0 + 4));
331 FT1 = glue(ldfqr, MEMSUFFIX)((uint32_t)T0);
334 void glue(do_POWER2_stfq, MEMSUFFIX) (void)
336 glue(stfq, MEMSUFFIX)((uint32_t)T0, FT0);
337 glue(stfq, MEMSUFFIX)((uint32_t)(T0 + 4), FT1);
340 static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
342 union {
343 double d;
344 uint64_t u;
345 } u;
347 u.d = d;
348 u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
349 ((u.u & 0x00FF000000000000ULL) >> 40) |
350 ((u.u & 0x0000FF0000000000ULL) >> 24) |
351 ((u.u & 0x000000FF00000000ULL) >> 8) |
352 ((u.u & 0x00000000FF000000ULL) << 8) |
353 ((u.u & 0x0000000000FF0000ULL) << 24) |
354 ((u.u & 0x000000000000FF00ULL) << 40) |
355 ((u.u & 0x00000000000000FFULL) << 56);
356 glue(stfq, MEMSUFFIX)(EA, u.d);
359 void glue(do_POWER2_stfq_le, MEMSUFFIX) (void)
361 glue(stfqr, MEMSUFFIX)((uint32_t)(T0 + 4), FT0);
362 glue(stfqr, MEMSUFFIX)((uint32_t)T0, FT1);
365 #undef MEMSUFFIX