i386: Remove REGPARM
[qemu.git] / softmmu_template.h
blobd633bb5558bb98baa646209b56f4458044fbe214
1 /*
2 * Software MMU support
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-timer.h"
25 #include "memory.h"
27 #define DATA_SIZE (1 << SHIFT)
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define USUFFIX q
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define USUFFIX l
36 #define DATA_TYPE uint32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define USUFFIX uw
40 #define DATA_TYPE uint16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define USUFFIX ub
44 #define DATA_TYPE uint8_t
45 #else
46 #error unsupported data size
47 #endif
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
52 #else
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
55 #endif
57 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
58 int mmu_idx,
59 void *retaddr);
60 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
61 target_ulong addr,
62 void *retaddr)
64 DATA_TYPE res;
65 MemoryRegion *mr = iotlb_to_region(physaddr);
67 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
68 env->mem_io_pc = (unsigned long)retaddr;
69 if (mr != &io_mem_ram && mr != &io_mem_rom
70 && mr != &io_mem_unassigned
71 && mr != &io_mem_notdirty
72 && !can_do_io(env)) {
73 cpu_io_recompile(env, retaddr);
76 env->mem_io_vaddr = addr;
77 #if SHIFT <= 2
78 res = io_mem_read(mr, physaddr, 1 << SHIFT);
79 #else
80 #ifdef TARGET_WORDS_BIGENDIAN
81 res = io_mem_read(mr, physaddr, 4) << 32;
82 res |= io_mem_read(mr, physaddr + 4, 4);
83 #else
84 res = io_mem_read(mr, physaddr, 4);
85 res |= io_mem_read(mr, physaddr + 4, 4) << 32;
86 #endif
87 #endif /* SHIFT > 2 */
88 return res;
91 /* handle all cases except unaligned access which span two pages */
92 DATA_TYPE glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr, int mmu_idx)
94 DATA_TYPE res;
95 int index;
96 target_ulong tlb_addr;
97 target_phys_addr_t ioaddr;
98 unsigned long addend;
99 void *retaddr;
101 /* test if there is match for unaligned or IO access */
102 /* XXX: could done more in memory macro in a non portable way */
103 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
104 redo:
105 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
106 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
107 if (tlb_addr & ~TARGET_PAGE_MASK) {
108 /* IO access */
109 if ((addr & (DATA_SIZE - 1)) != 0)
110 goto do_unaligned_access;
111 retaddr = GETPC();
112 ioaddr = env->iotlb[mmu_idx][index];
113 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
114 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
115 /* slow unaligned access (it spans two pages or IO) */
116 do_unaligned_access:
117 retaddr = GETPC();
118 #ifdef ALIGNED_ONLY
119 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
120 #endif
121 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
122 mmu_idx, retaddr);
123 } else {
124 /* unaligned/aligned access in the same page */
125 #ifdef ALIGNED_ONLY
126 if ((addr & (DATA_SIZE - 1)) != 0) {
127 retaddr = GETPC();
128 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
130 #endif
131 addend = env->tlb_table[mmu_idx][index].addend;
132 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
134 } else {
135 /* the page is not in the TLB : fill it */
136 retaddr = GETPC();
137 #ifdef ALIGNED_ONLY
138 if ((addr & (DATA_SIZE - 1)) != 0)
139 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
140 #endif
141 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
142 goto redo;
144 return res;
147 /* handle all unaligned cases */
148 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
149 int mmu_idx,
150 void *retaddr)
152 DATA_TYPE res, res1, res2;
153 int index, shift;
154 target_phys_addr_t ioaddr;
155 unsigned long addend;
156 target_ulong tlb_addr, addr1, addr2;
158 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
159 redo:
160 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
161 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
162 if (tlb_addr & ~TARGET_PAGE_MASK) {
163 /* IO access */
164 if ((addr & (DATA_SIZE - 1)) != 0)
165 goto do_unaligned_access;
166 ioaddr = env->iotlb[mmu_idx][index];
167 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
168 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
169 do_unaligned_access:
170 /* slow unaligned access (it spans two pages) */
171 addr1 = addr & ~(DATA_SIZE - 1);
172 addr2 = addr1 + DATA_SIZE;
173 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
174 mmu_idx, retaddr);
175 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
176 mmu_idx, retaddr);
177 shift = (addr & (DATA_SIZE - 1)) * 8;
178 #ifdef TARGET_WORDS_BIGENDIAN
179 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
180 #else
181 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
182 #endif
183 res = (DATA_TYPE)res;
184 } else {
185 /* unaligned/aligned access in the same page */
186 addend = env->tlb_table[mmu_idx][index].addend;
187 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
189 } else {
190 /* the page is not in the TLB : fill it */
191 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
192 goto redo;
194 return res;
197 #ifndef SOFTMMU_CODE_ACCESS
199 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
200 DATA_TYPE val,
201 int mmu_idx,
202 void *retaddr);
204 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
205 DATA_TYPE val,
206 target_ulong addr,
207 void *retaddr)
209 MemoryRegion *mr = iotlb_to_region(physaddr);
211 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
212 if (mr != &io_mem_ram && mr != &io_mem_rom
213 && mr != &io_mem_unassigned
214 && mr != &io_mem_notdirty
215 && !can_do_io(env)) {
216 cpu_io_recompile(env, retaddr);
219 env->mem_io_vaddr = addr;
220 env->mem_io_pc = (unsigned long)retaddr;
221 #if SHIFT <= 2
222 io_mem_write(mr, physaddr, val, 1 << SHIFT);
223 #else
224 #ifdef TARGET_WORDS_BIGENDIAN
225 io_mem_write(mr, physaddr, (val >> 32), 4);
226 io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
227 #else
228 io_mem_write(mr, physaddr, (uint32_t)val, 4);
229 io_mem_write(mr, physaddr + 4, val >> 32, 4);
230 #endif
231 #endif /* SHIFT > 2 */
234 void glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE val,
235 int mmu_idx)
237 target_phys_addr_t ioaddr;
238 unsigned long addend;
239 target_ulong tlb_addr;
240 void *retaddr;
241 int index;
243 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
244 redo:
245 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
246 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
247 if (tlb_addr & ~TARGET_PAGE_MASK) {
248 /* IO access */
249 if ((addr & (DATA_SIZE - 1)) != 0)
250 goto do_unaligned_access;
251 retaddr = GETPC();
252 ioaddr = env->iotlb[mmu_idx][index];
253 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
254 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
255 do_unaligned_access:
256 retaddr = GETPC();
257 #ifdef ALIGNED_ONLY
258 do_unaligned_access(addr, 1, mmu_idx, retaddr);
259 #endif
260 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
261 mmu_idx, retaddr);
262 } else {
263 /* aligned/unaligned access in the same page */
264 #ifdef ALIGNED_ONLY
265 if ((addr & (DATA_SIZE - 1)) != 0) {
266 retaddr = GETPC();
267 do_unaligned_access(addr, 1, mmu_idx, retaddr);
269 #endif
270 addend = env->tlb_table[mmu_idx][index].addend;
271 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
273 } else {
274 /* the page is not in the TLB : fill it */
275 retaddr = GETPC();
276 #ifdef ALIGNED_ONLY
277 if ((addr & (DATA_SIZE - 1)) != 0)
278 do_unaligned_access(addr, 1, mmu_idx, retaddr);
279 #endif
280 tlb_fill(env, addr, 1, mmu_idx, retaddr);
281 goto redo;
285 /* handles all unaligned cases */
286 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
287 DATA_TYPE val,
288 int mmu_idx,
289 void *retaddr)
291 target_phys_addr_t ioaddr;
292 unsigned long addend;
293 target_ulong tlb_addr;
294 int index, i;
296 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
297 redo:
298 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
299 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
300 if (tlb_addr & ~TARGET_PAGE_MASK) {
301 /* IO access */
302 if ((addr & (DATA_SIZE - 1)) != 0)
303 goto do_unaligned_access;
304 ioaddr = env->iotlb[mmu_idx][index];
305 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
306 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
307 do_unaligned_access:
308 /* XXX: not efficient, but simple */
309 /* Note: relies on the fact that tlb_fill() does not remove the
310 * previous page from the TLB cache. */
311 for(i = DATA_SIZE - 1; i >= 0; i--) {
312 #ifdef TARGET_WORDS_BIGENDIAN
313 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
314 mmu_idx, retaddr);
315 #else
316 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
317 mmu_idx, retaddr);
318 #endif
320 } else {
321 /* aligned/unaligned access in the same page */
322 addend = env->tlb_table[mmu_idx][index].addend;
323 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
325 } else {
326 /* the page is not in the TLB : fill it */
327 tlb_fill(env, addr, 1, mmu_idx, retaddr);
328 goto redo;
332 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
334 #undef READ_ACCESS_TYPE
335 #undef SHIFT
336 #undef DATA_TYPE
337 #undef SUFFIX
338 #undef USUFFIX
339 #undef DATA_SIZE
340 #undef ADDR_READ