exec: make io_mem_unassigned private
[qemu-kvm.git] / include / exec / softmmu_template.h
blobca91fd0b2202db273fca2475a0b5ca8b35e97c18
1 /*
2 * Software MMU support
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
27 #define DATA_SIZE (1 << SHIFT)
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define USUFFIX q
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define USUFFIX l
36 #define DATA_TYPE uint32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define USUFFIX uw
40 #define DATA_TYPE uint16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define USUFFIX ub
44 #define DATA_TYPE uint8_t
45 #else
46 #error unsupported data size
47 #endif
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
52 #else
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
55 #endif
57 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
58 target_ulong addr,
59 int mmu_idx,
60 uintptr_t retaddr);
61 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
62 hwaddr physaddr,
63 target_ulong addr,
64 uintptr_t retaddr)
66 DATA_TYPE res;
67 MemoryRegion *mr = iotlb_to_region(physaddr);
69 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
70 env->mem_io_pc = retaddr;
71 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
72 cpu_io_recompile(env, retaddr);
75 env->mem_io_vaddr = addr;
76 #if SHIFT <= 2
77 res = io_mem_read(mr, physaddr, 1 << SHIFT);
78 #else
79 #ifdef TARGET_WORDS_BIGENDIAN
80 res = io_mem_read(mr, physaddr, 4) << 32;
81 res |= io_mem_read(mr, physaddr + 4, 4);
82 #else
83 res = io_mem_read(mr, physaddr, 4);
84 res |= io_mem_read(mr, physaddr + 4, 4) << 32;
85 #endif
86 #endif /* SHIFT > 2 */
87 return res;
90 /* handle all cases except unaligned access which span two pages */
91 DATA_TYPE
92 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
93 int mmu_idx)
95 DATA_TYPE res;
96 int index;
97 target_ulong tlb_addr;
98 hwaddr ioaddr;
99 uintptr_t retaddr;
101 /* test if there is match for unaligned or IO access */
102 /* XXX: could done more in memory macro in a non portable way */
103 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
104 redo:
105 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
106 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
107 if (tlb_addr & ~TARGET_PAGE_MASK) {
108 /* IO access */
109 if ((addr & (DATA_SIZE - 1)) != 0)
110 goto do_unaligned_access;
111 retaddr = GETPC_EXT();
112 ioaddr = env->iotlb[mmu_idx][index];
113 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
114 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
115 /* slow unaligned access (it spans two pages or IO) */
116 do_unaligned_access:
117 retaddr = GETPC_EXT();
118 #ifdef ALIGNED_ONLY
119 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
120 #endif
121 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr,
122 mmu_idx, retaddr);
123 } else {
124 /* unaligned/aligned access in the same page */
125 uintptr_t addend;
126 #ifdef ALIGNED_ONLY
127 if ((addr & (DATA_SIZE - 1)) != 0) {
128 retaddr = GETPC_EXT();
129 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
131 #endif
132 addend = env->tlb_table[mmu_idx][index].addend;
133 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
134 (addr + addend));
136 } else {
137 /* the page is not in the TLB : fill it */
138 retaddr = GETPC_EXT();
139 #ifdef ALIGNED_ONLY
140 if ((addr & (DATA_SIZE - 1)) != 0)
141 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
142 #endif
143 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
144 goto redo;
146 return res;
149 /* handle all unaligned cases */
150 static DATA_TYPE
151 glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
152 target_ulong addr,
153 int mmu_idx,
154 uintptr_t retaddr)
156 DATA_TYPE res, res1, res2;
157 int index, shift;
158 hwaddr ioaddr;
159 target_ulong tlb_addr, addr1, addr2;
161 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
162 redo:
163 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
164 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
165 if (tlb_addr & ~TARGET_PAGE_MASK) {
166 /* IO access */
167 if ((addr & (DATA_SIZE - 1)) != 0)
168 goto do_unaligned_access;
169 ioaddr = env->iotlb[mmu_idx][index];
170 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
171 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
172 do_unaligned_access:
173 /* slow unaligned access (it spans two pages) */
174 addr1 = addr & ~(DATA_SIZE - 1);
175 addr2 = addr1 + DATA_SIZE;
176 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr1,
177 mmu_idx, retaddr);
178 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(env, addr2,
179 mmu_idx, retaddr);
180 shift = (addr & (DATA_SIZE - 1)) * 8;
181 #ifdef TARGET_WORDS_BIGENDIAN
182 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
183 #else
184 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
185 #endif
186 res = (DATA_TYPE)res;
187 } else {
188 /* unaligned/aligned access in the same page */
189 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
190 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(intptr_t)
191 (addr + addend));
193 } else {
194 /* the page is not in the TLB : fill it */
195 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
196 goto redo;
198 return res;
201 #ifndef SOFTMMU_CODE_ACCESS
203 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
204 target_ulong addr,
205 DATA_TYPE val,
206 int mmu_idx,
207 uintptr_t retaddr);
209 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
210 hwaddr physaddr,
211 DATA_TYPE val,
212 target_ulong addr,
213 uintptr_t retaddr)
215 MemoryRegion *mr = iotlb_to_region(physaddr);
217 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
218 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
219 cpu_io_recompile(env, retaddr);
222 env->mem_io_vaddr = addr;
223 env->mem_io_pc = retaddr;
224 #if SHIFT <= 2
225 io_mem_write(mr, physaddr, val, 1 << SHIFT);
226 #else
227 #ifdef TARGET_WORDS_BIGENDIAN
228 io_mem_write(mr, physaddr, (val >> 32), 4);
229 io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
230 #else
231 io_mem_write(mr, physaddr, (uint32_t)val, 4);
232 io_mem_write(mr, physaddr + 4, val >> 32, 4);
233 #endif
234 #endif /* SHIFT > 2 */
237 void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
238 target_ulong addr, DATA_TYPE val,
239 int mmu_idx)
241 hwaddr ioaddr;
242 target_ulong tlb_addr;
243 uintptr_t retaddr;
244 int index;
246 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
247 redo:
248 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
249 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
250 if (tlb_addr & ~TARGET_PAGE_MASK) {
251 /* IO access */
252 if ((addr & (DATA_SIZE - 1)) != 0)
253 goto do_unaligned_access;
254 retaddr = GETPC_EXT();
255 ioaddr = env->iotlb[mmu_idx][index];
256 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
257 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
258 do_unaligned_access:
259 retaddr = GETPC_EXT();
260 #ifdef ALIGNED_ONLY
261 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
262 #endif
263 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(env, addr, val,
264 mmu_idx, retaddr);
265 } else {
266 /* aligned/unaligned access in the same page */
267 uintptr_t addend;
268 #ifdef ALIGNED_ONLY
269 if ((addr & (DATA_SIZE - 1)) != 0) {
270 retaddr = GETPC_EXT();
271 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
273 #endif
274 addend = env->tlb_table[mmu_idx][index].addend;
275 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
276 (addr + addend), val);
278 } else {
279 /* the page is not in the TLB : fill it */
280 retaddr = GETPC_EXT();
281 #ifdef ALIGNED_ONLY
282 if ((addr & (DATA_SIZE - 1)) != 0)
283 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
284 #endif
285 tlb_fill(env, addr, 1, mmu_idx, retaddr);
286 goto redo;
290 /* handles all unaligned cases */
291 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
292 target_ulong addr,
293 DATA_TYPE val,
294 int mmu_idx,
295 uintptr_t retaddr)
297 hwaddr ioaddr;
298 target_ulong tlb_addr;
299 int index, i;
301 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
302 redo:
303 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
304 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
305 if (tlb_addr & ~TARGET_PAGE_MASK) {
306 /* IO access */
307 if ((addr & (DATA_SIZE - 1)) != 0)
308 goto do_unaligned_access;
309 ioaddr = env->iotlb[mmu_idx][index];
310 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
311 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
312 do_unaligned_access:
313 /* XXX: not efficient, but simple */
314 /* Note: relies on the fact that tlb_fill() does not remove the
315 * previous page from the TLB cache. */
316 for(i = DATA_SIZE - 1; i >= 0; i--) {
317 #ifdef TARGET_WORDS_BIGENDIAN
318 glue(slow_stb, MMUSUFFIX)(env, addr + i,
319 val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
320 mmu_idx, retaddr);
321 #else
322 glue(slow_stb, MMUSUFFIX)(env, addr + i,
323 val >> (i * 8),
324 mmu_idx, retaddr);
325 #endif
327 } else {
328 /* aligned/unaligned access in the same page */
329 uintptr_t addend = env->tlb_table[mmu_idx][index].addend;
330 glue(glue(st, SUFFIX), _raw)((uint8_t *)(intptr_t)
331 (addr + addend), val);
333 } else {
334 /* the page is not in the TLB : fill it */
335 tlb_fill(env, addr, 1, mmu_idx, retaddr);
336 goto redo;
340 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
342 #undef READ_ACCESS_TYPE
343 #undef SHIFT
344 #undef DATA_TYPE
345 #undef SUFFIX
346 #undef USUFFIX
347 #undef DATA_SIZE
348 #undef ADDR_READ