Fix -enable-kvm
[qemu.git] / softmmu_template.h
blob2f37c34affa1ea6ffec97762587b80f38b6b2a2f
1 /*
2 * Software MMU support
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu-timer.h"
21 #define DATA_SIZE (1 << SHIFT)
23 #if DATA_SIZE == 8
24 #define SUFFIX q
25 #define USUFFIX q
26 #define DATA_TYPE uint64_t
27 #elif DATA_SIZE == 4
28 #define SUFFIX l
29 #define USUFFIX l
30 #define DATA_TYPE uint32_t
31 #elif DATA_SIZE == 2
32 #define SUFFIX w
33 #define USUFFIX uw
34 #define DATA_TYPE uint16_t
35 #elif DATA_SIZE == 1
36 #define SUFFIX b
37 #define USUFFIX ub
38 #define DATA_TYPE uint8_t
39 #else
40 #error unsupported data size
41 #endif
43 #ifdef SOFTMMU_CODE_ACCESS
44 #define READ_ACCESS_TYPE 2
45 #define ADDR_READ addr_code
46 #else
47 #define READ_ACCESS_TYPE 0
48 #define ADDR_READ addr_read
49 #endif
51 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
52 int mmu_idx,
53 void *retaddr);
54 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
55 target_ulong addr,
56 void *retaddr)
58 DATA_TYPE res;
59 int index;
60 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
61 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
62 env->mem_io_pc = (unsigned long)retaddr;
63 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
64 && !can_do_io(env)) {
65 cpu_io_recompile(env, retaddr);
68 env->mem_io_vaddr = addr;
69 #if SHIFT <= 2
70 res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
71 #else
72 #ifdef TARGET_WORDS_BIGENDIAN
73 res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
74 res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
75 #else
76 res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
77 res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
78 #endif
79 #endif /* SHIFT > 2 */
80 return res;
83 /* handle all cases except unaligned access which span two pages */
84 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
85 int mmu_idx)
87 DATA_TYPE res;
88 int index;
89 target_ulong tlb_addr;
90 target_phys_addr_t addend;
91 void *retaddr;
93 /* test if there is match for unaligned or IO access */
94 /* XXX: could done more in memory macro in a non portable way */
95 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
96 redo:
97 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
98 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
99 if (tlb_addr & ~TARGET_PAGE_MASK) {
100 /* IO access */
101 if ((addr & (DATA_SIZE - 1)) != 0)
102 goto do_unaligned_access;
103 retaddr = GETPC();
104 addend = env->iotlb[mmu_idx][index];
105 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
106 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
107 /* slow unaligned access (it spans two pages or IO) */
108 do_unaligned_access:
109 retaddr = GETPC();
110 #ifdef ALIGNED_ONLY
111 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
112 #endif
113 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
114 mmu_idx, retaddr);
115 } else {
116 /* unaligned/aligned access in the same page */
117 #ifdef ALIGNED_ONLY
118 if ((addr & (DATA_SIZE - 1)) != 0) {
119 retaddr = GETPC();
120 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
122 #endif
123 addend = env->tlb_table[mmu_idx][index].addend;
124 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
126 } else {
127 /* the page is not in the TLB : fill it */
128 retaddr = GETPC();
129 #ifdef ALIGNED_ONLY
130 if ((addr & (DATA_SIZE - 1)) != 0)
131 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
132 #endif
133 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
134 goto redo;
136 return res;
139 /* handle all unaligned cases */
140 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
141 int mmu_idx,
142 void *retaddr)
144 DATA_TYPE res, res1, res2;
145 int index, shift;
146 target_phys_addr_t addend;
147 target_ulong tlb_addr, addr1, addr2;
149 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
150 redo:
151 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
152 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
153 if (tlb_addr & ~TARGET_PAGE_MASK) {
154 /* IO access */
155 if ((addr & (DATA_SIZE - 1)) != 0)
156 goto do_unaligned_access;
157 addend = env->iotlb[mmu_idx][index];
158 res = glue(io_read, SUFFIX)(addend, addr, retaddr);
159 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
160 do_unaligned_access:
161 /* slow unaligned access (it spans two pages) */
162 addr1 = addr & ~(DATA_SIZE - 1);
163 addr2 = addr1 + DATA_SIZE;
164 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
165 mmu_idx, retaddr);
166 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
167 mmu_idx, retaddr);
168 shift = (addr & (DATA_SIZE - 1)) * 8;
169 #ifdef TARGET_WORDS_BIGENDIAN
170 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
171 #else
172 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
173 #endif
174 res = (DATA_TYPE)res;
175 } else {
176 /* unaligned/aligned access in the same page */
177 addend = env->tlb_table[mmu_idx][index].addend;
178 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
180 } else {
181 /* the page is not in the TLB : fill it */
182 tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
183 goto redo;
185 return res;
188 #ifndef SOFTMMU_CODE_ACCESS
190 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
191 DATA_TYPE val,
192 int mmu_idx,
193 void *retaddr);
195 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
196 DATA_TYPE val,
197 target_ulong addr,
198 void *retaddr)
200 int index;
201 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
202 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
203 if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
204 && !can_do_io(env)) {
205 cpu_io_recompile(env, retaddr);
208 env->mem_io_vaddr = addr;
209 env->mem_io_pc = (unsigned long)retaddr;
210 #if SHIFT <= 2
211 io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
212 #else
213 #ifdef TARGET_WORDS_BIGENDIAN
214 io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
215 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
216 #else
217 io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
218 io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
219 #endif
220 #endif /* SHIFT > 2 */
223 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
224 DATA_TYPE val,
225 int mmu_idx)
227 target_phys_addr_t addend;
228 target_ulong tlb_addr;
229 void *retaddr;
230 int index;
232 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
233 redo:
234 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
235 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
236 if (tlb_addr & ~TARGET_PAGE_MASK) {
237 /* IO access */
238 if ((addr & (DATA_SIZE - 1)) != 0)
239 goto do_unaligned_access;
240 retaddr = GETPC();
241 addend = env->iotlb[mmu_idx][index];
242 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
243 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
244 do_unaligned_access:
245 retaddr = GETPC();
246 #ifdef ALIGNED_ONLY
247 do_unaligned_access(addr, 1, mmu_idx, retaddr);
248 #endif
249 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
250 mmu_idx, retaddr);
251 } else {
252 /* aligned/unaligned access in the same page */
253 #ifdef ALIGNED_ONLY
254 if ((addr & (DATA_SIZE - 1)) != 0) {
255 retaddr = GETPC();
256 do_unaligned_access(addr, 1, mmu_idx, retaddr);
258 #endif
259 addend = env->tlb_table[mmu_idx][index].addend;
260 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
262 } else {
263 /* the page is not in the TLB : fill it */
264 retaddr = GETPC();
265 #ifdef ALIGNED_ONLY
266 if ((addr & (DATA_SIZE - 1)) != 0)
267 do_unaligned_access(addr, 1, mmu_idx, retaddr);
268 #endif
269 tlb_fill(addr, 1, mmu_idx, retaddr);
270 goto redo;
274 /* handles all unaligned cases */
275 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
276 DATA_TYPE val,
277 int mmu_idx,
278 void *retaddr)
280 target_phys_addr_t addend;
281 target_ulong tlb_addr;
282 int index, i;
284 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
285 redo:
286 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
287 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
288 if (tlb_addr & ~TARGET_PAGE_MASK) {
289 /* IO access */
290 if ((addr & (DATA_SIZE - 1)) != 0)
291 goto do_unaligned_access;
292 addend = env->iotlb[mmu_idx][index];
293 glue(io_write, SUFFIX)(addend, val, addr, retaddr);
294 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
295 do_unaligned_access:
296 /* XXX: not efficient, but simple */
297 /* Note: relies on the fact that tlb_fill() does not remove the
298 * previous page from the TLB cache. */
299 for(i = DATA_SIZE - 1; i >= 0; i--) {
300 #ifdef TARGET_WORDS_BIGENDIAN
301 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
302 mmu_idx, retaddr);
303 #else
304 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
305 mmu_idx, retaddr);
306 #endif
308 } else {
309 /* aligned/unaligned access in the same page */
310 addend = env->tlb_table[mmu_idx][index].addend;
311 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
313 } else {
314 /* the page is not in the TLB : fill it */
315 tlb_fill(addr, 1, mmu_idx, retaddr);
316 goto redo;
320 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
322 #undef READ_ACCESS_TYPE
323 #undef SHIFT
324 #undef DATA_TYPE
325 #undef SUFFIX
326 #undef USUFFIX
327 #undef DATA_SIZE
328 #undef ADDR_READ