Avoid range comparisons on io index types
[qemu/ar7.git] / softmmu_template.h
blob726744c81477491f134caf4396d71350457be0bd
1 /*
2 * Software MMU support
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-timer.h"
26 #define DATA_SIZE (1 << SHIFT)
28 #if DATA_SIZE == 8
29 #define SUFFIX q
30 #define USUFFIX q
31 #define DATA_TYPE uint64_t
32 #elif DATA_SIZE == 4
33 #define SUFFIX l
34 #define USUFFIX l
35 #define DATA_TYPE uint32_t
36 #elif DATA_SIZE == 2
37 #define SUFFIX w
38 #define USUFFIX uw
39 #define DATA_TYPE uint16_t
40 #elif DATA_SIZE == 1
41 #define SUFFIX b
42 #define USUFFIX ub
43 #define DATA_TYPE uint8_t
44 #else
45 #error unsupported data size
46 #endif
48 #ifdef SOFTMMU_CODE_ACCESS
49 #define READ_ACCESS_TYPE 2
50 #define ADDR_READ addr_code
51 #else
52 #define READ_ACCESS_TYPE 0
53 #define ADDR_READ addr_read
54 #endif
56 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
57 int mmu_idx,
58 void *retaddr);
59 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
60 target_ulong addr,
61 void *retaddr)
63 DATA_TYPE res;
64 int index;
65 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
66 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
67 env->mem_io_pc = (unsigned long)retaddr;
68 if (index != IO_MEM_RAM && index != IO_MEM_ROM
69 && index != IO_MEM_UNASSIGNED && index != IO_MEM_NOTDIRTY
70 && !can_do_io(env)) {
71 cpu_io_recompile(env, retaddr);
74 env->mem_io_vaddr = addr;
75 #if SHIFT <= 2
76 res = io_mem_read(index, physaddr, 1 << SHIFT);
77 #else
78 #ifdef TARGET_WORDS_BIGENDIAN
79 res = io_mem_read(index, physaddr, 4) << 32;
80 res |= io_mem_read(index, physaddr + 4, 4);
81 #else
82 res = io_mem_read(index, physaddr, 4);
83 res |= io_mem_read(index, physaddr + 4, 4) << 32;
84 #endif
85 #endif /* SHIFT > 2 */
86 return res;
89 /* handle all cases except unaligned access which span two pages */
90 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
91 int mmu_idx)
93 DATA_TYPE res;
94 int index;
95 target_ulong tlb_addr;
96 target_phys_addr_t ioaddr;
97 unsigned long addend;
98 void *retaddr;
100 /* test if there is match for unaligned or IO access */
101 /* XXX: could done more in memory macro in a non portable way */
102 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
103 redo:
104 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
105 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
106 if (tlb_addr & ~TARGET_PAGE_MASK) {
107 /* IO access */
108 if ((addr & (DATA_SIZE - 1)) != 0)
109 goto do_unaligned_access;
110 retaddr = GETPC();
111 ioaddr = env->iotlb[mmu_idx][index];
112 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
113 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
114 /* slow unaligned access (it spans two pages or IO) */
115 do_unaligned_access:
116 retaddr = GETPC();
117 #ifdef ALIGNED_ONLY
118 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
119 #endif
120 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
121 mmu_idx, retaddr);
122 } else {
123 /* unaligned/aligned access in the same page */
124 #ifdef ALIGNED_ONLY
125 if ((addr & (DATA_SIZE - 1)) != 0) {
126 retaddr = GETPC();
127 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
129 #endif
130 addend = env->tlb_table[mmu_idx][index].addend;
131 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
133 } else {
134 /* the page is not in the TLB : fill it */
135 retaddr = GETPC();
136 #ifdef ALIGNED_ONLY
137 if ((addr & (DATA_SIZE - 1)) != 0)
138 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
139 #endif
140 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
141 goto redo;
143 return res;
146 /* handle all unaligned cases */
147 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
148 int mmu_idx,
149 void *retaddr)
151 DATA_TYPE res, res1, res2;
152 int index, shift;
153 target_phys_addr_t ioaddr;
154 unsigned long addend;
155 target_ulong tlb_addr, addr1, addr2;
157 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
158 redo:
159 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
160 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
161 if (tlb_addr & ~TARGET_PAGE_MASK) {
162 /* IO access */
163 if ((addr & (DATA_SIZE - 1)) != 0)
164 goto do_unaligned_access;
165 ioaddr = env->iotlb[mmu_idx][index];
166 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
167 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
168 do_unaligned_access:
169 /* slow unaligned access (it spans two pages) */
170 addr1 = addr & ~(DATA_SIZE - 1);
171 addr2 = addr1 + DATA_SIZE;
172 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
173 mmu_idx, retaddr);
174 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
175 mmu_idx, retaddr);
176 shift = (addr & (DATA_SIZE - 1)) * 8;
177 #ifdef TARGET_WORDS_BIGENDIAN
178 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
179 #else
180 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
181 #endif
182 res = (DATA_TYPE)res;
183 } else {
184 /* unaligned/aligned access in the same page */
185 addend = env->tlb_table[mmu_idx][index].addend;
186 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
188 } else {
189 /* the page is not in the TLB : fill it */
190 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
191 goto redo;
193 return res;
196 #ifndef SOFTMMU_CODE_ACCESS
198 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
199 DATA_TYPE val,
200 int mmu_idx,
201 void *retaddr);
203 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
204 DATA_TYPE val,
205 target_ulong addr,
206 void *retaddr)
208 int index;
209 index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
210 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
211 if (index != IO_MEM_RAM && index != IO_MEM_ROM
212 && index != IO_MEM_UNASSIGNED && index != IO_MEM_NOTDIRTY
213 && !can_do_io(env)) {
214 cpu_io_recompile(env, retaddr);
217 env->mem_io_vaddr = addr;
218 env->mem_io_pc = (unsigned long)retaddr;
219 #if SHIFT <= 2
220 io_mem_write(index, physaddr, val, 1 << SHIFT);
221 #else
222 #ifdef TARGET_WORDS_BIGENDIAN
223 io_mem_write(index, physaddr, (val >> 32), 4);
224 io_mem_write(index, physaddr + 4, (uint32_t)val, 4);
225 #else
226 io_mem_write(index, physaddr, (uint32_t)val, 4);
227 io_mem_write(index, physaddr + 4, val >> 32, 4);
228 #endif
229 #endif /* SHIFT > 2 */
232 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
233 DATA_TYPE val,
234 int mmu_idx)
236 target_phys_addr_t ioaddr;
237 unsigned long addend;
238 target_ulong tlb_addr;
239 void *retaddr;
240 int index;
242 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
243 redo:
244 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
245 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
246 if (tlb_addr & ~TARGET_PAGE_MASK) {
247 /* IO access */
248 if ((addr & (DATA_SIZE - 1)) != 0)
249 goto do_unaligned_access;
250 retaddr = GETPC();
251 ioaddr = env->iotlb[mmu_idx][index];
252 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
253 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
254 do_unaligned_access:
255 retaddr = GETPC();
256 #ifdef ALIGNED_ONLY
257 do_unaligned_access(addr, 1, mmu_idx, retaddr);
258 #endif
259 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
260 mmu_idx, retaddr);
261 } else {
262 /* aligned/unaligned access in the same page */
263 #ifdef ALIGNED_ONLY
264 if ((addr & (DATA_SIZE - 1)) != 0) {
265 retaddr = GETPC();
266 do_unaligned_access(addr, 1, mmu_idx, retaddr);
268 #endif
269 addend = env->tlb_table[mmu_idx][index].addend;
270 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
272 } else {
273 /* the page is not in the TLB : fill it */
274 retaddr = GETPC();
275 #ifdef ALIGNED_ONLY
276 if ((addr & (DATA_SIZE - 1)) != 0)
277 do_unaligned_access(addr, 1, mmu_idx, retaddr);
278 #endif
279 tlb_fill(env, addr, 1, mmu_idx, retaddr);
280 goto redo;
284 /* handles all unaligned cases */
285 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
286 DATA_TYPE val,
287 int mmu_idx,
288 void *retaddr)
290 target_phys_addr_t ioaddr;
291 unsigned long addend;
292 target_ulong tlb_addr;
293 int index, i;
295 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
296 redo:
297 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
298 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
299 if (tlb_addr & ~TARGET_PAGE_MASK) {
300 /* IO access */
301 if ((addr & (DATA_SIZE - 1)) != 0)
302 goto do_unaligned_access;
303 ioaddr = env->iotlb[mmu_idx][index];
304 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
305 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
306 do_unaligned_access:
307 /* XXX: not efficient, but simple */
308 /* Note: relies on the fact that tlb_fill() does not remove the
309 * previous page from the TLB cache. */
310 for(i = DATA_SIZE - 1; i >= 0; i--) {
311 #ifdef TARGET_WORDS_BIGENDIAN
312 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
313 mmu_idx, retaddr);
314 #else
315 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
316 mmu_idx, retaddr);
317 #endif
319 } else {
320 /* aligned/unaligned access in the same page */
321 addend = env->tlb_table[mmu_idx][index].addend;
322 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
324 } else {
325 /* the page is not in the TLB : fill it */
326 tlb_fill(env, addr, 1, mmu_idx, retaddr);
327 goto redo;
331 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
333 #undef READ_ACCESS_TYPE
334 #undef SHIFT
335 #undef DATA_TYPE
336 #undef SUFFIX
337 #undef USUFFIX
338 #undef DATA_SIZE
339 #undef ADDR_READ