pseries: Fix reset of VIO network device
[qemu-kvm.git] / softmmu_template.h
blobb285d7823b4fd09d7dea25d5adbc06dcfc24f0d2
1 /*
2 * Software MMU support
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-timer.h"
25 #include "memory.h"
27 #define DATA_SIZE (1 << SHIFT)
29 #if DATA_SIZE == 8
30 #define SUFFIX q
31 #define USUFFIX q
32 #define DATA_TYPE uint64_t
33 #elif DATA_SIZE == 4
34 #define SUFFIX l
35 #define USUFFIX l
36 #define DATA_TYPE uint32_t
37 #elif DATA_SIZE == 2
38 #define SUFFIX w
39 #define USUFFIX uw
40 #define DATA_TYPE uint16_t
41 #elif DATA_SIZE == 1
42 #define SUFFIX b
43 #define USUFFIX ub
44 #define DATA_TYPE uint8_t
45 #else
46 #error unsupported data size
47 #endif
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
52 #else
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
55 #endif
57 #ifndef CONFIG_TCG_PASS_AREG0
58 #define ENV_PARAM
59 #define ENV_VAR
60 #define CPU_PREFIX
61 #define HELPER_PREFIX __
62 #else
63 #define ENV_PARAM CPUArchState *env,
64 #define ENV_VAR env,
65 #define CPU_PREFIX cpu_
66 #define HELPER_PREFIX helper_
67 #endif
69 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
70 target_ulong addr,
71 int mmu_idx,
72 uintptr_t retaddr);
73 static inline DATA_TYPE glue(io_read, SUFFIX)(ENV_PARAM
74 target_phys_addr_t physaddr,
75 target_ulong addr,
76 uintptr_t retaddr)
78 DATA_TYPE res;
79 MemoryRegion *mr = iotlb_to_region(physaddr);
81 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
82 env->mem_io_pc = retaddr;
83 if (mr != &io_mem_ram && mr != &io_mem_rom
84 && mr != &io_mem_unassigned
85 && mr != &io_mem_notdirty
86 && !can_do_io(env)) {
87 cpu_io_recompile(env, retaddr);
90 env->mem_io_vaddr = addr;
91 #if SHIFT <= 2
92 res = io_mem_read(mr, physaddr, 1 << SHIFT);
93 #else
94 #ifdef TARGET_WORDS_BIGENDIAN
95 res = io_mem_read(mr, physaddr, 4) << 32;
96 res |= io_mem_read(mr, physaddr + 4, 4);
97 #else
98 res = io_mem_read(mr, physaddr, 4);
99 res |= io_mem_read(mr, physaddr + 4, 4) << 32;
100 #endif
101 #endif /* SHIFT > 2 */
102 return res;
105 /* handle all cases except unaligned access which span two pages */
106 DATA_TYPE
107 glue(glue(glue(HELPER_PREFIX, ld), SUFFIX), MMUSUFFIX)(ENV_PARAM
108 target_ulong addr,
109 int mmu_idx)
111 DATA_TYPE res;
112 int index;
113 target_ulong tlb_addr;
114 target_phys_addr_t ioaddr;
115 unsigned long addend;
116 uintptr_t retaddr;
118 /* test if there is match for unaligned or IO access */
119 /* XXX: could done more in memory macro in a non portable way */
120 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
121 redo:
122 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
123 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
124 if (tlb_addr & ~TARGET_PAGE_MASK) {
125 /* IO access */
126 if ((addr & (DATA_SIZE - 1)) != 0)
127 goto do_unaligned_access;
128 retaddr = GETPC();
129 ioaddr = env->iotlb[mmu_idx][index];
130 res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
131 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
132 /* slow unaligned access (it spans two pages or IO) */
133 do_unaligned_access:
134 retaddr = GETPC();
135 #ifdef ALIGNED_ONLY
136 do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
137 #endif
138 res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr,
139 mmu_idx, retaddr);
140 } else {
141 /* unaligned/aligned access in the same page */
142 #ifdef ALIGNED_ONLY
143 if ((addr & (DATA_SIZE - 1)) != 0) {
144 retaddr = GETPC();
145 do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
147 #endif
148 addend = env->tlb_table[mmu_idx][index].addend;
149 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
151 } else {
152 /* the page is not in the TLB : fill it */
153 retaddr = GETPC();
154 #ifdef ALIGNED_ONLY
155 if ((addr & (DATA_SIZE - 1)) != 0)
156 do_unaligned_access(ENV_VAR addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
157 #endif
158 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
159 goto redo;
161 return res;
164 /* handle all unaligned cases */
165 static DATA_TYPE
166 glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_PARAM
167 target_ulong addr,
168 int mmu_idx,
169 uintptr_t retaddr)
171 DATA_TYPE res, res1, res2;
172 int index, shift;
173 target_phys_addr_t ioaddr;
174 unsigned long addend;
175 target_ulong tlb_addr, addr1, addr2;
177 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
178 redo:
179 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
180 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
181 if (tlb_addr & ~TARGET_PAGE_MASK) {
182 /* IO access */
183 if ((addr & (DATA_SIZE - 1)) != 0)
184 goto do_unaligned_access;
185 ioaddr = env->iotlb[mmu_idx][index];
186 res = glue(io_read, SUFFIX)(ENV_VAR ioaddr, addr, retaddr);
187 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
188 do_unaligned_access:
189 /* slow unaligned access (it spans two pages) */
190 addr1 = addr & ~(DATA_SIZE - 1);
191 addr2 = addr1 + DATA_SIZE;
192 res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr1,
193 mmu_idx, retaddr);
194 res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(ENV_VAR addr2,
195 mmu_idx, retaddr);
196 shift = (addr & (DATA_SIZE - 1)) * 8;
197 #ifdef TARGET_WORDS_BIGENDIAN
198 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
199 #else
200 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
201 #endif
202 res = (DATA_TYPE)res;
203 } else {
204 /* unaligned/aligned access in the same page */
205 addend = env->tlb_table[mmu_idx][index].addend;
206 res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
208 } else {
209 /* the page is not in the TLB : fill it */
210 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
211 goto redo;
213 return res;
216 #ifndef SOFTMMU_CODE_ACCESS
218 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
219 target_ulong addr,
220 DATA_TYPE val,
221 int mmu_idx,
222 uintptr_t retaddr);
224 static inline void glue(io_write, SUFFIX)(ENV_PARAM
225 target_phys_addr_t physaddr,
226 DATA_TYPE val,
227 target_ulong addr,
228 uintptr_t retaddr)
230 MemoryRegion *mr = iotlb_to_region(physaddr);
232 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
233 if (mr != &io_mem_ram && mr != &io_mem_rom
234 && mr != &io_mem_unassigned
235 && mr != &io_mem_notdirty
236 && !can_do_io(env)) {
237 cpu_io_recompile(env, retaddr);
240 env->mem_io_vaddr = addr;
241 env->mem_io_pc = retaddr;
242 #if SHIFT <= 2
243 io_mem_write(mr, physaddr, val, 1 << SHIFT);
244 #else
245 #ifdef TARGET_WORDS_BIGENDIAN
246 io_mem_write(mr, physaddr, (val >> 32), 4);
247 io_mem_write(mr, physaddr + 4, (uint32_t)val, 4);
248 #else
249 io_mem_write(mr, physaddr, (uint32_t)val, 4);
250 io_mem_write(mr, physaddr + 4, val >> 32, 4);
251 #endif
252 #endif /* SHIFT > 2 */
255 void glue(glue(glue(HELPER_PREFIX, st), SUFFIX), MMUSUFFIX)(ENV_PARAM
256 target_ulong addr,
257 DATA_TYPE val,
258 int mmu_idx)
260 target_phys_addr_t ioaddr;
261 unsigned long addend;
262 target_ulong tlb_addr;
263 uintptr_t retaddr;
264 int index;
266 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
267 redo:
268 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
269 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
270 if (tlb_addr & ~TARGET_PAGE_MASK) {
271 /* IO access */
272 if ((addr & (DATA_SIZE - 1)) != 0)
273 goto do_unaligned_access;
274 retaddr = GETPC();
275 ioaddr = env->iotlb[mmu_idx][index];
276 glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
277 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
278 do_unaligned_access:
279 retaddr = GETPC();
280 #ifdef ALIGNED_ONLY
281 do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
282 #endif
283 glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_VAR addr, val,
284 mmu_idx, retaddr);
285 } else {
286 /* aligned/unaligned access in the same page */
287 #ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0) {
289 retaddr = GETPC();
290 do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
292 #endif
293 addend = env->tlb_table[mmu_idx][index].addend;
294 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
296 } else {
297 /* the page is not in the TLB : fill it */
298 retaddr = GETPC();
299 #ifdef ALIGNED_ONLY
300 if ((addr & (DATA_SIZE - 1)) != 0)
301 do_unaligned_access(ENV_VAR addr, 1, mmu_idx, retaddr);
302 #endif
303 tlb_fill(env, addr, 1, mmu_idx, retaddr);
304 goto redo;
308 /* handles all unaligned cases */
309 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(ENV_PARAM
310 target_ulong addr,
311 DATA_TYPE val,
312 int mmu_idx,
313 uintptr_t retaddr)
315 target_phys_addr_t ioaddr;
316 unsigned long addend;
317 target_ulong tlb_addr;
318 int index, i;
320 index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
321 redo:
322 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
323 if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
324 if (tlb_addr & ~TARGET_PAGE_MASK) {
325 /* IO access */
326 if ((addr & (DATA_SIZE - 1)) != 0)
327 goto do_unaligned_access;
328 ioaddr = env->iotlb[mmu_idx][index];
329 glue(io_write, SUFFIX)(ENV_VAR ioaddr, val, addr, retaddr);
330 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
331 do_unaligned_access:
332 /* XXX: not efficient, but simple */
333 /* Note: relies on the fact that tlb_fill() does not remove the
334 * previous page from the TLB cache. */
335 for(i = DATA_SIZE - 1; i >= 0; i--) {
336 #ifdef TARGET_WORDS_BIGENDIAN
337 glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
338 val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
339 mmu_idx, retaddr);
340 #else
341 glue(slow_stb, MMUSUFFIX)(ENV_VAR addr + i,
342 val >> (i * 8),
343 mmu_idx, retaddr);
344 #endif
346 } else {
347 /* aligned/unaligned access in the same page */
348 addend = env->tlb_table[mmu_idx][index].addend;
349 glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
351 } else {
352 /* the page is not in the TLB : fill it */
353 tlb_fill(env, addr, 1, mmu_idx, retaddr);
354 goto redo;
358 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
360 #undef READ_ACCESS_TYPE
361 #undef SHIFT
362 #undef DATA_TYPE
363 #undef SUFFIX
364 #undef USUFFIX
365 #undef DATA_SIZE
366 #undef ADDR_READ
367 #undef ENV_PARAM
368 #undef ENV_VAR
369 #undef CPU_PREFIX
370 #undef HELPER_PREFIX