4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define DATA_TYPE uint64_t
36 #define DATA_TYPE uint32_t
40 #define DATA_TYPE uint16_t
44 #define DATA_TYPE uint8_t
46 #error unsupported data size
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
57 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
61 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
67 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
69 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
70 env
->mem_io_pc
= retaddr
;
71 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
72 cpu_io_recompile(env
, retaddr
);
75 env
->mem_io_vaddr
= addr
;
77 res
= io_mem_read(mr
, physaddr
, 1 << SHIFT
);
79 #ifdef TARGET_WORDS_BIGENDIAN
80 res
= io_mem_read(mr
, physaddr
, 4) << 32;
81 res
|= io_mem_read(mr
, physaddr
+ 4, 4);
83 res
= io_mem_read(mr
, physaddr
, 4);
84 res
|= io_mem_read(mr
, physaddr
+ 4, 4) << 32;
86 #endif /* SHIFT > 2 */
90 /* handle all cases except unaligned access which span two pages */
92 glue(glue(helper_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
97 target_ulong tlb_addr
;
101 /* test if there is match for unaligned or IO access */
102 /* XXX: could done more in memory macro in a non portable way */
103 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
105 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
106 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
107 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
109 if ((addr
& (DATA_SIZE
- 1)) != 0)
110 goto do_unaligned_access
;
111 retaddr
= GETPC_EXT();
112 ioaddr
= env
->iotlb
[mmu_idx
][index
];
113 res
= glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
114 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
115 /* slow unaligned access (it spans two pages or IO) */
117 retaddr
= GETPC_EXT();
119 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
121 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr
,
124 /* unaligned/aligned access in the same page */
127 if ((addr
& (DATA_SIZE
- 1)) != 0) {
128 retaddr
= GETPC_EXT();
129 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
132 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
133 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(intptr_t)
137 /* the page is not in the TLB : fill it */
138 retaddr
= GETPC_EXT();
140 if ((addr
& (DATA_SIZE
- 1)) != 0)
141 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
143 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
149 /* handle all unaligned cases */
151 glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
156 DATA_TYPE res
, res1
, res2
;
159 target_ulong tlb_addr
, addr1
, addr2
;
161 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
163 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
164 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
165 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
167 if ((addr
& (DATA_SIZE
- 1)) != 0)
168 goto do_unaligned_access
;
169 ioaddr
= env
->iotlb
[mmu_idx
][index
];
170 res
= glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
171 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
173 /* slow unaligned access (it spans two pages) */
174 addr1
= addr
& ~(DATA_SIZE
- 1);
175 addr2
= addr1
+ DATA_SIZE
;
176 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr1
,
178 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr2
,
180 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
181 #ifdef TARGET_WORDS_BIGENDIAN
182 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
184 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
186 res
= (DATA_TYPE
)res
;
188 /* unaligned/aligned access in the same page */
189 uintptr_t addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
190 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(intptr_t)
194 /* the page is not in the TLB : fill it */
195 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
201 #ifndef SOFTMMU_CODE_ACCESS
203 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
209 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
215 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
217 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
218 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !can_do_io(env
)) {
219 cpu_io_recompile(env
, retaddr
);
222 env
->mem_io_vaddr
= addr
;
223 env
->mem_io_pc
= retaddr
;
225 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
227 #ifdef TARGET_WORDS_BIGENDIAN
228 io_mem_write(mr
, physaddr
, (val
>> 32), 4);
229 io_mem_write(mr
, physaddr
+ 4, (uint32_t)val
, 4);
231 io_mem_write(mr
, physaddr
, (uint32_t)val
, 4);
232 io_mem_write(mr
, physaddr
+ 4, val
>> 32, 4);
234 #endif /* SHIFT > 2 */
237 void glue(glue(helper_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
238 target_ulong addr
, DATA_TYPE val
,
242 target_ulong tlb_addr
;
246 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
248 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
249 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
250 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
252 if ((addr
& (DATA_SIZE
- 1)) != 0)
253 goto do_unaligned_access
;
254 retaddr
= GETPC_EXT();
255 ioaddr
= env
->iotlb
[mmu_idx
][index
];
256 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
257 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
259 retaddr
= GETPC_EXT();
261 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
263 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(env
, addr
, val
,
266 /* aligned/unaligned access in the same page */
269 if ((addr
& (DATA_SIZE
- 1)) != 0) {
270 retaddr
= GETPC_EXT();
271 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
274 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
275 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(intptr_t)
276 (addr
+ addend
), val
);
279 /* the page is not in the TLB : fill it */
280 retaddr
= GETPC_EXT();
282 if ((addr
& (DATA_SIZE
- 1)) != 0)
283 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
285 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
290 /* handles all unaligned cases */
291 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
298 target_ulong tlb_addr
;
301 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
303 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
304 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
305 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
307 if ((addr
& (DATA_SIZE
- 1)) != 0)
308 goto do_unaligned_access
;
309 ioaddr
= env
->iotlb
[mmu_idx
][index
];
310 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
311 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
313 /* XXX: not efficient, but simple */
314 /* Note: relies on the fact that tlb_fill() does not remove the
315 * previous page from the TLB cache. */
316 for(i
= DATA_SIZE
- 1; i
>= 0; i
--) {
317 #ifdef TARGET_WORDS_BIGENDIAN
318 glue(slow_stb
, MMUSUFFIX
)(env
, addr
+ i
,
319 val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
322 glue(slow_stb
, MMUSUFFIX
)(env
, addr
+ i
,
328 /* aligned/unaligned access in the same page */
329 uintptr_t addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
330 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(intptr_t)
331 (addr
+ addend
), val
);
334 /* the page is not in the TLB : fill it */
335 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
340 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
342 #undef READ_ACCESS_TYPE