4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/memory.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define DATA_TYPE uint64_t
36 #define DATA_TYPE uint32_t
40 #define DATA_TYPE uint16_t
44 #define DATA_TYPE uint8_t
46 #error unsupported data size
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
57 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
61 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(CPUArchState
*env
,
67 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
69 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
70 env
->mem_io_pc
= retaddr
;
71 if (mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
72 && mr
!= &io_mem_unassigned
73 && mr
!= &io_mem_notdirty
75 cpu_io_recompile(env
, retaddr
);
78 env
->mem_io_vaddr
= addr
;
80 res
= io_mem_read(mr
, physaddr
, 1 << SHIFT
);
82 #ifdef TARGET_WORDS_BIGENDIAN
83 res
= io_mem_read(mr
, physaddr
, 4) << 32;
84 res
|= io_mem_read(mr
, physaddr
+ 4, 4);
86 res
= io_mem_read(mr
, physaddr
, 4);
87 res
|= io_mem_read(mr
, physaddr
+ 4, 4) << 32;
89 #endif /* SHIFT > 2 */
93 /* handle all cases except unaligned access which span two pages */
95 glue(glue(helper_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
, target_ulong addr
,
100 target_ulong tlb_addr
;
104 /* test if there is match for unaligned or IO access */
105 /* XXX: could done more in memory macro in a non portable way */
106 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
108 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
109 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
110 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
112 if ((addr
& (DATA_SIZE
- 1)) != 0)
113 goto do_unaligned_access
;
114 retaddr
= GETPC_EXT();
115 ioaddr
= env
->iotlb
[mmu_idx
][index
];
116 res
= glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
117 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
118 /* slow unaligned access (it spans two pages or IO) */
120 retaddr
= GETPC_EXT();
122 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
124 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr
,
127 /* unaligned/aligned access in the same page */
130 if ((addr
& (DATA_SIZE
- 1)) != 0) {
131 retaddr
= GETPC_EXT();
132 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
135 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
136 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(intptr_t)
140 /* the page is not in the TLB : fill it */
141 retaddr
= GETPC_EXT();
143 if ((addr
& (DATA_SIZE
- 1)) != 0)
144 do_unaligned_access(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
146 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
152 /* handle all unaligned cases */
154 glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
159 DATA_TYPE res
, res1
, res2
;
162 target_ulong tlb_addr
, addr1
, addr2
;
164 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
166 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
167 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
168 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
170 if ((addr
& (DATA_SIZE
- 1)) != 0)
171 goto do_unaligned_access
;
172 ioaddr
= env
->iotlb
[mmu_idx
][index
];
173 res
= glue(io_read
, SUFFIX
)(env
, ioaddr
, addr
, retaddr
);
174 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
176 /* slow unaligned access (it spans two pages) */
177 addr1
= addr
& ~(DATA_SIZE
- 1);
178 addr2
= addr1
+ DATA_SIZE
;
179 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr1
,
181 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr2
,
183 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
184 #ifdef TARGET_WORDS_BIGENDIAN
185 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
187 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
189 res
= (DATA_TYPE
)res
;
191 /* unaligned/aligned access in the same page */
192 uintptr_t addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
193 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(intptr_t)
197 /* the page is not in the TLB : fill it */
198 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
204 #ifndef SOFTMMU_CODE_ACCESS
206 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
212 static inline void glue(io_write
, SUFFIX
)(CPUArchState
*env
,
218 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
220 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
221 if (mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
222 && mr
!= &io_mem_unassigned
223 && mr
!= &io_mem_notdirty
224 && !can_do_io(env
)) {
225 cpu_io_recompile(env
, retaddr
);
228 env
->mem_io_vaddr
= addr
;
229 env
->mem_io_pc
= retaddr
;
231 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
233 #ifdef TARGET_WORDS_BIGENDIAN
234 io_mem_write(mr
, physaddr
, (val
>> 32), 4);
235 io_mem_write(mr
, physaddr
+ 4, (uint32_t)val
, 4);
237 io_mem_write(mr
, physaddr
, (uint32_t)val
, 4);
238 io_mem_write(mr
, physaddr
+ 4, val
>> 32, 4);
240 #endif /* SHIFT > 2 */
243 void glue(glue(helper_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
244 target_ulong addr
, DATA_TYPE val
,
248 target_ulong tlb_addr
;
252 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
254 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
255 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
256 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
258 if ((addr
& (DATA_SIZE
- 1)) != 0)
259 goto do_unaligned_access
;
260 retaddr
= GETPC_EXT();
261 ioaddr
= env
->iotlb
[mmu_idx
][index
];
262 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
263 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
265 retaddr
= GETPC_EXT();
267 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
269 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(env
, addr
, val
,
272 /* aligned/unaligned access in the same page */
275 if ((addr
& (DATA_SIZE
- 1)) != 0) {
276 retaddr
= GETPC_EXT();
277 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
280 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
281 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(intptr_t)
282 (addr
+ addend
), val
);
285 /* the page is not in the TLB : fill it */
286 retaddr
= GETPC_EXT();
288 if ((addr
& (DATA_SIZE
- 1)) != 0)
289 do_unaligned_access(env
, addr
, 1, mmu_idx
, retaddr
);
291 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
296 /* handles all unaligned cases */
297 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(CPUArchState
*env
,
304 target_ulong tlb_addr
;
307 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
309 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
310 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
311 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
313 if ((addr
& (DATA_SIZE
- 1)) != 0)
314 goto do_unaligned_access
;
315 ioaddr
= env
->iotlb
[mmu_idx
][index
];
316 glue(io_write
, SUFFIX
)(env
, ioaddr
, val
, addr
, retaddr
);
317 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
319 /* XXX: not efficient, but simple */
320 /* Note: relies on the fact that tlb_fill() does not remove the
321 * previous page from the TLB cache. */
322 for(i
= DATA_SIZE
- 1; i
>= 0; i
--) {
323 #ifdef TARGET_WORDS_BIGENDIAN
324 glue(slow_stb
, MMUSUFFIX
)(env
, addr
+ i
,
325 val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
328 glue(slow_stb
, MMUSUFFIX
)(env
, addr
+ i
,
334 /* aligned/unaligned access in the same page */
335 uintptr_t addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
336 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(intptr_t)
337 (addr
+ addend
), val
);
340 /* the page is not in the TLB : fill it */
341 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
346 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
348 #undef READ_ACCESS_TYPE