4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-timer.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define DATA_TYPE uint64_t
36 #define DATA_TYPE uint32_t
40 #define DATA_TYPE uint16_t
44 #define DATA_TYPE uint8_t
46 #error unsupported data size
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
57 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
60 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(target_phys_addr_t physaddr
,
65 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
67 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
68 env
->mem_io_pc
= (unsigned long)retaddr
;
69 if (mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
70 && mr
!= &io_mem_unassigned
71 && mr
!= &io_mem_notdirty
73 cpu_io_recompile(env
, retaddr
);
76 env
->mem_io_vaddr
= addr
;
78 res
= io_mem_read(mr
, physaddr
, 1 << SHIFT
);
80 #ifdef TARGET_WORDS_BIGENDIAN
81 res
= io_mem_read(mr
, physaddr
, 4) << 32;
82 res
|= io_mem_read(mr
, physaddr
+ 4, 4);
84 res
= io_mem_read(mr
, physaddr
, 4);
85 res
|= io_mem_read(mr
, physaddr
+ 4, 4) << 32;
87 #endif /* SHIFT > 2 */
91 /* handle all cases except unaligned access which span two pages */
92 DATA_TYPE REGPARM
glue(glue(__ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
97 target_ulong tlb_addr
;
98 target_phys_addr_t ioaddr
;
102 /* test if there is match for unaligned or IO access */
103 /* XXX: could done more in memory macro in a non portable way */
104 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
106 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
107 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
108 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
110 if ((addr
& (DATA_SIZE
- 1)) != 0)
111 goto do_unaligned_access
;
113 ioaddr
= env
->iotlb
[mmu_idx
][index
];
114 res
= glue(io_read
, SUFFIX
)(ioaddr
, addr
, retaddr
);
115 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
116 /* slow unaligned access (it spans two pages or IO) */
120 do_unaligned_access(addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
122 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr
,
125 /* unaligned/aligned access in the same page */
127 if ((addr
& (DATA_SIZE
- 1)) != 0) {
129 do_unaligned_access(addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
132 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
133 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
));
136 /* the page is not in the TLB : fill it */
139 if ((addr
& (DATA_SIZE
- 1)) != 0)
140 do_unaligned_access(addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
142 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
148 /* handle all unaligned cases */
149 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
153 DATA_TYPE res
, res1
, res2
;
155 target_phys_addr_t ioaddr
;
156 unsigned long addend
;
157 target_ulong tlb_addr
, addr1
, addr2
;
159 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
161 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
162 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
163 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
165 if ((addr
& (DATA_SIZE
- 1)) != 0)
166 goto do_unaligned_access
;
167 ioaddr
= env
->iotlb
[mmu_idx
][index
];
168 res
= glue(io_read
, SUFFIX
)(ioaddr
, addr
, retaddr
);
169 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
171 /* slow unaligned access (it spans two pages) */
172 addr1
= addr
& ~(DATA_SIZE
- 1);
173 addr2
= addr1
+ DATA_SIZE
;
174 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr1
,
176 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr2
,
178 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
179 #ifdef TARGET_WORDS_BIGENDIAN
180 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
182 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
184 res
= (DATA_TYPE
)res
;
186 /* unaligned/aligned access in the same page */
187 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
188 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
));
191 /* the page is not in the TLB : fill it */
192 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
198 #ifndef SOFTMMU_CODE_ACCESS
200 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
205 static inline void glue(io_write
, SUFFIX
)(target_phys_addr_t physaddr
,
210 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
212 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
213 if (mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
214 && mr
!= &io_mem_unassigned
215 && mr
!= &io_mem_notdirty
216 && !can_do_io(env
)) {
217 cpu_io_recompile(env
, retaddr
);
220 env
->mem_io_vaddr
= addr
;
221 env
->mem_io_pc
= (unsigned long)retaddr
;
223 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
225 #ifdef TARGET_WORDS_BIGENDIAN
226 io_mem_write(mr
, physaddr
, (val
>> 32), 4);
227 io_mem_write(mr
, physaddr
+ 4, (uint32_t)val
, 4);
229 io_mem_write(mr
, physaddr
, (uint32_t)val
, 4);
230 io_mem_write(mr
, physaddr
+ 4, val
>> 32, 4);
232 #endif /* SHIFT > 2 */
235 void REGPARM
glue(glue(__st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
239 target_phys_addr_t ioaddr
;
240 unsigned long addend
;
241 target_ulong tlb_addr
;
245 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
247 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
248 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
249 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
251 if ((addr
& (DATA_SIZE
- 1)) != 0)
252 goto do_unaligned_access
;
254 ioaddr
= env
->iotlb
[mmu_idx
][index
];
255 glue(io_write
, SUFFIX
)(ioaddr
, val
, addr
, retaddr
);
256 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
260 do_unaligned_access(addr
, 1, mmu_idx
, retaddr
);
262 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(addr
, val
,
265 /* aligned/unaligned access in the same page */
267 if ((addr
& (DATA_SIZE
- 1)) != 0) {
269 do_unaligned_access(addr
, 1, mmu_idx
, retaddr
);
272 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
273 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
), val
);
276 /* the page is not in the TLB : fill it */
279 if ((addr
& (DATA_SIZE
- 1)) != 0)
280 do_unaligned_access(addr
, 1, mmu_idx
, retaddr
);
282 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
287 /* handles all unaligned cases */
288 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
293 target_phys_addr_t ioaddr
;
294 unsigned long addend
;
295 target_ulong tlb_addr
;
298 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
300 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
301 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
302 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
304 if ((addr
& (DATA_SIZE
- 1)) != 0)
305 goto do_unaligned_access
;
306 ioaddr
= env
->iotlb
[mmu_idx
][index
];
307 glue(io_write
, SUFFIX
)(ioaddr
, val
, addr
, retaddr
);
308 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
310 /* XXX: not efficient, but simple */
311 /* Note: relies on the fact that tlb_fill() does not remove the
312 * previous page from the TLB cache. */
313 for(i
= DATA_SIZE
- 1; i
>= 0; i
--) {
314 #ifdef TARGET_WORDS_BIGENDIAN
315 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
318 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (i
* 8),
323 /* aligned/unaligned access in the same page */
324 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
325 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
), val
);
328 /* the page is not in the TLB : fill it */
329 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
334 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
336 #undef READ_ACCESS_TYPE