4 * Generate helpers used by TCG for qemu_ld/st ops and code load
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu-timer.h"
27 #define DATA_SIZE (1 << SHIFT)
32 #define DATA_TYPE uint64_t
36 #define DATA_TYPE uint32_t
40 #define DATA_TYPE uint16_t
44 #define DATA_TYPE uint8_t
46 #error unsupported data size
49 #ifdef SOFTMMU_CODE_ACCESS
50 #define READ_ACCESS_TYPE 2
51 #define ADDR_READ addr_code
53 #define READ_ACCESS_TYPE 0
54 #define ADDR_READ addr_read
57 #ifndef CONFIG_TCG_PASS_AREG0
61 #define HELPER_PREFIX __
63 #define ENV_PARAM CPUArchState *env,
65 #define CPU_PREFIX cpu_
66 #define HELPER_PREFIX helper_
69 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(ENV_PARAM
73 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(ENV_PARAM
74 target_phys_addr_t physaddr
,
79 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
81 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
82 env
->mem_io_pc
= (unsigned long)retaddr
;
83 if (mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
84 && mr
!= &io_mem_unassigned
85 && mr
!= &io_mem_notdirty
87 cpu_io_recompile(env
, retaddr
);
90 env
->mem_io_vaddr
= addr
;
92 res
= io_mem_read(mr
, physaddr
, 1 << SHIFT
);
94 #ifdef TARGET_WORDS_BIGENDIAN
95 res
= io_mem_read(mr
, physaddr
, 4) << 32;
96 res
|= io_mem_read(mr
, physaddr
+ 4, 4);
98 res
= io_mem_read(mr
, physaddr
, 4);
99 res
|= io_mem_read(mr
, physaddr
+ 4, 4) << 32;
101 #endif /* SHIFT > 2 */
105 /* handle all cases except unaligned access which span two pages */
107 glue(glue(glue(HELPER_PREFIX
, ld
), SUFFIX
), MMUSUFFIX
)(ENV_PARAM
113 target_ulong tlb_addr
;
114 target_phys_addr_t ioaddr
;
115 unsigned long addend
;
118 /* test if there is match for unaligned or IO access */
119 /* XXX: could done more in memory macro in a non portable way */
120 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
122 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
123 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
124 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
126 if ((addr
& (DATA_SIZE
- 1)) != 0)
127 goto do_unaligned_access
;
129 ioaddr
= env
->iotlb
[mmu_idx
][index
];
130 res
= glue(io_read
, SUFFIX
)(ENV_VAR ioaddr
, addr
, retaddr
);
131 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
132 /* slow unaligned access (it spans two pages or IO) */
136 do_unaligned_access(ENV_VAR addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
138 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(ENV_VAR addr
,
141 /* unaligned/aligned access in the same page */
143 if ((addr
& (DATA_SIZE
- 1)) != 0) {
145 do_unaligned_access(ENV_VAR addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
148 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
149 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
));
152 /* the page is not in the TLB : fill it */
155 if ((addr
& (DATA_SIZE
- 1)) != 0)
156 do_unaligned_access(ENV_VAR addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
158 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
164 /* handle all unaligned cases */
166 glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(ENV_PARAM
171 DATA_TYPE res
, res1
, res2
;
173 target_phys_addr_t ioaddr
;
174 unsigned long addend
;
175 target_ulong tlb_addr
, addr1
, addr2
;
177 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
179 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].ADDR_READ
;
180 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
181 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
183 if ((addr
& (DATA_SIZE
- 1)) != 0)
184 goto do_unaligned_access
;
185 ioaddr
= env
->iotlb
[mmu_idx
][index
];
186 res
= glue(io_read
, SUFFIX
)(ENV_VAR ioaddr
, addr
, retaddr
);
187 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
189 /* slow unaligned access (it spans two pages) */
190 addr1
= addr
& ~(DATA_SIZE
- 1);
191 addr2
= addr1
+ DATA_SIZE
;
192 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(ENV_VAR addr1
,
194 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(ENV_VAR addr2
,
196 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
197 #ifdef TARGET_WORDS_BIGENDIAN
198 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
200 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
202 res
= (DATA_TYPE
)res
;
204 /* unaligned/aligned access in the same page */
205 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
206 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
));
209 /* the page is not in the TLB : fill it */
210 tlb_fill(env
, addr
, READ_ACCESS_TYPE
, mmu_idx
, retaddr
);
216 #ifndef SOFTMMU_CODE_ACCESS
218 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(ENV_PARAM
224 static inline void glue(io_write
, SUFFIX
)(ENV_PARAM
225 target_phys_addr_t physaddr
,
230 MemoryRegion
*mr
= iotlb_to_region(physaddr
);
232 physaddr
= (physaddr
& TARGET_PAGE_MASK
) + addr
;
233 if (mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
234 && mr
!= &io_mem_unassigned
235 && mr
!= &io_mem_notdirty
236 && !can_do_io(env
)) {
237 cpu_io_recompile(env
, retaddr
);
240 env
->mem_io_vaddr
= addr
;
241 env
->mem_io_pc
= (unsigned long)retaddr
;
243 io_mem_write(mr
, physaddr
, val
, 1 << SHIFT
);
245 #ifdef TARGET_WORDS_BIGENDIAN
246 io_mem_write(mr
, physaddr
, (val
>> 32), 4);
247 io_mem_write(mr
, physaddr
+ 4, (uint32_t)val
, 4);
249 io_mem_write(mr
, physaddr
, (uint32_t)val
, 4);
250 io_mem_write(mr
, physaddr
+ 4, val
>> 32, 4);
252 #endif /* SHIFT > 2 */
255 void glue(glue(glue(HELPER_PREFIX
, st
), SUFFIX
), MMUSUFFIX
)(ENV_PARAM
260 target_phys_addr_t ioaddr
;
261 unsigned long addend
;
262 target_ulong tlb_addr
;
266 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
268 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
269 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
270 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
272 if ((addr
& (DATA_SIZE
- 1)) != 0)
273 goto do_unaligned_access
;
275 ioaddr
= env
->iotlb
[mmu_idx
][index
];
276 glue(io_write
, SUFFIX
)(ENV_VAR ioaddr
, val
, addr
, retaddr
);
277 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
281 do_unaligned_access(ENV_VAR addr
, 1, mmu_idx
, retaddr
);
283 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(ENV_VAR addr
, val
,
286 /* aligned/unaligned access in the same page */
288 if ((addr
& (DATA_SIZE
- 1)) != 0) {
290 do_unaligned_access(ENV_VAR addr
, 1, mmu_idx
, retaddr
);
293 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
294 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
), val
);
297 /* the page is not in the TLB : fill it */
300 if ((addr
& (DATA_SIZE
- 1)) != 0)
301 do_unaligned_access(ENV_VAR addr
, 1, mmu_idx
, retaddr
);
303 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
308 /* handles all unaligned cases */
309 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(ENV_PARAM
315 target_phys_addr_t ioaddr
;
316 unsigned long addend
;
317 target_ulong tlb_addr
;
320 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
322 tlb_addr
= env
->tlb_table
[mmu_idx
][index
].addr_write
;
323 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
324 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
326 if ((addr
& (DATA_SIZE
- 1)) != 0)
327 goto do_unaligned_access
;
328 ioaddr
= env
->iotlb
[mmu_idx
][index
];
329 glue(io_write
, SUFFIX
)(ENV_VAR ioaddr
, val
, addr
, retaddr
);
330 } else if (((addr
& ~TARGET_PAGE_MASK
) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
332 /* XXX: not efficient, but simple */
333 /* Note: relies on the fact that tlb_fill() does not remove the
334 * previous page from the TLB cache. */
335 for(i
= DATA_SIZE
- 1; i
>= 0; i
--) {
336 #ifdef TARGET_WORDS_BIGENDIAN
337 glue(slow_stb
, MMUSUFFIX
)(ENV_VAR addr
+ i
,
338 val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
341 glue(slow_stb
, MMUSUFFIX
)(ENV_VAR addr
+ i
,
347 /* aligned/unaligned access in the same page */
348 addend
= env
->tlb_table
[mmu_idx
][index
].addend
;
349 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)(long)(addr
+addend
), val
);
352 /* the page is not in the TLB : fill it */
353 tlb_fill(env
, addr
, 1, mmu_idx
, retaddr
);
358 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
360 #undef READ_ACCESS_TYPE