qga: Don't require 'time' argument in guest-set-time command
[qemu.git] / include / exec / softmmu_template.h
blobc14a04d7e97e8b3b77a4f64ba61d7373fc3e68cd
1 /*
2 * Software MMU support
4 * Generate helpers used by TCG for qemu_ld/st ops and code load
5 * functions.
7 * Included from target op helpers and exec.c.
9 * Copyright (c) 2003 Fabrice Bellard
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/timer.h"
25 #include "exec/address-spaces.h"
26 #include "exec/memory.h"
28 #define DATA_SIZE (1 << SHIFT)
30 #if DATA_SIZE == 8
31 #define SUFFIX q
32 #define LSUFFIX q
33 #define SDATA_TYPE int64_t
34 #define DATA_TYPE uint64_t
35 #elif DATA_SIZE == 4
36 #define SUFFIX l
37 #define LSUFFIX l
38 #define SDATA_TYPE int32_t
39 #define DATA_TYPE uint32_t
40 #elif DATA_SIZE == 2
41 #define SUFFIX w
42 #define LSUFFIX uw
43 #define SDATA_TYPE int16_t
44 #define DATA_TYPE uint16_t
45 #elif DATA_SIZE == 1
46 #define SUFFIX b
47 #define LSUFFIX ub
48 #define SDATA_TYPE int8_t
49 #define DATA_TYPE uint8_t
50 #else
51 #error unsupported data size
52 #endif
55 /* For the benefit of TCG generated code, we want to avoid the complication
56 of ABI-specific return type promotion and always return a value extended
57 to the register size of the host. This is tcg_target_long, except in the
58 case of a 32-bit host and 64-bit data, and for that we always have
59 uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
60 #if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
61 # define WORD_TYPE DATA_TYPE
62 # define USUFFIX SUFFIX
63 #else
64 # define WORD_TYPE tcg_target_ulong
65 # define USUFFIX glue(u, SUFFIX)
66 # define SSUFFIX glue(s, SUFFIX)
67 #endif
69 #ifdef SOFTMMU_CODE_ACCESS
70 #define READ_ACCESS_TYPE 2
71 #define ADDR_READ addr_code
72 #else
73 #define READ_ACCESS_TYPE 0
74 #define ADDR_READ addr_read
75 #endif
77 #if DATA_SIZE == 8
78 # define BSWAP(X) bswap64(X)
79 #elif DATA_SIZE == 4
80 # define BSWAP(X) bswap32(X)
81 #elif DATA_SIZE == 2
82 # define BSWAP(X) bswap16(X)
83 #else
84 # define BSWAP(X) (X)
85 #endif
87 #ifdef TARGET_WORDS_BIGENDIAN
88 # define TGT_BE(X) (X)
89 # define TGT_LE(X) BSWAP(X)
90 #else
91 # define TGT_BE(X) BSWAP(X)
92 # define TGT_LE(X) (X)
93 #endif
95 #if DATA_SIZE == 1
96 # define helper_le_ld_name glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
97 # define helper_be_ld_name helper_le_ld_name
98 # define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
99 # define helper_be_lds_name helper_le_lds_name
100 # define helper_le_st_name glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
101 # define helper_be_st_name helper_le_st_name
102 #else
103 # define helper_le_ld_name glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
104 # define helper_be_ld_name glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
105 # define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
106 # define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
107 # define helper_le_st_name glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
108 # define helper_be_st_name glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
109 #endif
111 #ifdef TARGET_WORDS_BIGENDIAN
112 # define helper_te_ld_name helper_be_ld_name
113 # define helper_te_st_name helper_be_st_name
114 #else
115 # define helper_te_ld_name helper_le_ld_name
116 # define helper_te_st_name helper_le_st_name
117 #endif
119 static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
120 hwaddr physaddr,
121 target_ulong addr,
122 uintptr_t retaddr)
124 uint64_t val;
125 CPUState *cpu = ENV_GET_CPU(env);
126 MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
128 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
129 env->mem_io_pc = retaddr;
130 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
131 cpu_io_recompile(env, retaddr);
134 env->mem_io_vaddr = addr;
135 io_mem_read(mr, physaddr, &val, 1 << SHIFT);
136 return val;
139 #ifdef SOFTMMU_CODE_ACCESS
140 static __attribute__((unused))
141 #endif
142 WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
143 uintptr_t retaddr)
145 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
146 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
147 uintptr_t haddr;
148 DATA_TYPE res;
150 /* Adjust the given return address. */
151 retaddr -= GETPC_ADJ;
153 /* If the TLB entry is for a different page, reload and try again. */
154 if ((addr & TARGET_PAGE_MASK)
155 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
156 #ifdef ALIGNED_ONLY
157 if ((addr & (DATA_SIZE - 1)) != 0) {
158 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
160 #endif
161 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
162 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
165 /* Handle an IO access. */
166 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
167 hwaddr ioaddr;
168 if ((addr & (DATA_SIZE - 1)) != 0) {
169 goto do_unaligned_access;
171 ioaddr = env->iotlb[mmu_idx][index];
173 /* ??? Note that the io helpers always read data in the target
174 byte ordering. We should push the LE/BE request down into io. */
175 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
176 res = TGT_LE(res);
177 return res;
180 /* Handle slow unaligned access (it spans two pages or IO). */
181 if (DATA_SIZE > 1
182 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
183 >= TARGET_PAGE_SIZE)) {
184 target_ulong addr1, addr2;
185 DATA_TYPE res1, res2;
186 unsigned shift;
187 do_unaligned_access:
188 #ifdef ALIGNED_ONLY
189 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
190 #endif
191 addr1 = addr & ~(DATA_SIZE - 1);
192 addr2 = addr1 + DATA_SIZE;
193 /* Note the adjustment at the beginning of the function.
194 Undo that for the recursion. */
195 res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
196 res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
197 shift = (addr & (DATA_SIZE - 1)) * 8;
199 /* Little-endian combine. */
200 res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
201 return res;
204 /* Handle aligned access or unaligned access in the same page. */
205 #ifdef ALIGNED_ONLY
206 if ((addr & (DATA_SIZE - 1)) != 0) {
207 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
209 #endif
211 haddr = addr + env->tlb_table[mmu_idx][index].addend;
212 #if DATA_SIZE == 1
213 res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
214 #else
215 res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
216 #endif
217 return res;
220 #if DATA_SIZE > 1
221 #ifdef SOFTMMU_CODE_ACCESS
222 static __attribute__((unused))
223 #endif
224 WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
225 uintptr_t retaddr)
227 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
228 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
229 uintptr_t haddr;
230 DATA_TYPE res;
232 /* Adjust the given return address. */
233 retaddr -= GETPC_ADJ;
235 /* If the TLB entry is for a different page, reload and try again. */
236 if ((addr & TARGET_PAGE_MASK)
237 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
238 #ifdef ALIGNED_ONLY
239 if ((addr & (DATA_SIZE - 1)) != 0) {
240 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
242 #endif
243 tlb_fill(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
244 tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
247 /* Handle an IO access. */
248 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
249 hwaddr ioaddr;
250 if ((addr & (DATA_SIZE - 1)) != 0) {
251 goto do_unaligned_access;
253 ioaddr = env->iotlb[mmu_idx][index];
255 /* ??? Note that the io helpers always read data in the target
256 byte ordering. We should push the LE/BE request down into io. */
257 res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
258 res = TGT_BE(res);
259 return res;
262 /* Handle slow unaligned access (it spans two pages or IO). */
263 if (DATA_SIZE > 1
264 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
265 >= TARGET_PAGE_SIZE)) {
266 target_ulong addr1, addr2;
267 DATA_TYPE res1, res2;
268 unsigned shift;
269 do_unaligned_access:
270 #ifdef ALIGNED_ONLY
271 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
272 #endif
273 addr1 = addr & ~(DATA_SIZE - 1);
274 addr2 = addr1 + DATA_SIZE;
275 /* Note the adjustment at the beginning of the function.
276 Undo that for the recursion. */
277 res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
278 res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
279 shift = (addr & (DATA_SIZE - 1)) * 8;
281 /* Big-endian combine. */
282 res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
283 return res;
286 /* Handle aligned access or unaligned access in the same page. */
287 #ifdef ALIGNED_ONLY
288 if ((addr & (DATA_SIZE - 1)) != 0) {
289 do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
291 #endif
293 haddr = addr + env->tlb_table[mmu_idx][index].addend;
294 res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
295 return res;
297 #endif /* DATA_SIZE > 1 */
299 DATA_TYPE
300 glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
301 int mmu_idx)
303 return helper_te_ld_name (env, addr, mmu_idx, GETRA());
306 #ifndef SOFTMMU_CODE_ACCESS
308 /* Provide signed versions of the load routines as well. We can of course
309 avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
310 #if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
311 WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
312 int mmu_idx, uintptr_t retaddr)
314 return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
317 # if DATA_SIZE > 1
318 WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
319 int mmu_idx, uintptr_t retaddr)
321 return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
323 # endif
324 #endif
326 static inline void glue(io_write, SUFFIX)(CPUArchState *env,
327 hwaddr physaddr,
328 DATA_TYPE val,
329 target_ulong addr,
330 uintptr_t retaddr)
332 CPUState *cpu = ENV_GET_CPU(env);
333 MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
335 physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
336 if (mr != &io_mem_rom && mr != &io_mem_notdirty && !can_do_io(env)) {
337 cpu_io_recompile(env, retaddr);
340 env->mem_io_vaddr = addr;
341 env->mem_io_pc = retaddr;
342 io_mem_write(mr, physaddr, val, 1 << SHIFT);
345 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
346 int mmu_idx, uintptr_t retaddr)
348 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
349 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
350 uintptr_t haddr;
352 /* Adjust the given return address. */
353 retaddr -= GETPC_ADJ;
355 /* If the TLB entry is for a different page, reload and try again. */
356 if ((addr & TARGET_PAGE_MASK)
357 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
358 #ifdef ALIGNED_ONLY
359 if ((addr & (DATA_SIZE - 1)) != 0) {
360 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
362 #endif
363 tlb_fill(env, addr, 1, mmu_idx, retaddr);
364 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
367 /* Handle an IO access. */
368 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
369 hwaddr ioaddr;
370 if ((addr & (DATA_SIZE - 1)) != 0) {
371 goto do_unaligned_access;
373 ioaddr = env->iotlb[mmu_idx][index];
375 /* ??? Note that the io helpers always read data in the target
376 byte ordering. We should push the LE/BE request down into io. */
377 val = TGT_LE(val);
378 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
379 return;
382 /* Handle slow unaligned access (it spans two pages or IO). */
383 if (DATA_SIZE > 1
384 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
385 >= TARGET_PAGE_SIZE)) {
386 int i;
387 do_unaligned_access:
388 #ifdef ALIGNED_ONLY
389 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
390 #endif
391 /* XXX: not efficient, but simple */
392 /* Note: relies on the fact that tlb_fill() does not remove the
393 * previous page from the TLB cache. */
394 for (i = DATA_SIZE - 1; i >= 0; i--) {
395 /* Little-endian extract. */
396 uint8_t val8 = val >> (i * 8);
397 /* Note the adjustment at the beginning of the function.
398 Undo that for the recursion. */
399 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
400 mmu_idx, retaddr + GETPC_ADJ);
402 return;
405 /* Handle aligned access or unaligned access in the same page. */
406 #ifdef ALIGNED_ONLY
407 if ((addr & (DATA_SIZE - 1)) != 0) {
408 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
410 #endif
412 haddr = addr + env->tlb_table[mmu_idx][index].addend;
413 #if DATA_SIZE == 1
414 glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
415 #else
416 glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
417 #endif
420 #if DATA_SIZE > 1
421 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
422 int mmu_idx, uintptr_t retaddr)
424 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
425 target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
426 uintptr_t haddr;
428 /* Adjust the given return address. */
429 retaddr -= GETPC_ADJ;
431 /* If the TLB entry is for a different page, reload and try again. */
432 if ((addr & TARGET_PAGE_MASK)
433 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
434 #ifdef ALIGNED_ONLY
435 if ((addr & (DATA_SIZE - 1)) != 0) {
436 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
438 #endif
439 tlb_fill(env, addr, 1, mmu_idx, retaddr);
440 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
443 /* Handle an IO access. */
444 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
445 hwaddr ioaddr;
446 if ((addr & (DATA_SIZE - 1)) != 0) {
447 goto do_unaligned_access;
449 ioaddr = env->iotlb[mmu_idx][index];
451 /* ??? Note that the io helpers always read data in the target
452 byte ordering. We should push the LE/BE request down into io. */
453 val = TGT_BE(val);
454 glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
455 return;
458 /* Handle slow unaligned access (it spans two pages or IO). */
459 if (DATA_SIZE > 1
460 && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
461 >= TARGET_PAGE_SIZE)) {
462 int i;
463 do_unaligned_access:
464 #ifdef ALIGNED_ONLY
465 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
466 #endif
467 /* XXX: not efficient, but simple */
468 /* Note: relies on the fact that tlb_fill() does not remove the
469 * previous page from the TLB cache. */
470 for (i = DATA_SIZE - 1; i >= 0; i--) {
471 /* Big-endian extract. */
472 uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
473 /* Note the adjustment at the beginning of the function.
474 Undo that for the recursion. */
475 glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
476 mmu_idx, retaddr + GETPC_ADJ);
478 return;
481 /* Handle aligned access or unaligned access in the same page. */
482 #ifdef ALIGNED_ONLY
483 if ((addr & (DATA_SIZE - 1)) != 0) {
484 do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
486 #endif
488 haddr = addr + env->tlb_table[mmu_idx][index].addend;
489 glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
491 #endif /* DATA_SIZE > 1 */
493 void
494 glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
495 DATA_TYPE val, int mmu_idx)
497 helper_te_st_name(env, addr, val, mmu_idx, GETRA());
500 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
502 #undef READ_ACCESS_TYPE
503 #undef SHIFT
504 #undef DATA_TYPE
505 #undef SUFFIX
506 #undef LSUFFIX
507 #undef DATA_SIZE
508 #undef ADDR_READ
509 #undef WORD_TYPE
510 #undef SDATA_TYPE
511 #undef USUFFIX
512 #undef SSUFFIX
513 #undef BSWAP
514 #undef TGT_BE
515 #undef TGT_LE
516 #undef CPU_BE
517 #undef CPU_LE
518 #undef helper_le_ld_name
519 #undef helper_be_ld_name
520 #undef helper_le_lds_name
521 #undef helper_be_lds_name
522 #undef helper_le_st_name
523 #undef helper_be_st_name
524 #undef helper_te_ld_name
525 #undef helper_te_st_name