4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
24 * Used by target op helpers.
26 * The syntax for the accessors is:
28 * load: cpu_ld{sign}{size}_{mmusuffix}(env, ptr)
30 * store: cpu_st{sign}{size}_{mmusuffix}(env, ptr, val)
33 * (empty): for 32 and 64 bit sizes
43 * mmusuffix is one of the generic suffixes "data" or "code", or
44 * (for softmmu configs) a target-specific MMU mode suffix as defined
50 #if defined(CONFIG_USER_ONLY)
51 /* sparc32plus has 64bit long but 32bit space address
52 * this can make bad result with g2h() and h2g()
54 #if TARGET_VIRT_ADDR_SPACE_BITS <= 32
55 typedef uint32_t abi_ptr
;
56 #define TARGET_ABI_FMT_ptr "%x"
58 typedef uint64_t abi_ptr
;
59 #define TARGET_ABI_FMT_ptr "%"PRIx64
62 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
63 #define g2h(x) ((void *)((unsigned long)(abi_ptr)(x) + guest_base))
65 #define guest_addr_valid(x) ((x) <= GUEST_ADDR_MAX)
66 #define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base)
68 static inline int guest_range_valid(unsigned long start
, unsigned long len
)
70 return len
- 1 <= GUEST_ADDR_MAX
&& start
<= GUEST_ADDR_MAX
- len
+ 1;
73 #define h2g_nocheck(x) ({ \
74 unsigned long __ret = (unsigned long)(x) - guest_base; \
79 /* Check if given address fits target address space */ \
80 assert(h2g_valid(x)); \
84 typedef target_ulong abi_ptr
;
85 #define TARGET_ABI_FMT_ptr TARGET_ABI_FMT_lx
88 #if defined(CONFIG_USER_ONLY)
90 extern __thread
uintptr_t helper_retaddr
;
92 /* In user-only mode we provide only the _code and _data accessors. */
94 #define MEMSUFFIX _data
96 #include "exec/cpu_ldst_useronly_template.h"
99 #include "exec/cpu_ldst_useronly_template.h"
102 #include "exec/cpu_ldst_useronly_template.h"
105 #include "exec/cpu_ldst_useronly_template.h"
108 #define MEMSUFFIX _code
111 #include "exec/cpu_ldst_useronly_template.h"
114 #include "exec/cpu_ldst_useronly_template.h"
117 #include "exec/cpu_ldst_useronly_template.h"
120 #include "exec/cpu_ldst_useronly_template.h"
126 /* The memory helpers for tcg-generated code need tcg_target_long etc. */
129 static inline target_ulong
tlb_addr_write(const CPUTLBEntry
*entry
)
131 #if TCG_OVERSIZED_GUEST
132 return entry
->addr_write
;
134 return atomic_read(&entry
->addr_write
);
138 /* Find the TLB index corresponding to the mmu_idx + address pair. */
139 static inline uintptr_t tlb_index(CPUArchState
*env
, uintptr_t mmu_idx
,
142 return (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
145 /* Find the TLB entry corresponding to the mmu_idx + address pair. */
146 static inline CPUTLBEntry
*tlb_entry(CPUArchState
*env
, uintptr_t mmu_idx
,
149 return &env
->tlb_table
[mmu_idx
][tlb_index(env
, mmu_idx
, addr
)];
152 #ifdef MMU_MODE0_SUFFIX
153 #define CPU_MMU_INDEX 0
154 #define MEMSUFFIX MMU_MODE0_SUFFIX
156 #include "exec/cpu_ldst_template.h"
159 #include "exec/cpu_ldst_template.h"
162 #include "exec/cpu_ldst_template.h"
165 #include "exec/cpu_ldst_template.h"
170 #if (NB_MMU_MODES >= 2) && defined(MMU_MODE1_SUFFIX)
171 #define CPU_MMU_INDEX 1
172 #define MEMSUFFIX MMU_MODE1_SUFFIX
174 #include "exec/cpu_ldst_template.h"
177 #include "exec/cpu_ldst_template.h"
180 #include "exec/cpu_ldst_template.h"
183 #include "exec/cpu_ldst_template.h"
188 #if (NB_MMU_MODES >= 3) && defined(MMU_MODE2_SUFFIX)
190 #define CPU_MMU_INDEX 2
191 #define MEMSUFFIX MMU_MODE2_SUFFIX
193 #include "exec/cpu_ldst_template.h"
196 #include "exec/cpu_ldst_template.h"
199 #include "exec/cpu_ldst_template.h"
202 #include "exec/cpu_ldst_template.h"
205 #endif /* (NB_MMU_MODES >= 3) */
207 #if (NB_MMU_MODES >= 4) && defined(MMU_MODE3_SUFFIX)
209 #define CPU_MMU_INDEX 3
210 #define MEMSUFFIX MMU_MODE3_SUFFIX
212 #include "exec/cpu_ldst_template.h"
215 #include "exec/cpu_ldst_template.h"
218 #include "exec/cpu_ldst_template.h"
221 #include "exec/cpu_ldst_template.h"
224 #endif /* (NB_MMU_MODES >= 4) */
226 #if (NB_MMU_MODES >= 5) && defined(MMU_MODE4_SUFFIX)
228 #define CPU_MMU_INDEX 4
229 #define MEMSUFFIX MMU_MODE4_SUFFIX
231 #include "exec/cpu_ldst_template.h"
234 #include "exec/cpu_ldst_template.h"
237 #include "exec/cpu_ldst_template.h"
240 #include "exec/cpu_ldst_template.h"
243 #endif /* (NB_MMU_MODES >= 5) */
245 #if (NB_MMU_MODES >= 6) && defined(MMU_MODE5_SUFFIX)
247 #define CPU_MMU_INDEX 5
248 #define MEMSUFFIX MMU_MODE5_SUFFIX
250 #include "exec/cpu_ldst_template.h"
253 #include "exec/cpu_ldst_template.h"
256 #include "exec/cpu_ldst_template.h"
259 #include "exec/cpu_ldst_template.h"
262 #endif /* (NB_MMU_MODES >= 6) */
264 #if (NB_MMU_MODES >= 7) && defined(MMU_MODE6_SUFFIX)
266 #define CPU_MMU_INDEX 6
267 #define MEMSUFFIX MMU_MODE6_SUFFIX
269 #include "exec/cpu_ldst_template.h"
272 #include "exec/cpu_ldst_template.h"
275 #include "exec/cpu_ldst_template.h"
278 #include "exec/cpu_ldst_template.h"
281 #endif /* (NB_MMU_MODES >= 7) */
283 #if (NB_MMU_MODES >= 8) && defined(MMU_MODE7_SUFFIX)
285 #define CPU_MMU_INDEX 7
286 #define MEMSUFFIX MMU_MODE7_SUFFIX
288 #include "exec/cpu_ldst_template.h"
291 #include "exec/cpu_ldst_template.h"
294 #include "exec/cpu_ldst_template.h"
297 #include "exec/cpu_ldst_template.h"
300 #endif /* (NB_MMU_MODES >= 8) */
302 #if (NB_MMU_MODES >= 9) && defined(MMU_MODE8_SUFFIX)
304 #define CPU_MMU_INDEX 8
305 #define MEMSUFFIX MMU_MODE8_SUFFIX
307 #include "exec/cpu_ldst_template.h"
310 #include "exec/cpu_ldst_template.h"
313 #include "exec/cpu_ldst_template.h"
316 #include "exec/cpu_ldst_template.h"
319 #endif /* (NB_MMU_MODES >= 9) */
321 #if (NB_MMU_MODES >= 10) && defined(MMU_MODE9_SUFFIX)
323 #define CPU_MMU_INDEX 9
324 #define MEMSUFFIX MMU_MODE9_SUFFIX
326 #include "exec/cpu_ldst_template.h"
329 #include "exec/cpu_ldst_template.h"
332 #include "exec/cpu_ldst_template.h"
335 #include "exec/cpu_ldst_template.h"
338 #endif /* (NB_MMU_MODES >= 10) */
340 #if (NB_MMU_MODES >= 11) && defined(MMU_MODE10_SUFFIX)
342 #define CPU_MMU_INDEX 10
343 #define MEMSUFFIX MMU_MODE10_SUFFIX
345 #include "exec/cpu_ldst_template.h"
348 #include "exec/cpu_ldst_template.h"
351 #include "exec/cpu_ldst_template.h"
354 #include "exec/cpu_ldst_template.h"
357 #endif /* (NB_MMU_MODES >= 11) */
359 #if (NB_MMU_MODES >= 12) && defined(MMU_MODE11_SUFFIX)
361 #define CPU_MMU_INDEX 11
362 #define MEMSUFFIX MMU_MODE11_SUFFIX
364 #include "exec/cpu_ldst_template.h"
367 #include "exec/cpu_ldst_template.h"
370 #include "exec/cpu_ldst_template.h"
373 #include "exec/cpu_ldst_template.h"
376 #endif /* (NB_MMU_MODES >= 12) */
378 #if (NB_MMU_MODES > 12)
379 #error "NB_MMU_MODES > 12 is not supported for now"
380 #endif /* (NB_MMU_MODES > 12) */
382 /* these access are slower, they must be as rare as possible */
383 #define CPU_MMU_INDEX (cpu_mmu_index(env, false))
384 #define MEMSUFFIX _data
386 #include "exec/cpu_ldst_template.h"
389 #include "exec/cpu_ldst_template.h"
392 #include "exec/cpu_ldst_template.h"
395 #include "exec/cpu_ldst_template.h"
399 #define CPU_MMU_INDEX (cpu_mmu_index(env, true))
400 #define MEMSUFFIX _code
401 #define SOFTMMU_CODE_ACCESS
404 #include "exec/cpu_ldst_template.h"
407 #include "exec/cpu_ldst_template.h"
410 #include "exec/cpu_ldst_template.h"
413 #include "exec/cpu_ldst_template.h"
417 #undef SOFTMMU_CODE_ACCESS
419 #endif /* defined(CONFIG_USER_ONLY) */
424 * @addr: guest virtual address to look up
425 * @access_type: 0 for read, 1 for write, 2 for execute
426 * @mmu_idx: MMU index to use for lookup
428 * Look up the specified guest virtual index in the TCG softmmu TLB.
429 * If the TLB contains a host virtual address suitable for direct RAM
430 * access, then return it. Otherwise (TLB miss, TLB entry is for an
431 * I/O access, etc) return NULL.
433 * This is the equivalent of the initial fast-path code used by
434 * TCG backends for guest load and store accesses.
436 static inline void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
437 int access_type
, int mmu_idx
)
439 #if defined(CONFIG_USER_ONLY)
442 CPUTLBEntry
*tlbentry
= tlb_entry(env
, mmu_idx
, addr
);
446 switch (access_type
) {
448 tlb_addr
= tlbentry
->addr_read
;
451 tlb_addr
= tlb_addr_write(tlbentry
);
454 tlb_addr
= tlbentry
->addr_code
;
457 g_assert_not_reached();
460 if (!tlb_hit(tlb_addr
, addr
)) {
461 /* TLB entry is for a different page */
465 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
470 haddr
= addr
+ tlbentry
->addend
;
471 return (void *)haddr
;
472 #endif /* defined(CONFIG_USER_ONLY) */
475 #endif /* CPU_LDST_H */