4 * Generate inline load/store functions for all MMU modes (typically
5 * at least _user and _kernel) as well as _data versions, for all data
8 * Used by target op helpers.
10 * MMU mode suffixes are defined in target cpu.h.
13 /* XXX: find something cleaner.
14 * Furthermore, this is false for 64 bits targets
16 #define ldul_user ldl_user
17 #define ldul_kernel ldl_kernel
18 #define ldul_hypv ldl_hypv
19 #define ldul_executive ldl_executive
20 #define ldul_supervisor ldl_supervisor
22 /* The memory helpers for tcg-generated code need tcg_target_long etc. */
26 #define MEMSUFFIX MMU_MODE0_SUFFIX
28 #include "exec/softmmu_header.h"
31 #include "exec/softmmu_header.h"
34 #include "exec/softmmu_header.h"
37 #include "exec/softmmu_header.h"
42 #define MEMSUFFIX MMU_MODE1_SUFFIX
44 #include "exec/softmmu_header.h"
47 #include "exec/softmmu_header.h"
50 #include "exec/softmmu_header.h"
53 #include "exec/softmmu_header.h"
57 #if (NB_MMU_MODES >= 3)
60 #define MEMSUFFIX MMU_MODE2_SUFFIX
62 #include "exec/softmmu_header.h"
65 #include "exec/softmmu_header.h"
68 #include "exec/softmmu_header.h"
71 #include "exec/softmmu_header.h"
74 #endif /* (NB_MMU_MODES >= 3) */
76 #if (NB_MMU_MODES >= 4)
79 #define MEMSUFFIX MMU_MODE3_SUFFIX
81 #include "exec/softmmu_header.h"
84 #include "exec/softmmu_header.h"
87 #include "exec/softmmu_header.h"
90 #include "exec/softmmu_header.h"
93 #endif /* (NB_MMU_MODES >= 4) */
95 #if (NB_MMU_MODES >= 5)
98 #define MEMSUFFIX MMU_MODE4_SUFFIX
100 #include "exec/softmmu_header.h"
103 #include "exec/softmmu_header.h"
106 #include "exec/softmmu_header.h"
109 #include "exec/softmmu_header.h"
112 #endif /* (NB_MMU_MODES >= 5) */
114 #if (NB_MMU_MODES >= 6)
116 #define ACCESS_TYPE 5
117 #define MEMSUFFIX MMU_MODE5_SUFFIX
119 #include "exec/softmmu_header.h"
122 #include "exec/softmmu_header.h"
125 #include "exec/softmmu_header.h"
128 #include "exec/softmmu_header.h"
131 #endif /* (NB_MMU_MODES >= 6) */
133 #if (NB_MMU_MODES > 6)
134 #error "NB_MMU_MODES > 6 is not supported for now"
135 #endif /* (NB_MMU_MODES > 6) */
137 /* these access are slower, they must be as rare as possible */
138 #define ACCESS_TYPE (NB_MMU_MODES)
139 #define MEMSUFFIX _data
141 #include "exec/softmmu_header.h"
144 #include "exec/softmmu_header.h"
147 #include "exec/softmmu_header.h"
150 #include "exec/softmmu_header.h"
154 #define ldub(p) ldub_data(p)
155 #define ldsb(p) ldsb_data(p)
156 #define lduw(p) lduw_data(p)
157 #define ldsw(p) ldsw_data(p)
158 #define ldl(p) ldl_data(p)
159 #define ldq(p) ldq_data(p)
161 #define stb(p, v) stb_data(p, v)
162 #define stw(p, v) stw_data(p, v)
163 #define stl(p, v) stl_data(p, v)
164 #define stq(p, v) stq_data(p, v)
169 * @addr: guest virtual address to look up
170 * @access_type: 0 for read, 1 for write, 2 for execute
171 * @mmu_idx: MMU index to use for lookup
173 * Look up the specified guest virtual index in the TCG softmmu TLB.
174 * If the TLB contains a host virtual address suitable for direct RAM
175 * access, then return it. Otherwise (TLB miss, TLB entry is for an
176 * I/O access, etc) return NULL.
178 * This is the equivalent of the initial fast-path code used by
179 * TCG backends for guest load and store accesses.
181 static inline void *tlb_vaddr_to_host(CPUArchState
*env
, target_ulong addr
,
182 int access_type
, int mmu_idx
)
184 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
185 CPUTLBEntry
*tlbentry
= &env
->tlb_table
[mmu_idx
][index
];
186 target_ulong tlb_addr
;
189 switch (access_type
) {
191 tlb_addr
= tlbentry
->addr_read
;
194 tlb_addr
= tlbentry
->addr_write
;
197 tlb_addr
= tlbentry
->addr_code
;
200 g_assert_not_reached();
203 if ((addr
& TARGET_PAGE_MASK
)
204 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
205 /* TLB entry is for a different page */
209 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
214 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
215 return (void *)haddr
;