4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
24 * Used by target op helpers.
26 * MMU mode suffixes are defined in target cpu.h.
31 #if defined(CONFIG_USER_ONLY)
32 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
33 #define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
35 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
36 #define h2g_valid(x) 1
38 #define h2g_valid(x) ({ \
39 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
40 (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
41 (!RESERVED_VA || (__guest < RESERVED_VA)); \
45 #define h2g_nocheck(x) ({ \
46 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
51 /* Check if given address fits target address space */ \
52 assert(h2g_valid(x)); \
56 #define saddr(x) g2h(x)
57 #define laddr(x) g2h(x)
59 #else /* !CONFIG_USER_ONLY */
60 /* NOTE: we use double casts if pointers and target_ulong have
62 #define saddr(x) (uint8_t *)(intptr_t)(x)
63 #define laddr(x) (uint8_t *)(intptr_t)(x)
66 #define ldub_raw(p) ldub_p(laddr((p)))
67 #define ldsb_raw(p) ldsb_p(laddr((p)))
68 #define lduw_raw(p) lduw_p(laddr((p)))
69 #define ldsw_raw(p) ldsw_p(laddr((p)))
70 #define ldl_raw(p) ldl_p(laddr((p)))
71 #define ldq_raw(p) ldq_p(laddr((p)))
72 #define ldfl_raw(p) ldfl_p(laddr((p)))
73 #define ldfq_raw(p) ldfq_p(laddr((p)))
74 #define stb_raw(p, v) stb_p(saddr((p)), v)
75 #define stw_raw(p, v) stw_p(saddr((p)), v)
76 #define stl_raw(p, v) stl_p(saddr((p)), v)
77 #define stq_raw(p, v) stq_p(saddr((p)), v)
78 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
79 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
82 #if defined(CONFIG_USER_ONLY)
84 /* if user mode, no other memory access functions */
85 #define ldub(p) ldub_raw(p)
86 #define ldsb(p) ldsb_raw(p)
87 #define lduw(p) lduw_raw(p)
88 #define ldsw(p) ldsw_raw(p)
89 #define ldl(p) ldl_raw(p)
90 #define ldq(p) ldq_raw(p)
91 #define ldfl(p) ldfl_raw(p)
92 #define ldfq(p) ldfq_raw(p)
93 #define stb(p, v) stb_raw(p, v)
94 #define stw(p, v) stw_raw(p, v)
95 #define stl(p, v) stl_raw(p, v)
96 #define stq(p, v) stq_raw(p, v)
97 #define stfl(p, v) stfl_raw(p, v)
98 #define stfq(p, v) stfq_raw(p, v)
100 #define cpu_ldub_code(env1, p) ldub_raw(p)
101 #define cpu_ldsb_code(env1, p) ldsb_raw(p)
102 #define cpu_lduw_code(env1, p) lduw_raw(p)
103 #define cpu_ldsw_code(env1, p) ldsw_raw(p)
104 #define cpu_ldl_code(env1, p) ldl_raw(p)
105 #define cpu_ldq_code(env1, p) ldq_raw(p)
107 #define cpu_ldub_data(env, addr) ldub_raw(addr)
108 #define cpu_lduw_data(env, addr) lduw_raw(addr)
109 #define cpu_ldsw_data(env, addr) ldsw_raw(addr)
110 #define cpu_ldl_data(env, addr) ldl_raw(addr)
111 #define cpu_ldq_data(env, addr) ldq_raw(addr)
113 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
114 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
115 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
116 #define cpu_stq_data(env, addr, data) stq_raw(addr, data)
118 #define cpu_ldub_kernel(env, addr) ldub_raw(addr)
119 #define cpu_lduw_kernel(env, addr) lduw_raw(addr)
120 #define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
121 #define cpu_ldl_kernel(env, addr) ldl_raw(addr)
122 #define cpu_ldq_kernel(env, addr) ldq_raw(addr)
124 #define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
125 #define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
126 #define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
127 #define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
129 #define cpu_ldub_data(env, addr) ldub_raw(addr)
130 #define cpu_lduw_data(env, addr) lduw_raw(addr)
131 #define cpu_ldl_data(env, addr) ldl_raw(addr)
133 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
134 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
135 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
139 /* The memory helpers for tcg-generated code need tcg_target_long etc. */
142 uint8_t helper_ldb_mmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
143 uint16_t helper_ldw_mmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
144 uint32_t helper_ldl_mmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
145 uint64_t helper_ldq_mmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
147 void helper_stb_mmu(CPUArchState
*env
, target_ulong addr
,
148 uint8_t val
, int mmu_idx
);
149 void helper_stw_mmu(CPUArchState
*env
, target_ulong addr
,
150 uint16_t val
, int mmu_idx
);
151 void helper_stl_mmu(CPUArchState
*env
, target_ulong addr
,
152 uint32_t val
, int mmu_idx
);
153 void helper_stq_mmu(CPUArchState
*env
, target_ulong addr
,
154 uint64_t val
, int mmu_idx
);
156 uint8_t helper_ldb_cmmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
157 uint16_t helper_ldw_cmmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
158 uint32_t helper_ldl_cmmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
159 uint64_t helper_ldq_cmmu(CPUArchState
*env
, target_ulong addr
, int mmu_idx
);
161 #define CPU_MMU_INDEX 0
162 #define MEMSUFFIX MMU_MODE0_SUFFIX
164 #include "exec/cpu_ldst_template.h"
167 #include "exec/cpu_ldst_template.h"
170 #include "exec/cpu_ldst_template.h"
173 #include "exec/cpu_ldst_template.h"
177 #define CPU_MMU_INDEX 1
178 #define MEMSUFFIX MMU_MODE1_SUFFIX
180 #include "exec/cpu_ldst_template.h"
183 #include "exec/cpu_ldst_template.h"
186 #include "exec/cpu_ldst_template.h"
189 #include "exec/cpu_ldst_template.h"
193 #if (NB_MMU_MODES >= 3)
195 #define CPU_MMU_INDEX 2
196 #define MEMSUFFIX MMU_MODE2_SUFFIX
198 #include "exec/cpu_ldst_template.h"
201 #include "exec/cpu_ldst_template.h"
204 #include "exec/cpu_ldst_template.h"
207 #include "exec/cpu_ldst_template.h"
210 #endif /* (NB_MMU_MODES >= 3) */
212 #if (NB_MMU_MODES >= 4)
214 #define CPU_MMU_INDEX 3
215 #define MEMSUFFIX MMU_MODE3_SUFFIX
217 #include "exec/cpu_ldst_template.h"
220 #include "exec/cpu_ldst_template.h"
223 #include "exec/cpu_ldst_template.h"
226 #include "exec/cpu_ldst_template.h"
229 #endif /* (NB_MMU_MODES >= 4) */
231 #if (NB_MMU_MODES >= 5)
233 #define CPU_MMU_INDEX 4
234 #define MEMSUFFIX MMU_MODE4_SUFFIX
236 #include "exec/cpu_ldst_template.h"
239 #include "exec/cpu_ldst_template.h"
242 #include "exec/cpu_ldst_template.h"
245 #include "exec/cpu_ldst_template.h"
248 #endif /* (NB_MMU_MODES >= 5) */
250 #if (NB_MMU_MODES >= 6)
252 #define CPU_MMU_INDEX 5
253 #define MEMSUFFIX MMU_MODE5_SUFFIX
255 #include "exec/cpu_ldst_template.h"
258 #include "exec/cpu_ldst_template.h"
261 #include "exec/cpu_ldst_template.h"
264 #include "exec/cpu_ldst_template.h"
267 #endif /* (NB_MMU_MODES >= 6) */
269 #if (NB_MMU_MODES > 6)
270 #error "NB_MMU_MODES > 6 is not supported for now"
271 #endif /* (NB_MMU_MODES > 6) */
273 /* these access are slower, they must be as rare as possible */
274 #define CPU_MMU_INDEX (cpu_mmu_index(env))
275 #define MEMSUFFIX _data
277 #include "exec/cpu_ldst_template.h"
280 #include "exec/cpu_ldst_template.h"
283 #include "exec/cpu_ldst_template.h"
286 #include "exec/cpu_ldst_template.h"
290 #define ldub(p) ldub_data(p)
291 #define ldsb(p) ldsb_data(p)
292 #define lduw(p) lduw_data(p)
293 #define ldsw(p) ldsw_data(p)
294 #define ldl(p) ldl_data(p)
295 #define ldq(p) ldq_data(p)
297 #define stb(p, v) stb_data(p, v)
298 #define stw(p, v) stw_data(p, v)
299 #define stl(p, v) stl_data(p, v)
300 #define stq(p, v) stq_data(p, v)
302 #define CPU_MMU_INDEX (cpu_mmu_index(env))
303 #define MEMSUFFIX _code
304 #define SOFTMMU_CODE_ACCESS
307 #include "exec/cpu_ldst_template.h"
310 #include "exec/cpu_ldst_template.h"
313 #include "exec/cpu_ldst_template.h"
316 #include "exec/cpu_ldst_template.h"
320 #undef SOFTMMU_CODE_ACCESS
325 * @addr: guest virtual address to look up
326 * @access_type: 0 for read, 1 for write, 2 for execute
327 * @mmu_idx: MMU index to use for lookup
329 * Look up the specified guest virtual index in the TCG softmmu TLB.
330 * If the TLB contains a host virtual address suitable for direct RAM
331 * access, then return it. Otherwise (TLB miss, TLB entry is for an
332 * I/O access, etc) return NULL.
334 * This is the equivalent of the initial fast-path code used by
335 * TCG backends for guest load and store accesses.
337 static inline void *tlb_vaddr_to_host(CPUArchState
*env
, target_ulong addr
,
338 int access_type
, int mmu_idx
)
340 int index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
341 CPUTLBEntry
*tlbentry
= &env
->tlb_table
[mmu_idx
][index
];
342 target_ulong tlb_addr
;
345 switch (access_type
) {
347 tlb_addr
= tlbentry
->addr_read
;
350 tlb_addr
= tlbentry
->addr_write
;
353 tlb_addr
= tlbentry
->addr_code
;
356 g_assert_not_reached();
359 if ((addr
& TARGET_PAGE_MASK
)
360 != (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
361 /* TLB entry is for a different page */
365 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
370 haddr
= addr
+ env
->tlb_table
[mmu_idx
][index
].addend
;
371 return (void *)haddr
;
374 #endif /* defined(CONFIG_USER_ONLY) */
376 #endif /* CPU_LDST_H */