cpu_ldst.h: Drop unused ld/st*_kernel defines
[qemu/ar7.git] / include / exec / cpu_ldst.h
blob64d90876fd9011415023db2486f1f2ea945ef3b9
1 /*
2 * Software MMU support
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 * Generate inline load/store functions for all MMU modes (typically
21 * at least _user and _kernel) as well as _data versions, for all data
22 * sizes.
24 * Used by target op helpers.
26 * MMU mode suffixes are defined in target cpu.h.
28 #ifndef CPU_LDST_H
29 #define CPU_LDST_H
31 #if defined(CONFIG_USER_ONLY)
32 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
33 #define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
35 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
36 #define h2g_valid(x) 1
37 #else
38 #define h2g_valid(x) ({ \
39 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
40 (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
41 (!RESERVED_VA || (__guest < RESERVED_VA)); \
43 #endif
45 #define h2g_nocheck(x) ({ \
46 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
47 (abi_ulong)__ret; \
50 #define h2g(x) ({ \
51 /* Check if given address fits target address space */ \
52 assert(h2g_valid(x)); \
53 h2g_nocheck(x); \
56 #define saddr(x) g2h(x)
57 #define laddr(x) g2h(x)
59 #else /* !CONFIG_USER_ONLY */
60 /* NOTE: we use double casts if pointers and target_ulong have
61 different sizes */
62 #define saddr(x) (uint8_t *)(intptr_t)(x)
63 #define laddr(x) (uint8_t *)(intptr_t)(x)
64 #endif
66 #define ldub_raw(p) ldub_p(laddr((p)))
67 #define ldsb_raw(p) ldsb_p(laddr((p)))
68 #define lduw_raw(p) lduw_p(laddr((p)))
69 #define ldsw_raw(p) ldsw_p(laddr((p)))
70 #define ldl_raw(p) ldl_p(laddr((p)))
71 #define ldq_raw(p) ldq_p(laddr((p)))
72 #define ldfl_raw(p) ldfl_p(laddr((p)))
73 #define ldfq_raw(p) ldfq_p(laddr((p)))
74 #define stb_raw(p, v) stb_p(saddr((p)), v)
75 #define stw_raw(p, v) stw_p(saddr((p)), v)
76 #define stl_raw(p, v) stl_p(saddr((p)), v)
77 #define stq_raw(p, v) stq_p(saddr((p)), v)
78 #define stfl_raw(p, v) stfl_p(saddr((p)), v)
79 #define stfq_raw(p, v) stfq_p(saddr((p)), v)
82 #if defined(CONFIG_USER_ONLY)
84 /* if user mode, no other memory access functions */
85 #define ldub(p) ldub_raw(p)
86 #define ldsb(p) ldsb_raw(p)
87 #define lduw(p) lduw_raw(p)
88 #define ldsw(p) ldsw_raw(p)
89 #define ldl(p) ldl_raw(p)
90 #define ldq(p) ldq_raw(p)
91 #define ldfl(p) ldfl_raw(p)
92 #define ldfq(p) ldfq_raw(p)
93 #define stb(p, v) stb_raw(p, v)
94 #define stw(p, v) stw_raw(p, v)
95 #define stl(p, v) stl_raw(p, v)
96 #define stq(p, v) stq_raw(p, v)
97 #define stfl(p, v) stfl_raw(p, v)
98 #define stfq(p, v) stfq_raw(p, v)
100 #define cpu_ldub_code(env1, p) ldub_raw(p)
101 #define cpu_ldsb_code(env1, p) ldsb_raw(p)
102 #define cpu_lduw_code(env1, p) lduw_raw(p)
103 #define cpu_ldsw_code(env1, p) ldsw_raw(p)
104 #define cpu_ldl_code(env1, p) ldl_raw(p)
105 #define cpu_ldq_code(env1, p) ldq_raw(p)
107 #define cpu_ldub_data(env, addr) ldub_raw(addr)
108 #define cpu_lduw_data(env, addr) lduw_raw(addr)
109 #define cpu_ldsw_data(env, addr) ldsw_raw(addr)
110 #define cpu_ldl_data(env, addr) ldl_raw(addr)
111 #define cpu_ldq_data(env, addr) ldq_raw(addr)
113 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
114 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
115 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
116 #define cpu_stq_data(env, addr, data) stq_raw(addr, data)
118 #define cpu_ldub_kernel(env, addr) ldub_raw(addr)
119 #define cpu_lduw_kernel(env, addr) lduw_raw(addr)
120 #define cpu_ldsw_kernel(env, addr) ldsw_raw(addr)
121 #define cpu_ldl_kernel(env, addr) ldl_raw(addr)
122 #define cpu_ldq_kernel(env, addr) ldq_raw(addr)
124 #define cpu_stb_kernel(env, addr, data) stb_raw(addr, data)
125 #define cpu_stw_kernel(env, addr, data) stw_raw(addr, data)
126 #define cpu_stl_kernel(env, addr, data) stl_raw(addr, data)
127 #define cpu_stq_kernel(env, addr, data) stq_raw(addr, data)
129 #define cpu_ldub_data(env, addr) ldub_raw(addr)
130 #define cpu_lduw_data(env, addr) lduw_raw(addr)
131 #define cpu_ldl_data(env, addr) ldl_raw(addr)
133 #define cpu_stb_data(env, addr, data) stb_raw(addr, data)
134 #define cpu_stw_data(env, addr, data) stw_raw(addr, data)
135 #define cpu_stl_data(env, addr, data) stl_raw(addr, data)
137 #else
139 /* The memory helpers for tcg-generated code need tcg_target_long etc. */
140 #include "tcg.h"
142 uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
143 uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
144 uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
145 uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
147 void helper_stb_mmu(CPUArchState *env, target_ulong addr,
148 uint8_t val, int mmu_idx);
149 void helper_stw_mmu(CPUArchState *env, target_ulong addr,
150 uint16_t val, int mmu_idx);
151 void helper_stl_mmu(CPUArchState *env, target_ulong addr,
152 uint32_t val, int mmu_idx);
153 void helper_stq_mmu(CPUArchState *env, target_ulong addr,
154 uint64_t val, int mmu_idx);
156 uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
157 uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
158 uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
159 uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
161 #define CPU_MMU_INDEX 0
162 #define MEMSUFFIX MMU_MODE0_SUFFIX
163 #define DATA_SIZE 1
164 #include "exec/cpu_ldst_template.h"
166 #define DATA_SIZE 2
167 #include "exec/cpu_ldst_template.h"
169 #define DATA_SIZE 4
170 #include "exec/cpu_ldst_template.h"
172 #define DATA_SIZE 8
173 #include "exec/cpu_ldst_template.h"
174 #undef CPU_MMU_INDEX
175 #undef MEMSUFFIX
177 #define CPU_MMU_INDEX 1
178 #define MEMSUFFIX MMU_MODE1_SUFFIX
179 #define DATA_SIZE 1
180 #include "exec/cpu_ldst_template.h"
182 #define DATA_SIZE 2
183 #include "exec/cpu_ldst_template.h"
185 #define DATA_SIZE 4
186 #include "exec/cpu_ldst_template.h"
188 #define DATA_SIZE 8
189 #include "exec/cpu_ldst_template.h"
190 #undef CPU_MMU_INDEX
191 #undef MEMSUFFIX
193 #if (NB_MMU_MODES >= 3)
195 #define CPU_MMU_INDEX 2
196 #define MEMSUFFIX MMU_MODE2_SUFFIX
197 #define DATA_SIZE 1
198 #include "exec/cpu_ldst_template.h"
200 #define DATA_SIZE 2
201 #include "exec/cpu_ldst_template.h"
203 #define DATA_SIZE 4
204 #include "exec/cpu_ldst_template.h"
206 #define DATA_SIZE 8
207 #include "exec/cpu_ldst_template.h"
208 #undef CPU_MMU_INDEX
209 #undef MEMSUFFIX
210 #endif /* (NB_MMU_MODES >= 3) */
212 #if (NB_MMU_MODES >= 4)
214 #define CPU_MMU_INDEX 3
215 #define MEMSUFFIX MMU_MODE3_SUFFIX
216 #define DATA_SIZE 1
217 #include "exec/cpu_ldst_template.h"
219 #define DATA_SIZE 2
220 #include "exec/cpu_ldst_template.h"
222 #define DATA_SIZE 4
223 #include "exec/cpu_ldst_template.h"
225 #define DATA_SIZE 8
226 #include "exec/cpu_ldst_template.h"
227 #undef CPU_MMU_INDEX
228 #undef MEMSUFFIX
229 #endif /* (NB_MMU_MODES >= 4) */
231 #if (NB_MMU_MODES >= 5)
233 #define CPU_MMU_INDEX 4
234 #define MEMSUFFIX MMU_MODE4_SUFFIX
235 #define DATA_SIZE 1
236 #include "exec/cpu_ldst_template.h"
238 #define DATA_SIZE 2
239 #include "exec/cpu_ldst_template.h"
241 #define DATA_SIZE 4
242 #include "exec/cpu_ldst_template.h"
244 #define DATA_SIZE 8
245 #include "exec/cpu_ldst_template.h"
246 #undef CPU_MMU_INDEX
247 #undef MEMSUFFIX
248 #endif /* (NB_MMU_MODES >= 5) */
250 #if (NB_MMU_MODES >= 6)
252 #define CPU_MMU_INDEX 5
253 #define MEMSUFFIX MMU_MODE5_SUFFIX
254 #define DATA_SIZE 1
255 #include "exec/cpu_ldst_template.h"
257 #define DATA_SIZE 2
258 #include "exec/cpu_ldst_template.h"
260 #define DATA_SIZE 4
261 #include "exec/cpu_ldst_template.h"
263 #define DATA_SIZE 8
264 #include "exec/cpu_ldst_template.h"
265 #undef CPU_MMU_INDEX
266 #undef MEMSUFFIX
267 #endif /* (NB_MMU_MODES >= 6) */
269 #if (NB_MMU_MODES > 6)
270 #error "NB_MMU_MODES > 6 is not supported for now"
271 #endif /* (NB_MMU_MODES > 6) */
273 /* these access are slower, they must be as rare as possible */
274 #define CPU_MMU_INDEX (cpu_mmu_index(env))
275 #define MEMSUFFIX _data
276 #define DATA_SIZE 1
277 #include "exec/cpu_ldst_template.h"
279 #define DATA_SIZE 2
280 #include "exec/cpu_ldst_template.h"
282 #define DATA_SIZE 4
283 #include "exec/cpu_ldst_template.h"
285 #define DATA_SIZE 8
286 #include "exec/cpu_ldst_template.h"
287 #undef CPU_MMU_INDEX
288 #undef MEMSUFFIX
290 #define ldub(p) ldub_data(p)
291 #define ldsb(p) ldsb_data(p)
292 #define lduw(p) lduw_data(p)
293 #define ldsw(p) ldsw_data(p)
294 #define ldl(p) ldl_data(p)
295 #define ldq(p) ldq_data(p)
297 #define stb(p, v) stb_data(p, v)
298 #define stw(p, v) stw_data(p, v)
299 #define stl(p, v) stl_data(p, v)
300 #define stq(p, v) stq_data(p, v)
302 #define CPU_MMU_INDEX (cpu_mmu_index(env))
303 #define MEMSUFFIX _code
304 #define SOFTMMU_CODE_ACCESS
306 #define DATA_SIZE 1
307 #include "exec/cpu_ldst_template.h"
309 #define DATA_SIZE 2
310 #include "exec/cpu_ldst_template.h"
312 #define DATA_SIZE 4
313 #include "exec/cpu_ldst_template.h"
315 #define DATA_SIZE 8
316 #include "exec/cpu_ldst_template.h"
318 #undef CPU_MMU_INDEX
319 #undef MEMSUFFIX
320 #undef SOFTMMU_CODE_ACCESS
323 * tlb_vaddr_to_host:
324 * @env: CPUArchState
325 * @addr: guest virtual address to look up
326 * @access_type: 0 for read, 1 for write, 2 for execute
327 * @mmu_idx: MMU index to use for lookup
329 * Look up the specified guest virtual index in the TCG softmmu TLB.
330 * If the TLB contains a host virtual address suitable for direct RAM
331 * access, then return it. Otherwise (TLB miss, TLB entry is for an
332 * I/O access, etc) return NULL.
334 * This is the equivalent of the initial fast-path code used by
335 * TCG backends for guest load and store accesses.
337 static inline void *tlb_vaddr_to_host(CPUArchState *env, target_ulong addr,
338 int access_type, int mmu_idx)
340 int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
341 CPUTLBEntry *tlbentry = &env->tlb_table[mmu_idx][index];
342 target_ulong tlb_addr;
343 uintptr_t haddr;
345 switch (access_type) {
346 case 0:
347 tlb_addr = tlbentry->addr_read;
348 break;
349 case 1:
350 tlb_addr = tlbentry->addr_write;
351 break;
352 case 2:
353 tlb_addr = tlbentry->addr_code;
354 break;
355 default:
356 g_assert_not_reached();
359 if ((addr & TARGET_PAGE_MASK)
360 != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
361 /* TLB entry is for a different page */
362 return NULL;
365 if (tlb_addr & ~TARGET_PAGE_MASK) {
366 /* IO access */
367 return NULL;
370 haddr = addr + env->tlb_table[mmu_idx][index].addend;
371 return (void *)haddr;
374 #endif /* defined(CONFIG_USER_ONLY) */
376 #endif /* CPU_LDST_H */