4 * Generate inline load/store functions for one MMU mode and data
7 * Generate a store function as well as signed and unsigned loads. For
8 * 32 and 64 bit cases, also generate floating point functions with
11 * Not used directly but included from cpu_ldst.h.
13 * Copyright (c) 2003 Fabrice Bellard
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2 of the License, or (at your option) any later version.
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
31 #define DATA_TYPE uint64_t
35 #define DATA_TYPE uint32_t
39 #define DATA_TYPE uint16_t
40 #define DATA_STYPE int16_t
44 #define DATA_TYPE uint8_t
45 #define DATA_STYPE int8_t
47 #error unsupported data size
51 #define RES_TYPE uint64_t
53 #define RES_TYPE uint32_t
56 #ifdef SOFTMMU_CODE_ACCESS
57 #define ADDR_READ addr_code
58 #define MMUSUFFIX _cmmu
60 #define ADDR_READ addr_read
61 #define MMUSUFFIX _mmu
64 /* generic load/store macros */
66 static inline RES_TYPE
67 glue(glue(cpu_ld
, USUFFIX
), MEMSUFFIX
)(CPUArchState
*env
, target_ulong ptr
)
75 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
76 mmu_idx
= CPU_MMU_INDEX
;
77 if (unlikely(env
->tlb_table
[mmu_idx
][page_index
].ADDR_READ
!=
78 (addr
& (TARGET_PAGE_MASK
| (DATA_SIZE
- 1))))) {
79 res
= glue(glue(helper_ld
, SUFFIX
), MMUSUFFIX
)(env
, addr
, mmu_idx
);
81 uintptr_t hostaddr
= addr
+ env
->tlb_table
[mmu_idx
][page_index
].addend
;
82 res
= glue(glue(ld
, USUFFIX
), _p
)((uint8_t *)hostaddr
);
89 glue(glue(cpu_lds
, SUFFIX
), MEMSUFFIX
)(CPUArchState
*env
, target_ulong ptr
)
96 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
97 mmu_idx
= CPU_MMU_INDEX
;
98 if (unlikely(env
->tlb_table
[mmu_idx
][page_index
].ADDR_READ
!=
99 (addr
& (TARGET_PAGE_MASK
| (DATA_SIZE
- 1))))) {
100 res
= (DATA_STYPE
)glue(glue(helper_ld
, SUFFIX
),
101 MMUSUFFIX
)(env
, addr
, mmu_idx
);
103 uintptr_t hostaddr
= addr
+ env
->tlb_table
[mmu_idx
][page_index
].addend
;
104 res
= glue(glue(lds
, SUFFIX
), _p
)((uint8_t *)hostaddr
);
110 #ifndef SOFTMMU_CODE_ACCESS
112 /* generic store macro */
115 glue(glue(cpu_st
, SUFFIX
), MEMSUFFIX
)(CPUArchState
*env
, target_ulong ptr
,
123 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
124 mmu_idx
= CPU_MMU_INDEX
;
125 if (unlikely(env
->tlb_table
[mmu_idx
][page_index
].addr_write
!=
126 (addr
& (TARGET_PAGE_MASK
| (DATA_SIZE
- 1))))) {
127 glue(glue(helper_st
, SUFFIX
), MMUSUFFIX
)(env
, addr
, v
, mmu_idx
);
129 uintptr_t hostaddr
= addr
+ env
->tlb_table
[mmu_idx
][page_index
].addend
;
130 glue(glue(st
, SUFFIX
), _p
)((uint8_t *)hostaddr
, v
);
137 static inline float64
glue(cpu_ldfq
, MEMSUFFIX
)(CPUArchState
*env
,
144 u
.i
= glue(cpu_ldq
, MEMSUFFIX
)(env
, ptr
);
148 static inline void glue(cpu_stfq
, MEMSUFFIX
)(CPUArchState
*env
,
149 target_ulong ptr
, float64 v
)
156 glue(cpu_stq
, MEMSUFFIX
)(env
, ptr
, u
.i
);
158 #endif /* DATA_SIZE == 8 */
161 static inline float32
glue(cpu_ldfl
, MEMSUFFIX
)(CPUArchState
*env
,
168 u
.i
= glue(cpu_ldl
, MEMSUFFIX
)(env
, ptr
);
172 static inline void glue(cpu_stfl
, MEMSUFFIX
)(CPUArchState
*env
,
173 target_ulong ptr
, float32 v
)
180 glue(cpu_stl
, MEMSUFFIX
)(env
, ptr
, u
.i
);
182 #endif /* DATA_SIZE == 4 */
184 #endif /* !SOFTMMU_CODE_ACCESS */