4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define DATA_SIZE (1 << SHIFT)
25 #define DATA_TYPE uint64_t
29 #define DATA_TYPE uint32_t
33 #define DATA_TYPE uint16_t
37 #define DATA_TYPE uint8_t
39 #error unsupported data size
42 #ifdef SOFTMMU_CODE_ACCESS
43 #define READ_ACCESS_TYPE 2
45 #define READ_ACCESS_TYPE 0
48 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
51 static inline DATA_TYPE
glue(io_read
, SUFFIX
)(unsigned long physaddr
,
52 target_ulong tlb_addr
)
57 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
59 res
= io_mem_read
[index
][SHIFT
](io_mem_opaque
[index
], physaddr
);
61 #ifdef TARGET_WORDS_BIGENDIAN
62 res
= (uint64_t)io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
) << 32;
63 res
|= io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
+ 4);
65 res
= io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
);
66 res
|= (uint64_t)io_mem_read
[index
][2](io_mem_opaque
[index
], physaddr
+ 4) << 32;
68 #endif /* SHIFT > 2 */
72 /* handle all cases except unaligned access which span two pages */
73 DATA_TYPE
REGPARM(1) glue(glue(__ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
78 target_ulong tlb_addr
;
79 unsigned long physaddr
;
82 /* test if there is match for unaligned or IO access */
83 /* XXX: could done more in memory macro in a non portable way */
84 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
86 tlb_addr
= env
->tlb_read
[is_user
][index
].address
;
87 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
88 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
89 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
91 if ((addr
& (DATA_SIZE
- 1)) != 0)
92 goto do_unaligned_access
;
93 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
94 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
95 /* slow unaligned access (it spans two pages or IO) */
98 res
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr
,
101 /* unaligned access in the same page */
102 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)physaddr
);
105 /* the page is not in the TLB : fill it */
107 tlb_fill(addr
, READ_ACCESS_TYPE
, is_user
, retaddr
);
113 /* handle all unaligned cases */
114 static DATA_TYPE
glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
118 DATA_TYPE res
, res1
, res2
;
120 unsigned long physaddr
;
121 target_ulong tlb_addr
, addr1
, addr2
;
123 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
125 tlb_addr
= env
->tlb_read
[is_user
][index
].address
;
126 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
127 physaddr
= addr
+ env
->tlb_read
[is_user
][index
].addend
;
128 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
130 if ((addr
& (DATA_SIZE
- 1)) != 0)
131 goto do_unaligned_access
;
132 res
= glue(io_read
, SUFFIX
)(physaddr
, tlb_addr
);
133 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
135 /* slow unaligned access (it spans two pages) */
136 addr1
= addr
& ~(DATA_SIZE
- 1);
137 addr2
= addr1
+ DATA_SIZE
;
138 res1
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr1
,
140 res2
= glue(glue(slow_ld
, SUFFIX
), MMUSUFFIX
)(addr2
,
142 shift
= (addr
& (DATA_SIZE
- 1)) * 8;
143 #ifdef TARGET_WORDS_BIGENDIAN
144 res
= (res1
<< shift
) | (res2
>> ((DATA_SIZE
* 8) - shift
));
146 res
= (res1
>> shift
) | (res2
<< ((DATA_SIZE
* 8) - shift
));
148 res
= (DATA_TYPE
)res
;
150 /* unaligned/aligned access in the same page */
151 res
= glue(glue(ld
, USUFFIX
), _raw
)((uint8_t *)physaddr
);
154 /* the page is not in the TLB : fill it */
155 tlb_fill(addr
, READ_ACCESS_TYPE
, is_user
, retaddr
);
161 #ifndef SOFTMMU_CODE_ACCESS
163 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
168 static inline void glue(io_write
, SUFFIX
)(unsigned long physaddr
,
170 target_ulong tlb_addr
,
175 index
= (tlb_addr
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
176 env
->mem_write_vaddr
= tlb_addr
;
177 env
->mem_write_pc
= (unsigned long)retaddr
;
179 io_mem_write
[index
][SHIFT
](io_mem_opaque
[index
], physaddr
, val
);
181 #ifdef TARGET_WORDS_BIGENDIAN
182 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
, val
>> 32);
183 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
+ 4, val
);
185 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
, val
);
186 io_mem_write
[index
][2](io_mem_opaque
[index
], physaddr
+ 4, val
>> 32);
188 #endif /* SHIFT > 2 */
191 void REGPARM(2) glue(glue(__st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
195 unsigned long physaddr
;
196 target_ulong tlb_addr
;
200 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
202 tlb_addr
= env
->tlb_write
[is_user
][index
].address
;
203 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
204 physaddr
= addr
+ env
->tlb_write
[is_user
][index
].addend
;
205 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
207 if ((addr
& (DATA_SIZE
- 1)) != 0)
208 goto do_unaligned_access
;
210 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
, retaddr
);
211 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
214 glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(addr
, val
,
217 /* aligned/unaligned access in the same page */
218 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, val
);
221 /* the page is not in the TLB : fill it */
223 tlb_fill(addr
, 1, is_user
, retaddr
);
228 /* handles all unaligned cases */
229 static void glue(glue(slow_st
, SUFFIX
), MMUSUFFIX
)(target_ulong addr
,
234 unsigned long physaddr
;
235 target_ulong tlb_addr
;
238 index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
240 tlb_addr
= env
->tlb_write
[is_user
][index
].address
;
241 if ((addr
& TARGET_PAGE_MASK
) == (tlb_addr
& (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
242 physaddr
= addr
+ env
->tlb_write
[is_user
][index
].addend
;
243 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
245 if ((addr
& (DATA_SIZE
- 1)) != 0)
246 goto do_unaligned_access
;
247 glue(io_write
, SUFFIX
)(physaddr
, val
, tlb_addr
, retaddr
);
248 } else if (((addr
& 0xfff) + DATA_SIZE
- 1) >= TARGET_PAGE_SIZE
) {
250 /* XXX: not efficient, but simple */
251 for(i
= 0;i
< DATA_SIZE
; i
++) {
252 #ifdef TARGET_WORDS_BIGENDIAN
253 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (((DATA_SIZE
- 1) * 8) - (i
* 8)),
256 glue(slow_stb
, MMUSUFFIX
)(addr
+ i
, val
>> (i
* 8),
261 /* aligned/unaligned access in the same page */
262 glue(glue(st
, SUFFIX
), _raw
)((uint8_t *)physaddr
, val
);
265 /* the page is not in the TLB : fill it */
266 tlb_fill(addr
, 1, is_user
, retaddr
);
271 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
273 #undef READ_ACCESS_TYPE