2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal
, SUFFIX
)(ARG1_DECL
,
24 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
25 enum device_endian endian
)
33 bool release_lock
= false;
36 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
37 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
38 release_lock
|= prepare_mmio_access(mr
);
41 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
42 #if defined(TARGET_WORDS_BIGENDIAN)
43 if (endian
== DEVICE_LITTLE_ENDIAN
) {
47 if (endian
== DEVICE_BIG_ENDIAN
) {
53 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
55 case DEVICE_LITTLE_ENDIAN
:
58 case DEVICE_BIG_ENDIAN
:
71 qemu_mutex_unlock_iothread();
77 uint32_t glue(address_space_ldl
, SUFFIX
)(ARG1_DECL
,
78 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
80 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
81 DEVICE_NATIVE_ENDIAN
);
84 uint32_t glue(address_space_ldl_le
, SUFFIX
)(ARG1_DECL
,
85 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
87 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
88 DEVICE_LITTLE_ENDIAN
);
91 uint32_t glue(address_space_ldl_be
, SUFFIX
)(ARG1_DECL
,
92 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
94 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
98 /* warning: addr must be aligned */
99 static inline uint64_t glue(address_space_ldq_internal
, SUFFIX
)(ARG1_DECL
,
100 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
101 enum device_endian endian
)
109 bool release_lock
= false;
112 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
113 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
114 release_lock
|= prepare_mmio_access(mr
);
117 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
118 #if defined(TARGET_WORDS_BIGENDIAN)
119 if (endian
== DEVICE_LITTLE_ENDIAN
) {
123 if (endian
== DEVICE_BIG_ENDIAN
) {
129 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
131 case DEVICE_LITTLE_ENDIAN
:
134 case DEVICE_BIG_ENDIAN
:
147 qemu_mutex_unlock_iothread();
153 uint64_t glue(address_space_ldq
, SUFFIX
)(ARG1_DECL
,
154 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
156 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
157 DEVICE_NATIVE_ENDIAN
);
160 uint64_t glue(address_space_ldq_le
, SUFFIX
)(ARG1_DECL
,
161 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
163 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
164 DEVICE_LITTLE_ENDIAN
);
167 uint64_t glue(address_space_ldq_be
, SUFFIX
)(ARG1_DECL
,
168 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
170 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
174 uint32_t glue(address_space_ldub
, SUFFIX
)(ARG1_DECL
,
175 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
183 bool release_lock
= false;
186 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
187 if (!memory_access_is_direct(mr
, false)) {
188 release_lock
|= prepare_mmio_access(mr
);
191 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 1, attrs
);
194 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
202 qemu_mutex_unlock_iothread();
208 /* warning: addr must be aligned */
209 static inline uint32_t glue(address_space_lduw_internal
, SUFFIX
)(ARG1_DECL
,
210 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
211 enum device_endian endian
)
219 bool release_lock
= false;
222 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
223 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
224 release_lock
|= prepare_mmio_access(mr
);
227 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
228 #if defined(TARGET_WORDS_BIGENDIAN)
229 if (endian
== DEVICE_LITTLE_ENDIAN
) {
233 if (endian
== DEVICE_BIG_ENDIAN
) {
239 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
241 case DEVICE_LITTLE_ENDIAN
:
242 val
= lduw_le_p(ptr
);
244 case DEVICE_BIG_ENDIAN
:
245 val
= lduw_be_p(ptr
);
257 qemu_mutex_unlock_iothread();
263 uint32_t glue(address_space_lduw
, SUFFIX
)(ARG1_DECL
,
264 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
266 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
267 DEVICE_NATIVE_ENDIAN
);
270 uint32_t glue(address_space_lduw_le
, SUFFIX
)(ARG1_DECL
,
271 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
273 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
274 DEVICE_LITTLE_ENDIAN
);
277 uint32_t glue(address_space_lduw_be
, SUFFIX
)(ARG1_DECL
,
278 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
280 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
284 /* warning: addr must be aligned. The ram page is not masked as dirty
285 and the code inside is not invalidated. It is useful if the dirty
286 bits are used to track modified PTEs */
287 void glue(address_space_stl_notdirty
, SUFFIX
)(ARG1_DECL
,
288 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
295 uint8_t dirty_log_mask
;
296 bool release_lock
= false;
299 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
300 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
301 release_lock
|= prepare_mmio_access(mr
);
303 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
305 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
308 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
309 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
310 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
318 qemu_mutex_unlock_iothread();
323 /* warning: addr must be aligned */
324 static inline void glue(address_space_stl_internal
, SUFFIX
)(ARG1_DECL
,
325 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
326 MemTxResult
*result
, enum device_endian endian
)
333 bool release_lock
= false;
336 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
337 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
338 release_lock
|= prepare_mmio_access(mr
);
340 #if defined(TARGET_WORDS_BIGENDIAN)
341 if (endian
== DEVICE_LITTLE_ENDIAN
) {
345 if (endian
== DEVICE_BIG_ENDIAN
) {
349 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
352 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
354 case DEVICE_LITTLE_ENDIAN
:
357 case DEVICE_BIG_ENDIAN
:
364 invalidate_and_set_dirty(mr
, addr1
, 4);
371 qemu_mutex_unlock_iothread();
376 void glue(address_space_stl
, SUFFIX
)(ARG1_DECL
,
377 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
379 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
380 result
, DEVICE_NATIVE_ENDIAN
);
383 void glue(address_space_stl_le
, SUFFIX
)(ARG1_DECL
,
384 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
386 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
387 result
, DEVICE_LITTLE_ENDIAN
);
390 void glue(address_space_stl_be
, SUFFIX
)(ARG1_DECL
,
391 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
393 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
394 result
, DEVICE_BIG_ENDIAN
);
397 void glue(address_space_stb
, SUFFIX
)(ARG1_DECL
,
398 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
405 bool release_lock
= false;
408 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
409 if (!memory_access_is_direct(mr
, true)) {
410 release_lock
|= prepare_mmio_access(mr
);
411 r
= memory_region_dispatch_write(mr
, addr1
, val
, 1, attrs
);
414 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
416 invalidate_and_set_dirty(mr
, addr1
, 1);
423 qemu_mutex_unlock_iothread();
428 /* warning: addr must be aligned */
429 static inline void glue(address_space_stw_internal
, SUFFIX
)(ARG1_DECL
,
430 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
431 MemTxResult
*result
, enum device_endian endian
)
438 bool release_lock
= false;
441 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
442 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
443 release_lock
|= prepare_mmio_access(mr
);
445 #if defined(TARGET_WORDS_BIGENDIAN)
446 if (endian
== DEVICE_LITTLE_ENDIAN
) {
450 if (endian
== DEVICE_BIG_ENDIAN
) {
454 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
457 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
459 case DEVICE_LITTLE_ENDIAN
:
462 case DEVICE_BIG_ENDIAN
:
469 invalidate_and_set_dirty(mr
, addr1
, 2);
476 qemu_mutex_unlock_iothread();
481 void glue(address_space_stw
, SUFFIX
)(ARG1_DECL
,
482 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
484 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
485 DEVICE_NATIVE_ENDIAN
);
488 void glue(address_space_stw_le
, SUFFIX
)(ARG1_DECL
,
489 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
491 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
492 DEVICE_LITTLE_ENDIAN
);
495 void glue(address_space_stw_be
, SUFFIX
)(ARG1_DECL
,
496 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
498 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
502 static void glue(address_space_stq_internal
, SUFFIX
)(ARG1_DECL
,
503 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
,
504 MemTxResult
*result
, enum device_endian endian
)
511 bool release_lock
= false;
514 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
515 if (l
< 8 || !memory_access_is_direct(mr
, true)) {
516 release_lock
|= prepare_mmio_access(mr
);
518 #if defined(TARGET_WORDS_BIGENDIAN)
519 if (endian
== DEVICE_LITTLE_ENDIAN
) {
523 if (endian
== DEVICE_BIG_ENDIAN
) {
527 r
= memory_region_dispatch_write(mr
, addr1
, val
, 8, attrs
);
530 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
532 case DEVICE_LITTLE_ENDIAN
:
535 case DEVICE_BIG_ENDIAN
:
542 invalidate_and_set_dirty(mr
, addr1
, 8);
549 qemu_mutex_unlock_iothread();
554 void glue(address_space_stq
, SUFFIX
)(ARG1_DECL
,
555 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
557 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
558 DEVICE_NATIVE_ENDIAN
);
561 void glue(address_space_stq_le
, SUFFIX
)(ARG1_DECL
,
562 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
564 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
565 DEVICE_LITTLE_ENDIAN
);
568 void glue(address_space_stq_be
, SUFFIX
)(ARG1_DECL
,
569 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
571 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
580 #undef RCU_READ_UNLOCK