2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal
, SUFFIX
)(ARG1_DECL
,
24 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
25 enum device_endian endian
)
33 bool release_lock
= false;
36 mr
= TRANSLATE(addr
, &addr1
, &l
, false);
37 if (l
< 4 || !IS_DIRECT(mr
, false)) {
38 release_lock
|= prepare_mmio_access(mr
);
41 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
42 #if defined(TARGET_WORDS_BIGENDIAN)
43 if (endian
== DEVICE_LITTLE_ENDIAN
) {
47 if (endian
== DEVICE_BIG_ENDIAN
) {
53 ptr
= MAP_RAM(mr
, addr1
);
55 case DEVICE_LITTLE_ENDIAN
:
58 case DEVICE_BIG_ENDIAN
:
71 qemu_mutex_unlock_iothread();
77 uint32_t glue(address_space_ldl
, SUFFIX
)(ARG1_DECL
,
78 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
80 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
81 DEVICE_NATIVE_ENDIAN
);
84 uint32_t glue(address_space_ldl_le
, SUFFIX
)(ARG1_DECL
,
85 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
87 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
88 DEVICE_LITTLE_ENDIAN
);
91 uint32_t glue(address_space_ldl_be
, SUFFIX
)(ARG1_DECL
,
92 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
94 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
98 uint32_t glue(ldl_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
100 return glue(address_space_ldl
, SUFFIX
)(ARG1
, addr
,
101 MEMTXATTRS_UNSPECIFIED
, NULL
);
104 uint32_t glue(ldl_le_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
106 return glue(address_space_ldl_le
, SUFFIX
)(ARG1
, addr
,
107 MEMTXATTRS_UNSPECIFIED
, NULL
);
110 uint32_t glue(ldl_be_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
112 return glue(address_space_ldl_be
, SUFFIX
)(ARG1
, addr
,
113 MEMTXATTRS_UNSPECIFIED
, NULL
);
116 /* warning: addr must be aligned */
117 static inline uint64_t glue(address_space_ldq_internal
, SUFFIX
)(ARG1_DECL
,
118 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
119 enum device_endian endian
)
127 bool release_lock
= false;
130 mr
= TRANSLATE(addr
, &addr1
, &l
, false);
131 if (l
< 8 || !IS_DIRECT(mr
, false)) {
132 release_lock
|= prepare_mmio_access(mr
);
135 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
136 #if defined(TARGET_WORDS_BIGENDIAN)
137 if (endian
== DEVICE_LITTLE_ENDIAN
) {
141 if (endian
== DEVICE_BIG_ENDIAN
) {
147 ptr
= MAP_RAM(mr
, addr1
);
149 case DEVICE_LITTLE_ENDIAN
:
152 case DEVICE_BIG_ENDIAN
:
165 qemu_mutex_unlock_iothread();
171 uint64_t glue(address_space_ldq
, SUFFIX
)(ARG1_DECL
,
172 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
174 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
175 DEVICE_NATIVE_ENDIAN
);
178 uint64_t glue(address_space_ldq_le
, SUFFIX
)(ARG1_DECL
,
179 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
181 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
182 DEVICE_LITTLE_ENDIAN
);
185 uint64_t glue(address_space_ldq_be
, SUFFIX
)(ARG1_DECL
,
186 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
188 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
192 uint64_t glue(ldq_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
194 return glue(address_space_ldq
, SUFFIX
)(ARG1
, addr
,
195 MEMTXATTRS_UNSPECIFIED
, NULL
);
198 uint64_t glue(ldq_le_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
200 return glue(address_space_ldq_le
, SUFFIX
)(ARG1
, addr
,
201 MEMTXATTRS_UNSPECIFIED
, NULL
);
204 uint64_t glue(ldq_be_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
206 return glue(address_space_ldq_be
, SUFFIX
)(ARG1
, addr
,
207 MEMTXATTRS_UNSPECIFIED
, NULL
);
210 uint32_t glue(address_space_ldub
, SUFFIX
)(ARG1_DECL
,
211 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
219 bool release_lock
= false;
222 mr
= TRANSLATE(addr
, &addr1
, &l
, false);
223 if (!IS_DIRECT(mr
, false)) {
224 release_lock
|= prepare_mmio_access(mr
);
227 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 1, attrs
);
230 ptr
= MAP_RAM(mr
, addr1
);
238 qemu_mutex_unlock_iothread();
244 uint32_t glue(ldub_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
246 return glue(address_space_ldub
, SUFFIX
)(ARG1
, addr
,
247 MEMTXATTRS_UNSPECIFIED
, NULL
);
250 /* warning: addr must be aligned */
251 static inline uint32_t glue(address_space_lduw_internal
, SUFFIX
)(ARG1_DECL
,
252 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
253 enum device_endian endian
)
261 bool release_lock
= false;
264 mr
= TRANSLATE(addr
, &addr1
, &l
, false);
265 if (l
< 2 || !IS_DIRECT(mr
, false)) {
266 release_lock
|= prepare_mmio_access(mr
);
269 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
270 #if defined(TARGET_WORDS_BIGENDIAN)
271 if (endian
== DEVICE_LITTLE_ENDIAN
) {
275 if (endian
== DEVICE_BIG_ENDIAN
) {
281 ptr
= MAP_RAM(mr
, addr1
);
283 case DEVICE_LITTLE_ENDIAN
:
284 val
= lduw_le_p(ptr
);
286 case DEVICE_BIG_ENDIAN
:
287 val
= lduw_be_p(ptr
);
299 qemu_mutex_unlock_iothread();
305 uint32_t glue(address_space_lduw
, SUFFIX
)(ARG1_DECL
,
306 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
308 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
309 DEVICE_NATIVE_ENDIAN
);
312 uint32_t glue(address_space_lduw_le
, SUFFIX
)(ARG1_DECL
,
313 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
315 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
316 DEVICE_LITTLE_ENDIAN
);
319 uint32_t glue(address_space_lduw_be
, SUFFIX
)(ARG1_DECL
,
320 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
322 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
326 uint32_t glue(lduw_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
328 return glue(address_space_lduw
, SUFFIX
)(ARG1
, addr
,
329 MEMTXATTRS_UNSPECIFIED
, NULL
);
332 uint32_t glue(lduw_le_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
334 return glue(address_space_lduw_le
, SUFFIX
)(ARG1
, addr
,
335 MEMTXATTRS_UNSPECIFIED
, NULL
);
338 uint32_t glue(lduw_be_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
)
340 return glue(address_space_lduw_be
, SUFFIX
)(ARG1
, addr
,
341 MEMTXATTRS_UNSPECIFIED
, NULL
);
344 /* warning: addr must be aligned. The ram page is not masked as dirty
345 and the code inside is not invalidated. It is useful if the dirty
346 bits are used to track modified PTEs */
347 void glue(address_space_stl_notdirty
, SUFFIX
)(ARG1_DECL
,
348 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
355 uint8_t dirty_log_mask
;
356 bool release_lock
= false;
359 mr
= TRANSLATE(addr
, &addr1
, &l
, true);
360 if (l
< 4 || !IS_DIRECT(mr
, true)) {
361 release_lock
|= prepare_mmio_access(mr
);
363 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
365 ptr
= MAP_RAM(mr
, addr1
);
368 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
369 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
370 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
378 qemu_mutex_unlock_iothread();
383 void glue(stl_phys_notdirty
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
385 glue(address_space_stl_notdirty
, SUFFIX
)(ARG1
, addr
, val
,
386 MEMTXATTRS_UNSPECIFIED
, NULL
);
389 /* warning: addr must be aligned */
390 static inline void glue(address_space_stl_internal
, SUFFIX
)(ARG1_DECL
,
391 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
392 MemTxResult
*result
, enum device_endian endian
)
399 bool release_lock
= false;
402 mr
= TRANSLATE(addr
, &addr1
, &l
, true);
403 if (l
< 4 || !IS_DIRECT(mr
, true)) {
404 release_lock
|= prepare_mmio_access(mr
);
406 #if defined(TARGET_WORDS_BIGENDIAN)
407 if (endian
== DEVICE_LITTLE_ENDIAN
) {
411 if (endian
== DEVICE_BIG_ENDIAN
) {
415 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
418 ptr
= MAP_RAM(mr
, addr1
);
420 case DEVICE_LITTLE_ENDIAN
:
423 case DEVICE_BIG_ENDIAN
:
430 INVALIDATE(mr
, addr1
, 4);
437 qemu_mutex_unlock_iothread();
442 void glue(address_space_stl
, SUFFIX
)(ARG1_DECL
,
443 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
445 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
446 result
, DEVICE_NATIVE_ENDIAN
);
449 void glue(address_space_stl_le
, SUFFIX
)(ARG1_DECL
,
450 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
452 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
453 result
, DEVICE_LITTLE_ENDIAN
);
456 void glue(address_space_stl_be
, SUFFIX
)(ARG1_DECL
,
457 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
459 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
460 result
, DEVICE_BIG_ENDIAN
);
463 void glue(stl_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
465 glue(address_space_stl
, SUFFIX
)(ARG1
, addr
, val
,
466 MEMTXATTRS_UNSPECIFIED
, NULL
);
469 void glue(stl_le_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
471 glue(address_space_stl_le
, SUFFIX
)(ARG1
, addr
, val
,
472 MEMTXATTRS_UNSPECIFIED
, NULL
);
475 void glue(stl_be_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
477 glue(address_space_stl_be
, SUFFIX
)(ARG1
, addr
, val
,
478 MEMTXATTRS_UNSPECIFIED
, NULL
);
481 void glue(address_space_stb
, SUFFIX
)(ARG1_DECL
,
482 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
489 bool release_lock
= false;
492 mr
= TRANSLATE(addr
, &addr1
, &l
, true);
493 if (!IS_DIRECT(mr
, true)) {
494 release_lock
|= prepare_mmio_access(mr
);
495 r
= memory_region_dispatch_write(mr
, addr1
, val
, 1, attrs
);
498 ptr
= MAP_RAM(mr
, addr1
);
500 INVALIDATE(mr
, addr1
, 1);
507 qemu_mutex_unlock_iothread();
512 void glue(stb_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
514 glue(address_space_stb
, SUFFIX
)(ARG1
, addr
, val
,
515 MEMTXATTRS_UNSPECIFIED
, NULL
);
518 /* warning: addr must be aligned */
519 static inline void glue(address_space_stw_internal
, SUFFIX
)(ARG1_DECL
,
520 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
521 MemTxResult
*result
, enum device_endian endian
)
528 bool release_lock
= false;
531 mr
= TRANSLATE(addr
, &addr1
, &l
, true);
532 if (l
< 2 || !IS_DIRECT(mr
, true)) {
533 release_lock
|= prepare_mmio_access(mr
);
535 #if defined(TARGET_WORDS_BIGENDIAN)
536 if (endian
== DEVICE_LITTLE_ENDIAN
) {
540 if (endian
== DEVICE_BIG_ENDIAN
) {
544 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
547 ptr
= MAP_RAM(mr
, addr1
);
549 case DEVICE_LITTLE_ENDIAN
:
552 case DEVICE_BIG_ENDIAN
:
559 INVALIDATE(mr
, addr1
, 2);
566 qemu_mutex_unlock_iothread();
571 void glue(address_space_stw
, SUFFIX
)(ARG1_DECL
,
572 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
574 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
575 DEVICE_NATIVE_ENDIAN
);
578 void glue(address_space_stw_le
, SUFFIX
)(ARG1_DECL
,
579 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
581 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
582 DEVICE_LITTLE_ENDIAN
);
585 void glue(address_space_stw_be
, SUFFIX
)(ARG1_DECL
,
586 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
588 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
592 void glue(stw_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
594 glue(address_space_stw
, SUFFIX
)(ARG1
, addr
, val
,
595 MEMTXATTRS_UNSPECIFIED
, NULL
);
598 void glue(stw_le_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
600 glue(address_space_stw_le
, SUFFIX
)(ARG1
, addr
, val
,
601 MEMTXATTRS_UNSPECIFIED
, NULL
);
604 void glue(stw_be_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint32_t val
)
606 glue(address_space_stw_be
, SUFFIX
)(ARG1
, addr
, val
,
607 MEMTXATTRS_UNSPECIFIED
, NULL
);
610 static void glue(address_space_stq_internal
, SUFFIX
)(ARG1_DECL
,
611 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
,
612 MemTxResult
*result
, enum device_endian endian
)
619 bool release_lock
= false;
622 mr
= TRANSLATE(addr
, &addr1
, &l
, true);
623 if (l
< 8 || !IS_DIRECT(mr
, true)) {
624 release_lock
|= prepare_mmio_access(mr
);
626 #if defined(TARGET_WORDS_BIGENDIAN)
627 if (endian
== DEVICE_LITTLE_ENDIAN
) {
631 if (endian
== DEVICE_BIG_ENDIAN
) {
635 r
= memory_region_dispatch_write(mr
, addr1
, val
, 8, attrs
);
638 ptr
= MAP_RAM(mr
, addr1
);
640 case DEVICE_LITTLE_ENDIAN
:
643 case DEVICE_BIG_ENDIAN
:
650 INVALIDATE(mr
, addr1
, 8);
657 qemu_mutex_unlock_iothread();
662 void glue(address_space_stq
, SUFFIX
)(ARG1_DECL
,
663 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
665 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
666 DEVICE_NATIVE_ENDIAN
);
669 void glue(address_space_stq_le
, SUFFIX
)(ARG1_DECL
,
670 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
672 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
673 DEVICE_LITTLE_ENDIAN
);
676 void glue(address_space_stq_be
, SUFFIX
)(ARG1_DECL
,
677 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
679 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
683 void glue(stq_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint64_t val
)
685 glue(address_space_stq
, SUFFIX
)(ARG1
, addr
, val
,
686 MEMTXATTRS_UNSPECIFIED
, NULL
);
689 void glue(stq_le_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint64_t val
)
691 glue(address_space_stq_le
, SUFFIX
)(ARG1
, addr
, val
,
692 MEMTXATTRS_UNSPECIFIED
, NULL
);
695 void glue(stq_be_phys
, SUFFIX
)(ARG1_DECL
, hwaddr addr
, uint64_t val
)
697 glue(address_space_stq_be
, SUFFIX
)(ARG1
, addr
, val
,
698 MEMTXATTRS_UNSPECIFIED
, NULL
);
709 #undef RCU_READ_UNLOCK