2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal
, SUFFIX
)(ARG1_DECL
,
24 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
25 enum device_endian endian
)
33 bool release_lock
= false;
36 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
37 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
38 release_lock
|= prepare_mmio_access(mr
);
41 /* TODO: Merge bswap32 into memory_region_dispatch_read. */
42 r
= memory_region_dispatch_read(mr
, addr1
, &val
,
43 MO_32
| devend_memop(endian
), attrs
);
44 #if defined(TARGET_WORDS_BIGENDIAN)
45 if (endian
== DEVICE_LITTLE_ENDIAN
) {
49 if (endian
== DEVICE_BIG_ENDIAN
) {
55 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
57 case DEVICE_LITTLE_ENDIAN
:
60 case DEVICE_BIG_ENDIAN
:
73 qemu_mutex_unlock_iothread();
79 uint32_t glue(address_space_ldl
, SUFFIX
)(ARG1_DECL
,
80 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
82 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
83 DEVICE_NATIVE_ENDIAN
);
86 uint32_t glue(address_space_ldl_le
, SUFFIX
)(ARG1_DECL
,
87 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
89 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
90 DEVICE_LITTLE_ENDIAN
);
93 uint32_t glue(address_space_ldl_be
, SUFFIX
)(ARG1_DECL
,
94 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
96 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
100 /* warning: addr must be aligned */
101 static inline uint64_t glue(address_space_ldq_internal
, SUFFIX
)(ARG1_DECL
,
102 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
103 enum device_endian endian
)
111 bool release_lock
= false;
114 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
115 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
116 release_lock
|= prepare_mmio_access(mr
);
119 /* TODO: Merge bswap64 into memory_region_dispatch_read. */
120 r
= memory_region_dispatch_read(mr
, addr1
, &val
,
121 MO_64
| devend_memop(endian
), attrs
);
122 #if defined(TARGET_WORDS_BIGENDIAN)
123 if (endian
== DEVICE_LITTLE_ENDIAN
) {
127 if (endian
== DEVICE_BIG_ENDIAN
) {
133 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
135 case DEVICE_LITTLE_ENDIAN
:
138 case DEVICE_BIG_ENDIAN
:
151 qemu_mutex_unlock_iothread();
157 uint64_t glue(address_space_ldq
, SUFFIX
)(ARG1_DECL
,
158 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
160 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
161 DEVICE_NATIVE_ENDIAN
);
164 uint64_t glue(address_space_ldq_le
, SUFFIX
)(ARG1_DECL
,
165 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
167 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
168 DEVICE_LITTLE_ENDIAN
);
171 uint64_t glue(address_space_ldq_be
, SUFFIX
)(ARG1_DECL
,
172 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
174 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
178 uint32_t glue(address_space_ldub
, SUFFIX
)(ARG1_DECL
,
179 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
187 bool release_lock
= false;
190 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
191 if (!memory_access_is_direct(mr
, false)) {
192 release_lock
|= prepare_mmio_access(mr
);
195 r
= memory_region_dispatch_read(mr
, addr1
, &val
, MO_8
, attrs
);
198 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
206 qemu_mutex_unlock_iothread();
212 /* warning: addr must be aligned */
213 static inline uint32_t glue(address_space_lduw_internal
, SUFFIX
)(ARG1_DECL
,
214 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
215 enum device_endian endian
)
223 bool release_lock
= false;
226 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
227 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
228 release_lock
|= prepare_mmio_access(mr
);
231 /* TODO: Merge bswap16 into memory_region_dispatch_read. */
232 r
= memory_region_dispatch_read(mr
, addr1
, &val
,
233 MO_16
| devend_memop(endian
), attrs
);
234 #if defined(TARGET_WORDS_BIGENDIAN)
235 if (endian
== DEVICE_LITTLE_ENDIAN
) {
239 if (endian
== DEVICE_BIG_ENDIAN
) {
245 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
247 case DEVICE_LITTLE_ENDIAN
:
248 val
= lduw_le_p(ptr
);
250 case DEVICE_BIG_ENDIAN
:
251 val
= lduw_be_p(ptr
);
263 qemu_mutex_unlock_iothread();
269 uint32_t glue(address_space_lduw
, SUFFIX
)(ARG1_DECL
,
270 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
272 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
273 DEVICE_NATIVE_ENDIAN
);
276 uint32_t glue(address_space_lduw_le
, SUFFIX
)(ARG1_DECL
,
277 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
279 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
280 DEVICE_LITTLE_ENDIAN
);
283 uint32_t glue(address_space_lduw_be
, SUFFIX
)(ARG1_DECL
,
284 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
286 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
290 /* warning: addr must be aligned. The ram page is not masked as dirty
291 and the code inside is not invalidated. It is useful if the dirty
292 bits are used to track modified PTEs */
293 void glue(address_space_stl_notdirty
, SUFFIX
)(ARG1_DECL
,
294 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
301 uint8_t dirty_log_mask
;
302 bool release_lock
= false;
305 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
306 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
307 release_lock
|= prepare_mmio_access(mr
);
309 r
= memory_region_dispatch_write(mr
, addr1
, val
, MO_32
, attrs
);
311 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
314 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
315 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
316 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
324 qemu_mutex_unlock_iothread();
329 /* warning: addr must be aligned */
330 static inline void glue(address_space_stl_internal
, SUFFIX
)(ARG1_DECL
,
331 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
332 MemTxResult
*result
, enum device_endian endian
)
339 bool release_lock
= false;
342 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
343 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
344 release_lock
|= prepare_mmio_access(mr
);
346 #if defined(TARGET_WORDS_BIGENDIAN)
347 if (endian
== DEVICE_LITTLE_ENDIAN
) {
351 if (endian
== DEVICE_BIG_ENDIAN
) {
355 /* TODO: Merge bswap32 into memory_region_dispatch_write. */
356 r
= memory_region_dispatch_write(mr
, addr1
, val
,
357 MO_32
| devend_memop(endian
), attrs
);
360 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
362 case DEVICE_LITTLE_ENDIAN
:
365 case DEVICE_BIG_ENDIAN
:
372 invalidate_and_set_dirty(mr
, addr1
, 4);
379 qemu_mutex_unlock_iothread();
384 void glue(address_space_stl
, SUFFIX
)(ARG1_DECL
,
385 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
387 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
388 result
, DEVICE_NATIVE_ENDIAN
);
391 void glue(address_space_stl_le
, SUFFIX
)(ARG1_DECL
,
392 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
394 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
395 result
, DEVICE_LITTLE_ENDIAN
);
398 void glue(address_space_stl_be
, SUFFIX
)(ARG1_DECL
,
399 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
401 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
402 result
, DEVICE_BIG_ENDIAN
);
405 void glue(address_space_stb
, SUFFIX
)(ARG1_DECL
,
406 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
413 bool release_lock
= false;
416 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
417 if (!memory_access_is_direct(mr
, true)) {
418 release_lock
|= prepare_mmio_access(mr
);
419 r
= memory_region_dispatch_write(mr
, addr1
, val
, MO_8
, attrs
);
422 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
424 invalidate_and_set_dirty(mr
, addr1
, 1);
431 qemu_mutex_unlock_iothread();
436 /* warning: addr must be aligned */
437 static inline void glue(address_space_stw_internal
, SUFFIX
)(ARG1_DECL
,
438 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
439 MemTxResult
*result
, enum device_endian endian
)
446 bool release_lock
= false;
449 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
450 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
451 release_lock
|= prepare_mmio_access(mr
);
453 #if defined(TARGET_WORDS_BIGENDIAN)
454 if (endian
== DEVICE_LITTLE_ENDIAN
) {
458 if (endian
== DEVICE_BIG_ENDIAN
) {
462 /* TODO: Merge bswap16 into memory_region_dispatch_write. */
463 r
= memory_region_dispatch_write(mr
, addr1
, val
,
464 MO_16
| devend_memop(endian
), attrs
);
467 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
469 case DEVICE_LITTLE_ENDIAN
:
472 case DEVICE_BIG_ENDIAN
:
479 invalidate_and_set_dirty(mr
, addr1
, 2);
486 qemu_mutex_unlock_iothread();
491 void glue(address_space_stw
, SUFFIX
)(ARG1_DECL
,
492 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
494 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
495 DEVICE_NATIVE_ENDIAN
);
498 void glue(address_space_stw_le
, SUFFIX
)(ARG1_DECL
,
499 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
501 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
502 DEVICE_LITTLE_ENDIAN
);
505 void glue(address_space_stw_be
, SUFFIX
)(ARG1_DECL
,
506 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
508 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
512 static void glue(address_space_stq_internal
, SUFFIX
)(ARG1_DECL
,
513 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
,
514 MemTxResult
*result
, enum device_endian endian
)
521 bool release_lock
= false;
524 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
525 if (l
< 8 || !memory_access_is_direct(mr
, true)) {
526 release_lock
|= prepare_mmio_access(mr
);
528 #if defined(TARGET_WORDS_BIGENDIAN)
529 if (endian
== DEVICE_LITTLE_ENDIAN
) {
533 if (endian
== DEVICE_BIG_ENDIAN
) {
537 /* TODO: Merge bswap64 into memory_region_dispatch_write. */
538 r
= memory_region_dispatch_write(mr
, addr1
, val
,
539 MO_64
| devend_memop(endian
), attrs
);
542 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
544 case DEVICE_LITTLE_ENDIAN
:
547 case DEVICE_BIG_ENDIAN
:
554 invalidate_and_set_dirty(mr
, addr1
, 8);
561 qemu_mutex_unlock_iothread();
566 void glue(address_space_stq
, SUFFIX
)(ARG1_DECL
,
567 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
569 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
570 DEVICE_NATIVE_ENDIAN
);
573 void glue(address_space_stq_le
, SUFFIX
)(ARG1_DECL
,
574 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
576 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
577 DEVICE_LITTLE_ENDIAN
);
580 void glue(address_space_stq_be
, SUFFIX
)(ARG1_DECL
,
581 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
583 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
592 #undef RCU_READ_UNLOCK