2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal
, SUFFIX
)(ARG1_DECL
,
24 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
25 enum device_endian endian
)
33 bool release_lock
= false;
36 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
37 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
38 release_lock
|= prepare_mmio_access(mr
);
41 r
= memory_region_dispatch_read(mr
, addr1
, &val
,
42 MO_32
| devend_memop(endian
), attrs
);
45 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
47 case DEVICE_LITTLE_ENDIAN
:
50 case DEVICE_BIG_ENDIAN
:
63 qemu_mutex_unlock_iothread();
69 uint32_t glue(address_space_ldl
, SUFFIX
)(ARG1_DECL
,
70 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
72 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
73 DEVICE_NATIVE_ENDIAN
);
76 uint32_t glue(address_space_ldl_le
, SUFFIX
)(ARG1_DECL
,
77 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
79 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
80 DEVICE_LITTLE_ENDIAN
);
83 uint32_t glue(address_space_ldl_be
, SUFFIX
)(ARG1_DECL
,
84 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
86 return glue(address_space_ldl_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
90 /* warning: addr must be aligned */
91 static inline uint64_t glue(address_space_ldq_internal
, SUFFIX
)(ARG1_DECL
,
92 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
93 enum device_endian endian
)
101 bool release_lock
= false;
104 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
105 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
106 release_lock
|= prepare_mmio_access(mr
);
109 r
= memory_region_dispatch_read(mr
, addr1
, &val
,
110 MO_64
| devend_memop(endian
), attrs
);
113 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
115 case DEVICE_LITTLE_ENDIAN
:
118 case DEVICE_BIG_ENDIAN
:
131 qemu_mutex_unlock_iothread();
137 uint64_t glue(address_space_ldq
, SUFFIX
)(ARG1_DECL
,
138 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
140 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
141 DEVICE_NATIVE_ENDIAN
);
144 uint64_t glue(address_space_ldq_le
, SUFFIX
)(ARG1_DECL
,
145 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
147 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
148 DEVICE_LITTLE_ENDIAN
);
151 uint64_t glue(address_space_ldq_be
, SUFFIX
)(ARG1_DECL
,
152 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
154 return glue(address_space_ldq_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
158 uint32_t glue(address_space_ldub
, SUFFIX
)(ARG1_DECL
,
159 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
167 bool release_lock
= false;
170 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
171 if (!memory_access_is_direct(mr
, false)) {
172 release_lock
|= prepare_mmio_access(mr
);
175 r
= memory_region_dispatch_read(mr
, addr1
, &val
, MO_8
, attrs
);
178 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
186 qemu_mutex_unlock_iothread();
192 /* warning: addr must be aligned */
193 static inline uint32_t glue(address_space_lduw_internal
, SUFFIX
)(ARG1_DECL
,
194 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
,
195 enum device_endian endian
)
203 bool release_lock
= false;
206 mr
= TRANSLATE(addr
, &addr1
, &l
, false, attrs
);
207 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
208 release_lock
|= prepare_mmio_access(mr
);
211 r
= memory_region_dispatch_read(mr
, addr1
, &val
,
212 MO_16
| devend_memop(endian
), attrs
);
215 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
217 case DEVICE_LITTLE_ENDIAN
:
218 val
= lduw_le_p(ptr
);
220 case DEVICE_BIG_ENDIAN
:
221 val
= lduw_be_p(ptr
);
233 qemu_mutex_unlock_iothread();
239 uint32_t glue(address_space_lduw
, SUFFIX
)(ARG1_DECL
,
240 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
242 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
243 DEVICE_NATIVE_ENDIAN
);
246 uint32_t glue(address_space_lduw_le
, SUFFIX
)(ARG1_DECL
,
247 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
249 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
250 DEVICE_LITTLE_ENDIAN
);
253 uint32_t glue(address_space_lduw_be
, SUFFIX
)(ARG1_DECL
,
254 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
256 return glue(address_space_lduw_internal
, SUFFIX
)(ARG1
, addr
, attrs
, result
,
260 /* warning: addr must be aligned. The ram page is not masked as dirty
261 and the code inside is not invalidated. It is useful if the dirty
262 bits are used to track modified PTEs */
263 void glue(address_space_stl_notdirty
, SUFFIX
)(ARG1_DECL
,
264 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
271 uint8_t dirty_log_mask
;
272 bool release_lock
= false;
275 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
276 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
277 release_lock
|= prepare_mmio_access(mr
);
279 r
= memory_region_dispatch_write(mr
, addr1
, val
, MO_32
, attrs
);
281 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
284 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
285 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
286 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
294 qemu_mutex_unlock_iothread();
299 /* warning: addr must be aligned */
300 static inline void glue(address_space_stl_internal
, SUFFIX
)(ARG1_DECL
,
301 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
302 MemTxResult
*result
, enum device_endian endian
)
309 bool release_lock
= false;
312 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
313 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
314 release_lock
|= prepare_mmio_access(mr
);
315 r
= memory_region_dispatch_write(mr
, addr1
, val
,
316 MO_32
| devend_memop(endian
), attrs
);
319 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
321 case DEVICE_LITTLE_ENDIAN
:
324 case DEVICE_BIG_ENDIAN
:
331 invalidate_and_set_dirty(mr
, addr1
, 4);
338 qemu_mutex_unlock_iothread();
343 void glue(address_space_stl
, SUFFIX
)(ARG1_DECL
,
344 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
346 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
347 result
, DEVICE_NATIVE_ENDIAN
);
350 void glue(address_space_stl_le
, SUFFIX
)(ARG1_DECL
,
351 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
353 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
354 result
, DEVICE_LITTLE_ENDIAN
);
357 void glue(address_space_stl_be
, SUFFIX
)(ARG1_DECL
,
358 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
360 glue(address_space_stl_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
,
361 result
, DEVICE_BIG_ENDIAN
);
364 void glue(address_space_stb
, SUFFIX
)(ARG1_DECL
,
365 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
372 bool release_lock
= false;
375 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
376 if (!memory_access_is_direct(mr
, true)) {
377 release_lock
|= prepare_mmio_access(mr
);
378 r
= memory_region_dispatch_write(mr
, addr1
, val
, MO_8
, attrs
);
381 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
383 invalidate_and_set_dirty(mr
, addr1
, 1);
390 qemu_mutex_unlock_iothread();
395 /* warning: addr must be aligned */
396 static inline void glue(address_space_stw_internal
, SUFFIX
)(ARG1_DECL
,
397 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
,
398 MemTxResult
*result
, enum device_endian endian
)
405 bool release_lock
= false;
408 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
409 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
410 release_lock
|= prepare_mmio_access(mr
);
411 r
= memory_region_dispatch_write(mr
, addr1
, val
,
412 MO_16
| devend_memop(endian
), attrs
);
415 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
417 case DEVICE_LITTLE_ENDIAN
:
420 case DEVICE_BIG_ENDIAN
:
427 invalidate_and_set_dirty(mr
, addr1
, 2);
434 qemu_mutex_unlock_iothread();
439 void glue(address_space_stw
, SUFFIX
)(ARG1_DECL
,
440 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
442 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
443 DEVICE_NATIVE_ENDIAN
);
446 void glue(address_space_stw_le
, SUFFIX
)(ARG1_DECL
,
447 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
449 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
450 DEVICE_LITTLE_ENDIAN
);
453 void glue(address_space_stw_be
, SUFFIX
)(ARG1_DECL
,
454 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
456 glue(address_space_stw_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
460 static void glue(address_space_stq_internal
, SUFFIX
)(ARG1_DECL
,
461 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
,
462 MemTxResult
*result
, enum device_endian endian
)
469 bool release_lock
= false;
472 mr
= TRANSLATE(addr
, &addr1
, &l
, true, attrs
);
473 if (l
< 8 || !memory_access_is_direct(mr
, true)) {
474 release_lock
|= prepare_mmio_access(mr
);
475 r
= memory_region_dispatch_write(mr
, addr1
, val
,
476 MO_64
| devend_memop(endian
), attrs
);
479 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
481 case DEVICE_LITTLE_ENDIAN
:
484 case DEVICE_BIG_ENDIAN
:
491 invalidate_and_set_dirty(mr
, addr1
, 8);
498 qemu_mutex_unlock_iothread();
503 void glue(address_space_stq
, SUFFIX
)(ARG1_DECL
,
504 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
506 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
507 DEVICE_NATIVE_ENDIAN
);
510 void glue(address_space_stq_le
, SUFFIX
)(ARG1_DECL
,
511 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
513 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
514 DEVICE_LITTLE_ENDIAN
);
517 void glue(address_space_stq_be
, SUFFIX
)(ARG1_DECL
,
518 hwaddr addr
, uint64_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
520 glue(address_space_stq_internal
, SUFFIX
)(ARG1
, addr
, val
, attrs
, result
,
529 #undef RCU_READ_UNLOCK