2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
33 bool release_lock = false;
36 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
37 if (l < 4 || !memory_access_is_direct(mr, false)) {
38 release_lock |= prepare_mmio_access(mr);
41 r = memory_region_dispatch_read(mr, addr1, &val,
42 MO_32 | devend_memop(endian), attrs);
45 fuzz_dma_read_cb(addr, 4, mr, false);
46 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
48 case DEVICE_LITTLE_ENDIAN:
51 case DEVICE_BIG_ENDIAN:
64 qemu_mutex_unlock_iothread();
70 uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
71 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
73 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
74 DEVICE_NATIVE_ENDIAN);
77 uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
78 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
80 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
81 DEVICE_LITTLE_ENDIAN);
84 uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
85 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
87 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
91 /* warning: addr must be aligned */
92 static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
93 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
94 enum device_endian endian)
102 bool release_lock = false;
105 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
106 if (l < 8 || !memory_access_is_direct(mr, false)) {
107 release_lock |= prepare_mmio_access(mr);
110 r = memory_region_dispatch_read(mr, addr1, &val,
111 MO_64 | devend_memop(endian), attrs);
114 fuzz_dma_read_cb(addr, 8, mr, false);
115 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
117 case DEVICE_LITTLE_ENDIAN:
120 case DEVICE_BIG_ENDIAN:
133 qemu_mutex_unlock_iothread();
139 uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
140 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
142 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
143 DEVICE_NATIVE_ENDIAN);
146 uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
147 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
149 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
150 DEVICE_LITTLE_ENDIAN);
153 uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
154 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
156 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
160 uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
161 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
169 bool release_lock = false;
172 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
173 if (!memory_access_is_direct(mr, false)) {
174 release_lock |= prepare_mmio_access(mr);
177 r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
180 fuzz_dma_read_cb(addr, 1, mr, false);
181 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
189 qemu_mutex_unlock_iothread();
195 /* warning: addr must be aligned */
196 static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
197 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
198 enum device_endian endian)
206 bool release_lock = false;
209 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
210 if (l < 2 || !memory_access_is_direct(mr, false)) {
211 release_lock |= prepare_mmio_access(mr);
214 r = memory_region_dispatch_read(mr, addr1, &val,
215 MO_16 | devend_memop(endian), attrs);
218 fuzz_dma_read_cb(addr, 2, mr, false);
219 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
221 case DEVICE_LITTLE_ENDIAN:
222 val = lduw_le_p(ptr);
224 case DEVICE_BIG_ENDIAN:
225 val = lduw_be_p(ptr);
237 qemu_mutex_unlock_iothread();
243 uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
244 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
246 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
247 DEVICE_NATIVE_ENDIAN);
250 uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
251 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
253 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
254 DEVICE_LITTLE_ENDIAN);
257 uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
258 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
260 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
264 /* warning: addr must be aligned. The ram page is not masked as dirty
265 and the code inside is not invalidated. It is useful if the dirty
266 bits are used to track modified PTEs */
267 void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
268 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
275 uint8_t dirty_log_mask;
276 bool release_lock = false;
279 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
280 if (l < 4 || !memory_access_is_direct(mr, true)) {
281 release_lock |= prepare_mmio_access(mr);
283 r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
285 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
288 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
289 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
290 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
298 qemu_mutex_unlock_iothread();
303 /* warning: addr must be aligned */
304 static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
305 hwaddr addr, uint32_t val, MemTxAttrs attrs,
306 MemTxResult *result, enum device_endian endian)
313 bool release_lock = false;
316 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
317 if (l < 4 || !memory_access_is_direct(mr, true)) {
318 release_lock |= prepare_mmio_access(mr);
319 r = memory_region_dispatch_write(mr, addr1, val,
320 MO_32 | devend_memop(endian), attrs);
323 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
325 case DEVICE_LITTLE_ENDIAN:
328 case DEVICE_BIG_ENDIAN:
335 invalidate_and_set_dirty(mr, addr1, 4);
342 qemu_mutex_unlock_iothread();
347 void glue(address_space_stl, SUFFIX)(ARG1_DECL,
348 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
350 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
351 result, DEVICE_NATIVE_ENDIAN);
354 void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
355 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
357 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
358 result, DEVICE_LITTLE_ENDIAN);
361 void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
362 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
364 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
365 result, DEVICE_BIG_ENDIAN);
368 void glue(address_space_stb, SUFFIX)(ARG1_DECL,
369 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
376 bool release_lock = false;
379 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
380 if (!memory_access_is_direct(mr, true)) {
381 release_lock |= prepare_mmio_access(mr);
382 r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
385 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
387 invalidate_and_set_dirty(mr, addr1, 1);
394 qemu_mutex_unlock_iothread();
399 /* warning: addr must be aligned */
400 static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
401 hwaddr addr, uint32_t val, MemTxAttrs attrs,
402 MemTxResult *result, enum device_endian endian)
409 bool release_lock = false;
412 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
413 if (l < 2 || !memory_access_is_direct(mr, true)) {
414 release_lock |= prepare_mmio_access(mr);
415 r = memory_region_dispatch_write(mr, addr1, val,
416 MO_16 | devend_memop(endian), attrs);
419 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
421 case DEVICE_LITTLE_ENDIAN:
424 case DEVICE_BIG_ENDIAN:
431 invalidate_and_set_dirty(mr, addr1, 2);
438 qemu_mutex_unlock_iothread();
443 void glue(address_space_stw, SUFFIX)(ARG1_DECL,
444 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
446 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
447 DEVICE_NATIVE_ENDIAN);
450 void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
451 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
453 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
454 DEVICE_LITTLE_ENDIAN);
457 void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
458 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
460 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
464 static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
465 hwaddr addr, uint64_t val, MemTxAttrs attrs,
466 MemTxResult *result, enum device_endian endian)
473 bool release_lock = false;
476 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
477 if (l < 8 || !memory_access_is_direct(mr, true)) {
478 release_lock |= prepare_mmio_access(mr);
479 r = memory_region_dispatch_write(mr, addr1, val,
480 MO_64 | devend_memop(endian), attrs);
483 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
485 case DEVICE_LITTLE_ENDIAN:
488 case DEVICE_BIG_ENDIAN:
495 invalidate_and_set_dirty(mr, addr1, 8);
502 qemu_mutex_unlock_iothread();
507 void glue(address_space_stq, SUFFIX)(ARG1_DECL,
508 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
510 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
511 DEVICE_NATIVE_ENDIAN);
514 void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
515 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
517 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
518 DEVICE_LITTLE_ENDIAN);
521 void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
522 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
524 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
533 #undef RCU_READ_UNLOCK