s390x: drop inclusion of sysemu/kvm.h from some files
[qemu/ar7.git] / memory_ldst.inc.c
blob5dbff9cef86ffd1c0bb2a7a3ea51554ccfdcce08
1 /*
2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
35 RCU_READ_LOCK();
36 mr = TRANSLATE(addr, &addr1, &l, false);
37 if (l < 4 || !IS_DIRECT(mr, false)) {
38 release_lock |= prepare_mmio_access(mr);
40 /* I/O case */
41 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
42 #if defined(TARGET_WORDS_BIGENDIAN)
43 if (endian == DEVICE_LITTLE_ENDIAN) {
44 val = bswap32(val);
46 #else
47 if (endian == DEVICE_BIG_ENDIAN) {
48 val = bswap32(val);
50 #endif
51 } else {
52 /* RAM case */
53 ptr = MAP_RAM(mr, addr1);
54 switch (endian) {
55 case DEVICE_LITTLE_ENDIAN:
56 val = ldl_le_p(ptr);
57 break;
58 case DEVICE_BIG_ENDIAN:
59 val = ldl_be_p(ptr);
60 break;
61 default:
62 val = ldl_p(ptr);
63 break;
65 r = MEMTX_OK;
67 if (result) {
68 *result = r;
70 if (release_lock) {
71 qemu_mutex_unlock_iothread();
73 RCU_READ_UNLOCK();
74 return val;
77 uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
78 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
80 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
81 DEVICE_NATIVE_ENDIAN);
84 uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
85 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
87 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
88 DEVICE_LITTLE_ENDIAN);
91 uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
92 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
94 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
95 DEVICE_BIG_ENDIAN);
98 uint32_t glue(ldl_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
100 return glue(address_space_ldl, SUFFIX)(ARG1, addr,
101 MEMTXATTRS_UNSPECIFIED, NULL);
104 uint32_t glue(ldl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
106 return glue(address_space_ldl_le, SUFFIX)(ARG1, addr,
107 MEMTXATTRS_UNSPECIFIED, NULL);
110 uint32_t glue(ldl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
112 return glue(address_space_ldl_be, SUFFIX)(ARG1, addr,
113 MEMTXATTRS_UNSPECIFIED, NULL);
116 /* warning: addr must be aligned */
117 static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
118 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
119 enum device_endian endian)
121 uint8_t *ptr;
122 uint64_t val;
123 MemoryRegion *mr;
124 hwaddr l = 8;
125 hwaddr addr1;
126 MemTxResult r;
127 bool release_lock = false;
129 RCU_READ_LOCK();
130 mr = TRANSLATE(addr, &addr1, &l, false);
131 if (l < 8 || !IS_DIRECT(mr, false)) {
132 release_lock |= prepare_mmio_access(mr);
134 /* I/O case */
135 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
136 #if defined(TARGET_WORDS_BIGENDIAN)
137 if (endian == DEVICE_LITTLE_ENDIAN) {
138 val = bswap64(val);
140 #else
141 if (endian == DEVICE_BIG_ENDIAN) {
142 val = bswap64(val);
144 #endif
145 } else {
146 /* RAM case */
147 ptr = MAP_RAM(mr, addr1);
148 switch (endian) {
149 case DEVICE_LITTLE_ENDIAN:
150 val = ldq_le_p(ptr);
151 break;
152 case DEVICE_BIG_ENDIAN:
153 val = ldq_be_p(ptr);
154 break;
155 default:
156 val = ldq_p(ptr);
157 break;
159 r = MEMTX_OK;
161 if (result) {
162 *result = r;
164 if (release_lock) {
165 qemu_mutex_unlock_iothread();
167 RCU_READ_UNLOCK();
168 return val;
171 uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
172 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
174 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
175 DEVICE_NATIVE_ENDIAN);
178 uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
179 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
181 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
182 DEVICE_LITTLE_ENDIAN);
185 uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
186 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
188 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
189 DEVICE_BIG_ENDIAN);
192 uint64_t glue(ldq_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
194 return glue(address_space_ldq, SUFFIX)(ARG1, addr,
195 MEMTXATTRS_UNSPECIFIED, NULL);
198 uint64_t glue(ldq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
200 return glue(address_space_ldq_le, SUFFIX)(ARG1, addr,
201 MEMTXATTRS_UNSPECIFIED, NULL);
204 uint64_t glue(ldq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
206 return glue(address_space_ldq_be, SUFFIX)(ARG1, addr,
207 MEMTXATTRS_UNSPECIFIED, NULL);
210 uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
211 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
213 uint8_t *ptr;
214 uint64_t val;
215 MemoryRegion *mr;
216 hwaddr l = 1;
217 hwaddr addr1;
218 MemTxResult r;
219 bool release_lock = false;
221 RCU_READ_LOCK();
222 mr = TRANSLATE(addr, &addr1, &l, false);
223 if (!IS_DIRECT(mr, false)) {
224 release_lock |= prepare_mmio_access(mr);
226 /* I/O case */
227 r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
228 } else {
229 /* RAM case */
230 ptr = MAP_RAM(mr, addr1);
231 val = ldub_p(ptr);
232 r = MEMTX_OK;
234 if (result) {
235 *result = r;
237 if (release_lock) {
238 qemu_mutex_unlock_iothread();
240 RCU_READ_UNLOCK();
241 return val;
244 uint32_t glue(ldub_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
246 return glue(address_space_ldub, SUFFIX)(ARG1, addr,
247 MEMTXATTRS_UNSPECIFIED, NULL);
250 /* warning: addr must be aligned */
251 static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
252 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
253 enum device_endian endian)
255 uint8_t *ptr;
256 uint64_t val;
257 MemoryRegion *mr;
258 hwaddr l = 2;
259 hwaddr addr1;
260 MemTxResult r;
261 bool release_lock = false;
263 RCU_READ_LOCK();
264 mr = TRANSLATE(addr, &addr1, &l, false);
265 if (l < 2 || !IS_DIRECT(mr, false)) {
266 release_lock |= prepare_mmio_access(mr);
268 /* I/O case */
269 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
270 #if defined(TARGET_WORDS_BIGENDIAN)
271 if (endian == DEVICE_LITTLE_ENDIAN) {
272 val = bswap16(val);
274 #else
275 if (endian == DEVICE_BIG_ENDIAN) {
276 val = bswap16(val);
278 #endif
279 } else {
280 /* RAM case */
281 ptr = MAP_RAM(mr, addr1);
282 switch (endian) {
283 case DEVICE_LITTLE_ENDIAN:
284 val = lduw_le_p(ptr);
285 break;
286 case DEVICE_BIG_ENDIAN:
287 val = lduw_be_p(ptr);
288 break;
289 default:
290 val = lduw_p(ptr);
291 break;
293 r = MEMTX_OK;
295 if (result) {
296 *result = r;
298 if (release_lock) {
299 qemu_mutex_unlock_iothread();
301 RCU_READ_UNLOCK();
302 return val;
305 uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
306 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
308 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
309 DEVICE_NATIVE_ENDIAN);
312 uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
313 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
315 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
316 DEVICE_LITTLE_ENDIAN);
319 uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
320 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
322 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
323 DEVICE_BIG_ENDIAN);
326 uint32_t glue(lduw_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
328 return glue(address_space_lduw, SUFFIX)(ARG1, addr,
329 MEMTXATTRS_UNSPECIFIED, NULL);
332 uint32_t glue(lduw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
334 return glue(address_space_lduw_le, SUFFIX)(ARG1, addr,
335 MEMTXATTRS_UNSPECIFIED, NULL);
338 uint32_t glue(lduw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr)
340 return glue(address_space_lduw_be, SUFFIX)(ARG1, addr,
341 MEMTXATTRS_UNSPECIFIED, NULL);
344 /* warning: addr must be aligned. The ram page is not masked as dirty
345 and the code inside is not invalidated. It is useful if the dirty
346 bits are used to track modified PTEs */
347 void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
348 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
350 uint8_t *ptr;
351 MemoryRegion *mr;
352 hwaddr l = 4;
353 hwaddr addr1;
354 MemTxResult r;
355 uint8_t dirty_log_mask;
356 bool release_lock = false;
358 RCU_READ_LOCK();
359 mr = TRANSLATE(addr, &addr1, &l, true);
360 if (l < 4 || !IS_DIRECT(mr, true)) {
361 release_lock |= prepare_mmio_access(mr);
363 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
364 } else {
365 ptr = MAP_RAM(mr, addr1);
366 stl_p(ptr, val);
368 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
369 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
370 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
371 4, dirty_log_mask);
372 r = MEMTX_OK;
374 if (result) {
375 *result = r;
377 if (release_lock) {
378 qemu_mutex_unlock_iothread();
380 RCU_READ_UNLOCK();
383 void glue(stl_phys_notdirty, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
385 glue(address_space_stl_notdirty, SUFFIX)(ARG1, addr, val,
386 MEMTXATTRS_UNSPECIFIED, NULL);
389 /* warning: addr must be aligned */
390 static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
391 hwaddr addr, uint32_t val, MemTxAttrs attrs,
392 MemTxResult *result, enum device_endian endian)
394 uint8_t *ptr;
395 MemoryRegion *mr;
396 hwaddr l = 4;
397 hwaddr addr1;
398 MemTxResult r;
399 bool release_lock = false;
401 RCU_READ_LOCK();
402 mr = TRANSLATE(addr, &addr1, &l, true);
403 if (l < 4 || !IS_DIRECT(mr, true)) {
404 release_lock |= prepare_mmio_access(mr);
406 #if defined(TARGET_WORDS_BIGENDIAN)
407 if (endian == DEVICE_LITTLE_ENDIAN) {
408 val = bswap32(val);
410 #else
411 if (endian == DEVICE_BIG_ENDIAN) {
412 val = bswap32(val);
414 #endif
415 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
416 } else {
417 /* RAM case */
418 ptr = MAP_RAM(mr, addr1);
419 switch (endian) {
420 case DEVICE_LITTLE_ENDIAN:
421 stl_le_p(ptr, val);
422 break;
423 case DEVICE_BIG_ENDIAN:
424 stl_be_p(ptr, val);
425 break;
426 default:
427 stl_p(ptr, val);
428 break;
430 INVALIDATE(mr, addr1, 4);
431 r = MEMTX_OK;
433 if (result) {
434 *result = r;
436 if (release_lock) {
437 qemu_mutex_unlock_iothread();
439 RCU_READ_UNLOCK();
442 void glue(address_space_stl, SUFFIX)(ARG1_DECL,
443 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
445 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
446 result, DEVICE_NATIVE_ENDIAN);
449 void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
450 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
452 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
453 result, DEVICE_LITTLE_ENDIAN);
456 void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
457 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
459 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
460 result, DEVICE_BIG_ENDIAN);
463 void glue(stl_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
465 glue(address_space_stl, SUFFIX)(ARG1, addr, val,
466 MEMTXATTRS_UNSPECIFIED, NULL);
469 void glue(stl_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
471 glue(address_space_stl_le, SUFFIX)(ARG1, addr, val,
472 MEMTXATTRS_UNSPECIFIED, NULL);
475 void glue(stl_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
477 glue(address_space_stl_be, SUFFIX)(ARG1, addr, val,
478 MEMTXATTRS_UNSPECIFIED, NULL);
481 void glue(address_space_stb, SUFFIX)(ARG1_DECL,
482 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
484 uint8_t *ptr;
485 MemoryRegion *mr;
486 hwaddr l = 1;
487 hwaddr addr1;
488 MemTxResult r;
489 bool release_lock = false;
491 RCU_READ_LOCK();
492 mr = TRANSLATE(addr, &addr1, &l, true);
493 if (!IS_DIRECT(mr, true)) {
494 release_lock |= prepare_mmio_access(mr);
495 r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
496 } else {
497 /* RAM case */
498 ptr = MAP_RAM(mr, addr1);
499 stb_p(ptr, val);
500 INVALIDATE(mr, addr1, 1);
501 r = MEMTX_OK;
503 if (result) {
504 *result = r;
506 if (release_lock) {
507 qemu_mutex_unlock_iothread();
509 RCU_READ_UNLOCK();
512 void glue(stb_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
514 glue(address_space_stb, SUFFIX)(ARG1, addr, val,
515 MEMTXATTRS_UNSPECIFIED, NULL);
518 /* warning: addr must be aligned */
519 static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
520 hwaddr addr, uint32_t val, MemTxAttrs attrs,
521 MemTxResult *result, enum device_endian endian)
523 uint8_t *ptr;
524 MemoryRegion *mr;
525 hwaddr l = 2;
526 hwaddr addr1;
527 MemTxResult r;
528 bool release_lock = false;
530 RCU_READ_LOCK();
531 mr = TRANSLATE(addr, &addr1, &l, true);
532 if (l < 2 || !IS_DIRECT(mr, true)) {
533 release_lock |= prepare_mmio_access(mr);
535 #if defined(TARGET_WORDS_BIGENDIAN)
536 if (endian == DEVICE_LITTLE_ENDIAN) {
537 val = bswap16(val);
539 #else
540 if (endian == DEVICE_BIG_ENDIAN) {
541 val = bswap16(val);
543 #endif
544 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
545 } else {
546 /* RAM case */
547 ptr = MAP_RAM(mr, addr1);
548 switch (endian) {
549 case DEVICE_LITTLE_ENDIAN:
550 stw_le_p(ptr, val);
551 break;
552 case DEVICE_BIG_ENDIAN:
553 stw_be_p(ptr, val);
554 break;
555 default:
556 stw_p(ptr, val);
557 break;
559 INVALIDATE(mr, addr1, 2);
560 r = MEMTX_OK;
562 if (result) {
563 *result = r;
565 if (release_lock) {
566 qemu_mutex_unlock_iothread();
568 RCU_READ_UNLOCK();
571 void glue(address_space_stw, SUFFIX)(ARG1_DECL,
572 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
574 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
575 DEVICE_NATIVE_ENDIAN);
578 void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
579 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
581 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
582 DEVICE_LITTLE_ENDIAN);
585 void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
586 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
588 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
589 DEVICE_BIG_ENDIAN);
592 void glue(stw_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
594 glue(address_space_stw, SUFFIX)(ARG1, addr, val,
595 MEMTXATTRS_UNSPECIFIED, NULL);
598 void glue(stw_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
600 glue(address_space_stw_le, SUFFIX)(ARG1, addr, val,
601 MEMTXATTRS_UNSPECIFIED, NULL);
604 void glue(stw_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint32_t val)
606 glue(address_space_stw_be, SUFFIX)(ARG1, addr, val,
607 MEMTXATTRS_UNSPECIFIED, NULL);
610 static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
611 hwaddr addr, uint64_t val, MemTxAttrs attrs,
612 MemTxResult *result, enum device_endian endian)
614 uint8_t *ptr;
615 MemoryRegion *mr;
616 hwaddr l = 8;
617 hwaddr addr1;
618 MemTxResult r;
619 bool release_lock = false;
621 RCU_READ_LOCK();
622 mr = TRANSLATE(addr, &addr1, &l, true);
623 if (l < 8 || !IS_DIRECT(mr, true)) {
624 release_lock |= prepare_mmio_access(mr);
626 #if defined(TARGET_WORDS_BIGENDIAN)
627 if (endian == DEVICE_LITTLE_ENDIAN) {
628 val = bswap64(val);
630 #else
631 if (endian == DEVICE_BIG_ENDIAN) {
632 val = bswap64(val);
634 #endif
635 r = memory_region_dispatch_write(mr, addr1, val, 8, attrs);
636 } else {
637 /* RAM case */
638 ptr = MAP_RAM(mr, addr1);
639 switch (endian) {
640 case DEVICE_LITTLE_ENDIAN:
641 stq_le_p(ptr, val);
642 break;
643 case DEVICE_BIG_ENDIAN:
644 stq_be_p(ptr, val);
645 break;
646 default:
647 stq_p(ptr, val);
648 break;
650 INVALIDATE(mr, addr1, 8);
651 r = MEMTX_OK;
653 if (result) {
654 *result = r;
656 if (release_lock) {
657 qemu_mutex_unlock_iothread();
659 RCU_READ_UNLOCK();
662 void glue(address_space_stq, SUFFIX)(ARG1_DECL,
663 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
665 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
666 DEVICE_NATIVE_ENDIAN);
669 void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
670 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
672 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
673 DEVICE_LITTLE_ENDIAN);
676 void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
677 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
679 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
680 DEVICE_BIG_ENDIAN);
683 void glue(stq_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
685 glue(address_space_stq, SUFFIX)(ARG1, addr, val,
686 MEMTXATTRS_UNSPECIFIED, NULL);
689 void glue(stq_le_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
691 glue(address_space_stq_le, SUFFIX)(ARG1, addr, val,
692 MEMTXATTRS_UNSPECIFIED, NULL);
695 void glue(stq_be_phys, SUFFIX)(ARG1_DECL, hwaddr addr, uint64_t val)
697 glue(address_space_stq_be, SUFFIX)(ARG1, addr, val,
698 MEMTXATTRS_UNSPECIFIED, NULL);
701 #undef ARG1_DECL
702 #undef ARG1
703 #undef SUFFIX
704 #undef TRANSLATE
705 #undef IS_DIRECT
706 #undef MAP_RAM
707 #undef INVALIDATE
708 #undef RCU_READ_LOCK
709 #undef RCU_READ_UNLOCK