target/ppc: Use atomic cmpxchg for STQCX
[qemu.git] / memory_ldst.inc.c
blobacf865b900d7dfbff79380841d82ae3998126951
1 /*
2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
35 RCU_READ_LOCK();
36 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
37 if (l < 4 || !memory_access_is_direct(mr, false)) {
38 release_lock |= prepare_mmio_access(mr);
40 /* I/O case */
41 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
42 #if defined(TARGET_WORDS_BIGENDIAN)
43 if (endian == DEVICE_LITTLE_ENDIAN) {
44 val = bswap32(val);
46 #else
47 if (endian == DEVICE_BIG_ENDIAN) {
48 val = bswap32(val);
50 #endif
51 } else {
52 /* RAM case */
53 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
54 switch (endian) {
55 case DEVICE_LITTLE_ENDIAN:
56 val = ldl_le_p(ptr);
57 break;
58 case DEVICE_BIG_ENDIAN:
59 val = ldl_be_p(ptr);
60 break;
61 default:
62 val = ldl_p(ptr);
63 break;
65 r = MEMTX_OK;
67 if (result) {
68 *result = r;
70 if (release_lock) {
71 qemu_mutex_unlock_iothread();
73 RCU_READ_UNLOCK();
74 return val;
77 uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
78 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
80 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
81 DEVICE_NATIVE_ENDIAN);
84 uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
85 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
87 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
88 DEVICE_LITTLE_ENDIAN);
91 uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
92 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
94 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
95 DEVICE_BIG_ENDIAN);
98 /* warning: addr must be aligned */
99 static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
100 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
101 enum device_endian endian)
103 uint8_t *ptr;
104 uint64_t val;
105 MemoryRegion *mr;
106 hwaddr l = 8;
107 hwaddr addr1;
108 MemTxResult r;
109 bool release_lock = false;
111 RCU_READ_LOCK();
112 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
113 if (l < 8 || !memory_access_is_direct(mr, false)) {
114 release_lock |= prepare_mmio_access(mr);
116 /* I/O case */
117 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
118 #if defined(TARGET_WORDS_BIGENDIAN)
119 if (endian == DEVICE_LITTLE_ENDIAN) {
120 val = bswap64(val);
122 #else
123 if (endian == DEVICE_BIG_ENDIAN) {
124 val = bswap64(val);
126 #endif
127 } else {
128 /* RAM case */
129 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
130 switch (endian) {
131 case DEVICE_LITTLE_ENDIAN:
132 val = ldq_le_p(ptr);
133 break;
134 case DEVICE_BIG_ENDIAN:
135 val = ldq_be_p(ptr);
136 break;
137 default:
138 val = ldq_p(ptr);
139 break;
141 r = MEMTX_OK;
143 if (result) {
144 *result = r;
146 if (release_lock) {
147 qemu_mutex_unlock_iothread();
149 RCU_READ_UNLOCK();
150 return val;
153 uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
154 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
156 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
157 DEVICE_NATIVE_ENDIAN);
160 uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
161 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
163 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
164 DEVICE_LITTLE_ENDIAN);
167 uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
168 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
170 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
171 DEVICE_BIG_ENDIAN);
174 uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
175 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
177 uint8_t *ptr;
178 uint64_t val;
179 MemoryRegion *mr;
180 hwaddr l = 1;
181 hwaddr addr1;
182 MemTxResult r;
183 bool release_lock = false;
185 RCU_READ_LOCK();
186 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
187 if (!memory_access_is_direct(mr, false)) {
188 release_lock |= prepare_mmio_access(mr);
190 /* I/O case */
191 r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
192 } else {
193 /* RAM case */
194 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
195 val = ldub_p(ptr);
196 r = MEMTX_OK;
198 if (result) {
199 *result = r;
201 if (release_lock) {
202 qemu_mutex_unlock_iothread();
204 RCU_READ_UNLOCK();
205 return val;
208 /* warning: addr must be aligned */
209 static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
210 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
211 enum device_endian endian)
213 uint8_t *ptr;
214 uint64_t val;
215 MemoryRegion *mr;
216 hwaddr l = 2;
217 hwaddr addr1;
218 MemTxResult r;
219 bool release_lock = false;
221 RCU_READ_LOCK();
222 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
223 if (l < 2 || !memory_access_is_direct(mr, false)) {
224 release_lock |= prepare_mmio_access(mr);
226 /* I/O case */
227 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
228 #if defined(TARGET_WORDS_BIGENDIAN)
229 if (endian == DEVICE_LITTLE_ENDIAN) {
230 val = bswap16(val);
232 #else
233 if (endian == DEVICE_BIG_ENDIAN) {
234 val = bswap16(val);
236 #endif
237 } else {
238 /* RAM case */
239 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
240 switch (endian) {
241 case DEVICE_LITTLE_ENDIAN:
242 val = lduw_le_p(ptr);
243 break;
244 case DEVICE_BIG_ENDIAN:
245 val = lduw_be_p(ptr);
246 break;
247 default:
248 val = lduw_p(ptr);
249 break;
251 r = MEMTX_OK;
253 if (result) {
254 *result = r;
256 if (release_lock) {
257 qemu_mutex_unlock_iothread();
259 RCU_READ_UNLOCK();
260 return val;
263 uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
264 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
266 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
267 DEVICE_NATIVE_ENDIAN);
270 uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
271 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
273 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
274 DEVICE_LITTLE_ENDIAN);
277 uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
278 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
280 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
281 DEVICE_BIG_ENDIAN);
284 /* warning: addr must be aligned. The ram page is not masked as dirty
285 and the code inside is not invalidated. It is useful if the dirty
286 bits are used to track modified PTEs */
287 void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
288 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
290 uint8_t *ptr;
291 MemoryRegion *mr;
292 hwaddr l = 4;
293 hwaddr addr1;
294 MemTxResult r;
295 uint8_t dirty_log_mask;
296 bool release_lock = false;
298 RCU_READ_LOCK();
299 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
300 if (l < 4 || !memory_access_is_direct(mr, true)) {
301 release_lock |= prepare_mmio_access(mr);
303 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
304 } else {
305 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
306 stl_p(ptr, val);
308 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
309 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
310 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
311 4, dirty_log_mask);
312 r = MEMTX_OK;
314 if (result) {
315 *result = r;
317 if (release_lock) {
318 qemu_mutex_unlock_iothread();
320 RCU_READ_UNLOCK();
323 /* warning: addr must be aligned */
324 static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
325 hwaddr addr, uint32_t val, MemTxAttrs attrs,
326 MemTxResult *result, enum device_endian endian)
328 uint8_t *ptr;
329 MemoryRegion *mr;
330 hwaddr l = 4;
331 hwaddr addr1;
332 MemTxResult r;
333 bool release_lock = false;
335 RCU_READ_LOCK();
336 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
337 if (l < 4 || !memory_access_is_direct(mr, true)) {
338 release_lock |= prepare_mmio_access(mr);
340 #if defined(TARGET_WORDS_BIGENDIAN)
341 if (endian == DEVICE_LITTLE_ENDIAN) {
342 val = bswap32(val);
344 #else
345 if (endian == DEVICE_BIG_ENDIAN) {
346 val = bswap32(val);
348 #endif
349 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
350 } else {
351 /* RAM case */
352 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
353 switch (endian) {
354 case DEVICE_LITTLE_ENDIAN:
355 stl_le_p(ptr, val);
356 break;
357 case DEVICE_BIG_ENDIAN:
358 stl_be_p(ptr, val);
359 break;
360 default:
361 stl_p(ptr, val);
362 break;
364 invalidate_and_set_dirty(mr, addr1, 4);
365 r = MEMTX_OK;
367 if (result) {
368 *result = r;
370 if (release_lock) {
371 qemu_mutex_unlock_iothread();
373 RCU_READ_UNLOCK();
376 void glue(address_space_stl, SUFFIX)(ARG1_DECL,
377 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
379 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
380 result, DEVICE_NATIVE_ENDIAN);
383 void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
384 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
386 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
387 result, DEVICE_LITTLE_ENDIAN);
390 void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
391 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
393 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
394 result, DEVICE_BIG_ENDIAN);
397 void glue(address_space_stb, SUFFIX)(ARG1_DECL,
398 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
400 uint8_t *ptr;
401 MemoryRegion *mr;
402 hwaddr l = 1;
403 hwaddr addr1;
404 MemTxResult r;
405 bool release_lock = false;
407 RCU_READ_LOCK();
408 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
409 if (!memory_access_is_direct(mr, true)) {
410 release_lock |= prepare_mmio_access(mr);
411 r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
412 } else {
413 /* RAM case */
414 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
415 stb_p(ptr, val);
416 invalidate_and_set_dirty(mr, addr1, 1);
417 r = MEMTX_OK;
419 if (result) {
420 *result = r;
422 if (release_lock) {
423 qemu_mutex_unlock_iothread();
425 RCU_READ_UNLOCK();
428 /* warning: addr must be aligned */
429 static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
430 hwaddr addr, uint32_t val, MemTxAttrs attrs,
431 MemTxResult *result, enum device_endian endian)
433 uint8_t *ptr;
434 MemoryRegion *mr;
435 hwaddr l = 2;
436 hwaddr addr1;
437 MemTxResult r;
438 bool release_lock = false;
440 RCU_READ_LOCK();
441 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
442 if (l < 2 || !memory_access_is_direct(mr, true)) {
443 release_lock |= prepare_mmio_access(mr);
445 #if defined(TARGET_WORDS_BIGENDIAN)
446 if (endian == DEVICE_LITTLE_ENDIAN) {
447 val = bswap16(val);
449 #else
450 if (endian == DEVICE_BIG_ENDIAN) {
451 val = bswap16(val);
453 #endif
454 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
455 } else {
456 /* RAM case */
457 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
458 switch (endian) {
459 case DEVICE_LITTLE_ENDIAN:
460 stw_le_p(ptr, val);
461 break;
462 case DEVICE_BIG_ENDIAN:
463 stw_be_p(ptr, val);
464 break;
465 default:
466 stw_p(ptr, val);
467 break;
469 invalidate_and_set_dirty(mr, addr1, 2);
470 r = MEMTX_OK;
472 if (result) {
473 *result = r;
475 if (release_lock) {
476 qemu_mutex_unlock_iothread();
478 RCU_READ_UNLOCK();
481 void glue(address_space_stw, SUFFIX)(ARG1_DECL,
482 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
484 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
485 DEVICE_NATIVE_ENDIAN);
488 void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
489 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
491 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
492 DEVICE_LITTLE_ENDIAN);
495 void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
496 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
498 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
499 DEVICE_BIG_ENDIAN);
502 static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
503 hwaddr addr, uint64_t val, MemTxAttrs attrs,
504 MemTxResult *result, enum device_endian endian)
506 uint8_t *ptr;
507 MemoryRegion *mr;
508 hwaddr l = 8;
509 hwaddr addr1;
510 MemTxResult r;
511 bool release_lock = false;
513 RCU_READ_LOCK();
514 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
515 if (l < 8 || !memory_access_is_direct(mr, true)) {
516 release_lock |= prepare_mmio_access(mr);
518 #if defined(TARGET_WORDS_BIGENDIAN)
519 if (endian == DEVICE_LITTLE_ENDIAN) {
520 val = bswap64(val);
522 #else
523 if (endian == DEVICE_BIG_ENDIAN) {
524 val = bswap64(val);
526 #endif
527 r = memory_region_dispatch_write(mr, addr1, val, 8, attrs);
528 } else {
529 /* RAM case */
530 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
531 switch (endian) {
532 case DEVICE_LITTLE_ENDIAN:
533 stq_le_p(ptr, val);
534 break;
535 case DEVICE_BIG_ENDIAN:
536 stq_be_p(ptr, val);
537 break;
538 default:
539 stq_p(ptr, val);
540 break;
542 invalidate_and_set_dirty(mr, addr1, 8);
543 r = MEMTX_OK;
545 if (result) {
546 *result = r;
548 if (release_lock) {
549 qemu_mutex_unlock_iothread();
551 RCU_READ_UNLOCK();
554 void glue(address_space_stq, SUFFIX)(ARG1_DECL,
555 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
557 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
558 DEVICE_NATIVE_ENDIAN);
561 void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
562 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
564 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
565 DEVICE_LITTLE_ENDIAN);
568 void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
569 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
571 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
572 DEVICE_BIG_ENDIAN);
575 #undef ARG1_DECL
576 #undef ARG1
577 #undef SUFFIX
578 #undef TRANSLATE
579 #undef RCU_READ_LOCK
580 #undef RCU_READ_UNLOCK