memory: Access MemoryRegion with endianness
[qemu/ar7.git] / memory_ldst.inc.c
blob809a7e8389fcd3add34bb01a8e824a229199df95
1 /*
2 * Physical memory access templates
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2015 Linaro, Inc.
6 * Copyright (c) 2016 Red Hat, Inc.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 /* warning: addr must be aligned */
23 static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL,
24 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
25 enum device_endian endian)
27 uint8_t *ptr;
28 uint64_t val;
29 MemoryRegion *mr;
30 hwaddr l = 4;
31 hwaddr addr1;
32 MemTxResult r;
33 bool release_lock = false;
35 RCU_READ_LOCK();
36 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
37 if (l < 4 || !memory_access_is_direct(mr, false)) {
38 release_lock |= prepare_mmio_access(mr);
40 /* I/O case */
41 /* TODO: Merge bswap32 into memory_region_dispatch_read. */
42 r = memory_region_dispatch_read(mr, addr1, &val,
43 MO_32 | devend_memop(endian), attrs);
44 #if defined(TARGET_WORDS_BIGENDIAN)
45 if (endian == DEVICE_LITTLE_ENDIAN) {
46 val = bswap32(val);
48 #else
49 if (endian == DEVICE_BIG_ENDIAN) {
50 val = bswap32(val);
52 #endif
53 } else {
54 /* RAM case */
55 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
56 switch (endian) {
57 case DEVICE_LITTLE_ENDIAN:
58 val = ldl_le_p(ptr);
59 break;
60 case DEVICE_BIG_ENDIAN:
61 val = ldl_be_p(ptr);
62 break;
63 default:
64 val = ldl_p(ptr);
65 break;
67 r = MEMTX_OK;
69 if (result) {
70 *result = r;
72 if (release_lock) {
73 qemu_mutex_unlock_iothread();
75 RCU_READ_UNLOCK();
76 return val;
79 uint32_t glue(address_space_ldl, SUFFIX)(ARG1_DECL,
80 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
82 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
83 DEVICE_NATIVE_ENDIAN);
86 uint32_t glue(address_space_ldl_le, SUFFIX)(ARG1_DECL,
87 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
89 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
90 DEVICE_LITTLE_ENDIAN);
93 uint32_t glue(address_space_ldl_be, SUFFIX)(ARG1_DECL,
94 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
96 return glue(address_space_ldl_internal, SUFFIX)(ARG1, addr, attrs, result,
97 DEVICE_BIG_ENDIAN);
100 /* warning: addr must be aligned */
101 static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL,
102 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
103 enum device_endian endian)
105 uint8_t *ptr;
106 uint64_t val;
107 MemoryRegion *mr;
108 hwaddr l = 8;
109 hwaddr addr1;
110 MemTxResult r;
111 bool release_lock = false;
113 RCU_READ_LOCK();
114 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
115 if (l < 8 || !memory_access_is_direct(mr, false)) {
116 release_lock |= prepare_mmio_access(mr);
118 /* I/O case */
119 /* TODO: Merge bswap64 into memory_region_dispatch_read. */
120 r = memory_region_dispatch_read(mr, addr1, &val,
121 MO_64 | devend_memop(endian), attrs);
122 #if defined(TARGET_WORDS_BIGENDIAN)
123 if (endian == DEVICE_LITTLE_ENDIAN) {
124 val = bswap64(val);
126 #else
127 if (endian == DEVICE_BIG_ENDIAN) {
128 val = bswap64(val);
130 #endif
131 } else {
132 /* RAM case */
133 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
134 switch (endian) {
135 case DEVICE_LITTLE_ENDIAN:
136 val = ldq_le_p(ptr);
137 break;
138 case DEVICE_BIG_ENDIAN:
139 val = ldq_be_p(ptr);
140 break;
141 default:
142 val = ldq_p(ptr);
143 break;
145 r = MEMTX_OK;
147 if (result) {
148 *result = r;
150 if (release_lock) {
151 qemu_mutex_unlock_iothread();
153 RCU_READ_UNLOCK();
154 return val;
157 uint64_t glue(address_space_ldq, SUFFIX)(ARG1_DECL,
158 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
160 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
161 DEVICE_NATIVE_ENDIAN);
164 uint64_t glue(address_space_ldq_le, SUFFIX)(ARG1_DECL,
165 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
167 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
168 DEVICE_LITTLE_ENDIAN);
171 uint64_t glue(address_space_ldq_be, SUFFIX)(ARG1_DECL,
172 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
174 return glue(address_space_ldq_internal, SUFFIX)(ARG1, addr, attrs, result,
175 DEVICE_BIG_ENDIAN);
178 uint32_t glue(address_space_ldub, SUFFIX)(ARG1_DECL,
179 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
181 uint8_t *ptr;
182 uint64_t val;
183 MemoryRegion *mr;
184 hwaddr l = 1;
185 hwaddr addr1;
186 MemTxResult r;
187 bool release_lock = false;
189 RCU_READ_LOCK();
190 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
191 if (!memory_access_is_direct(mr, false)) {
192 release_lock |= prepare_mmio_access(mr);
194 /* I/O case */
195 r = memory_region_dispatch_read(mr, addr1, &val, MO_8, attrs);
196 } else {
197 /* RAM case */
198 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
199 val = ldub_p(ptr);
200 r = MEMTX_OK;
202 if (result) {
203 *result = r;
205 if (release_lock) {
206 qemu_mutex_unlock_iothread();
208 RCU_READ_UNLOCK();
209 return val;
212 /* warning: addr must be aligned */
213 static inline uint32_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL,
214 hwaddr addr, MemTxAttrs attrs, MemTxResult *result,
215 enum device_endian endian)
217 uint8_t *ptr;
218 uint64_t val;
219 MemoryRegion *mr;
220 hwaddr l = 2;
221 hwaddr addr1;
222 MemTxResult r;
223 bool release_lock = false;
225 RCU_READ_LOCK();
226 mr = TRANSLATE(addr, &addr1, &l, false, attrs);
227 if (l < 2 || !memory_access_is_direct(mr, false)) {
228 release_lock |= prepare_mmio_access(mr);
230 /* I/O case */
231 /* TODO: Merge bswap16 into memory_region_dispatch_read. */
232 r = memory_region_dispatch_read(mr, addr1, &val,
233 MO_16 | devend_memop(endian), attrs);
234 #if defined(TARGET_WORDS_BIGENDIAN)
235 if (endian == DEVICE_LITTLE_ENDIAN) {
236 val = bswap16(val);
238 #else
239 if (endian == DEVICE_BIG_ENDIAN) {
240 val = bswap16(val);
242 #endif
243 } else {
244 /* RAM case */
245 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
246 switch (endian) {
247 case DEVICE_LITTLE_ENDIAN:
248 val = lduw_le_p(ptr);
249 break;
250 case DEVICE_BIG_ENDIAN:
251 val = lduw_be_p(ptr);
252 break;
253 default:
254 val = lduw_p(ptr);
255 break;
257 r = MEMTX_OK;
259 if (result) {
260 *result = r;
262 if (release_lock) {
263 qemu_mutex_unlock_iothread();
265 RCU_READ_UNLOCK();
266 return val;
269 uint32_t glue(address_space_lduw, SUFFIX)(ARG1_DECL,
270 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
272 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
273 DEVICE_NATIVE_ENDIAN);
276 uint32_t glue(address_space_lduw_le, SUFFIX)(ARG1_DECL,
277 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
279 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
280 DEVICE_LITTLE_ENDIAN);
283 uint32_t glue(address_space_lduw_be, SUFFIX)(ARG1_DECL,
284 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
286 return glue(address_space_lduw_internal, SUFFIX)(ARG1, addr, attrs, result,
287 DEVICE_BIG_ENDIAN);
290 /* warning: addr must be aligned. The ram page is not masked as dirty
291 and the code inside is not invalidated. It is useful if the dirty
292 bits are used to track modified PTEs */
293 void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
294 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
296 uint8_t *ptr;
297 MemoryRegion *mr;
298 hwaddr l = 4;
299 hwaddr addr1;
300 MemTxResult r;
301 uint8_t dirty_log_mask;
302 bool release_lock = false;
304 RCU_READ_LOCK();
305 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
306 if (l < 4 || !memory_access_is_direct(mr, true)) {
307 release_lock |= prepare_mmio_access(mr);
309 r = memory_region_dispatch_write(mr, addr1, val, MO_32, attrs);
310 } else {
311 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
312 stl_p(ptr, val);
314 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
315 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
316 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
317 4, dirty_log_mask);
318 r = MEMTX_OK;
320 if (result) {
321 *result = r;
323 if (release_lock) {
324 qemu_mutex_unlock_iothread();
326 RCU_READ_UNLOCK();
329 /* warning: addr must be aligned */
330 static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL,
331 hwaddr addr, uint32_t val, MemTxAttrs attrs,
332 MemTxResult *result, enum device_endian endian)
334 uint8_t *ptr;
335 MemoryRegion *mr;
336 hwaddr l = 4;
337 hwaddr addr1;
338 MemTxResult r;
339 bool release_lock = false;
341 RCU_READ_LOCK();
342 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
343 if (l < 4 || !memory_access_is_direct(mr, true)) {
344 release_lock |= prepare_mmio_access(mr);
346 #if defined(TARGET_WORDS_BIGENDIAN)
347 if (endian == DEVICE_LITTLE_ENDIAN) {
348 val = bswap32(val);
350 #else
351 if (endian == DEVICE_BIG_ENDIAN) {
352 val = bswap32(val);
354 #endif
355 /* TODO: Merge bswap32 into memory_region_dispatch_write. */
356 r = memory_region_dispatch_write(mr, addr1, val,
357 MO_32 | devend_memop(endian), attrs);
358 } else {
359 /* RAM case */
360 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
361 switch (endian) {
362 case DEVICE_LITTLE_ENDIAN:
363 stl_le_p(ptr, val);
364 break;
365 case DEVICE_BIG_ENDIAN:
366 stl_be_p(ptr, val);
367 break;
368 default:
369 stl_p(ptr, val);
370 break;
372 invalidate_and_set_dirty(mr, addr1, 4);
373 r = MEMTX_OK;
375 if (result) {
376 *result = r;
378 if (release_lock) {
379 qemu_mutex_unlock_iothread();
381 RCU_READ_UNLOCK();
384 void glue(address_space_stl, SUFFIX)(ARG1_DECL,
385 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
387 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
388 result, DEVICE_NATIVE_ENDIAN);
391 void glue(address_space_stl_le, SUFFIX)(ARG1_DECL,
392 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
394 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
395 result, DEVICE_LITTLE_ENDIAN);
398 void glue(address_space_stl_be, SUFFIX)(ARG1_DECL,
399 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
401 glue(address_space_stl_internal, SUFFIX)(ARG1, addr, val, attrs,
402 result, DEVICE_BIG_ENDIAN);
405 void glue(address_space_stb, SUFFIX)(ARG1_DECL,
406 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
408 uint8_t *ptr;
409 MemoryRegion *mr;
410 hwaddr l = 1;
411 hwaddr addr1;
412 MemTxResult r;
413 bool release_lock = false;
415 RCU_READ_LOCK();
416 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
417 if (!memory_access_is_direct(mr, true)) {
418 release_lock |= prepare_mmio_access(mr);
419 r = memory_region_dispatch_write(mr, addr1, val, MO_8, attrs);
420 } else {
421 /* RAM case */
422 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
423 stb_p(ptr, val);
424 invalidate_and_set_dirty(mr, addr1, 1);
425 r = MEMTX_OK;
427 if (result) {
428 *result = r;
430 if (release_lock) {
431 qemu_mutex_unlock_iothread();
433 RCU_READ_UNLOCK();
436 /* warning: addr must be aligned */
437 static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL,
438 hwaddr addr, uint32_t val, MemTxAttrs attrs,
439 MemTxResult *result, enum device_endian endian)
441 uint8_t *ptr;
442 MemoryRegion *mr;
443 hwaddr l = 2;
444 hwaddr addr1;
445 MemTxResult r;
446 bool release_lock = false;
448 RCU_READ_LOCK();
449 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
450 if (l < 2 || !memory_access_is_direct(mr, true)) {
451 release_lock |= prepare_mmio_access(mr);
453 #if defined(TARGET_WORDS_BIGENDIAN)
454 if (endian == DEVICE_LITTLE_ENDIAN) {
455 val = bswap16(val);
457 #else
458 if (endian == DEVICE_BIG_ENDIAN) {
459 val = bswap16(val);
461 #endif
462 /* TODO: Merge bswap16 into memory_region_dispatch_write. */
463 r = memory_region_dispatch_write(mr, addr1, val,
464 MO_16 | devend_memop(endian), attrs);
465 } else {
466 /* RAM case */
467 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
468 switch (endian) {
469 case DEVICE_LITTLE_ENDIAN:
470 stw_le_p(ptr, val);
471 break;
472 case DEVICE_BIG_ENDIAN:
473 stw_be_p(ptr, val);
474 break;
475 default:
476 stw_p(ptr, val);
477 break;
479 invalidate_and_set_dirty(mr, addr1, 2);
480 r = MEMTX_OK;
482 if (result) {
483 *result = r;
485 if (release_lock) {
486 qemu_mutex_unlock_iothread();
488 RCU_READ_UNLOCK();
491 void glue(address_space_stw, SUFFIX)(ARG1_DECL,
492 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
494 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
495 DEVICE_NATIVE_ENDIAN);
498 void glue(address_space_stw_le, SUFFIX)(ARG1_DECL,
499 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
501 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
502 DEVICE_LITTLE_ENDIAN);
505 void glue(address_space_stw_be, SUFFIX)(ARG1_DECL,
506 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
508 glue(address_space_stw_internal, SUFFIX)(ARG1, addr, val, attrs, result,
509 DEVICE_BIG_ENDIAN);
512 static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL,
513 hwaddr addr, uint64_t val, MemTxAttrs attrs,
514 MemTxResult *result, enum device_endian endian)
516 uint8_t *ptr;
517 MemoryRegion *mr;
518 hwaddr l = 8;
519 hwaddr addr1;
520 MemTxResult r;
521 bool release_lock = false;
523 RCU_READ_LOCK();
524 mr = TRANSLATE(addr, &addr1, &l, true, attrs);
525 if (l < 8 || !memory_access_is_direct(mr, true)) {
526 release_lock |= prepare_mmio_access(mr);
528 #if defined(TARGET_WORDS_BIGENDIAN)
529 if (endian == DEVICE_LITTLE_ENDIAN) {
530 val = bswap64(val);
532 #else
533 if (endian == DEVICE_BIG_ENDIAN) {
534 val = bswap64(val);
536 #endif
537 /* TODO: Merge bswap64 into memory_region_dispatch_write. */
538 r = memory_region_dispatch_write(mr, addr1, val,
539 MO_64 | devend_memop(endian), attrs);
540 } else {
541 /* RAM case */
542 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
543 switch (endian) {
544 case DEVICE_LITTLE_ENDIAN:
545 stq_le_p(ptr, val);
546 break;
547 case DEVICE_BIG_ENDIAN:
548 stq_be_p(ptr, val);
549 break;
550 default:
551 stq_p(ptr, val);
552 break;
554 invalidate_and_set_dirty(mr, addr1, 8);
555 r = MEMTX_OK;
557 if (result) {
558 *result = r;
560 if (release_lock) {
561 qemu_mutex_unlock_iothread();
563 RCU_READ_UNLOCK();
566 void glue(address_space_stq, SUFFIX)(ARG1_DECL,
567 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
569 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
570 DEVICE_NATIVE_ENDIAN);
573 void glue(address_space_stq_le, SUFFIX)(ARG1_DECL,
574 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
576 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
577 DEVICE_LITTLE_ENDIAN);
580 void glue(address_space_stq_be, SUFFIX)(ARG1_DECL,
581 hwaddr addr, uint64_t val, MemTxAttrs attrs, MemTxResult *result)
583 glue(address_space_stq_internal, SUFFIX)(ARG1, addr, val, attrs, result,
584 DEVICE_BIG_ENDIAN);
587 #undef ARG1_DECL
588 #undef ARG1
589 #undef SUFFIX
590 #undef TRANSLATE
591 #undef RCU_READ_LOCK
592 #undef RCU_READ_UNLOCK