2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
24 #include "qemu/bitops.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translate-all.h"
28 #include "exec/helper-proto.h"
29 #include "qemu/atomic128.h"
30 #include "trace/trace-root.h"
31 #include "tcg/tcg-ldst.h"
34 __thread
uintptr_t helper_retaddr
;
36 //#define DEBUG_SIGNAL
39 * Adjust the pc to pass to cpu_restore_state; return the memop type.
41 MMUAccessType
adjust_signal_pc(uintptr_t *pc
, bool is_write
)
43 switch (helper_retaddr
) {
46 * Fault during host memory operation within a helper function.
47 * The helper's host return address, saved here, gives us a
48 * pointer into the generated code that will unwind to the
56 * Fault during host memory operation within generated code.
57 * (Or, a unrelated bug within qemu, but we can't tell from here).
59 * We take the host pc from the signal frame. However, we cannot
60 * use that value directly. Within cpu_restore_state_from_tb, we
61 * assume PC comes from GETPC(), as used by the helper functions,
62 * so we adjust the address by -GETPC_ADJ to form an address that
63 * is within the call insn, so that the address does not accidentally
64 * match the beginning of the next guest insn. However, when the
65 * pc comes from the signal frame it points to the actual faulting
66 * host memory insn and not the return from a call insn.
68 * Therefore, adjust to compensate for what will be done later
69 * by cpu_restore_state_from_tb.
76 * Fault during host read for translation, or loosely, "execution".
78 * The guest pc is already pointing to the start of the TB for which
79 * code is being generated. If the guest translator manages the
80 * page crossings correctly, this is exactly the correct address
81 * (and if the translator doesn't handle page boundaries correctly
82 * there's little we can do about that here). Therefore, do not
83 * trigger the unwinder.
86 return MMU_INST_FETCH
;
89 return is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
93 * handle_sigsegv_accerr_write:
94 * @cpu: the cpu context
95 * @old_set: the sigset_t from the signal ucontext_t
96 * @host_pc: the host pc, adjusted for the signal
97 * @guest_addr: the guest address of the fault
99 * Return true if the write fault has been handled, and should be re-tried.
101 * Note that it is important that we don't call page_unprotect() unless
102 * this is really a "write to nonwritable page" fault, because
103 * page_unprotect() assumes that if it is called for an access to
104 * a page that's writable this means we had two threads racing and
105 * another thread got there first and already made the page writable;
106 * so we will retry the access. If we were to call page_unprotect()
107 * for some other kind of fault that should really be passed to the
108 * guest, we'd end up in an infinite loop of retrying the faulting access.
110 bool handle_sigsegv_accerr_write(CPUState
*cpu
, sigset_t
*old_set
,
111 uintptr_t host_pc
, abi_ptr guest_addr
)
113 switch (page_unprotect(guest_addr
, host_pc
)) {
116 * Fault not caused by a page marked unwritable to protect
117 * cached translations, must be the guest binary's problem.
122 * Fault caused by protection of cached translation; TBs
123 * invalidated, so resume execution.
128 * Fault caused by protection of cached translation, and the
129 * currently executing TB was modified and must be exited immediately.
131 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
132 cpu_loop_exit_noexc(cpu
);
135 g_assert_not_reached();
139 typedef struct PageFlagsNode
{
141 IntervalTreeNode itree
;
145 static IntervalTreeRoot pageflags_root
;
147 static PageFlagsNode
*pageflags_find(target_ulong start
, target_ulong last
)
151 n
= interval_tree_iter_first(&pageflags_root
, start
, last
);
152 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
155 static PageFlagsNode
*pageflags_next(PageFlagsNode
*p
, target_ulong start
,
160 n
= interval_tree_iter_next(&p
->itree
, start
, last
);
161 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
164 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
170 for (n
= interval_tree_iter_first(&pageflags_root
, 0, -1);
172 n
= interval_tree_iter_next(n
, 0, -1)) {
173 PageFlagsNode
*p
= container_of(n
, PageFlagsNode
, itree
);
175 rc
= fn(priv
, n
->start
, n
->last
+ 1, p
->flags
);
185 static int dump_region(void *priv
, target_ulong start
,
186 target_ulong end
, unsigned long prot
)
188 FILE *f
= (FILE *)priv
;
190 fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
" "TARGET_FMT_lx
" %c%c%c\n",
191 start
, end
, end
- start
,
192 ((prot
& PAGE_READ
) ? 'r' : '-'),
193 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
194 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
198 /* dump memory mappings */
199 void page_dump(FILE *f
)
201 const int length
= sizeof(target_ulong
) * 2;
203 fprintf(f
, "%-*s %-*s %-*s %s\n",
204 length
, "start", length
, "end", length
, "size", "prot");
205 walk_memory_regions(f
, dump_region
);
208 int page_get_flags(target_ulong address
)
210 PageFlagsNode
*p
= pageflags_find(address
, address
);
213 * See util/interval-tree.c re lockless lookups: no false positives but
214 * there are false negatives. If we find nothing, retry with the mmap
220 if (have_mmap_lock()) {
225 p
= pageflags_find(address
, address
);
227 return p
? p
->flags
: 0;
230 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
231 static void pageflags_create(target_ulong start
, target_ulong last
, int flags
)
233 PageFlagsNode
*p
= g_new(PageFlagsNode
, 1);
235 p
->itree
.start
= start
;
236 p
->itree
.last
= last
;
238 interval_tree_insert(&p
->itree
, &pageflags_root
);
241 /* A subroutine of page_set_flags: remove everything in [start,last]. */
242 static bool pageflags_unset(target_ulong start
, target_ulong last
)
244 bool inval_tb
= false;
247 PageFlagsNode
*p
= pageflags_find(start
, last
);
254 if (p
->flags
& PAGE_EXEC
) {
258 interval_tree_remove(&p
->itree
, &pageflags_root
);
259 p_last
= p
->itree
.last
;
261 if (p
->itree
.start
< start
) {
262 /* Truncate the node from the end, or split out the middle. */
263 p
->itree
.last
= start
- 1;
264 interval_tree_insert(&p
->itree
, &pageflags_root
);
266 pageflags_create(last
+ 1, p_last
, p
->flags
);
269 } else if (p_last
<= last
) {
270 /* Range completely covers node -- remove it. */
273 /* Truncate the node from the start. */
274 p
->itree
.start
= last
+ 1;
275 interval_tree_insert(&p
->itree
, &pageflags_root
);
284 * A subroutine of page_set_flags: nothing overlaps [start,last],
285 * but check adjacent mappings and maybe merge into a single range.
287 static void pageflags_create_merge(target_ulong start
, target_ulong last
,
290 PageFlagsNode
*next
= NULL
, *prev
= NULL
;
293 prev
= pageflags_find(start
- 1, start
- 1);
295 if (prev
->flags
== flags
) {
296 interval_tree_remove(&prev
->itree
, &pageflags_root
);
303 next
= pageflags_find(last
+ 1, last
+ 1);
305 if (next
->flags
== flags
) {
306 interval_tree_remove(&next
->itree
, &pageflags_root
);
315 prev
->itree
.last
= next
->itree
.last
;
316 g_free_rcu(next
, rcu
);
318 prev
->itree
.last
= last
;
320 interval_tree_insert(&prev
->itree
, &pageflags_root
);
322 next
->itree
.start
= start
;
323 interval_tree_insert(&next
->itree
, &pageflags_root
);
325 pageflags_create(start
, last
, flags
);
330 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
331 * By default, they are not kept.
333 #ifndef PAGE_TARGET_STICKY
334 #define PAGE_TARGET_STICKY 0
336 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
338 /* A subroutine of page_set_flags: add flags to [start,last]. */
339 static bool pageflags_set_clear(target_ulong start
, target_ulong last
,
340 int set_flags
, int clear_flags
)
343 target_ulong p_start
, p_last
;
344 int p_flags
, merge_flags
;
345 bool inval_tb
= false;
348 p
= pageflags_find(start
, last
);
351 pageflags_create_merge(start
, last
, set_flags
);
356 p_start
= p
->itree
.start
;
357 p_last
= p
->itree
.last
;
359 /* Using mprotect on a page does not change sticky bits. */
360 merge_flags
= (p_flags
& ~clear_flags
) | set_flags
;
363 * Need to flush if an overlapping executable region
364 * removes exec, or adds write.
366 if ((p_flags
& PAGE_EXEC
)
367 && (!(merge_flags
& PAGE_EXEC
)
368 || (merge_flags
& ~p_flags
& PAGE_WRITE
))) {
373 * If there is an exact range match, update and return without
374 * attempting to merge with adjacent regions.
376 if (start
== p_start
&& last
== p_last
) {
378 p
->flags
= merge_flags
;
380 interval_tree_remove(&p
->itree
, &pageflags_root
);
387 * If sticky bits affect the original mapping, then we must be more
388 * careful about the existing intervals and the separate flags.
390 if (set_flags
!= merge_flags
) {
391 if (p_start
< start
) {
392 interval_tree_remove(&p
->itree
, &pageflags_root
);
393 p
->itree
.last
= start
- 1;
394 interval_tree_insert(&p
->itree
, &pageflags_root
);
398 pageflags_create(start
, last
, merge_flags
);
400 pageflags_create(last
+ 1, p_last
, p_flags
);
403 pageflags_create(start
, p_last
, merge_flags
);
411 if (start
< p_start
&& set_flags
) {
412 pageflags_create(start
, p_start
- 1, set_flags
);
415 interval_tree_remove(&p
->itree
, &pageflags_root
);
416 p
->itree
.start
= last
+ 1;
417 interval_tree_insert(&p
->itree
, &pageflags_root
);
419 pageflags_create(start
, last
, merge_flags
);
423 p
->flags
= merge_flags
;
425 interval_tree_remove(&p
->itree
, &pageflags_root
);
437 /* If flags are not changing for this range, incorporate it. */
438 if (set_flags
== p_flags
) {
439 if (start
< p_start
) {
440 interval_tree_remove(&p
->itree
, &pageflags_root
);
441 p
->itree
.start
= start
;
442 interval_tree_insert(&p
->itree
, &pageflags_root
);
451 /* Maybe split out head and/or tail ranges with the original flags. */
452 interval_tree_remove(&p
->itree
, &pageflags_root
);
453 if (p_start
< start
) {
454 p
->itree
.last
= start
- 1;
455 interval_tree_insert(&p
->itree
, &pageflags_root
);
461 pageflags_create(last
+ 1, p_last
, p_flags
);
463 } else if (last
< p_last
) {
464 p
->itree
.start
= last
+ 1;
465 interval_tree_insert(&p
->itree
, &pageflags_root
);
471 pageflags_create(start
, last
, set_flags
);
479 * Modify the flags of a page and invalidate the code if necessary.
480 * The flag PAGE_WRITE_ORG is positioned automatically depending
481 * on PAGE_WRITE. The mmap_lock should already be held.
483 void page_set_flags(target_ulong start
, target_ulong last
, int flags
)
486 bool inval_tb
= false;
488 /* This function should never be called with addresses outside the
489 guest address space. If this assert fires, it probably indicates
490 a missing call to h2g_valid. */
491 assert(start
<= last
);
492 assert(last
<= GUEST_ADDR_MAX
);
493 /* Only set PAGE_ANON with new mappings. */
494 assert(!(flags
& PAGE_ANON
) || (flags
& PAGE_RESET
));
495 assert_memory_lock();
497 start
&= TARGET_PAGE_MASK
;
498 last
|= ~TARGET_PAGE_MASK
;
500 if (!(flags
& PAGE_VALID
)) {
503 reset
= flags
& PAGE_RESET
;
504 flags
&= ~PAGE_RESET
;
505 if (flags
& PAGE_WRITE
) {
506 flags
|= PAGE_WRITE_ORG
;
510 if (!flags
|| reset
) {
511 page_reset_target_data(start
, last
);
512 inval_tb
|= pageflags_unset(start
, last
);
515 inval_tb
|= pageflags_set_clear(start
, last
, flags
,
516 ~(reset
? 0 : PAGE_STICKY
));
519 tb_invalidate_phys_range(start
, last
);
523 bool page_check_range(target_ulong start
, target_ulong len
, int flags
)
526 int locked
; /* tri-state: =0: unlocked, +1: global, -1: local */
530 return true; /* trivial length */
533 last
= start
+ len
- 1;
535 return false; /* wrap around */
538 locked
= have_mmap_lock();
540 PageFlagsNode
*p
= pageflags_find(start
, last
);
546 * Lockless lookups have false negatives.
547 * Retry with the lock held.
551 p
= pageflags_find(start
, last
);
554 ret
= false; /* entire region invalid */
558 if (start
< p
->itree
.start
) {
559 ret
= false; /* initial bytes invalid */
563 missing
= flags
& ~p
->flags
;
564 if (missing
& ~PAGE_WRITE
) {
565 ret
= false; /* page doesn't match */
568 if (missing
& PAGE_WRITE
) {
569 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
570 ret
= false; /* page not writable */
573 /* Asking about writable, but has been protected: undo. */
574 if (!page_unprotect(start
, 0)) {
578 /* TODO: page_unprotect should take a range, not a single page. */
579 if (last
- start
< TARGET_PAGE_SIZE
) {
583 start
+= TARGET_PAGE_SIZE
;
587 if (last
<= p
->itree
.last
) {
591 start
= p
->itree
.last
+ 1;
594 /* Release the lock if acquired locally. */
601 bool page_check_range_empty(target_ulong start
, target_ulong last
)
603 assert(last
>= start
);
604 assert_memory_lock();
605 return pageflags_find(start
, last
) == NULL
;
608 target_ulong
page_find_range_empty(target_ulong min
, target_ulong max
,
609 target_ulong len
, target_ulong align
)
611 target_ulong len_m1
, align_m1
;
614 assert(max
<= GUEST_ADDR_MAX
);
616 assert(is_power_of_2(align
));
617 assert_memory_lock();
620 align_m1
= align
- 1;
622 /* Iteratively narrow the search region. */
626 /* Align min and double-check there's enough space remaining. */
627 min
= (min
+ align_m1
) & ~align_m1
;
631 if (len_m1
> max
- min
) {
635 p
= pageflags_find(min
, min
+ len_m1
);
640 if (max
<= p
->itree
.last
) {
641 /* Existing allocation fills the remainder of the search region. */
644 /* Skip across existing allocation. */
645 min
= p
->itree
.last
+ 1;
649 void page_protect(tb_page_addr_t address
)
652 target_ulong start
, last
;
655 assert_memory_lock();
657 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
658 start
= address
& TARGET_PAGE_MASK
;
659 last
= start
+ TARGET_PAGE_SIZE
- 1;
661 start
= address
& qemu_host_page_mask
;
662 last
= start
+ qemu_host_page_size
- 1;
665 p
= pageflags_find(start
, last
);
671 if (unlikely(p
->itree
.last
< last
)) {
672 /* More than one protection region covers the one host page. */
673 assert(TARGET_PAGE_SIZE
< qemu_host_page_size
);
674 while ((p
= pageflags_next(p
, start
, last
)) != NULL
) {
679 if (prot
& PAGE_WRITE
) {
680 pageflags_set_clear(start
, last
, 0, PAGE_WRITE
);
681 mprotect(g2h_untagged(start
), qemu_host_page_size
,
682 prot
& (PAGE_READ
| PAGE_EXEC
) ? PROT_READ
: PROT_NONE
);
687 * Called from signal handler: invalidate the code and unprotect the
688 * page. Return 0 if the fault was not handled, 1 if it was handled,
689 * and 2 if it was handled but the caller must cause the TB to be
690 * immediately exited. (We can only return 2 if the 'pc' argument is
693 int page_unprotect(target_ulong address
, uintptr_t pc
)
696 bool current_tb_invalidated
;
699 * Technically this isn't safe inside a signal handler. However we
700 * know this only ever happens in a synchronous SEGV handler, so in
701 * practice it seems to be ok.
705 p
= pageflags_find(address
, address
);
707 /* If this address was not really writable, nothing to do. */
708 if (!p
|| !(p
->flags
& PAGE_WRITE_ORG
)) {
713 current_tb_invalidated
= false;
714 if (p
->flags
& PAGE_WRITE
) {
716 * If the page is actually marked WRITE then assume this is because
717 * this thread raced with another one which got here first and
718 * set the page to PAGE_WRITE and did the TB invalidate for us.
720 #ifdef TARGET_HAS_PRECISE_SMC
721 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
723 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
727 target_ulong start
, len
, i
;
730 if (qemu_host_page_size
<= TARGET_PAGE_SIZE
) {
731 start
= address
& TARGET_PAGE_MASK
;
732 len
= TARGET_PAGE_SIZE
;
733 prot
= p
->flags
| PAGE_WRITE
;
734 pageflags_set_clear(start
, start
+ len
- 1, PAGE_WRITE
, 0);
735 current_tb_invalidated
= tb_invalidate_phys_page_unwind(start
, pc
);
737 start
= address
& qemu_host_page_mask
;
738 len
= qemu_host_page_size
;
741 for (i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
742 target_ulong addr
= start
+ i
;
744 p
= pageflags_find(addr
, addr
);
747 if (p
->flags
& PAGE_WRITE_ORG
) {
749 pageflags_set_clear(addr
, addr
+ TARGET_PAGE_SIZE
- 1,
754 * Since the content will be modified, we must invalidate
755 * the corresponding translated code.
757 current_tb_invalidated
|=
758 tb_invalidate_phys_page_unwind(addr
, pc
);
761 if (prot
& PAGE_EXEC
) {
762 prot
= (prot
& ~PAGE_EXEC
) | PAGE_READ
;
764 mprotect((void *)g2h_untagged(start
), len
, prot
& PAGE_BITS
);
768 /* If current TB was invalidated return to main loop */
769 return current_tb_invalidated
? 2 : 1;
772 static int probe_access_internal(CPUArchState
*env
, vaddr addr
,
773 int fault_size
, MMUAccessType access_type
,
774 bool nonfault
, uintptr_t ra
)
779 switch (access_type
) {
781 acc_flag
= PAGE_WRITE_ORG
;
784 acc_flag
= PAGE_READ
;
787 acc_flag
= PAGE_EXEC
;
790 g_assert_not_reached();
793 if (guest_addr_valid_untagged(addr
)) {
794 int page_flags
= page_get_flags(addr
);
795 if (page_flags
& acc_flag
) {
796 if ((acc_flag
== PAGE_READ
|| acc_flag
== PAGE_WRITE
)
797 && cpu_plugin_mem_cbs_enabled(env_cpu(env
))) {
800 return 0; /* success */
802 maperr
= !(page_flags
& PAGE_VALID
);
808 return TLB_INVALID_MASK
;
811 cpu_loop_exit_sigsegv(env_cpu(env
), addr
, access_type
, maperr
, ra
);
814 int probe_access_flags(CPUArchState
*env
, vaddr addr
, int size
,
815 MMUAccessType access_type
, int mmu_idx
,
816 bool nonfault
, void **phost
, uintptr_t ra
)
820 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
821 flags
= probe_access_internal(env
, addr
, size
, access_type
, nonfault
, ra
);
822 *phost
= (flags
& TLB_INVALID_MASK
) ? NULL
: g2h(env_cpu(env
), addr
);
826 void *probe_access(CPUArchState
*env
, vaddr addr
, int size
,
827 MMUAccessType access_type
, int mmu_idx
, uintptr_t ra
)
831 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
832 flags
= probe_access_internal(env
, addr
, size
, access_type
, false, ra
);
833 g_assert((flags
& ~TLB_MMIO
) == 0);
835 return size
? g2h(env_cpu(env
), addr
) : NULL
;
838 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, vaddr addr
,
843 flags
= probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
, false, 0);
844 g_assert(flags
== 0);
847 *hostp
= g2h_untagged(addr
);
852 #ifdef TARGET_PAGE_DATA_SIZE
854 * Allocate chunks of target data together. For the only current user,
855 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
856 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
859 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
861 typedef struct TargetPageDataNode
{
863 IntervalTreeNode itree
;
864 char data
[TPD_PAGES
][TARGET_PAGE_DATA_SIZE
] __attribute__((aligned
));
865 } TargetPageDataNode
;
867 static IntervalTreeRoot targetdata_root
;
869 void page_reset_target_data(target_ulong start
, target_ulong last
)
871 IntervalTreeNode
*n
, *next
;
873 assert_memory_lock();
875 start
&= TARGET_PAGE_MASK
;
876 last
|= ~TARGET_PAGE_MASK
;
878 for (n
= interval_tree_iter_first(&targetdata_root
, start
, last
),
879 next
= n
? interval_tree_iter_next(n
, start
, last
) : NULL
;
882 next
= next
? interval_tree_iter_next(n
, start
, last
) : NULL
) {
883 target_ulong n_start
, n_last
, p_ofs
, p_len
;
884 TargetPageDataNode
*t
= container_of(n
, TargetPageDataNode
, itree
);
886 if (n
->start
>= start
&& n
->last
<= last
) {
887 interval_tree_remove(n
, &targetdata_root
);
892 if (n
->start
< start
) {
894 p_ofs
= (start
- n
->start
) >> TARGET_PAGE_BITS
;
899 n_last
= MIN(last
, n
->last
);
900 p_len
= (n_last
+ 1 - n_start
) >> TARGET_PAGE_BITS
;
902 memset(t
->data
[p_ofs
], 0, p_len
* TARGET_PAGE_DATA_SIZE
);
906 void *page_get_target_data(target_ulong address
)
909 TargetPageDataNode
*t
;
910 target_ulong page
, region
;
912 page
= address
& TARGET_PAGE_MASK
;
913 region
= address
& TBD_MASK
;
915 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
918 * See util/interval-tree.c re lockless lookups: no false positives
919 * but there are false negatives. If we find nothing, retry with
920 * the mmap lock acquired. We also need the lock for the
921 * allocation + insert.
924 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
926 t
= g_new0(TargetPageDataNode
, 1);
929 n
->last
= region
| ~TBD_MASK
;
930 interval_tree_insert(n
, &targetdata_root
);
935 t
= container_of(n
, TargetPageDataNode
, itree
);
936 return t
->data
[(page
- region
) >> TARGET_PAGE_BITS
];
939 void page_reset_target_data(target_ulong start
, target_ulong last
) { }
940 #endif /* TARGET_PAGE_DATA_SIZE */
942 /* The softmmu versions of these helpers are in cputlb.c. */
944 static void *cpu_mmu_lookup(CPUArchState
*env
, vaddr addr
,
945 MemOp mop
, uintptr_t ra
, MMUAccessType type
)
947 int a_bits
= get_alignment_bits(mop
);
950 /* Enforce guest required alignment. */
951 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
952 cpu_loop_exit_sigbus(env_cpu(env
), addr
, type
, ra
);
955 ret
= g2h(env_cpu(env
), addr
);
956 set_helper_retaddr(ra
);
960 #include "ldst_atomicity.c.inc"
962 static uint8_t do_ld1_mmu(CPUArchState
*env
, abi_ptr addr
,
963 MemOp mop
, uintptr_t ra
)
968 tcg_debug_assert((mop
& MO_SIZE
) == MO_8
);
969 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
970 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
972 clear_helper_retaddr();
976 tcg_target_ulong
helper_ldub_mmu(CPUArchState
*env
, uint64_t addr
,
977 MemOpIdx oi
, uintptr_t ra
)
979 return do_ld1_mmu(env
, addr
, get_memop(oi
), ra
);
982 tcg_target_ulong
helper_ldsb_mmu(CPUArchState
*env
, uint64_t addr
,
983 MemOpIdx oi
, uintptr_t ra
)
985 return (int8_t)do_ld1_mmu(env
, addr
, get_memop(oi
), ra
);
988 uint8_t cpu_ldb_mmu(CPUArchState
*env
, abi_ptr addr
,
989 MemOpIdx oi
, uintptr_t ra
)
991 uint8_t ret
= do_ld1_mmu(env
, addr
, get_memop(oi
), ra
);
992 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
996 static uint16_t do_ld2_mmu(CPUArchState
*env
, abi_ptr addr
,
997 MemOp mop
, uintptr_t ra
)
1002 tcg_debug_assert((mop
& MO_SIZE
) == MO_16
);
1003 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1004 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1005 ret
= load_atom_2(env
, ra
, haddr
, mop
);
1006 clear_helper_retaddr();
1008 if (mop
& MO_BSWAP
) {
1014 tcg_target_ulong
helper_lduw_mmu(CPUArchState
*env
, uint64_t addr
,
1015 MemOpIdx oi
, uintptr_t ra
)
1017 return do_ld2_mmu(env
, addr
, get_memop(oi
), ra
);
1020 tcg_target_ulong
helper_ldsw_mmu(CPUArchState
*env
, uint64_t addr
,
1021 MemOpIdx oi
, uintptr_t ra
)
1023 return (int16_t)do_ld2_mmu(env
, addr
, get_memop(oi
), ra
);
1026 uint16_t cpu_ldw_mmu(CPUArchState
*env
, abi_ptr addr
,
1027 MemOpIdx oi
, uintptr_t ra
)
1029 uint16_t ret
= do_ld2_mmu(env
, addr
, get_memop(oi
), ra
);
1030 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1034 static uint32_t do_ld4_mmu(CPUArchState
*env
, abi_ptr addr
,
1035 MemOp mop
, uintptr_t ra
)
1040 tcg_debug_assert((mop
& MO_SIZE
) == MO_32
);
1041 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1042 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1043 ret
= load_atom_4(env
, ra
, haddr
, mop
);
1044 clear_helper_retaddr();
1046 if (mop
& MO_BSWAP
) {
1052 tcg_target_ulong
helper_ldul_mmu(CPUArchState
*env
, uint64_t addr
,
1053 MemOpIdx oi
, uintptr_t ra
)
1055 return do_ld4_mmu(env
, addr
, get_memop(oi
), ra
);
1058 tcg_target_ulong
helper_ldsl_mmu(CPUArchState
*env
, uint64_t addr
,
1059 MemOpIdx oi
, uintptr_t ra
)
1061 return (int32_t)do_ld4_mmu(env
, addr
, get_memop(oi
), ra
);
1064 uint32_t cpu_ldl_mmu(CPUArchState
*env
, abi_ptr addr
,
1065 MemOpIdx oi
, uintptr_t ra
)
1067 uint32_t ret
= do_ld4_mmu(env
, addr
, get_memop(oi
), ra
);
1068 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1072 static uint64_t do_ld8_mmu(CPUArchState
*env
, abi_ptr addr
,
1073 MemOp mop
, uintptr_t ra
)
1078 tcg_debug_assert((mop
& MO_SIZE
) == MO_64
);
1079 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1080 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1081 ret
= load_atom_8(env
, ra
, haddr
, mop
);
1082 clear_helper_retaddr();
1084 if (mop
& MO_BSWAP
) {
1090 uint64_t helper_ldq_mmu(CPUArchState
*env
, uint64_t addr
,
1091 MemOpIdx oi
, uintptr_t ra
)
1093 return do_ld8_mmu(env
, addr
, get_memop(oi
), ra
);
1096 uint64_t cpu_ldq_mmu(CPUArchState
*env
, abi_ptr addr
,
1097 MemOpIdx oi
, uintptr_t ra
)
1099 uint64_t ret
= do_ld8_mmu(env
, addr
, get_memop(oi
), ra
);
1100 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1104 static Int128
do_ld16_mmu(CPUArchState
*env
, abi_ptr addr
,
1105 MemOp mop
, uintptr_t ra
)
1110 tcg_debug_assert((mop
& MO_SIZE
) == MO_128
);
1111 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1112 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1113 ret
= load_atom_16(env
, ra
, haddr
, mop
);
1114 clear_helper_retaddr();
1116 if (mop
& MO_BSWAP
) {
1117 ret
= bswap128(ret
);
1122 Int128
helper_ld16_mmu(CPUArchState
*env
, uint64_t addr
,
1123 MemOpIdx oi
, uintptr_t ra
)
1125 return do_ld16_mmu(env
, addr
, get_memop(oi
), ra
);
1128 Int128
helper_ld_i128(CPUArchState
*env
, uint64_t addr
, MemOpIdx oi
)
1130 return helper_ld16_mmu(env
, addr
, oi
, GETPC());
1133 Int128
cpu_ld16_mmu(CPUArchState
*env
, abi_ptr addr
,
1134 MemOpIdx oi
, uintptr_t ra
)
1136 Int128 ret
= do_ld16_mmu(env
, addr
, get_memop(oi
), ra
);
1137 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_R
);
1141 static void do_st1_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
1142 MemOp mop
, uintptr_t ra
)
1146 tcg_debug_assert((mop
& MO_SIZE
) == MO_8
);
1147 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1148 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1150 clear_helper_retaddr();
1153 void helper_stb_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
1154 MemOpIdx oi
, uintptr_t ra
)
1156 do_st1_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1159 void cpu_stb_mmu(CPUArchState
*env
, abi_ptr addr
, uint8_t val
,
1160 MemOpIdx oi
, uintptr_t ra
)
1162 do_st1_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1163 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1166 static void do_st2_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1167 MemOp mop
, uintptr_t ra
)
1171 tcg_debug_assert((mop
& MO_SIZE
) == MO_16
);
1172 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1173 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1175 if (mop
& MO_BSWAP
) {
1178 store_atom_2(env
, ra
, haddr
, mop
, val
);
1179 clear_helper_retaddr();
1182 void helper_stw_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
1183 MemOpIdx oi
, uintptr_t ra
)
1185 do_st2_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1188 void cpu_stw_mmu(CPUArchState
*env
, abi_ptr addr
, uint16_t val
,
1189 MemOpIdx oi
, uintptr_t ra
)
1191 do_st2_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1192 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1195 static void do_st4_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1196 MemOp mop
, uintptr_t ra
)
1200 tcg_debug_assert((mop
& MO_SIZE
) == MO_32
);
1201 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1202 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1204 if (mop
& MO_BSWAP
) {
1207 store_atom_4(env
, ra
, haddr
, mop
, val
);
1208 clear_helper_retaddr();
1211 void helper_stl_mmu(CPUArchState
*env
, uint64_t addr
, uint32_t val
,
1212 MemOpIdx oi
, uintptr_t ra
)
1214 do_st4_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1217 void cpu_stl_mmu(CPUArchState
*env
, abi_ptr addr
, uint32_t val
,
1218 MemOpIdx oi
, uintptr_t ra
)
1220 do_st4_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1221 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1224 static void do_st8_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1225 MemOp mop
, uintptr_t ra
)
1229 tcg_debug_assert((mop
& MO_SIZE
) == MO_64
);
1230 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1231 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1233 if (mop
& MO_BSWAP
) {
1236 store_atom_8(env
, ra
, haddr
, mop
, val
);
1237 clear_helper_retaddr();
1240 void helper_stq_mmu(CPUArchState
*env
, uint64_t addr
, uint64_t val
,
1241 MemOpIdx oi
, uintptr_t ra
)
1243 do_st8_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1246 void cpu_stq_mmu(CPUArchState
*env
, abi_ptr addr
, uint64_t val
,
1247 MemOpIdx oi
, uintptr_t ra
)
1249 do_st8_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1250 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1253 static void do_st16_mmu(CPUArchState
*env
, abi_ptr addr
, Int128 val
,
1254 MemOp mop
, uintptr_t ra
)
1258 tcg_debug_assert((mop
& MO_SIZE
) == MO_128
);
1259 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1260 haddr
= cpu_mmu_lookup(env
, addr
, mop
, ra
, MMU_DATA_STORE
);
1262 if (mop
& MO_BSWAP
) {
1263 val
= bswap128(val
);
1265 store_atom_16(env
, ra
, haddr
, mop
, val
);
1266 clear_helper_retaddr();
1269 void helper_st16_mmu(CPUArchState
*env
, uint64_t addr
, Int128 val
,
1270 MemOpIdx oi
, uintptr_t ra
)
1272 do_st16_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1275 void helper_st_i128(CPUArchState
*env
, uint64_t addr
, Int128 val
, MemOpIdx oi
)
1277 helper_st16_mmu(env
, addr
, val
, oi
, GETPC());
1280 void cpu_st16_mmu(CPUArchState
*env
, abi_ptr addr
,
1281 Int128 val
, MemOpIdx oi
, uintptr_t ra
)
1283 do_st16_mmu(env
, addr
, val
, get_memop(oi
), ra
);
1284 qemu_plugin_vcpu_mem_cb(env_cpu(env
), addr
, oi
, QEMU_PLUGIN_MEM_W
);
1287 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr ptr
)
1291 set_helper_retaddr(1);
1292 ret
= ldub_p(g2h_untagged(ptr
));
1293 clear_helper_retaddr();
1297 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr ptr
)
1301 set_helper_retaddr(1);
1302 ret
= lduw_p(g2h_untagged(ptr
));
1303 clear_helper_retaddr();
1307 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr ptr
)
1311 set_helper_retaddr(1);
1312 ret
= ldl_p(g2h_untagged(ptr
));
1313 clear_helper_retaddr();
1317 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr ptr
)
1321 set_helper_retaddr(1);
1322 ret
= ldq_p(g2h_untagged(ptr
));
1323 clear_helper_retaddr();
1327 uint8_t cpu_ldb_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1328 MemOpIdx oi
, uintptr_t ra
)
1333 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_INST_FETCH
);
1334 ret
= ldub_p(haddr
);
1335 clear_helper_retaddr();
1339 uint16_t cpu_ldw_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1340 MemOpIdx oi
, uintptr_t ra
)
1345 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_INST_FETCH
);
1346 ret
= lduw_p(haddr
);
1347 clear_helper_retaddr();
1348 if (get_memop(oi
) & MO_BSWAP
) {
1354 uint32_t cpu_ldl_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1355 MemOpIdx oi
, uintptr_t ra
)
1360 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_INST_FETCH
);
1362 clear_helper_retaddr();
1363 if (get_memop(oi
) & MO_BSWAP
) {
1369 uint64_t cpu_ldq_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1370 MemOpIdx oi
, uintptr_t ra
)
1375 haddr
= cpu_mmu_lookup(env
, addr
, oi
, ra
, MMU_DATA_LOAD
);
1377 clear_helper_retaddr();
1378 if (get_memop(oi
) & MO_BSWAP
) {
1384 #include "ldst_common.c.inc"
1387 * Do not allow unaligned operations to proceed. Return the host address.
1389 static void *atomic_mmu_lookup(CPUArchState
*env
, vaddr addr
, MemOpIdx oi
,
1390 int size
, uintptr_t retaddr
)
1392 MemOp mop
= get_memop(oi
);
1393 int a_bits
= get_alignment_bits(mop
);
1396 /* Enforce guest required alignment. */
1397 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
1398 cpu_loop_exit_sigbus(env_cpu(env
), addr
, MMU_DATA_STORE
, retaddr
);
1401 /* Enforce qemu required alignment. */
1402 if (unlikely(addr
& (size
- 1))) {
1403 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1406 ret
= g2h(env_cpu(env
), addr
);
1407 set_helper_retaddr(retaddr
);
1411 #include "atomic_common.c.inc"
1414 * First set of functions passes in OI and RETADDR.
1415 * This makes them callable from other helpers.
1418 #define ATOMIC_NAME(X) \
1419 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1420 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1423 #include "atomic_template.h"
1426 #include "atomic_template.h"
1429 #include "atomic_template.h"
1431 #ifdef CONFIG_ATOMIC64
1433 #include "atomic_template.h"
1436 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
1437 #define DATA_SIZE 16
1438 #include "atomic_template.h"