2 * User emulator execution
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "hw/core/tcg-cpu-ops.h"
21 #include "disas/disas.h"
22 #include "exec/exec-all.h"
24 #include "qemu/bitops.h"
26 #include "exec/cpu_ldst.h"
27 #include "qemu/main-loop.h"
28 #include "exec/translate-all.h"
29 #include "exec/page-protection.h"
30 #include "exec/helper-proto.h"
31 #include "qemu/atomic128.h"
32 #include "trace/trace-root.h"
33 #include "tcg/tcg-ldst.h"
34 #include "internal-common.h"
35 #include "internal-target.h"
37 __thread
uintptr_t helper_retaddr
;
39 //#define DEBUG_SIGNAL
41 void cpu_interrupt(CPUState
*cpu
, int mask
)
43 g_assert(bql_locked());
44 cpu
->interrupt_request
|= mask
;
45 qatomic_set(&cpu
->neg
.icount_decr
.u16
.high
, -1);
49 * Adjust the pc to pass to cpu_restore_state; return the memop type.
51 MMUAccessType
adjust_signal_pc(uintptr_t *pc
, bool is_write
)
53 switch (helper_retaddr
) {
56 * Fault during host memory operation within a helper function.
57 * The helper's host return address, saved here, gives us a
58 * pointer into the generated code that will unwind to the
66 * Fault during host memory operation within generated code.
67 * (Or, a unrelated bug within qemu, but we can't tell from here).
69 * We take the host pc from the signal frame. However, we cannot
70 * use that value directly. Within cpu_restore_state_from_tb, we
71 * assume PC comes from GETPC(), as used by the helper functions,
72 * so we adjust the address by -GETPC_ADJ to form an address that
73 * is within the call insn, so that the address does not accidentally
74 * match the beginning of the next guest insn. However, when the
75 * pc comes from the signal frame it points to the actual faulting
76 * host memory insn and not the return from a call insn.
78 * Therefore, adjust to compensate for what will be done later
79 * by cpu_restore_state_from_tb.
86 * Fault during host read for translation, or loosely, "execution".
88 * The guest pc is already pointing to the start of the TB for which
89 * code is being generated. If the guest translator manages the
90 * page crossings correctly, this is exactly the correct address
91 * (and if the translator doesn't handle page boundaries correctly
92 * there's little we can do about that here). Therefore, do not
93 * trigger the unwinder.
96 return MMU_INST_FETCH
;
99 return is_write
? MMU_DATA_STORE
: MMU_DATA_LOAD
;
103 * handle_sigsegv_accerr_write:
104 * @cpu: the cpu context
105 * @old_set: the sigset_t from the signal ucontext_t
106 * @host_pc: the host pc, adjusted for the signal
107 * @guest_addr: the guest address of the fault
109 * Return true if the write fault has been handled, and should be re-tried.
111 * Note that it is important that we don't call page_unprotect() unless
112 * this is really a "write to nonwritable page" fault, because
113 * page_unprotect() assumes that if it is called for an access to
114 * a page that's writable this means we had two threads racing and
115 * another thread got there first and already made the page writable;
116 * so we will retry the access. If we were to call page_unprotect()
117 * for some other kind of fault that should really be passed to the
118 * guest, we'd end up in an infinite loop of retrying the faulting access.
120 bool handle_sigsegv_accerr_write(CPUState
*cpu
, sigset_t
*old_set
,
121 uintptr_t host_pc
, abi_ptr guest_addr
)
123 switch (page_unprotect(guest_addr
, host_pc
)) {
126 * Fault not caused by a page marked unwritable to protect
127 * cached translations, must be the guest binary's problem.
132 * Fault caused by protection of cached translation; TBs
133 * invalidated, so resume execution.
138 * Fault caused by protection of cached translation, and the
139 * currently executing TB was modified and must be exited immediately.
141 sigprocmask(SIG_SETMASK
, old_set
, NULL
);
142 cpu_loop_exit_noexc(cpu
);
145 g_assert_not_reached();
149 typedef struct PageFlagsNode
{
151 IntervalTreeNode itree
;
155 static IntervalTreeRoot pageflags_root
;
157 static PageFlagsNode
*pageflags_find(target_ulong start
, target_ulong last
)
161 n
= interval_tree_iter_first(&pageflags_root
, start
, last
);
162 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
165 static PageFlagsNode
*pageflags_next(PageFlagsNode
*p
, target_ulong start
,
170 n
= interval_tree_iter_next(&p
->itree
, start
, last
);
171 return n
? container_of(n
, PageFlagsNode
, itree
) : NULL
;
174 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
180 for (n
= interval_tree_iter_first(&pageflags_root
, 0, -1);
182 n
= interval_tree_iter_next(n
, 0, -1)) {
183 PageFlagsNode
*p
= container_of(n
, PageFlagsNode
, itree
);
185 rc
= fn(priv
, n
->start
, n
->last
+ 1, p
->flags
);
195 static int dump_region(void *priv
, target_ulong start
,
196 target_ulong end
, unsigned long prot
)
198 FILE *f
= (FILE *)priv
;
200 fprintf(f
, TARGET_FMT_lx
"-"TARGET_FMT_lx
" "TARGET_FMT_lx
" %c%c%c\n",
201 start
, end
, end
- start
,
202 ((prot
& PAGE_READ
) ? 'r' : '-'),
203 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
204 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
208 /* dump memory mappings */
209 void page_dump(FILE *f
)
211 const int length
= sizeof(target_ulong
) * 2;
213 fprintf(f
, "%-*s %-*s %-*s %s\n",
214 length
, "start", length
, "end", length
, "size", "prot");
215 walk_memory_regions(f
, dump_region
);
218 int page_get_flags(target_ulong address
)
220 PageFlagsNode
*p
= pageflags_find(address
, address
);
223 * See util/interval-tree.c re lockless lookups: no false positives but
224 * there are false negatives. If we find nothing, retry with the mmap
230 if (have_mmap_lock()) {
235 p
= pageflags_find(address
, address
);
237 return p
? p
->flags
: 0;
240 /* A subroutine of page_set_flags: insert a new node for [start,last]. */
241 static void pageflags_create(target_ulong start
, target_ulong last
, int flags
)
243 PageFlagsNode
*p
= g_new(PageFlagsNode
, 1);
245 p
->itree
.start
= start
;
246 p
->itree
.last
= last
;
248 interval_tree_insert(&p
->itree
, &pageflags_root
);
251 /* A subroutine of page_set_flags: remove everything in [start,last]. */
252 static bool pageflags_unset(target_ulong start
, target_ulong last
)
254 bool inval_tb
= false;
257 PageFlagsNode
*p
= pageflags_find(start
, last
);
264 if (p
->flags
& PAGE_EXEC
) {
268 interval_tree_remove(&p
->itree
, &pageflags_root
);
269 p_last
= p
->itree
.last
;
271 if (p
->itree
.start
< start
) {
272 /* Truncate the node from the end, or split out the middle. */
273 p
->itree
.last
= start
- 1;
274 interval_tree_insert(&p
->itree
, &pageflags_root
);
276 pageflags_create(last
+ 1, p_last
, p
->flags
);
279 } else if (p_last
<= last
) {
280 /* Range completely covers node -- remove it. */
283 /* Truncate the node from the start. */
284 p
->itree
.start
= last
+ 1;
285 interval_tree_insert(&p
->itree
, &pageflags_root
);
294 * A subroutine of page_set_flags: nothing overlaps [start,last],
295 * but check adjacent mappings and maybe merge into a single range.
297 static void pageflags_create_merge(target_ulong start
, target_ulong last
,
300 PageFlagsNode
*next
= NULL
, *prev
= NULL
;
303 prev
= pageflags_find(start
- 1, start
- 1);
305 if (prev
->flags
== flags
) {
306 interval_tree_remove(&prev
->itree
, &pageflags_root
);
313 next
= pageflags_find(last
+ 1, last
+ 1);
315 if (next
->flags
== flags
) {
316 interval_tree_remove(&next
->itree
, &pageflags_root
);
325 prev
->itree
.last
= next
->itree
.last
;
326 g_free_rcu(next
, rcu
);
328 prev
->itree
.last
= last
;
330 interval_tree_insert(&prev
->itree
, &pageflags_root
);
332 next
->itree
.start
= start
;
333 interval_tree_insert(&next
->itree
, &pageflags_root
);
335 pageflags_create(start
, last
, flags
);
340 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
341 * By default, they are not kept.
343 #ifndef PAGE_TARGET_STICKY
344 #define PAGE_TARGET_STICKY 0
346 #define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
348 /* A subroutine of page_set_flags: add flags to [start,last]. */
349 static bool pageflags_set_clear(target_ulong start
, target_ulong last
,
350 int set_flags
, int clear_flags
)
353 target_ulong p_start
, p_last
;
354 int p_flags
, merge_flags
;
355 bool inval_tb
= false;
358 p
= pageflags_find(start
, last
);
361 pageflags_create_merge(start
, last
, set_flags
);
366 p_start
= p
->itree
.start
;
367 p_last
= p
->itree
.last
;
369 /* Using mprotect on a page does not change sticky bits. */
370 merge_flags
= (p_flags
& ~clear_flags
) | set_flags
;
373 * Need to flush if an overlapping executable region
374 * removes exec, or adds write.
376 if ((p_flags
& PAGE_EXEC
)
377 && (!(merge_flags
& PAGE_EXEC
)
378 || (merge_flags
& ~p_flags
& PAGE_WRITE
))) {
383 * If there is an exact range match, update and return without
384 * attempting to merge with adjacent regions.
386 if (start
== p_start
&& last
== p_last
) {
388 p
->flags
= merge_flags
;
390 interval_tree_remove(&p
->itree
, &pageflags_root
);
397 * If sticky bits affect the original mapping, then we must be more
398 * careful about the existing intervals and the separate flags.
400 if (set_flags
!= merge_flags
) {
401 if (p_start
< start
) {
402 interval_tree_remove(&p
->itree
, &pageflags_root
);
403 p
->itree
.last
= start
- 1;
404 interval_tree_insert(&p
->itree
, &pageflags_root
);
408 pageflags_create(start
, last
, merge_flags
);
410 pageflags_create(last
+ 1, p_last
, p_flags
);
413 pageflags_create(start
, p_last
, merge_flags
);
421 if (start
< p_start
&& set_flags
) {
422 pageflags_create(start
, p_start
- 1, set_flags
);
425 interval_tree_remove(&p
->itree
, &pageflags_root
);
426 p
->itree
.start
= last
+ 1;
427 interval_tree_insert(&p
->itree
, &pageflags_root
);
429 pageflags_create(start
, last
, merge_flags
);
433 p
->flags
= merge_flags
;
435 interval_tree_remove(&p
->itree
, &pageflags_root
);
447 /* If flags are not changing for this range, incorporate it. */
448 if (set_flags
== p_flags
) {
449 if (start
< p_start
) {
450 interval_tree_remove(&p
->itree
, &pageflags_root
);
451 p
->itree
.start
= start
;
452 interval_tree_insert(&p
->itree
, &pageflags_root
);
461 /* Maybe split out head and/or tail ranges with the original flags. */
462 interval_tree_remove(&p
->itree
, &pageflags_root
);
463 if (p_start
< start
) {
464 p
->itree
.last
= start
- 1;
465 interval_tree_insert(&p
->itree
, &pageflags_root
);
471 pageflags_create(last
+ 1, p_last
, p_flags
);
473 } else if (last
< p_last
) {
474 p
->itree
.start
= last
+ 1;
475 interval_tree_insert(&p
->itree
, &pageflags_root
);
481 pageflags_create(start
, last
, set_flags
);
489 * Modify the flags of a page and invalidate the code if necessary.
490 * The flag PAGE_WRITE_ORG is positioned automatically depending
491 * on PAGE_WRITE. The mmap_lock should already be held.
493 void page_set_flags(target_ulong start
, target_ulong last
, int flags
)
496 bool inval_tb
= false;
498 /* This function should never be called with addresses outside the
499 guest address space. If this assert fires, it probably indicates
500 a missing call to h2g_valid. */
501 assert(start
<= last
);
502 assert(last
<= GUEST_ADDR_MAX
);
503 /* Only set PAGE_ANON with new mappings. */
504 assert(!(flags
& PAGE_ANON
) || (flags
& PAGE_RESET
));
505 assert_memory_lock();
507 start
&= TARGET_PAGE_MASK
;
508 last
|= ~TARGET_PAGE_MASK
;
510 if (!(flags
& PAGE_VALID
)) {
513 reset
= flags
& PAGE_RESET
;
514 flags
&= ~PAGE_RESET
;
515 if (flags
& PAGE_WRITE
) {
516 flags
|= PAGE_WRITE_ORG
;
520 if (!flags
|| reset
) {
521 page_reset_target_data(start
, last
);
522 inval_tb
|= pageflags_unset(start
, last
);
525 inval_tb
|= pageflags_set_clear(start
, last
, flags
,
526 ~(reset
? 0 : PAGE_STICKY
));
529 tb_invalidate_phys_range(start
, last
);
533 bool page_check_range(target_ulong start
, target_ulong len
, int flags
)
536 int locked
; /* tri-state: =0: unlocked, +1: global, -1: local */
540 return true; /* trivial length */
543 last
= start
+ len
- 1;
545 return false; /* wrap around */
548 locked
= have_mmap_lock();
550 PageFlagsNode
*p
= pageflags_find(start
, last
);
556 * Lockless lookups have false negatives.
557 * Retry with the lock held.
561 p
= pageflags_find(start
, last
);
564 ret
= false; /* entire region invalid */
568 if (start
< p
->itree
.start
) {
569 ret
= false; /* initial bytes invalid */
573 missing
= flags
& ~p
->flags
;
574 if (missing
& ~PAGE_WRITE
) {
575 ret
= false; /* page doesn't match */
578 if (missing
& PAGE_WRITE
) {
579 if (!(p
->flags
& PAGE_WRITE_ORG
)) {
580 ret
= false; /* page not writable */
583 /* Asking about writable, but has been protected: undo. */
584 if (!page_unprotect(start
, 0)) {
588 /* TODO: page_unprotect should take a range, not a single page. */
589 if (last
- start
< TARGET_PAGE_SIZE
) {
593 start
+= TARGET_PAGE_SIZE
;
597 if (last
<= p
->itree
.last
) {
601 start
= p
->itree
.last
+ 1;
604 /* Release the lock if acquired locally. */
611 bool page_check_range_empty(target_ulong start
, target_ulong last
)
613 assert(last
>= start
);
614 assert_memory_lock();
615 return pageflags_find(start
, last
) == NULL
;
618 target_ulong
page_find_range_empty(target_ulong min
, target_ulong max
,
619 target_ulong len
, target_ulong align
)
621 target_ulong len_m1
, align_m1
;
624 assert(max
<= GUEST_ADDR_MAX
);
626 assert(is_power_of_2(align
));
627 assert_memory_lock();
630 align_m1
= align
- 1;
632 /* Iteratively narrow the search region. */
636 /* Align min and double-check there's enough space remaining. */
637 min
= (min
+ align_m1
) & ~align_m1
;
641 if (len_m1
> max
- min
) {
645 p
= pageflags_find(min
, min
+ len_m1
);
650 if (max
<= p
->itree
.last
) {
651 /* Existing allocation fills the remainder of the search region. */
654 /* Skip across existing allocation. */
655 min
= p
->itree
.last
+ 1;
659 void page_protect(tb_page_addr_t address
)
662 target_ulong start
, last
;
663 int host_page_size
= qemu_real_host_page_size();
666 assert_memory_lock();
668 if (host_page_size
<= TARGET_PAGE_SIZE
) {
669 start
= address
& TARGET_PAGE_MASK
;
670 last
= start
+ TARGET_PAGE_SIZE
- 1;
672 start
= address
& -host_page_size
;
673 last
= start
+ host_page_size
- 1;
676 p
= pageflags_find(start
, last
);
682 if (unlikely(p
->itree
.last
< last
)) {
683 /* More than one protection region covers the one host page. */
684 assert(TARGET_PAGE_SIZE
< host_page_size
);
685 while ((p
= pageflags_next(p
, start
, last
)) != NULL
) {
690 if (prot
& PAGE_WRITE
) {
691 pageflags_set_clear(start
, last
, 0, PAGE_WRITE
);
692 mprotect(g2h_untagged(start
), last
- start
+ 1,
693 prot
& (PAGE_READ
| PAGE_EXEC
) ? PROT_READ
: PROT_NONE
);
698 * Called from signal handler: invalidate the code and unprotect the
699 * page. Return 0 if the fault was not handled, 1 if it was handled,
700 * and 2 if it was handled but the caller must cause the TB to be
701 * immediately exited. (We can only return 2 if the 'pc' argument is
704 int page_unprotect(target_ulong address
, uintptr_t pc
)
707 bool current_tb_invalidated
;
710 * Technically this isn't safe inside a signal handler. However we
711 * know this only ever happens in a synchronous SEGV handler, so in
712 * practice it seems to be ok.
716 p
= pageflags_find(address
, address
);
718 /* If this address was not really writable, nothing to do. */
719 if (!p
|| !(p
->flags
& PAGE_WRITE_ORG
)) {
724 current_tb_invalidated
= false;
725 if (p
->flags
& PAGE_WRITE
) {
727 * If the page is actually marked WRITE then assume this is because
728 * this thread raced with another one which got here first and
729 * set the page to PAGE_WRITE and did the TB invalidate for us.
731 #ifdef TARGET_HAS_PRECISE_SMC
732 TranslationBlock
*current_tb
= tcg_tb_lookup(pc
);
734 current_tb_invalidated
= tb_cflags(current_tb
) & CF_INVALID
;
738 int host_page_size
= qemu_real_host_page_size();
739 target_ulong start
, len
, i
;
742 if (host_page_size
<= TARGET_PAGE_SIZE
) {
743 start
= address
& TARGET_PAGE_MASK
;
744 len
= TARGET_PAGE_SIZE
;
745 prot
= p
->flags
| PAGE_WRITE
;
746 pageflags_set_clear(start
, start
+ len
- 1, PAGE_WRITE
, 0);
747 current_tb_invalidated
= tb_invalidate_phys_page_unwind(start
, pc
);
749 start
= address
& -host_page_size
;
750 len
= host_page_size
;
753 for (i
= 0; i
< len
; i
+= TARGET_PAGE_SIZE
) {
754 target_ulong addr
= start
+ i
;
756 p
= pageflags_find(addr
, addr
);
759 if (p
->flags
& PAGE_WRITE_ORG
) {
761 pageflags_set_clear(addr
, addr
+ TARGET_PAGE_SIZE
- 1,
766 * Since the content will be modified, we must invalidate
767 * the corresponding translated code.
769 current_tb_invalidated
|=
770 tb_invalidate_phys_page_unwind(addr
, pc
);
773 if (prot
& PAGE_EXEC
) {
774 prot
= (prot
& ~PAGE_EXEC
) | PAGE_READ
;
776 mprotect((void *)g2h_untagged(start
), len
, prot
& PAGE_RWX
);
780 /* If current TB was invalidated return to main loop */
781 return current_tb_invalidated
? 2 : 1;
784 static int probe_access_internal(CPUArchState
*env
, vaddr addr
,
785 int fault_size
, MMUAccessType access_type
,
786 bool nonfault
, uintptr_t ra
)
791 switch (access_type
) {
793 acc_flag
= PAGE_WRITE_ORG
;
796 acc_flag
= PAGE_READ
;
799 acc_flag
= PAGE_EXEC
;
802 g_assert_not_reached();
805 if (guest_addr_valid_untagged(addr
)) {
806 int page_flags
= page_get_flags(addr
);
807 if (page_flags
& acc_flag
) {
808 if ((acc_flag
== PAGE_READ
|| acc_flag
== PAGE_WRITE
)
809 && cpu_plugin_mem_cbs_enabled(env_cpu(env
))) {
812 return 0; /* success */
814 maperr
= !(page_flags
& PAGE_VALID
);
820 return TLB_INVALID_MASK
;
823 cpu_loop_exit_sigsegv(env_cpu(env
), addr
, access_type
, maperr
, ra
);
826 int probe_access_flags(CPUArchState
*env
, vaddr addr
, int size
,
827 MMUAccessType access_type
, int mmu_idx
,
828 bool nonfault
, void **phost
, uintptr_t ra
)
832 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
833 flags
= probe_access_internal(env
, addr
, size
, access_type
, nonfault
, ra
);
834 *phost
= (flags
& TLB_INVALID_MASK
) ? NULL
: g2h(env_cpu(env
), addr
);
838 void *probe_access(CPUArchState
*env
, vaddr addr
, int size
,
839 MMUAccessType access_type
, int mmu_idx
, uintptr_t ra
)
843 g_assert(-(addr
| TARGET_PAGE_MASK
) >= size
);
844 flags
= probe_access_internal(env
, addr
, size
, access_type
, false, ra
);
845 g_assert((flags
& ~TLB_MMIO
) == 0);
847 return size
? g2h(env_cpu(env
), addr
) : NULL
;
850 tb_page_addr_t
get_page_addr_code_hostp(CPUArchState
*env
, vaddr addr
,
855 flags
= probe_access_internal(env
, addr
, 1, MMU_INST_FETCH
, false, 0);
856 g_assert(flags
== 0);
859 *hostp
= g2h_untagged(addr
);
864 #ifdef TARGET_PAGE_DATA_SIZE
866 * Allocate chunks of target data together. For the only current user,
867 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
868 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
871 #define TBD_MASK (TARGET_PAGE_MASK * TPD_PAGES)
873 typedef struct TargetPageDataNode
{
875 IntervalTreeNode itree
;
876 char data
[] __attribute__((aligned
));
877 } TargetPageDataNode
;
879 static IntervalTreeRoot targetdata_root
;
881 void page_reset_target_data(target_ulong start
, target_ulong last
)
883 IntervalTreeNode
*n
, *next
;
885 assert_memory_lock();
887 start
&= TARGET_PAGE_MASK
;
888 last
|= ~TARGET_PAGE_MASK
;
890 for (n
= interval_tree_iter_first(&targetdata_root
, start
, last
),
891 next
= n
? interval_tree_iter_next(n
, start
, last
) : NULL
;
894 next
= next
? interval_tree_iter_next(n
, start
, last
) : NULL
) {
895 target_ulong n_start
, n_last
, p_ofs
, p_len
;
896 TargetPageDataNode
*t
= container_of(n
, TargetPageDataNode
, itree
);
898 if (n
->start
>= start
&& n
->last
<= last
) {
899 interval_tree_remove(n
, &targetdata_root
);
904 if (n
->start
< start
) {
906 p_ofs
= (start
- n
->start
) >> TARGET_PAGE_BITS
;
911 n_last
= MIN(last
, n
->last
);
912 p_len
= (n_last
+ 1 - n_start
) >> TARGET_PAGE_BITS
;
914 memset(t
->data
+ p_ofs
* TARGET_PAGE_DATA_SIZE
, 0,
915 p_len
* TARGET_PAGE_DATA_SIZE
);
919 void *page_get_target_data(target_ulong address
)
922 TargetPageDataNode
*t
;
923 target_ulong page
, region
, p_ofs
;
925 page
= address
& TARGET_PAGE_MASK
;
926 region
= address
& TBD_MASK
;
928 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
931 * See util/interval-tree.c re lockless lookups: no false positives
932 * but there are false negatives. If we find nothing, retry with
933 * the mmap lock acquired. We also need the lock for the
934 * allocation + insert.
937 n
= interval_tree_iter_first(&targetdata_root
, page
, page
);
939 t
= g_malloc0(sizeof(TargetPageDataNode
)
940 + TPD_PAGES
* TARGET_PAGE_DATA_SIZE
);
943 n
->last
= region
| ~TBD_MASK
;
944 interval_tree_insert(n
, &targetdata_root
);
949 t
= container_of(n
, TargetPageDataNode
, itree
);
950 p_ofs
= (page
- region
) >> TARGET_PAGE_BITS
;
951 return t
->data
+ p_ofs
* TARGET_PAGE_DATA_SIZE
;
954 void page_reset_target_data(target_ulong start
, target_ulong last
) { }
955 #endif /* TARGET_PAGE_DATA_SIZE */
957 /* The system-mode versions of these helpers are in cputlb.c. */
959 static void *cpu_mmu_lookup(CPUState
*cpu
, vaddr addr
,
960 MemOp mop
, uintptr_t ra
, MMUAccessType type
)
962 int a_bits
= get_alignment_bits(mop
);
965 /* Enforce guest required alignment. */
966 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
967 cpu_loop_exit_sigbus(cpu
, addr
, type
, ra
);
970 ret
= g2h(cpu
, addr
);
971 set_helper_retaddr(ra
);
975 #include "ldst_atomicity.c.inc"
977 static uint8_t do_ld1_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
978 uintptr_t ra
, MMUAccessType access_type
)
983 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
984 haddr
= cpu_mmu_lookup(cpu
, addr
, get_memop(oi
), ra
, access_type
);
986 clear_helper_retaddr();
990 static uint16_t do_ld2_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
991 uintptr_t ra
, MMUAccessType access_type
)
995 MemOp mop
= get_memop(oi
);
997 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
998 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, access_type
);
999 ret
= load_atom_2(cpu
, ra
, haddr
, mop
);
1000 clear_helper_retaddr();
1002 if (mop
& MO_BSWAP
) {
1008 static uint32_t do_ld4_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
1009 uintptr_t ra
, MMUAccessType access_type
)
1013 MemOp mop
= get_memop(oi
);
1015 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1016 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, access_type
);
1017 ret
= load_atom_4(cpu
, ra
, haddr
, mop
);
1018 clear_helper_retaddr();
1020 if (mop
& MO_BSWAP
) {
1026 static uint64_t do_ld8_mmu(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
1027 uintptr_t ra
, MMUAccessType access_type
)
1031 MemOp mop
= get_memop(oi
);
1033 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1034 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, access_type
);
1035 ret
= load_atom_8(cpu
, ra
, haddr
, mop
);
1036 clear_helper_retaddr();
1038 if (mop
& MO_BSWAP
) {
1044 static Int128
do_ld16_mmu(CPUState
*cpu
, abi_ptr addr
,
1045 MemOpIdx oi
, uintptr_t ra
)
1049 MemOp mop
= get_memop(oi
);
1051 tcg_debug_assert((mop
& MO_SIZE
) == MO_128
);
1052 cpu_req_mo(TCG_MO_LD_LD
| TCG_MO_ST_LD
);
1053 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, MMU_DATA_LOAD
);
1054 ret
= load_atom_16(cpu
, ra
, haddr
, mop
);
1055 clear_helper_retaddr();
1057 if (mop
& MO_BSWAP
) {
1058 ret
= bswap128(ret
);
1063 static void do_st1_mmu(CPUState
*cpu
, vaddr addr
, uint8_t val
,
1064 MemOpIdx oi
, uintptr_t ra
)
1068 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1069 haddr
= cpu_mmu_lookup(cpu
, addr
, get_memop(oi
), ra
, MMU_DATA_STORE
);
1071 clear_helper_retaddr();
1074 static void do_st2_mmu(CPUState
*cpu
, vaddr addr
, uint16_t val
,
1075 MemOpIdx oi
, uintptr_t ra
)
1078 MemOp mop
= get_memop(oi
);
1080 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1081 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, MMU_DATA_STORE
);
1083 if (mop
& MO_BSWAP
) {
1086 store_atom_2(cpu
, ra
, haddr
, mop
, val
);
1087 clear_helper_retaddr();
1090 static void do_st4_mmu(CPUState
*cpu
, vaddr addr
, uint32_t val
,
1091 MemOpIdx oi
, uintptr_t ra
)
1094 MemOp mop
= get_memop(oi
);
1096 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1097 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, MMU_DATA_STORE
);
1099 if (mop
& MO_BSWAP
) {
1102 store_atom_4(cpu
, ra
, haddr
, mop
, val
);
1103 clear_helper_retaddr();
1106 static void do_st8_mmu(CPUState
*cpu
, vaddr addr
, uint64_t val
,
1107 MemOpIdx oi
, uintptr_t ra
)
1110 MemOp mop
= get_memop(oi
);
1112 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1113 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, MMU_DATA_STORE
);
1115 if (mop
& MO_BSWAP
) {
1118 store_atom_8(cpu
, ra
, haddr
, mop
, val
);
1119 clear_helper_retaddr();
1122 static void do_st16_mmu(CPUState
*cpu
, vaddr addr
, Int128 val
,
1123 MemOpIdx oi
, uintptr_t ra
)
1126 MemOpIdx mop
= get_memop(oi
);
1128 cpu_req_mo(TCG_MO_LD_ST
| TCG_MO_ST_ST
);
1129 haddr
= cpu_mmu_lookup(cpu
, addr
, mop
, ra
, MMU_DATA_STORE
);
1131 if (mop
& MO_BSWAP
) {
1132 val
= bswap128(val
);
1134 store_atom_16(cpu
, ra
, haddr
, mop
, val
);
1135 clear_helper_retaddr();
1138 uint32_t cpu_ldub_code(CPUArchState
*env
, abi_ptr ptr
)
1142 set_helper_retaddr(1);
1143 ret
= ldub_p(g2h_untagged(ptr
));
1144 clear_helper_retaddr();
1148 uint32_t cpu_lduw_code(CPUArchState
*env
, abi_ptr ptr
)
1152 set_helper_retaddr(1);
1153 ret
= lduw_p(g2h_untagged(ptr
));
1154 clear_helper_retaddr();
1158 uint32_t cpu_ldl_code(CPUArchState
*env
, abi_ptr ptr
)
1162 set_helper_retaddr(1);
1163 ret
= ldl_p(g2h_untagged(ptr
));
1164 clear_helper_retaddr();
1168 uint64_t cpu_ldq_code(CPUArchState
*env
, abi_ptr ptr
)
1172 set_helper_retaddr(1);
1173 ret
= ldq_p(g2h_untagged(ptr
));
1174 clear_helper_retaddr();
1178 uint8_t cpu_ldb_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1179 MemOpIdx oi
, uintptr_t ra
)
1184 haddr
= cpu_mmu_lookup(env_cpu(env
), addr
, oi
, ra
, MMU_INST_FETCH
);
1185 ret
= ldub_p(haddr
);
1186 clear_helper_retaddr();
1190 uint16_t cpu_ldw_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1191 MemOpIdx oi
, uintptr_t ra
)
1196 haddr
= cpu_mmu_lookup(env_cpu(env
), addr
, oi
, ra
, MMU_INST_FETCH
);
1197 ret
= lduw_p(haddr
);
1198 clear_helper_retaddr();
1199 if (get_memop(oi
) & MO_BSWAP
) {
1205 uint32_t cpu_ldl_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1206 MemOpIdx oi
, uintptr_t ra
)
1211 haddr
= cpu_mmu_lookup(env_cpu(env
), addr
, oi
, ra
, MMU_INST_FETCH
);
1213 clear_helper_retaddr();
1214 if (get_memop(oi
) & MO_BSWAP
) {
1220 uint64_t cpu_ldq_code_mmu(CPUArchState
*env
, abi_ptr addr
,
1221 MemOpIdx oi
, uintptr_t ra
)
1226 haddr
= cpu_mmu_lookup(env_cpu(env
), addr
, oi
, ra
, MMU_DATA_LOAD
);
1228 clear_helper_retaddr();
1229 if (get_memop(oi
) & MO_BSWAP
) {
1235 #include "ldst_common.c.inc"
1238 * Do not allow unaligned operations to proceed. Return the host address.
1240 static void *atomic_mmu_lookup(CPUState
*cpu
, vaddr addr
, MemOpIdx oi
,
1241 int size
, uintptr_t retaddr
)
1243 MemOp mop
= get_memop(oi
);
1244 int a_bits
= get_alignment_bits(mop
);
1247 /* Enforce guest required alignment. */
1248 if (unlikely(addr
& ((1 << a_bits
) - 1))) {
1249 cpu_loop_exit_sigbus(cpu
, addr
, MMU_DATA_STORE
, retaddr
);
1252 /* Enforce qemu required alignment. */
1253 if (unlikely(addr
& (size
- 1))) {
1254 cpu_loop_exit_atomic(cpu
, retaddr
);
1257 ret
= g2h(cpu
, addr
);
1258 set_helper_retaddr(retaddr
);
1262 #include "atomic_common.c.inc"
1265 * First set of functions passes in OI and RETADDR.
1266 * This makes them callable from other helpers.
1269 #define ATOMIC_NAME(X) \
1270 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1271 #define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1274 #include "atomic_template.h"
1277 #include "atomic_template.h"
1280 #include "atomic_template.h"
1282 #ifdef CONFIG_ATOMIC64
1284 #include "atomic_template.h"
1287 #if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
1288 #define DATA_SIZE 16
1289 #include "atomic_template.h"