kvm: dirty-ring: Fix race with vcpu creation
[qemu/ar7.git] / softmmu / watchpoint.c
blob5350163385834a75fbf876cca174d3b7e9bbfb43
1 /*
2 * CPU watchpoints
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
22 #include "qemu/error-report.h"
23 #include "exec/exec-all.h"
24 #include "exec/translate-all.h"
25 #include "sysemu/tcg.h"
26 #include "sysemu/replay.h"
27 #include "hw/core/tcg-cpu-ops.h"
28 #include "hw/core/cpu.h"
30 /* Add a watchpoint. */
31 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
32 int flags, CPUWatchpoint **watchpoint)
34 CPUWatchpoint *wp;
35 vaddr in_page;
37 /* forbid ranges which are empty or run off the end of the address space */
38 if (len == 0 || (addr + len - 1) < addr) {
39 error_report("tried to set invalid watchpoint at %"
40 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
41 return -EINVAL;
43 wp = g_malloc(sizeof(*wp));
45 wp->vaddr = addr;
46 wp->len = len;
47 wp->flags = flags;
49 /* keep all GDB-injected watchpoints in front */
50 if (flags & BP_GDB) {
51 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
52 } else {
53 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
56 in_page = -(addr | TARGET_PAGE_MASK);
57 if (len <= in_page) {
58 tlb_flush_page(cpu, addr);
59 } else {
60 tlb_flush(cpu);
63 if (watchpoint) {
64 *watchpoint = wp;
66 return 0;
69 /* Remove a specific watchpoint. */
70 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
71 int flags)
73 CPUWatchpoint *wp;
75 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
76 if (addr == wp->vaddr && len == wp->len
77 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
78 cpu_watchpoint_remove_by_ref(cpu, wp);
79 return 0;
82 return -ENOENT;
85 /* Remove a specific watchpoint by reference. */
86 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
88 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
90 tlb_flush_page(cpu, watchpoint->vaddr);
92 g_free(watchpoint);
95 /* Remove all matching watchpoints. */
96 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
98 CPUWatchpoint *wp, *next;
100 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
101 if (wp->flags & mask) {
102 cpu_watchpoint_remove_by_ref(cpu, wp);
107 #ifdef CONFIG_TCG
110 * Return true if this watchpoint address matches the specified
111 * access (ie the address range covered by the watchpoint overlaps
112 * partially or completely with the address range covered by the
113 * access).
115 static inline bool watchpoint_address_matches(CPUWatchpoint *wp,
116 vaddr addr, vaddr len)
119 * We know the lengths are non-zero, but a little caution is
120 * required to avoid errors in the case where the range ends
121 * exactly at the top of the address space and so addr + len
122 * wraps round to zero.
124 vaddr wpend = wp->vaddr + wp->len - 1;
125 vaddr addrend = addr + len - 1;
127 return !(addr > wpend || wp->vaddr > addrend);
130 /* Return flags for watchpoints that match addr + prot. */
131 int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
133 CPUWatchpoint *wp;
134 int ret = 0;
136 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
137 if (watchpoint_address_matches(wp, addr, len)) {
138 ret |= wp->flags;
141 return ret;
144 /* Generate a debug exception if a watchpoint has been hit. */
145 void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
146 MemTxAttrs attrs, int flags, uintptr_t ra)
148 CPUClass *cc = CPU_GET_CLASS(cpu);
149 CPUWatchpoint *wp;
151 assert(tcg_enabled());
152 if (cpu->watchpoint_hit) {
154 * We re-entered the check after replacing the TB.
155 * Now raise the debug interrupt so that it will
156 * trigger after the current instruction.
158 qemu_mutex_lock_iothread();
159 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
160 qemu_mutex_unlock_iothread();
161 return;
164 if (cc->tcg_ops->adjust_watchpoint_address) {
165 /* this is currently used only by ARM BE32 */
166 addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
169 assert((flags & ~BP_MEM_ACCESS) == 0);
170 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
171 int hit_flags = wp->flags & flags;
173 if (hit_flags && watchpoint_address_matches(wp, addr, len)) {
174 if (replay_running_debug()) {
176 * replay_breakpoint reads icount.
177 * Force recompile to succeed, because icount may
178 * be read only at the end of the block.
180 if (!cpu->can_do_io) {
181 /* Force execution of one insn next time. */
182 cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
183 | curr_cflags(cpu);
184 cpu_loop_exit_restore(cpu, ra);
187 * Don't process the watchpoints when we are
188 * in a reverse debugging operation.
190 replay_breakpoint();
191 return;
194 wp->flags |= hit_flags << BP_HIT_SHIFT;
195 wp->hitaddr = MAX(addr, wp->vaddr);
196 wp->hitattrs = attrs;
198 if (wp->flags & BP_CPU
199 && cc->tcg_ops->debug_check_watchpoint
200 && !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
201 wp->flags &= ~BP_WATCHPOINT_HIT;
202 continue;
204 cpu->watchpoint_hit = wp;
206 mmap_lock();
207 /* This call also restores vCPU state */
208 tb_check_watchpoint(cpu, ra);
209 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
210 cpu->exception_index = EXCP_DEBUG;
211 mmap_unlock();
212 cpu_loop_exit(cpu);
213 } else {
214 /* Force execution of one insn next time. */
215 cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
216 | curr_cflags(cpu);
217 mmap_unlock();
218 cpu_loop_exit_noexc(cpu);
220 } else {
221 wp->flags &= ~BP_WATCHPOINT_HIT;
226 #endif /* CONFIG_TCG */