hw/cpu: Extend CPUState::cluster_index documentation
[qemu/kevin.git] / accel / tcg / tcg-accel-ops.c
blobaeb1cbaf65ff57be3c2f60a29d5ceda036367c28
1 /*
2 * QEMU TCG vCPU common functionality
4 * Functionality common to all TCG vCPU variants: mttcg, rr and icount.
6 * Copyright (c) 2003-2008 Fabrice Bellard
7 * Copyright (c) 2014 Red Hat Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this software and associated documentation files (the "Software"), to deal
11 * in the Software without restriction, including without limitation the rights
12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
13 * copies of the Software, and to permit persons to whom the Software is
14 * furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 * THE SOFTWARE.
28 #include "qemu/osdep.h"
29 #include "sysemu/tcg.h"
30 #include "sysemu/replay.h"
31 #include "sysemu/cpu-timers.h"
32 #include "qemu/main-loop.h"
33 #include "qemu/guest-random.h"
34 #include "exec/exec-all.h"
35 #include "exec/hwaddr.h"
36 #include "exec/gdbstub.h"
38 #include "tcg-accel-ops.h"
39 #include "tcg-accel-ops-mttcg.h"
40 #include "tcg-accel-ops-rr.h"
41 #include "tcg-accel-ops-icount.h"
43 /* common functionality among all TCG variants */
45 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
47 uint32_t cflags;
50 * Include the cluster number in the hash we use to look up TBs.
51 * This is important because a TB that is valid for one cluster at
52 * a given physical address and set of CPU flags is not necessarily
53 * valid for another:
54 * the two clusters may have different views of physical memory, or
55 * may have different CPU features (eg FPU present or absent).
57 cflags = cpu->cluster_index << CF_CLUSTER_SHIFT;
59 cflags |= parallel ? CF_PARALLEL : 0;
60 cflags |= icount_enabled() ? CF_USE_ICOUNT : 0;
61 cpu->tcg_cflags = cflags;
64 void tcg_cpus_destroy(CPUState *cpu)
66 cpu_thread_signal_destroyed(cpu);
69 int tcg_cpus_exec(CPUState *cpu)
71 int ret;
72 #ifdef CONFIG_PROFILER
73 int64_t ti;
74 #endif
75 assert(tcg_enabled());
76 #ifdef CONFIG_PROFILER
77 ti = profile_getclock();
78 #endif
79 cpu_exec_start(cpu);
80 ret = cpu_exec(cpu);
81 cpu_exec_end(cpu);
82 #ifdef CONFIG_PROFILER
83 qatomic_set(&tcg_ctx->prof.cpu_exec_time,
84 tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
85 #endif
86 return ret;
89 /* mask must never be zero, except for A20 change call */
90 void tcg_handle_interrupt(CPUState *cpu, int mask)
92 g_assert(qemu_mutex_iothread_locked());
94 cpu->interrupt_request |= mask;
97 * If called from iothread context, wake the target cpu in
98 * case its halted.
100 if (!qemu_cpu_is_self(cpu)) {
101 qemu_cpu_kick(cpu);
102 } else {
103 qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
107 static bool tcg_supports_guest_debug(void)
109 return true;
112 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
113 static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
115 static const int xlat[] = {
116 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
117 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
118 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
121 CPUClass *cc = CPU_GET_CLASS(cpu);
122 int cputype = xlat[gdbtype];
124 if (cc->gdb_stop_before_watchpoint) {
125 cputype |= BP_STOP_BEFORE_ACCESS;
127 return cputype;
130 static int tcg_insert_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
132 CPUState *cpu;
133 int err = 0;
135 switch (type) {
136 case GDB_BREAKPOINT_SW:
137 case GDB_BREAKPOINT_HW:
138 CPU_FOREACH(cpu) {
139 err = cpu_breakpoint_insert(cpu, addr, BP_GDB, NULL);
140 if (err) {
141 break;
144 return err;
145 case GDB_WATCHPOINT_WRITE:
146 case GDB_WATCHPOINT_READ:
147 case GDB_WATCHPOINT_ACCESS:
148 CPU_FOREACH(cpu) {
149 err = cpu_watchpoint_insert(cpu, addr, len,
150 xlat_gdb_type(cpu, type), NULL);
151 if (err) {
152 break;
155 return err;
156 default:
157 return -ENOSYS;
161 static int tcg_remove_breakpoint(CPUState *cs, int type, vaddr addr, vaddr len)
163 CPUState *cpu;
164 int err = 0;
166 switch (type) {
167 case GDB_BREAKPOINT_SW:
168 case GDB_BREAKPOINT_HW:
169 CPU_FOREACH(cpu) {
170 err = cpu_breakpoint_remove(cpu, addr, BP_GDB);
171 if (err) {
172 break;
175 return err;
176 case GDB_WATCHPOINT_WRITE:
177 case GDB_WATCHPOINT_READ:
178 case GDB_WATCHPOINT_ACCESS:
179 CPU_FOREACH(cpu) {
180 err = cpu_watchpoint_remove(cpu, addr, len,
181 xlat_gdb_type(cpu, type));
182 if (err) {
183 break;
186 return err;
187 default:
188 return -ENOSYS;
192 static inline void tcg_remove_all_breakpoints(CPUState *cpu)
194 cpu_breakpoint_remove_all(cpu, BP_GDB);
195 cpu_watchpoint_remove_all(cpu, BP_GDB);
198 static void tcg_accel_ops_init(AccelOpsClass *ops)
200 if (qemu_tcg_mttcg_enabled()) {
201 ops->create_vcpu_thread = mttcg_start_vcpu_thread;
202 ops->kick_vcpu_thread = mttcg_kick_vcpu_thread;
203 ops->handle_interrupt = tcg_handle_interrupt;
204 } else {
205 ops->create_vcpu_thread = rr_start_vcpu_thread;
206 ops->kick_vcpu_thread = rr_kick_vcpu_thread;
208 if (icount_enabled()) {
209 ops->handle_interrupt = icount_handle_interrupt;
210 ops->get_virtual_clock = icount_get;
211 ops->get_elapsed_ticks = icount_get;
212 } else {
213 ops->handle_interrupt = tcg_handle_interrupt;
217 ops->supports_guest_debug = tcg_supports_guest_debug;
218 ops->insert_breakpoint = tcg_insert_breakpoint;
219 ops->remove_breakpoint = tcg_remove_breakpoint;
220 ops->remove_all_breakpoints = tcg_remove_all_breakpoints;
223 static void tcg_accel_ops_class_init(ObjectClass *oc, void *data)
225 AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
227 ops->ops_init = tcg_accel_ops_init;
230 static const TypeInfo tcg_accel_ops_type = {
231 .name = ACCEL_OPS_NAME("tcg"),
233 .parent = TYPE_ACCEL_OPS,
234 .class_init = tcg_accel_ops_class_init,
235 .abstract = true,
237 module_obj(ACCEL_OPS_NAME("tcg"));
239 static void tcg_accel_ops_register_types(void)
241 type_register_static(&tcg_accel_ops_type);
243 type_init(tcg_accel_ops_register_types);