hw/arm/virt: add pmu interrupt state
[qemu/ar7.git] / qom / cpu.c
blobdeb88809301ebd953410b400ccd67f5d1bee8b51
1 /*
2 * QEMU CPU model
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "qemu-common.h"
24 #include "qom/cpu.h"
25 #include "sysemu/hw_accel.h"
26 #include "qemu/notify.h"
27 #include "qemu/log.h"
28 #include "exec/log.h"
29 #include "exec/cpu-common.h"
30 #include "qemu/error-report.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/qdev-properties.h"
33 #include "trace-root.h"
35 CPUInterruptHandler cpu_interrupt_handler;
37 CPUState *cpu_by_arch_id(int64_t id)
39 CPUState *cpu;
41 CPU_FOREACH(cpu) {
42 CPUClass *cc = CPU_GET_CLASS(cpu);
44 if (cc->get_arch_id(cpu) == id) {
45 return cpu;
48 return NULL;
51 bool cpu_exists(int64_t id)
53 return !!cpu_by_arch_id(id);
56 CPUState *cpu_generic_init(const char *typename, const char *cpu_model)
58 CPUState *cpu = NULL;
59 ObjectClass *oc;
60 CPUClass *cc;
61 Error *err = NULL;
62 gchar **model_pieces;
64 model_pieces = g_strsplit(cpu_model, ",", 2);
66 oc = cpu_class_by_name(typename, model_pieces[0]);
67 if (oc == NULL) {
68 g_strfreev(model_pieces);
69 return NULL;
72 cc = CPU_CLASS(oc);
73 /* TODO: all callers of cpu_generic_init() need to be converted to
74 * call parse_features() only once, before calling cpu_generic_init().
76 cc->parse_features(object_class_get_name(oc), model_pieces[1], &err);
77 g_strfreev(model_pieces);
78 if (err != NULL) {
79 goto out;
82 cpu = CPU(object_new(object_class_get_name(oc)));
83 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
85 out:
86 if (err != NULL) {
87 error_report_err(err);
88 object_unref(OBJECT(cpu));
89 return NULL;
92 return cpu;
95 bool cpu_paging_enabled(const CPUState *cpu)
97 CPUClass *cc = CPU_GET_CLASS(cpu);
99 return cc->get_paging_enabled(cpu);
102 static bool cpu_common_get_paging_enabled(const CPUState *cpu)
104 return false;
107 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
108 Error **errp)
110 CPUClass *cc = CPU_GET_CLASS(cpu);
112 cc->get_memory_mapping(cpu, list, errp);
115 static void cpu_common_get_memory_mapping(CPUState *cpu,
116 MemoryMappingList *list,
117 Error **errp)
119 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
122 /* Resetting the IRQ comes from across the code base so we take the
123 * BQL here if we need to. cpu_interrupt assumes it is held.*/
124 void cpu_reset_interrupt(CPUState *cpu, int mask)
126 bool need_lock = !qemu_mutex_iothread_locked();
128 if (need_lock) {
129 qemu_mutex_lock_iothread();
131 cpu->interrupt_request &= ~mask;
132 if (need_lock) {
133 qemu_mutex_unlock_iothread();
137 void cpu_exit(CPUState *cpu)
139 atomic_set(&cpu->exit_request, 1);
140 /* Ensure cpu_exec will see the exit request after TCG has exited. */
141 smp_wmb();
142 atomic_set(&cpu->icount_decr.u16.high, -1);
145 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
146 void *opaque)
148 CPUClass *cc = CPU_GET_CLASS(cpu);
150 return (*cc->write_elf32_qemunote)(f, cpu, opaque);
153 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
154 CPUState *cpu, void *opaque)
156 return 0;
159 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
160 int cpuid, void *opaque)
162 CPUClass *cc = CPU_GET_CLASS(cpu);
164 return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
167 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
168 CPUState *cpu, int cpuid,
169 void *opaque)
171 return -1;
174 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
175 void *opaque)
177 CPUClass *cc = CPU_GET_CLASS(cpu);
179 return (*cc->write_elf64_qemunote)(f, cpu, opaque);
182 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
183 CPUState *cpu, void *opaque)
185 return 0;
188 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
189 int cpuid, void *opaque)
191 CPUClass *cc = CPU_GET_CLASS(cpu);
193 return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
196 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
197 CPUState *cpu, int cpuid,
198 void *opaque)
200 return -1;
204 static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
206 return 0;
209 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
211 return 0;
214 static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
216 /* If no extra check is required, QEMU watchpoint match can be considered
217 * as an architectural match.
219 return true;
222 bool target_words_bigendian(void);
223 static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
225 return target_words_bigendian();
228 static void cpu_common_noop(CPUState *cpu)
232 static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
234 return false;
237 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
239 CPUClass *cc = CPU_GET_CLASS(cpu);
240 GuestPanicInformation *res = NULL;
242 if (cc->get_crash_info) {
243 res = cc->get_crash_info(cpu);
245 return res;
248 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
249 int flags)
251 CPUClass *cc = CPU_GET_CLASS(cpu);
253 if (cc->dump_state) {
254 cpu_synchronize_state(cpu);
255 cc->dump_state(cpu, f, cpu_fprintf, flags);
259 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
260 int flags)
262 CPUClass *cc = CPU_GET_CLASS(cpu);
264 if (cc->dump_statistics) {
265 cc->dump_statistics(cpu, f, cpu_fprintf, flags);
269 void cpu_reset(CPUState *cpu)
271 CPUClass *klass = CPU_GET_CLASS(cpu);
273 if (klass->reset != NULL) {
274 (*klass->reset)(cpu);
277 trace_guest_cpu_reset(cpu);
280 static void cpu_common_reset(CPUState *cpu)
282 CPUClass *cc = CPU_GET_CLASS(cpu);
284 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
285 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
286 log_cpu_state(cpu, cc->reset_dump_flags);
289 cpu->interrupt_request = 0;
290 cpu->halted = 0;
291 cpu->mem_io_pc = 0;
292 cpu->mem_io_vaddr = 0;
293 cpu->icount_extra = 0;
294 cpu->icount_decr.u32 = 0;
295 cpu->can_do_io = 1;
296 cpu->exception_index = -1;
297 cpu->crash_occurred = false;
299 if (tcg_enabled()) {
300 cpu_tb_jmp_cache_clear(cpu);
302 tcg_flush_softmmu_tlb(cpu);
306 static bool cpu_common_has_work(CPUState *cs)
308 return false;
311 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
313 CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
315 return cc->class_by_name(cpu_model);
318 static ObjectClass *cpu_common_class_by_name(const char *cpu_model)
320 return NULL;
323 static void cpu_common_parse_features(const char *typename, char *features,
324 Error **errp)
326 char *featurestr; /* Single "key=value" string being parsed */
327 char *val;
328 static bool cpu_globals_initialized;
330 /* TODO: all callers of ->parse_features() need to be changed to
331 * call it only once, so we can remove this check (or change it
332 * to assert(!cpu_globals_initialized).
333 * Current callers of ->parse_features() are:
334 * - cpu_generic_init()
336 if (cpu_globals_initialized) {
337 return;
339 cpu_globals_initialized = true;
341 featurestr = features ? strtok(features, ",") : NULL;
343 while (featurestr) {
344 val = strchr(featurestr, '=');
345 if (val) {
346 GlobalProperty *prop = g_new0(typeof(*prop), 1);
347 *val = 0;
348 val++;
349 prop->driver = typename;
350 prop->property = g_strdup(featurestr);
351 prop->value = g_strdup(val);
352 prop->errp = &error_fatal;
353 qdev_prop_register_global(prop);
354 } else {
355 error_setg(errp, "Expected key=value format, found %s.",
356 featurestr);
357 return;
359 featurestr = strtok(NULL, ",");
363 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
365 CPUState *cpu = CPU(dev);
367 if (dev->hotplugged) {
368 cpu_synchronize_post_init(cpu);
369 cpu_resume(cpu);
372 /* NOTE: latest generic point where the cpu is fully realized */
373 trace_init_vcpu(cpu);
376 static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
378 CPUState *cpu = CPU(dev);
379 /* NOTE: latest generic point before the cpu is fully unrealized */
380 trace_fini_vcpu(cpu);
381 cpu_exec_unrealizefn(cpu);
384 static void cpu_common_initfn(Object *obj)
386 CPUState *cpu = CPU(obj);
387 CPUClass *cc = CPU_GET_CLASS(obj);
389 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
390 cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
391 /* *-user doesn't have configurable SMP topology */
392 /* the default value is changed by qemu_init_vcpu() for softmmu */
393 cpu->nr_cores = 1;
394 cpu->nr_threads = 1;
396 qemu_mutex_init(&cpu->work_mutex);
397 QTAILQ_INIT(&cpu->breakpoints);
398 QTAILQ_INIT(&cpu->watchpoints);
400 cpu_exec_initfn(cpu);
403 static void cpu_common_finalize(Object *obj)
407 static int64_t cpu_common_get_arch_id(CPUState *cpu)
409 return cpu->cpu_index;
412 static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
414 return addr;
417 static void generic_handle_interrupt(CPUState *cpu, int mask)
419 cpu->interrupt_request |= mask;
421 if (!qemu_cpu_is_self(cpu)) {
422 qemu_cpu_kick(cpu);
426 CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
428 static void cpu_class_init(ObjectClass *klass, void *data)
430 DeviceClass *dc = DEVICE_CLASS(klass);
431 CPUClass *k = CPU_CLASS(klass);
433 k->class_by_name = cpu_common_class_by_name;
434 k->parse_features = cpu_common_parse_features;
435 k->reset = cpu_common_reset;
436 k->get_arch_id = cpu_common_get_arch_id;
437 k->has_work = cpu_common_has_work;
438 k->get_paging_enabled = cpu_common_get_paging_enabled;
439 k->get_memory_mapping = cpu_common_get_memory_mapping;
440 k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
441 k->write_elf32_note = cpu_common_write_elf32_note;
442 k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
443 k->write_elf64_note = cpu_common_write_elf64_note;
444 k->gdb_read_register = cpu_common_gdb_read_register;
445 k->gdb_write_register = cpu_common_gdb_write_register;
446 k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
447 k->debug_excp_handler = cpu_common_noop;
448 k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
449 k->cpu_exec_enter = cpu_common_noop;
450 k->cpu_exec_exit = cpu_common_noop;
451 k->cpu_exec_interrupt = cpu_common_exec_interrupt;
452 k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
453 set_bit(DEVICE_CATEGORY_CPU, dc->categories);
454 dc->realize = cpu_common_realizefn;
455 dc->unrealize = cpu_common_unrealizefn;
456 dc->props = cpu_common_props;
458 * Reason: CPUs still need special care by board code: wiring up
459 * IRQs, adding reset handlers, halting non-first CPUs, ...
461 dc->user_creatable = false;
464 static const TypeInfo cpu_type_info = {
465 .name = TYPE_CPU,
466 .parent = TYPE_DEVICE,
467 .instance_size = sizeof(CPUState),
468 .instance_init = cpu_common_initfn,
469 .instance_finalize = cpu_common_finalize,
470 .abstract = true,
471 .class_size = sizeof(CPUClass),
472 .class_init = cpu_class_init,
475 static void cpu_register_types(void)
477 type_register_static(&cpu_type_info);
480 type_init(cpu_register_types)