linux-user: fix use of SIGRTMIN
[qemu/ar7.git] / hw / core / cpu.c
blobfe65ca62aceef581d4d9ef3cb9e1b0d7df4e5bfa
1 /*
2 * QEMU CPU model
4 * Copyright (c) 2012-2014 SUSE LINUX Products GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "hw/core/cpu.h"
24 #include "sysemu/hw_accel.h"
25 #include "qemu/notify.h"
26 #include "qemu/log.h"
27 #include "qemu/main-loop.h"
28 #include "exec/log.h"
29 #include "qemu/error-report.h"
30 #include "qemu/qemu-print.h"
31 #include "sysemu/tcg.h"
32 #include "hw/boards.h"
33 #include "hw/qdev-properties.h"
34 #include "trace-root.h"
35 #include "qemu/plugin.h"
37 CPUInterruptHandler cpu_interrupt_handler;
39 CPUState *cpu_by_arch_id(int64_t id)
41 CPUState *cpu;
43 CPU_FOREACH(cpu) {
44 CPUClass *cc = CPU_GET_CLASS(cpu);
46 if (cc->get_arch_id(cpu) == id) {
47 return cpu;
50 return NULL;
53 bool cpu_exists(int64_t id)
55 return !!cpu_by_arch_id(id);
58 CPUState *cpu_create(const char *typename)
60 Error *err = NULL;
61 CPUState *cpu = CPU(object_new(typename));
62 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
63 if (err != NULL) {
64 error_report_err(err);
65 object_unref(OBJECT(cpu));
66 exit(EXIT_FAILURE);
68 return cpu;
71 bool cpu_paging_enabled(const CPUState *cpu)
73 CPUClass *cc = CPU_GET_CLASS(cpu);
75 return cc->get_paging_enabled(cpu);
78 static bool cpu_common_get_paging_enabled(const CPUState *cpu)
80 return false;
83 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
84 Error **errp)
86 CPUClass *cc = CPU_GET_CLASS(cpu);
88 cc->get_memory_mapping(cpu, list, errp);
91 static void cpu_common_get_memory_mapping(CPUState *cpu,
92 MemoryMappingList *list,
93 Error **errp)
95 error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
98 /* Resetting the IRQ comes from across the code base so we take the
99 * BQL here if we need to. cpu_interrupt assumes it is held.*/
100 void cpu_reset_interrupt(CPUState *cpu, int mask)
102 bool need_lock = !qemu_mutex_iothread_locked();
104 if (need_lock) {
105 qemu_mutex_lock_iothread();
107 cpu->interrupt_request &= ~mask;
108 if (need_lock) {
109 qemu_mutex_unlock_iothread();
113 void cpu_exit(CPUState *cpu)
115 atomic_set(&cpu->exit_request, 1);
116 /* Ensure cpu_exec will see the exit request after TCG has exited. */
117 smp_wmb();
118 atomic_set(&cpu->icount_decr_ptr->u16.high, -1);
121 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
122 void *opaque)
124 CPUClass *cc = CPU_GET_CLASS(cpu);
126 return (*cc->write_elf32_qemunote)(f, cpu, opaque);
129 static int cpu_common_write_elf32_qemunote(WriteCoreDumpFunction f,
130 CPUState *cpu, void *opaque)
132 return 0;
135 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
136 int cpuid, void *opaque)
138 CPUClass *cc = CPU_GET_CLASS(cpu);
140 return (*cc->write_elf32_note)(f, cpu, cpuid, opaque);
143 static int cpu_common_write_elf32_note(WriteCoreDumpFunction f,
144 CPUState *cpu, int cpuid,
145 void *opaque)
147 return -1;
150 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
151 void *opaque)
153 CPUClass *cc = CPU_GET_CLASS(cpu);
155 return (*cc->write_elf64_qemunote)(f, cpu, opaque);
158 static int cpu_common_write_elf64_qemunote(WriteCoreDumpFunction f,
159 CPUState *cpu, void *opaque)
161 return 0;
164 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
165 int cpuid, void *opaque)
167 CPUClass *cc = CPU_GET_CLASS(cpu);
169 return (*cc->write_elf64_note)(f, cpu, cpuid, opaque);
172 static int cpu_common_write_elf64_note(WriteCoreDumpFunction f,
173 CPUState *cpu, int cpuid,
174 void *opaque)
176 return -1;
180 static int cpu_common_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg)
182 return 0;
185 static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
187 return 0;
190 static bool cpu_common_debug_check_watchpoint(CPUState *cpu, CPUWatchpoint *wp)
192 /* If no extra check is required, QEMU watchpoint match can be considered
193 * as an architectural match.
195 return true;
198 static bool cpu_common_virtio_is_big_endian(CPUState *cpu)
200 return target_words_bigendian();
203 static void cpu_common_noop(CPUState *cpu)
207 static bool cpu_common_exec_interrupt(CPUState *cpu, int int_req)
209 return false;
212 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
214 CPUClass *cc = CPU_GET_CLASS(cpu);
215 GuestPanicInformation *res = NULL;
217 if (cc->get_crash_info) {
218 res = cc->get_crash_info(cpu);
220 return res;
223 void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
225 CPUClass *cc = CPU_GET_CLASS(cpu);
227 if (cc->dump_state) {
228 cpu_synchronize_state(cpu);
229 cc->dump_state(cpu, f, flags);
233 void cpu_dump_statistics(CPUState *cpu, int flags)
235 CPUClass *cc = CPU_GET_CLASS(cpu);
237 if (cc->dump_statistics) {
238 cc->dump_statistics(cpu, flags);
242 void cpu_class_set_parent_reset(CPUClass *cc,
243 void (*child_reset)(CPUState *cpu),
244 void (**parent_reset)(CPUState *cpu))
246 *parent_reset = cc->reset;
247 cc->reset = child_reset;
250 void cpu_reset(CPUState *cpu)
252 CPUClass *klass = CPU_GET_CLASS(cpu);
254 if (klass->reset != NULL) {
255 (*klass->reset)(cpu);
258 trace_guest_cpu_reset(cpu);
261 static void cpu_common_reset(CPUState *cpu)
263 CPUClass *cc = CPU_GET_CLASS(cpu);
265 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
266 qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
267 log_cpu_state(cpu, cc->reset_dump_flags);
270 cpu->interrupt_request = 0;
271 cpu->halted = 0;
272 cpu->mem_io_pc = 0;
273 cpu->icount_extra = 0;
274 atomic_set(&cpu->icount_decr_ptr->u32, 0);
275 cpu->can_do_io = 1;
276 cpu->exception_index = -1;
277 cpu->crash_occurred = false;
278 cpu->cflags_next_tb = -1;
280 if (tcg_enabled()) {
281 cpu_tb_jmp_cache_clear(cpu);
283 tcg_flush_softmmu_tlb(cpu);
287 static bool cpu_common_has_work(CPUState *cs)
289 return false;
292 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
294 CPUClass *cc = CPU_CLASS(object_class_by_name(typename));
296 assert(cpu_model && cc->class_by_name);
297 return cc->class_by_name(cpu_model);
300 static void cpu_common_parse_features(const char *typename, char *features,
301 Error **errp)
303 char *val;
304 static bool cpu_globals_initialized;
305 /* Single "key=value" string being parsed */
306 char *featurestr = features ? strtok(features, ",") : NULL;
308 /* should be called only once, catch invalid users */
309 assert(!cpu_globals_initialized);
310 cpu_globals_initialized = true;
312 while (featurestr) {
313 val = strchr(featurestr, '=');
314 if (val) {
315 GlobalProperty *prop = g_new0(typeof(*prop), 1);
316 *val = 0;
317 val++;
318 prop->driver = typename;
319 prop->property = g_strdup(featurestr);
320 prop->value = g_strdup(val);
321 qdev_prop_register_global(prop);
322 } else {
323 error_setg(errp, "Expected key=value format, found %s.",
324 featurestr);
325 return;
327 featurestr = strtok(NULL, ",");
331 static void cpu_common_realizefn(DeviceState *dev, Error **errp)
333 CPUState *cpu = CPU(dev);
334 Object *machine = qdev_get_machine();
336 /* qdev_get_machine() can return something that's not TYPE_MACHINE
337 * if this is one of the user-only emulators; in that case there's
338 * no need to check the ignore_memory_transaction_failures board flag.
340 if (object_dynamic_cast(machine, TYPE_MACHINE)) {
341 ObjectClass *oc = object_get_class(machine);
342 MachineClass *mc = MACHINE_CLASS(oc);
344 if (mc) {
345 cpu->ignore_memory_transaction_failures =
346 mc->ignore_memory_transaction_failures;
350 if (dev->hotplugged) {
351 cpu_synchronize_post_init(cpu);
352 cpu_resume(cpu);
355 /* NOTE: latest generic point where the cpu is fully realized */
356 trace_init_vcpu(cpu);
359 static void cpu_common_unrealizefn(DeviceState *dev, Error **errp)
361 CPUState *cpu = CPU(dev);
362 /* NOTE: latest generic point before the cpu is fully unrealized */
363 trace_fini_vcpu(cpu);
364 qemu_plugin_vcpu_exit_hook(cpu);
365 cpu_exec_unrealizefn(cpu);
368 static void cpu_common_initfn(Object *obj)
370 CPUState *cpu = CPU(obj);
371 CPUClass *cc = CPU_GET_CLASS(obj);
373 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
374 cpu->cluster_index = UNASSIGNED_CLUSTER_INDEX;
375 cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
376 /* *-user doesn't have configurable SMP topology */
377 /* the default value is changed by qemu_init_vcpu() for softmmu */
378 cpu->nr_cores = 1;
379 cpu->nr_threads = 1;
381 qemu_mutex_init(&cpu->work_mutex);
382 QTAILQ_INIT(&cpu->breakpoints);
383 QTAILQ_INIT(&cpu->watchpoints);
385 cpu_exec_initfn(cpu);
388 static void cpu_common_finalize(Object *obj)
390 CPUState *cpu = CPU(obj);
392 qemu_mutex_destroy(&cpu->work_mutex);
395 static int64_t cpu_common_get_arch_id(CPUState *cpu)
397 return cpu->cpu_index;
400 static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, vaddr addr, int len)
402 return addr;
405 static void generic_handle_interrupt(CPUState *cpu, int mask)
407 cpu->interrupt_request |= mask;
409 if (!qemu_cpu_is_self(cpu)) {
410 qemu_cpu_kick(cpu);
414 CPUInterruptHandler cpu_interrupt_handler = generic_handle_interrupt;
416 static void cpu_class_init(ObjectClass *klass, void *data)
418 DeviceClass *dc = DEVICE_CLASS(klass);
419 CPUClass *k = CPU_CLASS(klass);
421 k->parse_features = cpu_common_parse_features;
422 k->reset = cpu_common_reset;
423 k->get_arch_id = cpu_common_get_arch_id;
424 k->has_work = cpu_common_has_work;
425 k->get_paging_enabled = cpu_common_get_paging_enabled;
426 k->get_memory_mapping = cpu_common_get_memory_mapping;
427 k->write_elf32_qemunote = cpu_common_write_elf32_qemunote;
428 k->write_elf32_note = cpu_common_write_elf32_note;
429 k->write_elf64_qemunote = cpu_common_write_elf64_qemunote;
430 k->write_elf64_note = cpu_common_write_elf64_note;
431 k->gdb_read_register = cpu_common_gdb_read_register;
432 k->gdb_write_register = cpu_common_gdb_write_register;
433 k->virtio_is_big_endian = cpu_common_virtio_is_big_endian;
434 k->debug_excp_handler = cpu_common_noop;
435 k->debug_check_watchpoint = cpu_common_debug_check_watchpoint;
436 k->cpu_exec_enter = cpu_common_noop;
437 k->cpu_exec_exit = cpu_common_noop;
438 k->cpu_exec_interrupt = cpu_common_exec_interrupt;
439 k->adjust_watchpoint_address = cpu_adjust_watchpoint_address;
440 set_bit(DEVICE_CATEGORY_CPU, dc->categories);
441 dc->realize = cpu_common_realizefn;
442 dc->unrealize = cpu_common_unrealizefn;
443 device_class_set_props(dc, cpu_common_props);
445 * Reason: CPUs still need special care by board code: wiring up
446 * IRQs, adding reset handlers, halting non-first CPUs, ...
448 dc->user_creatable = false;
451 static const TypeInfo cpu_type_info = {
452 .name = TYPE_CPU,
453 .parent = TYPE_DEVICE,
454 .instance_size = sizeof(CPUState),
455 .instance_init = cpu_common_initfn,
456 .instance_finalize = cpu_common_finalize,
457 .abstract = true,
458 .class_size = sizeof(CPUClass),
459 .class_init = cpu_class_init,
462 static void cpu_register_types(void)
464 type_register_static(&cpu_type_info);
467 type_init(cpu_register_types)