From 08e73c48b053566bfe0c994f154f73991cd0ff0e Mon Sep 17 00:00:00 2001 From: Pranith Kumar Date: Thu, 23 Feb 2017 18:29:15 +0000 Subject: [PATCH] tcg: handle EXCP_ATOMIC exception for system emulation MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The patch enables handling atomic code in the guest. This should be preferably done in cpu_handle_exception(), but the current assumptions regarding when we can execute atomic sections cause a deadlock. The current mechanism discards the flags which were set in atomic execution. We ensure they are properly saved by calling the cc->cpu_exec_enter/leave() functions around the loop. As we are running cpu_exec_step_atomic() from the outermost loop we need to avoid an abort() when single stepping over atomic code since debug exception longjmp will point to the the setlongjmp in cpu_exec(). We do this by setting a new jmp_env so that it jumps back here on an exception. Signed-off-by: Pranith Kumar [AJB: tweak title, merge with new patches, add mmap_lock] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson CC: Paolo Bonzini --- cpu-exec.c | 43 +++++++++++++++++++++++++++++++------------ cpus.c | 9 +++++++++ 2 files changed, 40 insertions(+), 12 deletions(-) diff --git a/cpu-exec.c b/cpu-exec.c index 2edd26e823..1a5ad4889d 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -228,24 +228,43 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, static void cpu_exec_step(CPUState *cpu) { + CPUClass *cc = CPU_GET_CLASS(cpu); CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); - tb_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, - 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); - tb->orig_tb = NULL; - tb_unlock(); - /* execute the generated code */ - trace_exec_tb_nocache(tb, pc); - cpu_tb_exec(cpu, tb); - tb_lock(); - tb_phys_invalidate(tb, -1); - tb_free(tb); - tb_unlock(); + if (sigsetjmp(cpu->jmp_env, 0) == 0) { + mmap_lock(); + tb_lock(); + tb = tb_gen_code(cpu, pc, cs_base, flags, + 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); + tb->orig_tb = NULL; + tb_unlock(); + mmap_unlock(); + + cc->cpu_exec_enter(cpu); + /* execute the generated code */ + trace_exec_tb_nocache(tb, pc); + cpu_tb_exec(cpu, tb); + cc->cpu_exec_exit(cpu); + + tb_lock(); + tb_phys_invalidate(tb, -1); + tb_free(tb); + tb_unlock(); + } else { + /* We may have exited due to another problem here, so we need + * to reset any tb_locks we may have taken but didn't release. + * The mmap_lock is dropped by tb_gen_code if it runs out of + * memory. + */ +#ifndef CONFIG_SOFTMMU + tcg_debug_assert(!have_mmap_lock()); +#endif + tb_lock_reset(); + } } void cpu_exec_step_atomic(CPUState *cpu) diff --git a/cpus.c b/cpus.c index bfee326d30..8200ac6b75 100644 --- a/cpus.c +++ b/cpus.c @@ -1348,6 +1348,11 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg) if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; + } else if (r == EXCP_ATOMIC) { + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + break; } } else if (cpu->stop) { if (cpu->unplug) { @@ -1458,6 +1463,10 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) */ g_assert(cpu->halted); break; + case EXCP_ATOMIC: + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); default: /* Ignore everything else? */ break; -- 2.11.4.GIT