From a5c293a87f919351d7e6380f81066a19dee3d092 Mon Sep 17 00:00:00 2001 From: sonic Date: Thu, 22 Dec 2011 06:18:19 +0000 Subject: [PATCH] Support red zone on x86-64 git-svn-id: https://svn.aros.org/svn/aros/trunk/AROS@43164 fb15a70f-31f2-0310-bbcc-cdcc74a49acc --- arch/all-mingw32/kernel/host_intern.h | 1 + arch/all-mingw32/kernel/host_intr.c | 12 +++++++- arch/all-mingw32/kernel/leaveinterrupt_x86_64.s | 41 ++++++++++++++++++++----- 3 files changed, 45 insertions(+), 9 deletions(-) rewrite arch/all-mingw32/kernel/leaveinterrupt_x86_64.s (74%) diff --git a/arch/all-mingw32/kernel/host_intern.h b/arch/all-mingw32/kernel/host_intern.h index 35eb5b6e56..12b4b9c9ff 100644 --- a/arch/all-mingw32/kernel/host_intern.h +++ b/arch/all-mingw32/kernel/host_intern.h @@ -13,3 +13,4 @@ struct LeaveInterruptContext extern HANDLE conin, conout; void core_LeaveInterrupt(void); +extern unsigned char core_LeaveInt_End; diff --git a/arch/all-mingw32/kernel/host_intr.c b/arch/all-mingw32/kernel/host_intr.c index 2f194f9f6d..0d5d7a1ad2 100644 --- a/arch/all-mingw32/kernel/host_intr.c +++ b/arch/all-mingw32/kernel/host_intr.c @@ -119,6 +119,16 @@ LONG WINAPI exceptionHandler(EXCEPTION_POINTERS *exptr) return EXCEPTION_CONTINUE_EXECUTION; } +#ifdef __x86_64__ +/* + * Magic: on x86-64 we can't preempt within a certain location. Not good, + * but i can't offer something better. See leaveinterrupt_x86_64.s. + */ +#define INT_SAFE(ctx) ((ctx.Rip < (DWORD64)core_LeaveInterrupt) || (ctx.Rip >= (DWORD64)&core_LeaveInt_End)) +#else +#define INT_SAFE(ctx) TRUE +#endif + DWORD WINAPI TaskSwitcher() { DWORD obj; @@ -144,7 +154,7 @@ DWORD WINAPI TaskSwitcher() } /* Process interrupts if we are allowed to */ - if (Ints_Enabled) + if (Ints_Enabled && INT_SAFE(MainCtx)) { Supervisor = 1; /* diff --git a/arch/all-mingw32/kernel/leaveinterrupt_x86_64.s b/arch/all-mingw32/kernel/leaveinterrupt_x86_64.s dissimilarity index 74% index 23b87cdf53..2f5dc56db1 100644 --- a/arch/all-mingw32/kernel/leaveinterrupt_x86_64.s +++ b/arch/all-mingw32/kernel/leaveinterrupt_x86_64.s @@ -1,8 +1,33 @@ - .globl core_LeaveInterrupt - -core_LeaveInterrupt: - pushq 0(%rax) # Push real return address - pushq 8(%rax) # Push real rax contents - movl $1, Ints_Enabled # Now enable interrupts - popq %rax # Restore rax and leave - ret +# +# Copyright © 2010-2011, The AROS Development Team. All rights reserved. +# $Id$ +# +# Desc: Exit from emulated interrupt with enabling, x86-64 version +# Lang: English +# + +# Theory of operation: +# x86-64 has red zone of 128 bytes below the rsp. This makes it impossible to use +# push/pop instructions here, unlike on i386, because doing this will destroy +# red zone data. +# Here we skip the red zone and use -128(%rsp) as our intermediate storage. However, +# there's an important problem with this. x86 is not ARM, and we can't do something like +# "addq $128, %rsp; jmpq *-128(rsp)" atomically. This mean, we can be preempted right before +# the final jump. This is dangerous, because next time we will get back here with our own +# address in 0(%rax), overwriting -128(rsp) with it and causing infinite loop. +# In order to work around this issue, interrupt thread checks rip value, and if rip is +# pointing at this code, interrupts are considered disabled. +# We still have to use stack for temporary storage because we first need to restore rax +# and only then jump to return address. + + .globl core_LeaveInterrupt + .globl core_LeaveInt_End + +core_LeaveInterrupt: + movq %rbx, -128(%rsp) # Save rbx + movq 0(%rax), %rbx # Get real return address into rbx + xchg %rbx, -128(%rsp) # Remember return address and restore rbx + movq 8(%rax), %rax # Restore real rax contents + movl $1, Ints_Enabled # Now enable interrupts + jmpq *-128(%rsp) # And jump to the needed address +core_LeaveInt_End: -- 2.11.4.GIT