2 * very-low-level utilities for runtime support
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
16 #define LANGUAGE_ASSEMBLY
19 #include "genesis/closure.h"
20 #include "genesis/funcallable-instance.h"
21 #include "genesis/fdefn.h"
22 #include "genesis/static-symbols.h"
23 #include "genesis/symbol.h"
24 #include "genesis/thread.h"
26 /* Minimize conditionalization for different OS naming schemes.
28 * (As of sbcl-0.8.10, this seems no longer to be much of an issue,
29 * since everyone has converged on ELF. If this generality really
30 * turns out not to matter, perhaps it's just clutter we could get
31 * rid of? -- WHN 2004-04-18)
33 * (Except Win32, which is unlikely ever to be ELF, sorry. -- AB 2005-12-08)
35 #if defined __linux__ || defined LISP_FEATURE_FREEBSD || defined __NetBSD__ || defined __OpenBSD__ || \
36 defined __sun || defined __DragonFly__
37 #define GNAME(var) var
39 #define GNAME(var) _##var
42 /* Get the right type of alignment. Linux, FreeBSD and NetBSD (but not OpenBSD)
43 * want alignment in bytes.
45 * (As in the GNAME() definitions above, as of sbcl-0.8.10, this seems
46 * no longer to be much of an issue, since everyone has converged on
47 * the same value. If this generality really turns out not to
48 * matter any more, perhaps it's just clutter we could get
49 * rid of? -- WHN 2004-04-18)
51 #if defined(__linux__) || defined(LISP_FEATURE_FREEBSD) || defined(__NetBSD__) || defined(__OpenBSD__) || \
52 defined(__sun) || defined(LISP_FEATURE_WIN32) || defined(__DragonFly__)
55 #define align_16byte 16
56 #define align_page 4096
60 #define align_16byte 4
65 * The assembler used for win32 doesn't like .type or .size directives,
66 * so we want to conditionally kill them out. So let's wrap them in macros
67 * that are defined to be no-ops on win32. Hopefully this still works on
70 #if !defined(LISP_FEATURE_WIN32) && !defined(LISP_FEATURE_DARWIN)
71 #define TYPE(name) .type name,@function
72 #define SIZE(name) .size name,.-name
78 /* Helper macros for access to thread-locals slots for both OS types:
79 * ------------------------------------------------------------------------
82 * ================== __________
83 * | Win32 %FS base | ----> | | 0
84 * ================== | | 1
86 * TLS slots start here> |XXXXXXXX| e10 = TEB_STATIC_TLS_SLOTS_OFFSET
90 * TLS ends here> ,- |XXXXXXXX| e4f = TEB_STATIC_TLS_SLOTS_OFFSET+63
92 * | ---------- "os_address" ----.
94 * | big blob of SBCL-specific thread-local data |
95 * | |----------------------------------------| <--'
96 * | | CONTROL, BINDING, ALIEN STACK |
98 * ================== | |----------------------------------------|
99 * | Linux %FS base | -->| | FFI stack pointer |
100 * ================== | | (extra page for mprotect) |
101 * \ |----------------------------------------|
102 * (union p_t_d) -----> \-> | struct thread { | dynamic_values[0] |
105 * [tls data begins] | } | ... | <-
106 * [declared end of p_t_d] |----------------------------------------| . |
108 * . | [TLS_SIZE-1] | <-|
109 * [tls data actually ends] |----------------------------------------| |
111 * . |----------------------------------------| |
112 * . | struct nonpointer_thread_data { } | |
113 * . ------------------------------------------ |
114 * [blob actually ends] |
118 * ______________________ /
119 * | struct symbol { | /
121 * | fixnum tls_index; // fixnum value relative to union /
122 * | } | (< TLS_SIZE = 4096)
123 * ---------------------|
125 #ifdef LISP_FEATURE_WIN32
126 # define TEB_STATIC_TLS_SLOTS_OFFSET 0xE10
127 # define TEB_SBCL_THREAD_BASE_OFFSET (TEB_STATIC_TLS_SLOTS_OFFSET+(63*4))
128 # define SBCL_THREAD_BASE_EA %fs:TEB_SBCL_THREAD_BASE_OFFSET
129 # define MAYBE_FS(addr) addr
130 # define LoadTlSymbolValueAddress(symbol,reg) ; \
131 movl SBCL_THREAD_BASE_EA, reg ; \
132 addl (symbol+SYMBOL_TLS_INDEX_OFFSET), reg ;
133 # define LoadCurrentThreadSlot(offset,reg); \
134 movl SBCL_THREAD_BASE_EA, reg ; \
135 movl offset(reg), reg ;
136 #elif defined(LISP_FEATURE_LINUX) || defined(LISP_FEATURE_SUNOS) || defined(LISP_FEATURE_FREEBSD) || \
137 defined(LISP_FEATURE_DRAGONFLY)
138 /* %fs:0 refers to the current thread. Useful! Less usefully,
139 * Linux/x86 isn't capable of reporting a faulting si_addr on a
140 * segment as defined above (whereas faults on the segment that %gs
141 * usually points are reported just fine...).
142 * But we have the thread's address stored in the THIS slot,
143 * so that within the thread
144 * movl %fs:THIS_OFFSET,x
145 * stores the absolute address of %fs:0 into x.
147 # define SBCL_THREAD_BASE_EA %fs:THREAD_THIS_OFFSET
148 # define MAYBE_FS(addr) addr
150 /* perhaps there's an OS out there that actually supports %fs without
151 * jumping through hoops, so just in case, here a default definition: */
152 # define SBCL_THREAD_BASE_EA $0
153 # define MAYBE_FS(addr) %fs:addr
156 /* gas can't parse 4096LU; redefine */
157 #if BACKEND_PAGE_BYTES == 4096
158 # undef BACKEND_PAGE_BYTES
159 # define BACKEND_PAGE_BYTES 4096
160 #elif BACKEND_PAGE_BYTES == 32768
161 # undef BACKEND_PAGE_BYTES
162 # define BACKEND_PAGE_BYTES 32768
164 # error BACKEND_PAGE_BYTES mismatch
167 /* OAOOM because we don't have the C headers here */
168 #define THREAD_CSP_PAGE_SIZE BACKEND_PAGE_BYTES
170 /* the CSP page sits right before the thread */
171 #define THREAD_SAVED_CSP_OFFSET (-THREAD_CSP_PAGE_SIZE)
174 * x86/darwin (as of MacOS X 10.4.5) doesn't reliably file signal
175 * handlers (SIGTRAP or Mach exception handlers) for 0xCC, wo we have
176 * to use ud2 instead. ud2 is an undefined opcode, #x0b0f, or
177 * 0F 0B in low-endian notation, that causes SIGILL to fire. We check
178 * for this instruction in the SIGILL handler and if we see it, we
179 * advance the EIP by two bytes to skip over ud2 instruction and
180 * call sigtrap_handler. */
181 #if defined(LISP_FEATURE_UD2_BREAKPOINTS)
188 .globl GNAME(all_threads)
191 * A call to call_into_c preserves esi, edi, and ebp.
192 * (The C function will preserve ebx, esi, edi, and ebp across its
193 * function call, but we trash ebx ourselves by using it to save the
194 * return Lisp address.)
196 * Return values are in eax and maybe edx for quads, or st(0) for
199 * This should work for Lisp calls C calls Lisp calls C..
201 * FIXME & OAOOM: This duplicates call-out in src/compiler/x86/c-call.lisp,
202 * so if you tweak this, change that too!
205 * Note on sections specific to LISP_FEATURE_SB_SAFEPOINT:
207 * The code below is essential to safepoint-based garbage collection,
208 * and several details need to be considered for correct implementation.
210 * The stack spilling approach:
211 * On SB-SAFEPOINT platforms, the CALL-OUT vop is defined to spill all
212 * live Lisp TNs to the stack to provide information for conservative
213 * GC cooperatively (avoiding the need to retrieve register values
214 * from POSIX signal contexts or Windows GetThreadContext()).
216 * Finding the SP at all:
217 * The main remaining value needed by GC is the stack pointer (SP) at
218 * the moment of entering the foreign function. For this purpose, a
219 * thread-local field for the SP is used. Two stores to that field
220 * are done for each C call, one to save the SP before calling out and
221 * and one to undo that store afterwards.
223 * Stores as synchronization points:
224 * These two stores delimit the C call: While the SP is set, our
225 * thread is known not to run Lisp code: During GC, memory protection
226 * ensures that no thread proceeds across stores.
228 * The return PC issue:
229 * (Note that CALL-OUT has, in principle, two versions: Inline
230 * assembly in the VOP -or- alternatively the out-of-line version you
231 * are currently reading. In reality, safepoint builds currently
232 * lack the inline code entirely.)
234 * Both versions need to take special care with the return PC:
235 * - In the inline version of the code (if it existed), the two stores
236 * would be done directly in the CALL-OUT vop. In that theoretical
237 * implementation, there is a time interval between return of the
238 * actual C call and a second SP store during which the return
239 * address might not be on the stack anymore.
240 * - In this out-of-line version, the stores are done during
241 * call_into_c's frame, but an equivalent problem arises: In order
242 * to present the stack of arguments as our foreign function expects
243 * them, call_into_c has to pop the Lisp return address into a
244 * register first; this register has to be preserved by GENCGC
245 * separately: our return address is not in the stack anymore.
246 * In both case, stack scanning alone is not sufficient to pin
247 * the return address, and we communicate it to GC explicitly
248 * in addition to the SP.
250 * Note on look-alike accessor macros with vastly different behaviour:
251 * THREAD_PC_AROUND_FOREIGN_CALL_OFFSET is an "ordinary" field of the
252 * struct thread, whereas THREAD_SAVED_CSP_OFFSET is a synchronization
253 * point on a potentially write-protected page.
257 .align align_16byte,0x90
258 .globl GNAME(call_into_c)
259 TYPE(GNAME(call_into_c))
261 /* Save the return Lisp address in ebx. */
264 /* Setup the NPX for C */
265 /* The VOP says regarding CLD: "Clear out DF: Darwin, Windows,
266 * and Solaris at least require this, and it should not hurt
267 * others either." call_into_c didn't have it, but better safe than
279 #ifdef LISP_FEATURE_SB_SAFEPOINT
280 /* enter safe region: store SP and return PC */
281 movl SBCL_THREAD_BASE_EA,%edi
282 movl %esp,MAYBE_FS(THREAD_SAVED_CSP_OFFSET(%edi))
283 movl %ebx,MAYBE_FS(THREAD_PC_AROUND_FOREIGN_CALL_OFFSET(%edi))
286 /* foreign call, preserving ESI, EDI, and EBX */
287 call *%eax # normal callout using Lisp stack
288 /* return values now in eax/edx OR st(0) */
290 #ifdef LISP_FEATURE_SB_SAFEPOINT
291 /* leave region: clear the SP! (Also unpin the return PC.) */
293 movl %ecx,MAYBE_FS(THREAD_SAVED_CSP_OFFSET(%edi))
294 movl %ecx,MAYBE_FS(THREAD_PC_AROUND_FOREIGN_CALL_OFFSET(%edi))
297 movl %eax,%ecx # remember integer return value
299 /* Check for a return FP value. */
306 /* The return value is in eax, or eax,edx? */
307 /* Set up the NPX stack for Lisp. */
308 fldz # Ensure no regs are empty.
317 /* Restore the return value. */
318 movl %ecx,%eax # maybe return value
324 /* The return result is in st(0). */
325 /* Set up the NPX stack for Lisp, placing the result in st(0). */
326 fldz # Ensure no regs are empty.
333 fxch %st(7) # Move the result back to st(0).
335 /* We don't need to restore eax, because the result is in st(0). */
337 /* Return. FIXME: It would be nice to restructure this to use RET. */
340 SIZE(GNAME(call_into_c))
344 .globl GNAME(call_into_lisp_first_time)
345 TYPE(GNAME(call_into_lisp_first_time))
347 /* We don't worry too much about saving registers
348 * here, because we never expect to return from the initial call to lisp
351 .align align_16byte,0x90
352 GNAME(call_into_lisp_first_time):
353 pushl %ebp # Save old frame pointer.
354 movl %esp,%ebp # Establish new frame.
355 #ifndef LISP_FEATURE_WIN32
356 movl GNAME(all_threads),%eax
357 /* pthread machinery takes care of this for other threads */
358 movl THREAD_CONTROL_STACK_END_OFFSET(%eax) ,%esp
360 /* Win32 -really- doesn't like you switching stacks out from under it. */
361 movl GNAME(all_threads),%eax
366 .globl GNAME(call_into_lisp)
367 TYPE(GNAME(call_into_lisp))
369 /* The C conventions require that ebx, esi, edi, and ebp be preserved
370 * across function calls. */
372 .align align_16byte,0x90
373 GNAME(call_into_lisp):
374 pushl %ebp # Save old frame pointer.
375 movl %esp,%ebp # Establish new frame.
378 /* Save the NPX state */
379 fwait # Catch any pending NPX exceptions.
380 subl $108,%esp # Make room for the NPX state.
381 fnsave (%esp) # save and reset NPX
383 movl (%esp),%eax # Load NPX control word.
384 andl $0xfffff2ff,%eax # Set rounding mode to nearest.
385 orl $0x00000200,%eax # Set precision to 64 bits. (53-bit mantissa)
387 fldcw (%esp) # Recover modes.
390 fldz # Ensure no FP regs are empty.
399 /* Save C regs: ebx esi edi. */
404 /* Clear descriptor regs. */
405 xorl %eax,%eax # lexenv
406 xorl %ebx,%ebx # available
407 xorl %ecx,%ecx # arg count
408 xorl %edx,%edx # first arg
409 xorl %edi,%edi # second arg
410 xorl %esi,%esi # third arg
412 /* no longer in function call */
413 movl %esp,%ebx # remember current stack
414 pushl %ebx # Save entry stack on (maybe) new stack.
416 /* Establish Lisp args. */
417 movl 8(%ebp),%eax # lexenv?
418 movl 12(%ebp),%ebx # address of arg vec
419 movl 16(%ebp),%ecx # num args
420 shll $2,%ecx # Make num args into fixnum.
423 movl (%ebx),%edx # arg0
426 movl 4(%ebx),%edi # arg1
429 movl 8(%ebx),%esi # arg2
431 /* Registers eax, ecx, edx, edi, and esi are now live. */
433 #ifdef LISP_FEATURE_WIN32
434 /* Establish an SEH frame. */
435 #ifdef LISP_FEATURE_SB_THREAD
436 /* Save binding stack pointer */
439 movl SBCL_THREAD_BASE_EA, %eax
440 movl THREAD_BINDING_STACK_POINTER_OFFSET(%eax), %eax
444 pushl BINDING_STACK_POINTER + SYMBOL_VALUE_OFFSET
446 pushl $GNAME(exception_handler_wrapper)
451 /* Alloc new frame. */
452 push %ebp # Dummy for return address
453 push %ebp # fp in save location S1
454 mov %esp,%ebp # The current sp marks start of new frame.
455 sub $4,%esp # Ensure 3 slots are allocated, two above.
457 call *CLOSURE_FUN_OFFSET(%eax)
459 /* If the function returned multiple values, it will return to
460 this point. Lose them */
464 /* A singled value function returns here */
466 #ifdef LISP_FEATURE_WIN32
467 /* Remove our SEH frame. */
473 /* Restore the stack, in case there was a stack change. */
476 /* Restore C regs: ebx esi edi. */
481 /* Restore the NPX state. */
486 movl %edx,%eax # c-val
488 SIZE(GNAME(call_into_lisp))
490 /* support for saving and restoring the NPX state from C */
492 .globl GNAME(fpu_save)
493 TYPE(GNAME(fpu_save))
497 fnsave (%eax) # Save the NPX state. (resets NPX)
499 SIZE(GNAME(fpu_save))
501 .globl GNAME(fpu_restore)
502 TYPE(GNAME(fpu_restore))
506 frstor (%eax) # Restore the NPX state.
508 SIZE(GNAME(fpu_restore))
511 * the undefined-function trampoline
514 .align align_16byte,0x90
515 .globl GNAME(undefined_tramp)
516 TYPE(GNAME(undefined_tramp))
517 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
518 GNAME(undefined_tramp):
519 pop 4(%ebp) # Save return PC for backtrace.
523 .byte UNDEFINED_FUN_ERROR
524 .byte sc_DescriptorReg # eax in the Descriptor-reg SC
526 SIZE(GNAME(undefined_tramp))
528 /* KLUDGE: FIND-ESCAPED-FRAME (SYS:SRC;CODE;DEBUG-INT.LISP) needs
529 * to know the name of the function immediately following the
530 * undefined-function trampoline. */
533 * the closure trampoline
536 .align align_16byte,0x90
537 .globl GNAME(closure_tramp)
538 TYPE(GNAME(closure_tramp))
539 .byte 0, 0, 0, SIMPLE_FUN_HEADER_WIDETAG
540 GNAME(closure_tramp):
541 movl FDEFN_FUN_OFFSET(%eax),%eax
542 /* FIXME: The '*' after "jmp" in the next line is from PVE's
543 * patch posted to the CMU CL mailing list Oct 6, 1999. It looks
544 * reasonable, and it certainly seems as though if CMU CL needs it,
545 * SBCL needs it too, but I haven't actually verified that it's
546 * right. It would be good to find a way to force the flow of
547 * control through here to test it. */
548 jmp *CLOSURE_FUN_OFFSET(%eax)
549 SIZE(GNAME(closure_tramp))
552 .align align_16byte,0x90
553 .globl GNAME(funcallable_instance_tramp)
554 TYPE(GNAME(funcallable_instance_tramp))
555 GNAME(funcallable_instance_tramp):
556 movl FUNCALLABLE_INSTANCE_FUNCTION_OFFSET(%eax),%eax
557 /* KLUDGE: on this platform, whatever kind of function is in %rax
558 * now, the first word of it contains the address to jump to. */
559 jmp *CLOSURE_FUN_OFFSET(%eax)
560 SIZE(GNAME(funcallable_instance_tramp))
563 * fun-end breakpoint magic
567 * For an explanation of the magic involved in function-end
568 * breakpoints, see the implementation in ppc-assem.S.
572 .globl GNAME(fun_end_breakpoint_guts)
574 GNAME(fun_end_breakpoint_guts):
575 /* Multiple Value return */
576 jc multiple_value_return
577 /* Single value return: The eventual return will now use the
578 multiple values return convention but with a return values
580 movl %esp,%ebx # Setup ebx - the ofp.
581 subl $4,%esp # Allocate one stack slot for the return value
582 movl $(1 << N_FIXNUM_TAG_BITS),%ecx # Setup ecx for one return value.
583 movl $(NIL),%edi # default second value
584 movl $(NIL),%esi # default third value
586 multiple_value_return:
588 .globl GNAME(fun_end_breakpoint_trap)
589 GNAME(fun_end_breakpoint_trap):
591 .byte trap_FunEndBreakpoint
592 hlt # We should never return here.
594 .globl GNAME(fun_end_breakpoint_end)
595 GNAME(fun_end_breakpoint_end):
598 .globl GNAME(do_pending_interrupt)
599 TYPE(GNAME(do_pending_interrupt))
600 .align align_16byte,0x90
601 GNAME(do_pending_interrupt):
603 .byte trap_PendingInterrupt
605 SIZE(GNAME(do_pending_interrupt))
607 /* Allocate bytes and return the start of the allocated space
608 * in the specified destination register.
610 * In the general case the size will be in the destination register.
612 * All registers must be preserved except the destination.
613 * The C conventions will preserve ebx, esi, edi, and ebp.
614 * So only eax, ecx, and edx need special care here.
616 * ALLOC factors out the logic of calling alloc(): stack alignment, etc.
618 * DEFINE_ALLOC_TO_FOO defines an alloction routine.
621 #ifdef LISP_FEATURE_DARWIN
622 #define ALLOC(size) \
623 pushl %ebp; /* Save EBP */ \
624 movl %esp,%ebp; /* Save ESP to EBP */ \
625 pushl $0; /* Reserve space for arg */ \
626 andl $0xfffffff0,%esp; /* Align stack to 16bytes */ \
627 movl size, (%esp); /* Argument to alloc */ \
629 movl %ebp,%esp; /* Restore ESP from EBP */ \
630 popl %ebp; /* Restore EBP */
632 #define ALLOC(size) \
633 pushl size; /* Argument to alloc */ \
635 addl $4,%esp; /* Pop argument */
638 #define DEFINE_ALLOC_TO_EAX(name,size) \
639 .globl GNAME(name); \
641 .align align_16byte,0x90; \
643 pushl %ecx; /* Save ECX and EDX */ \
646 popl %edx; /* Restore ECX and EDX */ \
651 #define DEFINE_ALLOC_TO_ECX(name,size) \
652 .globl GNAME(name); \
654 .align align_16byte,0x90; \
656 pushl %eax; /* Save EAX and EDX */ \
659 movl %eax,%ecx; /* Result to destination */ \
665 #define DEFINE_ALLOC_TO_EDX(name,size) \
666 .globl GNAME(name); \
668 .align align_16byte,0x90; \
670 pushl %eax; /* Save EAX and ECX */ \
673 movl %eax,%edx; /* Restore EAX and ECX */ \
679 #define DEFINE_ALLOC_TO_REG(name,reg,size) \
680 .globl GNAME(name); \
682 .align align_16byte,0x90; \
684 pushl %eax; /* Save EAX, ECX, and EDX */ \
688 movl %eax,reg; /* Restore them */ \
695 DEFINE_ALLOC_TO_EAX(alloc_to_eax,%eax)
696 DEFINE_ALLOC_TO_EAX(alloc_8_to_eax,$8)
697 DEFINE_ALLOC_TO_EAX(alloc_16_to_eax,$16)
699 DEFINE_ALLOC_TO_ECX(alloc_to_ecx,%ecx)
700 DEFINE_ALLOC_TO_ECX(alloc_8_to_ecx,$8)
701 DEFINE_ALLOC_TO_ECX(alloc_16_to_ecx,$16)
703 DEFINE_ALLOC_TO_EDX(alloc_to_edx,%edx)
704 DEFINE_ALLOC_TO_EDX(alloc_8_to_edx,$8)
705 DEFINE_ALLOC_TO_EDX(alloc_16_to_edx,$16)
707 DEFINE_ALLOC_TO_REG(alloc_to_ebx,%ebx,%ebx)
708 DEFINE_ALLOC_TO_REG(alloc_8_to_ebx,%ebx,$8)
709 DEFINE_ALLOC_TO_REG(alloc_16_to_ebx,%ebx,$16)
711 DEFINE_ALLOC_TO_REG(alloc_to_esi,%esi,%esi)
712 DEFINE_ALLOC_TO_REG(alloc_8_to_esi,%esi,$8)
713 DEFINE_ALLOC_TO_REG(alloc_16_to_esi,%esi,$16)
715 DEFINE_ALLOC_TO_REG(alloc_to_edi,%edi,%edi)
716 DEFINE_ALLOC_TO_REG(alloc_8_to_edi,%edi,$8)
717 DEFINE_ALLOC_TO_REG(alloc_16_to_edi,%edi,$16)
719 /* Called from lisp when an inline allocation overflows.
720 * Every register except the result needs to be preserved.
721 * We depend on C to preserve ebx, esi, edi, and ebp.
722 * But where necessary must save eax, ecx, edx. */
724 #ifdef LISP_FEATURE_SB_THREAD
725 #define START_REGION %fs:THREAD_ALLOC_REGION_OFFSET
727 #define START_REGION GNAME(boxed_region)
730 #if defined(LISP_FEATURE_SB_THREAD) && defined(LISP_FEATURE_WIN32)
731 #define ALLOC_OVERFLOW(size,scratch) \
732 movl SBCL_THREAD_BASE_EA, scratch; \
733 /* Calculate the size for the allocation. */ \
734 subl THREAD_ALLOC_REGION_OFFSET(scratch),size; \
737 #define ALLOC_OVERFLOW(size,scratch) \
738 /* Calculate the size for the allocation. */ \
739 subl START_REGION,size; \
743 /* This routine handles an overflow with eax=crfp+size. So the
746 .globl GNAME(alloc_overflow_eax)
747 TYPE(GNAME(alloc_overflow_eax))
748 GNAME(alloc_overflow_eax):
749 pushl %ecx # Save ecx
750 pushl %edx # Save edx
751 ALLOC_OVERFLOW(%eax,%edx)
752 popl %edx # Restore edx.
753 popl %ecx # Restore ecx.
755 SIZE(GNAME(alloc_overflow_eax))
758 .globl GNAME(alloc_overflow_ecx)
759 TYPE(GNAME(alloc_overflow_ecx))
760 GNAME(alloc_overflow_ecx):
761 pushl %eax # Save eax
762 pushl %edx # Save edx
763 ALLOC_OVERFLOW(%ecx,%edx)
764 movl %eax,%ecx # setup the destination.
765 popl %edx # Restore edx.
766 popl %eax # Restore eax.
768 SIZE(GNAME(alloc_overflow_ecx))
771 .globl GNAME(alloc_overflow_edx)
772 TYPE(GNAME(alloc_overflow_edx))
773 GNAME(alloc_overflow_edx):
774 pushl %eax # Save eax
775 pushl %ecx # Save ecx
776 ALLOC_OVERFLOW(%edx,%ecx)
777 movl %eax,%edx # setup the destination.
778 popl %ecx # Restore ecx.
779 popl %eax # Restore eax.
781 SIZE(GNAME(alloc_overflow_edx))
783 /* This routine handles an overflow with ebx=crfp+size. So the
786 .globl GNAME(alloc_overflow_ebx)
787 TYPE(GNAME(alloc_overflow_ebx))
788 GNAME(alloc_overflow_ebx):
789 pushl %eax # Save eax
790 pushl %ecx # Save ecx
791 pushl %edx # Save edx
792 ALLOC_OVERFLOW(%ebx,%edx)
793 movl %eax,%ebx # setup the destination.
794 popl %edx # Restore edx.
795 popl %ecx # Restore ecx.
796 popl %eax # Restore eax.
798 SIZE(GNAME(alloc_overflow_ebx))
800 /* This routine handles an overflow with esi=crfp+size. So the
803 .globl GNAME(alloc_overflow_esi)
804 TYPE(GNAME(alloc_overflow_esi))
805 GNAME(alloc_overflow_esi):
806 pushl %eax # Save eax
807 pushl %ecx # Save ecx
808 pushl %edx # Save edx
809 ALLOC_OVERFLOW(%esi,%edx)
810 movl %eax,%esi # setup the destination.
811 popl %edx # Restore edx.
812 popl %ecx # Restore ecx.
813 popl %eax # Restore eax.
815 SIZE(GNAME(alloc_overflow_esi))
818 .globl GNAME(alloc_overflow_edi)
819 TYPE(GNAME(alloc_overflow_edi))
820 GNAME(alloc_overflow_edi):
821 pushl %eax # Save eax
822 pushl %ecx # Save ecx
823 pushl %edx # Save edx
824 ALLOC_OVERFLOW(%edi,%edx)
825 movl %eax,%edi # setup the destination.
826 popl %edx # Restore edx.
827 popl %ecx # Restore ecx.
828 popl %eax # Restore eax.
830 SIZE(GNAME(alloc_overflow_edi))
833 #ifdef LISP_FEATURE_WIN32
834 /* The guts of the exception-handling system doesn't use
835 * frame pointers, which manages to throw off backtraces
836 * rather badly. So here we grab the (known-good) EBP
837 * and EIP from the exception context and use it to fake
838 * up a stack frame which will skip over the system SEH
841 .globl GNAME(exception_handler_wrapper)
842 TYPE(GNAME(exception_handler_wrapper))
843 GNAME(exception_handler_wrapper):
844 /* Context layout is: */
845 /* 7 dwords before FSA. (0x1c) */
846 /* 8 dwords and 0x50 bytes in the FSA. (0x70/0x8c) */
847 /* 4 dwords segregs. (0x10/0x9c) */
848 /* 6 dwords non-stack GPRs. (0x18/0xb4) */
851 #define CONTEXT_EBP_OFFSET 0xb4
852 #define CONTEXT_EIP_OFFSET 0xb8
853 /* some other stuff we don't care about. */
855 movl 0x10(%esp), %ebp /* context */
856 pushl CONTEXT_EIP_OFFSET(%ebp)
857 pushl CONTEXT_EBP_OFFSET(%ebp)
863 call GNAME(handle_exception)
867 SIZE(GNAME(exception_handler_wrapper))
870 #ifdef LISP_FEATURE_DARWIN
872 .globl GNAME(call_into_lisp_tramp)
873 TYPE(GNAME(call_into_lisp_tramp))
874 GNAME(call_into_lisp_tramp):
875 /* 1. build the stack frame from the block that's pointed to by ECX
878 4. call the function via call_into_lisp
880 pushl 0(%ecx) /* return address */
885 pushl 32(%ecx) /* eflags */
886 pushl 28(%ecx) /* EAX */
887 pushl 20(%ecx) /* ECX */
888 pushl 16(%ecx) /* EDX */
889 pushl 24(%ecx) /* EBX */
890 pushl $0 /* popal is going to ignore esp */
891 pushl %ebp /* is this right?? */
892 pushl 12(%ecx) /* ESI */
893 pushl 8(%ecx) /* EDI */
894 pushl $0 /* args for call_into_lisp */
896 pushl 4(%ecx) /* function to call */
898 /* free our save block */
899 pushl %ecx /* reserve sufficient space on stack for args */
901 andl $0xfffffff0, %esp /* align stack */
904 call GNAME(os_invalidate)
906 /* call call_into_lisp */
908 call GNAME(call_into_lisp)
910 /* Clean up our mess */
917 SIZE(call_into_lisp_tramp)
920 .align align_16byte,0x90
921 .globl GNAME(post_signal_tramp)
922 TYPE(GNAME(post_signal_tramp))
923 GNAME(post_signal_tramp):
924 /* this is notionally the second half of a function whose first half
925 * doesn't exist. This is where call_into_lisp returns when called
926 * using return_to_lisp_function */
927 addl $12,%esp /* clear call_into_lisp args from stack */
928 popal /* restore registers */
930 #ifdef LISP_FEATURE_DARWIN
931 /* skip two padding words */
936 SIZE(GNAME(post_signal_tramp))
939 /* fast_bzero implementations and code to detect which implementation
943 .globl GNAME(fast_bzero_pointer)
946 GNAME(fast_bzero_pointer):
947 /* Variable containing a pointer to the bzero function to use.
948 * Initially points to a basic function. Change this variable
949 * to fast_bzero_detect if OS supports SSE. */
950 .long GNAME(fast_bzero_base)
953 .align align_16byte,0x90
954 .globl GNAME(fast_bzero)
955 TYPE(GNAME(fast_bzero))
957 /* Indirect function call */
958 jmp *GNAME(fast_bzero_pointer)
959 SIZE(GNAME(fast_bzero))
963 .align align_16byte,0x90
964 .globl GNAME(fast_bzero_detect)
965 TYPE(GNAME(fast_bzero_detect))
966 GNAME(fast_bzero_detect):
967 /* Decide whether to use SSE, MMX or REP version */
968 push %eax /* CPUID uses EAX-EDX */
974 test $0x04000000, %edx /* SSE2 needed for MOVNTDQ */
976 /* Originally there was another case here for using the
977 * MOVNTQ instruction for processors that supported MMX but
978 * not SSE2. This turned out to be a loss especially on
979 * Athlons (where this instruction is apparently microcoded
980 * somewhat slowly). So for simplicity revert to REP STOSL
981 * for all non-SSE2 processors.
984 movl $(GNAME(fast_bzero_base)), GNAME(fast_bzero_pointer)
987 movl $(GNAME(fast_bzero_sse)), GNAME(fast_bzero_pointer)
995 jmp *GNAME(fast_bzero_pointer)
997 SIZE(GNAME(fast_bzero_detect))
1001 .align align_16byte,0x90
1002 .globl GNAME(fast_bzero_sse)
1003 TYPE(GNAME(fast_bzero_sse))
1005 GNAME(fast_bzero_sse):
1006 /* A fast routine for zero-filling blocks of memory that are
1007 * guaranteed to start and end at a 4096-byte aligned address.
1009 push %esi /* Save temporary registers */
1011 mov 16(%esp), %esi /* Parameter: amount of bytes to fill */
1012 mov 12(%esp), %edi /* Parameter: start address */
1013 shr $6, %esi /* Amount of 64-byte blocks to copy */
1014 jz Lend_sse /* If none, stop */
1015 movups %xmm7, -16(%esp) /* Save XMM register */
1016 xorps %xmm7, %xmm7 /* Zero the XMM register */
1021 /* Copy the 16 zeroes from xmm7 to memory, 4 times. MOVNTDQ is the
1022 * non-caching double-quadword moving variant, i.e. the memory areas
1023 * we're touching are not fetched into the L1 cache, since we're just
1024 * going to overwrite the memory soon anyway.
1026 movntdq %xmm7, 0(%edi)
1027 movntdq %xmm7, 16(%edi)
1028 movntdq %xmm7, 32(%edi)
1029 movntdq %xmm7, 48(%edi)
1031 add $64, %edi /* Advance pointer */
1032 dec %esi /* Decrement 64-byte block count */
1034 movups -16(%esp), %xmm7 /* Restore the XMM register */
1035 sfence /* Ensure that weakly ordered writes are flushed. */
1037 mov 12(%esp), %esi /* Parameter: start address */
1038 prefetcht0 0(%esi) /* Prefetch the start of the block into cache,
1039 * since it's likely to be used immediately. */
1040 pop %edi /* Restore temp registers */
1043 SIZE(GNAME(fast_bzero_sse))
1047 .align align_16byte,0x90
1048 .globl GNAME(fast_bzero_base)
1049 TYPE(GNAME(fast_bzero_base))
1051 GNAME(fast_bzero_base):
1052 /* A fast routine for zero-filling blocks of memory that are
1053 * guaranteed to start and end at a 4096-byte aligned address.
1055 push %eax /* Save temporary registers */
1058 mov 20(%esp), %ecx /* Parameter: amount of bytes to fill */
1059 mov 16(%esp), %edi /* Parameter: start address */
1060 xor %eax, %eax /* Zero EAX */
1061 shr $2, %ecx /* Amount of 4-byte blocks to copy */
1065 stosl /* Store EAX to *EDI, ECX times, incrementing
1066 * EDI by 4 after each store */
1069 pop %edi /* Restore temp registers */
1073 SIZE(GNAME(fast_bzero_base))
1076 /* When LISP_FEATURE_C_STACK_IS_CONTROL_STACK, we cannot safely scrub
1077 * the control stack from C, largely due to not knowing where the
1078 * active stack frame ends. On such platforms, we reimplement the
1079 * core scrubbing logic in assembly, in this case here:
1082 .align align_16byte,0x90
1083 .globl GNAME(arch_scrub_control_stack)
1084 TYPE(GNAME(arch_scrub_control_stack))
1085 GNAME(arch_scrub_control_stack):
1086 /* We are passed three parameters:
1087 * A (struct thread *) at [ESP+4],
1088 * the address of the guard page at [ESP+8], and
1089 * the address of the hard guard page at [ESP+12].
1090 * We may trash EAX, ECX, and EDX with impunity.
1091 * [ESP] is our return address, [ESP-4] is the first
1092 * stack slot to scrub. */
1094 /* We start by setting up our scrub pointer in EAX, our
1095 * guard page upper bound in ECX, and our hard guard
1096 * page upper bound in EDX. */
1098 mov GNAME(os_vm_page_size),%edx
1103 /* We need to do a memory operation relative to the
1104 * thread pointer, so put it in %ecx and our guard
1105 * page upper bound in 4(%esp). */
1108 /* Now we begin our main scrub loop. */
1111 /* If we're about to scrub the hard guard page, exit. */
1113 jae ascs_check_guard_page
1117 ascs_check_guard_page:
1118 /* If we're about to scrub the guard page, and the guard
1119 * page is protected, exit. */
1124 cmpl $(NIL), THREAD_CONTROL_STACK_GUARD_PAGE_PROTECTED_OFFSET(%ecx)
1127 /* Clear memory backwards to the start of the (4KiB) page */
1134 /* If we're about to hit the hard guard page, exit. */
1138 /* If the next (previous?) 4KiB page contains a non-zero
1139 * word, continue scrubbing. */
1149 SIZE(GNAME(arch_scrub_control_stack))