Preliminary work towards threads on win32
[sbcl.git] / src / runtime / win32-os.c
blob59e547380e54dcb3cd1941ce3389f267f9838bc1
1 /*
2 * the Win32 incarnation of OS-dependent routines. See also
3 * $(sbcl_arch)-win32-os.c
5 * This file (along with os.h) exports an OS-independent interface to
6 * the operating system VM facilities. Surprise surprise, this
7 * interface looks a lot like the Mach interface (but simpler in some
8 * places). For some operating systems, a subset of these functions
9 * will have to be emulated.
13 * This software is part of the SBCL system. See the README file for
14 * more information.
16 * This software is derived from the CMU CL system, which was
17 * written at Carnegie Mellon University and released into the
18 * public domain. The software is in the public domain and is
19 * provided with absolutely no warranty. See the COPYING and CREDITS
20 * files for more information.
24 * This file was copied from the Linux version of the same, and
25 * likely still has some linuxisms in it have haven't been elimiated
26 * yet.
29 #include <malloc.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <sys/param.h>
33 #include <sys/file.h>
34 #include <io.h>
35 #include "sbcl.h"
36 #include "os.h"
37 #include "arch.h"
38 #include "globals.h"
39 #include "sbcl.h"
40 #include "interrupt.h"
41 #include "interr.h"
42 #include "lispregs.h"
43 #include "runtime.h"
44 #include "alloc.h"
45 #include "genesis/primitive-objects.h"
46 #include "dynbind.h"
48 #include <sys/types.h>
49 #include <sys/time.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
53 #include <math.h>
54 #include <float.h>
56 #include <excpt.h>
58 #include "validate.h"
59 #include "thread.h"
60 #include "cpputil.h"
62 #ifndef LISP_FEATURE_SB_THREAD
63 /* dummy definition to reduce ifdef clutter */
64 #define WITH_GC_AT_SAFEPOINTS_ONLY() if (0) ; else
65 #endif
67 os_vm_size_t os_vm_page_size;
69 #include "gc.h"
70 #include "gencgc-internal.h"
71 #include <winsock2.h>
73 #if 0
74 int linux_sparc_siginfo_bug = 0;
75 int linux_supports_futex=0;
76 #endif
78 #include <stdarg.h>
79 #include <string.h>
81 /* missing definitions for modern mingws */
82 #ifndef EH_UNWINDING
83 #define EH_UNWINDING 0x02
84 #endif
85 #ifndef EH_EXIT_UNWIND
86 #define EH_EXIT_UNWIND 0x04
87 #endif
89 /* Tired of writing arch_os_get_current_thread each time. */
90 #define this_thread (arch_os_get_current_thread())
92 /* wrappers for winapi calls that must be successful (like SBCL's
93 * (aver ...) form). */
95 /* win_aver function: basic building block for miscellaneous
96 * ..AVER.. macrology (below) */
98 /* To do: These routines used to be "customizable" with dyndebug_init()
99 * and variables like dyndebug_survive_aver, dyndebug_skip_averlax based
100 * on environment variables. Those features got lost on the way, but
101 * ought to be reintroduced. */
103 static inline
104 intptr_t win_aver(intptr_t value, char* comment, char* file, int line,
105 int justwarn)
107 if (!value) {
108 LPSTR errorMessage = "<FormatMessage failed>";
109 DWORD errorCode = GetLastError(), allocated=0;
110 int posixerrno = errno;
111 const char* posixstrerror = strerror(errno);
112 char* report_template =
113 "Expression unexpectedly false: %s:%d\n"
114 " ... %s\n"
115 " ===> returned #X%p, \n"
116 " (in thread %p)"
117 " ... Win32 thinks:\n"
118 " ===> code %u, message => %s\n"
119 " ... CRT thinks:\n"
120 " ===> code %u, message => %s\n";
122 allocated =
123 FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER|
124 FORMAT_MESSAGE_FROM_SYSTEM,
125 NULL,
126 errorCode,
127 MAKELANGID(LANG_ENGLISH,SUBLANG_ENGLISH_US),
128 (LPSTR)&errorMessage,
129 1024u,
130 NULL);
132 if (justwarn) {
133 fprintf(stderr, report_template,
134 file, line,
135 comment, value,
136 this_thread,
137 (unsigned)errorCode, errorMessage,
138 posixerrno, posixstrerror);
139 } else {
140 lose(report_template,
141 file, line,
142 comment, value,
143 this_thread,
144 (unsigned)errorCode, errorMessage,
145 posixerrno, posixstrerror);
147 if (allocated)
148 LocalFree(errorMessage);
150 return value;
153 /* sys_aver function: really tiny adaptor of win_aver for
154 * "POSIX-parody" CRT results ("lowio" and similar stuff):
155 * negative number means something... negative. */
156 static inline
157 intptr_t sys_aver(long value, char* comment, char* file, int line,
158 int justwarn)
160 win_aver((intptr_t)(value>=0),comment,file,line,justwarn);
161 return value;
164 /* Check for (call) result being boolean true. (call) may be arbitrary
165 * expression now; massive attack of gccisms ensures transparent type
166 * conversion back and forth, so the type of AVER(expression) is the
167 * type of expression. Value is the same _if_ it can be losslessly
168 * converted to (void*) and back.
170 * Failed AVER() is normally fatal. Well, unless dyndebug_survive_aver
171 * flag is set. */
173 #define AVER(call) \
174 ({ __typeof__(call) __attribute__((unused)) me = \
175 (__typeof__(call)) \
176 win_aver((intptr_t)(call), #call, __FILE__, __LINE__, 0); \
177 me;})
179 /* AVERLAX(call): do the same check as AVER did, but be mild on
180 * failure: print an annoying unrequested message to stderr, and
181 * continue. With dyndebug_skip_averlax flag, AVERLAX stop even to
182 * check and complain. */
184 #define AVERLAX(call) \
185 ({ __typeof__(call) __attribute__((unused)) me = \
186 (__typeof__(call)) \
187 win_aver((intptr_t)(call), #call, __FILE__, __LINE__, 1); \
188 me;})
190 /* Now, when failed AVER... prints both errno and GetLastError(), two
191 * variants of "POSIX/lowio" style checks below are almost useless
192 * (they build on sys_aver like the two above do on win_aver). */
194 #define CRT_AVER_NONNEGATIVE(call) \
195 ({ __typeof__(call) __attribute__((unused)) me = \
196 (__typeof__(call)) \
197 sys_aver((call), #call, __FILE__, __LINE__, 0); \
198 me;})
200 #define CRT_AVERLAX_NONNEGATIVE(call) \
201 ({ __typeof__(call) __attribute__((unused)) me = \
202 (__typeof__(call)) \
203 sys_aver((call), #call, __FILE__, __LINE__, 1); \
204 me;})
206 /* to be removed */
207 #define CRT_AVER(booly) \
208 ({ __typeof__(booly) __attribute__((unused)) me = (booly); \
209 sys_aver((booly)?0:-1, #booly, __FILE__, __LINE__, 0); \
210 me;})
212 const char * t_nil_s(lispobj symbol);
215 * The following signal-mask-related alien routines are called from Lisp:
218 /* As of win32, deferrables _do_ matter. gc_signal doesn't. */
219 unsigned long block_deferrables_and_return_mask()
221 sigset_t sset;
222 block_deferrable_signals(0, &sset);
223 return (unsigned long)sset;
226 #if defined(LISP_FEATURE_SB_THREAD)
227 void apply_sigmask(unsigned long sigmask)
229 sigset_t sset = (sigset_t)sigmask;
230 pthread_sigmask(SIG_SETMASK, &sset, 0);
232 #endif
234 /* The exception handling function looks like this: */
235 EXCEPTION_DISPOSITION handle_exception(EXCEPTION_RECORD *,
236 struct lisp_exception_frame *,
237 CONTEXT *,
238 void *);
239 /* handle_exception is defined further in this file, but since SBCL
240 * 1.0.1.24, it doesn't get registered as SEH handler directly anymore,
241 * not even by wos_install_interrupt_handlers. Instead, x86-assem.S
242 * provides exception_handler_wrapper; we install it here, and each
243 * exception frame on nested funcall()s also points to it.
247 void *base_seh_frame;
249 static void *get_seh_frame(void)
251 void* retval;
252 #ifdef LISP_FEATURE_X86
253 asm volatile ("mov %%fs:0,%0": "=r" (retval));
254 #else
255 asm volatile ("mov %%gs:0,%0": "=r" (retval));
256 #endif
257 return retval;
260 static void set_seh_frame(void *frame)
262 #ifdef LISP_FEATURE_X86
263 asm volatile ("mov %0,%%fs:0": : "r" (frame));
264 #else
265 asm volatile ("mov %0,%%gs:0": : "r" (frame));
266 #endif
269 #if defined(LISP_FEATURE_SB_THREAD)
271 /* Permit loads from GC_SAFEPOINT_PAGE_ADDR (NB page state change is
272 * "synchronized" with the memory region content/availability --
273 * e.g. you won't see other CPU flushing buffered writes after WP --
274 * but there is some window when other thread _seem_ to trap AFTER
275 * access is granted. You may think of it something like "OS enters
276 * SEH handler too slowly" -- what's important is there's no implicit
277 * synchronization between VirtualProtect caller and other thread's
278 * SEH handler, hence no ordering of events. VirtualProtect is
279 * implicitly synchronized with protected memory contents (only).
281 * The last fact may be potentially used with many benefits e.g. for
282 * foreign call speed, but we don't use it for now: almost the only
283 * fact relevant to the current signalling protocol is "sooner or
284 * later everyone will trap [everyone will stop trapping]".
286 * An interesting source on page-protection-based inter-thread
287 * communication is a well-known paper by Dave Dice, Hui Huang,
288 * Mingyao Yang: ``Asymmetric Dekker Synchronization''. Last time
289 * I checked it was available at
290 * http://home.comcast.net/~pjbishop/Dave/Asymmetric-Dekker-Synchronization.txt
292 void map_gc_page()
294 DWORD oldProt;
295 AVER(VirtualProtect((void*) GC_SAFEPOINT_PAGE_ADDR, sizeof(lispobj),
296 PAGE_READWRITE, &oldProt));
299 void unmap_gc_page()
301 DWORD oldProt;
302 AVER(VirtualProtect((void*) GC_SAFEPOINT_PAGE_ADDR, sizeof(lispobj),
303 PAGE_NOACCESS, &oldProt));
306 #endif
308 #if defined(LISP_FEATURE_SB_THREAD)
309 /* We want to get a slot in TIB that (1) is available at constant
310 offset, (2) is our private property, so libraries wouldn't legally
311 override it, (3) contains something predefined for threads created
312 out of our sight.
314 Low 64 TLS slots are adressable directly, starting with
315 FS:[#xE10]. When SBCL runtime is initialized, some of the low slots
316 may be already in use by its prerequisite DLLs, as DllMain()s and
317 TLS callbacks have been called already. But slot 63 is unlikely to
318 be reached at this point: one slot per DLL that needs it is the
319 common practice, and many system DLLs use predefined TIB-based
320 areas outside conventional TLS storage and don't need TLS slots.
321 With our current dependencies, even slot 2 is observed to be free
322 (as of WinXP and wine).
324 Now we'll call TlsAlloc() repeatedly until slot 63 is officially
325 assigned to us, then TlsFree() all other slots for normal use. TLS
326 slot 63, alias FS:[#.(+ #xE10 (* 4 63))], now belongs to us.
328 To summarize, let's list the assumptions we make:
330 - TIB, which is FS segment base, contains first 64 TLS slots at the
331 offset #xE10 (i.e. TIB layout compatibility);
332 - TLS slots are allocated from lower to higher ones;
333 - All libraries together with CRT startup have not requested 64
334 slots yet.
336 All these assumptions together don't seem to be less warranted than
337 the availability of TIB arbitrary data slot for our use. There are
338 some more reasons to prefer slot 63 over TIB arbitrary data: (1) if
339 our assumptions for slot 63 are violated, it will be detected at
340 startup instead of causing some system-specific unreproducible
341 problems afterwards, depending on OS and loaded foreign libraries;
342 (2) if getting slot 63 reliably with our current approach will
343 become impossible for some future Windows version, we can add TLS
344 callback directory to SBCL binary; main image TLS callback is
345 started before _any_ TLS slot is allocated by libraries, and
346 some C compiler vendors rely on this fact. */
348 void os_preinit()
350 #ifdef LISP_FEATURE_X86
351 DWORD slots[TLS_MINIMUM_AVAILABLE];
352 DWORD key;
353 int n_slots = 0, i;
354 for (i=0; i<TLS_MINIMUM_AVAILABLE; ++i) {
355 key = TlsAlloc();
356 if (key == OUR_TLS_INDEX) {
357 if (TlsGetValue(key)!=NULL)
358 lose("TLS slot assertion failed: fresh slot value is not NULL");
359 TlsSetValue(OUR_TLS_INDEX, (void*)(intptr_t)0xFEEDBAC4);
360 if ((intptr_t)(void*)arch_os_get_current_thread()!=(intptr_t)0xFEEDBAC4)
361 lose("TLS slot assertion failed: TIB layout change detected");
362 TlsSetValue(OUR_TLS_INDEX, NULL);
363 break;
365 slots[n_slots++]=key;
367 for (i=0; i<n_slots; ++i) {
368 TlsFree(slots[i]);
370 if (key!=OUR_TLS_INDEX) {
371 lose("TLS slot assertion failed: slot 63 is unavailable "
372 "(last TlsAlloc() returned %u)",key);
374 #endif
376 #endif /* LISP_FEATURE_SB_THREAD */
378 int os_number_of_processors = 1;
380 void os_init(char *argv[], char *envp[])
382 SYSTEM_INFO system_info;
383 GetSystemInfo(&system_info);
384 os_vm_page_size = system_info.dwPageSize > BACKEND_PAGE_BYTES?
385 system_info.dwPageSize : BACKEND_PAGE_BYTES;
386 #if defined(LISP_FEATURE_X86)
387 fast_bzero_pointer = fast_bzero_detect;
388 #endif
389 os_number_of_processors = system_info.dwNumberOfProcessors;
391 base_seh_frame = get_seh_frame();
394 static inline boolean local_thread_stack_address_p(os_vm_address_t address)
396 return this_thread &&
397 (((((u64)address >= (u64)this_thread->os_address) &&
398 ((u64)address < ((u64)this_thread)-THREAD_CSP_PAGE_SIZE))||
399 (((u64)address >= (u64)this_thread->control_stack_start)&&
400 ((u64)address < (u64)this_thread->control_stack_end))));
404 * So we have three fun scenarios here.
406 * First, we could be being called to reserve the memory areas
407 * during initialization (prior to loading the core file).
409 * Second, we could be being called by the GC to commit a page
410 * that has just been decommitted (for easy zero-fill).
412 * Third, we could be being called by create_thread_struct()
413 * in order to create the sundry and various stacks.
415 * The third case is easy to pick out because it passes an
416 * addr of 0.
418 * The second case is easy to pick out because it will be for
419 * a range of memory that is MEM_RESERVE rather than MEM_FREE.
421 * The second case is also an easy implement, because we leave
422 * the memory as reserved (since we do lazy commits).
425 os_vm_address_t
426 os_validate(os_vm_address_t addr, os_vm_size_t len)
428 MEMORY_BASIC_INFORMATION mem_info;
430 if (!addr) {
431 /* the simple case first */
432 return
433 AVERLAX(VirtualAlloc(addr, len, MEM_RESERVE|MEM_COMMIT, PAGE_EXECUTE_READWRITE));
436 if (!AVERLAX(VirtualQuery(addr, &mem_info, sizeof mem_info)))
437 return 0;
439 if ((mem_info.State == MEM_RESERVE) && (mem_info.RegionSize >=len)) {
440 /* It would be correct to return here. However, support for Wine
441 * is beneficial, and Wine has a strange behavior in this
442 * department. It reports all memory below KERNEL32.DLL as
443 * reserved, but disallows MEM_COMMIT.
445 * Let's work around it: reserve the region we need for a second
446 * time. The second reservation is documented to fail on normal NT
447 * family, but it will succeed on Wine if this region is
448 * actually free.
450 VirtualAlloc(addr, len, MEM_RESERVE, PAGE_EXECUTE_READWRITE);
451 /* If it is wine, the second call has succeded, and now the region
452 * is really reserved. */
453 return addr;
456 if (mem_info.State == MEM_RESERVE) {
457 fprintf(stderr, "validation of reserved space too short.\n");
458 fflush(stderr);
459 /* Oddly, we do not treat this assertion as fatal; hence also the
460 * provision for MEM_RESERVE in the following code, I suppose: */
463 if (!AVERLAX(VirtualAlloc(addr, len, (mem_info.State == MEM_RESERVE)?
464 MEM_COMMIT: MEM_RESERVE, PAGE_EXECUTE_READWRITE)))
465 return 0;
467 return addr;
471 * For os_invalidate(), we merely decommit the memory rather than
472 * freeing the address space. This loses when freeing per-thread
473 * data and related memory since it leaks address space.
475 * So far the original comment (author unknown). It used to continue as
476 * follows:
478 * It's not too lossy, however, since the two scenarios I'm aware of
479 * are fd-stream buffers, which are pooled rather than torched, and
480 * thread information, which I hope to pool (since windows creates
481 * threads at its own whim, and we probably want to be able to have
482 * them callback without funky magic on the part of the user, and
483 * full-on thread allocation is fairly heavyweight).
485 * But: As it turns out, we are no longer content with decommitting
486 * without freeing, and have now grown a second function
487 * os_invalidate_free(), sort of a really_os_invalidate().
489 * As discussed on #lisp, this is not a satisfactory solution, and probably
490 * ought to be rectified in the following way:
492 * - Any cases currently going through the non-freeing version of
493 * os_invalidate() are ultimately meant for zero-filling applications.
494 * Replace those use cases with an os_revalidate_bzero() or similarly
495 * named function, which explicitly takes care of that aspect of
496 * the semantics.
498 * - The remaining uses of os_invalidate should actually free, and once
499 * the above is implemented, we can rename os_invalidate_free back to
500 * just os_invalidate().
502 * So far the new plan, as yet unimplemented. -- DFL
505 void
506 os_invalidate(os_vm_address_t addr, os_vm_size_t len)
508 AVERLAX(VirtualFree(addr, len, MEM_DECOMMIT));
511 void
512 os_invalidate_free(os_vm_address_t addr, os_vm_size_t len)
514 AVERLAX(VirtualFree(addr, 0, MEM_RELEASE));
517 void
518 os_invalidate_free_by_any_address(os_vm_address_t addr, os_vm_size_t len)
520 MEMORY_BASIC_INFORMATION minfo;
521 AVERLAX(VirtualQuery(addr, &minfo, sizeof minfo));
522 AVERLAX(minfo.AllocationBase);
523 AVERLAX(VirtualFree(minfo.AllocationBase, 0, MEM_RELEASE));
527 * os_map() is called to map a chunk of the core file into memory.
529 * Unfortunately, Windows semantics completely screws this up, so
530 * we just add backing store from the swapfile to where the chunk
531 * goes and read it up like a normal file. We could consider using
532 * a lazy read (demand page) setup, but that would mean keeping an
533 * open file pointer for the core indefinately (and be one more
534 * thing to maintain).
537 os_vm_address_t
538 os_map(int fd, int offset, os_vm_address_t addr, os_vm_size_t len)
540 os_vm_size_t count;
542 AVER(VirtualAlloc(addr, len, MEM_COMMIT, PAGE_EXECUTE_READWRITE)||
543 VirtualAlloc(addr, len, MEM_RESERVE|MEM_COMMIT,
544 PAGE_EXECUTE_READWRITE));
546 CRT_AVER_NONNEGATIVE(lseek(fd, offset, SEEK_SET));
548 count = read(fd, addr, len);
549 CRT_AVER( count == len );
551 return addr;
554 static DWORD os_protect_modes[8] = {
555 PAGE_NOACCESS,
556 PAGE_READONLY,
557 PAGE_READWRITE,
558 PAGE_READWRITE,
559 PAGE_EXECUTE,
560 PAGE_EXECUTE_READ,
561 PAGE_EXECUTE_READWRITE,
562 PAGE_EXECUTE_READWRITE,
565 void
566 os_protect(os_vm_address_t address, os_vm_size_t length, os_vm_prot_t prot)
568 DWORD old_prot;
570 DWORD new_prot = os_protect_modes[prot];
571 AVER(VirtualProtect(address, length, new_prot, &old_prot)||
572 (VirtualAlloc(address, length, MEM_COMMIT, new_prot) &&
573 VirtualProtect(address, length, new_prot, &old_prot)));
574 odxprint(misc,"Protecting %p + %p vmaccess %d "
575 "newprot %08x oldprot %08x",
576 address,length,prot,new_prot,old_prot);
579 /* FIXME: Now that FOO_END, rather than FOO_SIZE, is the fundamental
580 * description of a space, we could probably punt this and just do
581 * (FOO_START <= x && x < FOO_END) everywhere it's called. */
582 static boolean
583 in_range_p(os_vm_address_t a, lispobj sbeg, size_t slen)
585 char* beg = (char*)((uword_t)sbeg);
586 char* end = (char*)((uword_t)sbeg) + slen;
587 char* adr = (char*)a;
588 return (adr >= beg && adr < end);
591 boolean
592 is_linkage_table_addr(os_vm_address_t addr)
594 return in_range_p(addr, LINKAGE_TABLE_SPACE_START, LINKAGE_TABLE_SPACE_END);
597 static boolean is_some_thread_local_addr(os_vm_address_t addr);
599 boolean
600 is_valid_lisp_addr(os_vm_address_t addr)
602 if(in_range_p(addr, READ_ONLY_SPACE_START, READ_ONLY_SPACE_SIZE) ||
603 in_range_p(addr, STATIC_SPACE_START , STATIC_SPACE_SIZE) ||
604 in_range_p(addr, DYNAMIC_SPACE_START , dynamic_space_size) ||
605 is_some_thread_local_addr(addr))
606 return 1;
607 return 0;
610 /* test if an address is within thread-local space */
611 static boolean
612 is_thread_local_addr(struct thread* th, os_vm_address_t addr)
614 /* Assuming that this is correct, it would warrant further comment,
615 * I think. Based on what our call site is doing, we have been
616 * tasked to check for the address of a lisp object; not merely any
617 * foreign address within the thread's area. Indeed, this used to
618 * be a check for control and binding stack only, rather than the
619 * full thread "struct". So shouldn't the THREAD_STRUCT_SIZE rather
620 * be (thread_control_stack_size+BINDING_STACK_SIZE) instead? That
621 * would also do away with the LISP_FEATURE_SB_THREAD case. Or does
622 * it simply not matter? --DFL */
623 ptrdiff_t diff = ((char*)th->os_address)-(char*)addr;
624 return diff > (ptrdiff_t)0 && diff < (ptrdiff_t)THREAD_STRUCT_SIZE
625 #ifdef LISP_FEATURE_SB_THREAD
626 && addr != (os_vm_address_t) th->csp_around_foreign_call
627 #endif
631 static boolean
632 is_some_thread_local_addr(os_vm_address_t addr)
634 boolean result = 0;
635 #ifdef LISP_FEATURE_SB_THREAD
636 struct thread *th;
637 pthread_mutex_lock(&all_threads_lock);
638 for_each_thread(th) {
639 if(is_thread_local_addr(th,addr)) {
640 result = 1;
641 break;
644 pthread_mutex_unlock(&all_threads_lock);
645 #endif
646 return result;
650 /* A tiny bit of interrupt.c state we want our paws on. */
651 extern boolean internal_errors_enabled;
653 extern void exception_handler_wrapper();
655 void
656 c_level_backtrace(const char* header, int depth)
658 void* frame;
659 int n = 0;
660 void** lastseh;
662 for (lastseh = get_seh_frame(); lastseh && (lastseh!=(void*)-1);
663 lastseh = *lastseh);
665 fprintf(stderr, "Backtrace: %s (thread %p)\n", header, this_thread);
666 for (frame = __builtin_frame_address(0); frame; frame=*(void**)frame)
668 if ((n++)>depth)
669 return;
670 fprintf(stderr, "[#%02d]: ebp = 0x%p, ret = 0x%p\n",n,
671 frame, ((void**)frame)[1]);
675 #ifdef LISP_FEATURE_X86
676 #define voidreg(ctxptr,name) ((void*)((ctxptr)->E##name))
677 #else
678 #define voidreg(ctxptr,name) ((void*)((ctxptr)->R##name))
679 #endif
682 #if defined(LISP_FEATURE_X86)
683 static int
684 handle_single_step(os_context_t *ctx)
686 if (!single_stepping)
687 return -1;
689 /* We are doing a displaced instruction. At least function
690 * end breakpoints use this. */
691 WITH_GC_AT_SAFEPOINTS_ONLY () /* Todo: Is it really gc-unsafe? */
692 restore_breakpoint_from_single_step(ctx);
694 return 0;
696 #endif
698 #ifdef LISP_FEATURE_UD2_BREAKPOINTS
699 #define SBCL_EXCEPTION_BREAKPOINT EXCEPTION_ILLEGAL_INSTRUCTION
700 #define TRAP_CODE_WIDTH 2
701 #else
702 #define SBCL_EXCEPTION_BREAKPOINT EXCEPTION_BREAKPOINT
703 #define TRAP_CODE_WIDTH 1
704 #endif
706 static int
707 handle_breakpoint_trap(os_context_t *ctx, struct thread* self)
709 #ifdef LISP_FEATURE_UD2_BREAKPOINTS
710 if (((unsigned short *)((ctx->win32_context)->Eip))[0] != 0x0b0f)
711 return -1;
712 #endif
714 /* Unlike some other operating systems, Win32 leaves EIP
715 * pointing to the breakpoint instruction. */
716 (*os_context_pc_addr(ctx)) += TRAP_CODE_WIDTH;
718 /* Now EIP points just after the INT3 byte and aims at the
719 * 'kind' value (eg trap_Cerror). */
720 unsigned trap = *(unsigned char *)(*os_context_pc_addr(ctx));
722 #ifdef LISP_FEATURE_SB_THREAD
723 /* Before any other trap handler: gc_safepoint ensures that
724 inner alloc_sap for passing the context won't trap on
725 pseudo-atomic. */
726 if (trap == trap_PendingInterrupt) {
727 /* Done everything needed for this trap, except EIP
728 adjustment */
729 arch_skip_instruction(ctx);
730 thread_interrupted(ctx);
731 return 0;
733 #endif
735 /* This is just for info in case the monitor wants to print an
736 * approximation. */
737 access_control_stack_pointer(self) =
738 (lispobj *)*os_context_sp_addr(ctx);
740 WITH_GC_AT_SAFEPOINTS_ONLY() {
741 #if defined(LISP_FEATURE_SB_THREAD)
742 block_blockable_signals(0,&ctx->sigmask);
743 #endif
744 handle_trap(ctx, trap);
745 #if defined(LISP_FEATURE_SB_THREAD)
746 thread_sigmask(SIG_SETMASK,&ctx->sigmask,NULL);
747 #endif
750 /* Done, we're good to go! */
751 return 0;
754 static int
755 handle_access_violation(os_context_t *ctx,
756 EXCEPTION_RECORD *exception_record,
757 void *fault_address,
758 struct thread* self)
760 CONTEXT *win32_context = ctx->win32_context;
762 #if defined(LISP_FEATURE_X86)
763 odxprint(pagefaults,
764 "SEGV. ThSap %p, Eip %p, Esp %p, Esi %p, Edi %p, "
765 "Addr %p Access %d\n",
766 self,
767 win32_context->Eip,
768 win32_context->Esp,
769 win32_context->Esi,
770 win32_context->Edi,
771 fault_address,
772 exception_record->ExceptionInformation[0]);
773 #else
774 odxprint(pagefaults,
775 "SEGV. ThSap %p, Eip %p, Esp %p, Esi %p, Edi %p, "
776 "Addr %p Access %d\n",
777 self,
778 win32_context->Rip,
779 win32_context->Rsp,
780 win32_context->Rsi,
781 win32_context->Rdi,
782 fault_address,
783 exception_record->ExceptionInformation[0]);
784 #endif
786 /* Stack: This case takes care of our various stack exhaustion
787 * protect pages (with the notable exception of the control stack!). */
788 if (self && local_thread_stack_address_p(fault_address)) {
789 if (handle_guard_page_triggered(ctx, fault_address))
790 return 0; /* gc safety? */
791 goto try_recommit;
794 /* Safepoint pages */
795 #ifdef LISP_FEATURE_SB_THREAD
796 if (fault_address == (void *) GC_SAFEPOINT_PAGE_ADDR) {
797 thread_in_lisp_raised(ctx);
798 return 0;
801 if ((((u64)fault_address) == ((u64)self->csp_around_foreign_call))){
802 thread_in_safety_transition(ctx);
803 return 0;
805 #endif
807 /* dynamic space */
808 page_index_t index = find_page_index(fault_address);
809 if (index != -1) {
811 * Now, if the page is supposedly write-protected and this
812 * is a write, tell the gc that it's been hit.
814 if (page_table[index].write_protected) {
815 gencgc_handle_wp_violation(fault_address);
816 } else {
817 AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size),
818 os_vm_page_size,
819 MEM_COMMIT, PAGE_EXECUTE_READWRITE));
821 return 0;
824 if (fault_address == undefined_alien_address)
825 return -1;
827 /* linkage table or a "valid_lisp_addr" outside of dynamic space (?) */
828 if (is_linkage_table_addr(fault_address)
829 || is_valid_lisp_addr(fault_address))
830 goto try_recommit;
832 return -1;
834 try_recommit:
835 /* First use of a new page, lets get some memory for it. */
837 #if defined(LISP_FEATURE_X86)
838 AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size),
839 os_vm_page_size,
840 MEM_COMMIT, PAGE_EXECUTE_READWRITE)
841 ||(fprintf(stderr,"Unable to recommit addr %p eip 0x%08lx\n",
842 fault_address, win32_context->Eip) &&
843 (c_level_backtrace("BT",5),
844 fake_foreign_function_call(ctx),
845 lose("Lispy backtrace"),
846 0)));
847 #else
848 AVER(VirtualAlloc(PTR_ALIGN_DOWN(fault_address,os_vm_page_size),
849 os_vm_page_size,
850 MEM_COMMIT, PAGE_EXECUTE_READWRITE)
851 ||(fprintf(stderr,"Unable to recommit addr %p eip 0x%p\n",
852 fault_address, (void*)win32_context->Rip) &&
853 (c_level_backtrace("BT",5),
854 fake_foreign_function_call(ctx),
855 lose("Lispy backtrace"),
856 0)));
857 #endif
859 return 0;
862 static void
863 signal_internal_error_or_lose(os_context_t *ctx,
864 EXCEPTION_RECORD *exception_record,
865 void *fault_address)
868 * If we fall through to here then we need to either forward
869 * the exception to the lisp-side exception handler if it's
870 * set up, or drop to LDB.
873 if (internal_errors_enabled) {
874 lispobj context_sap;
875 lispobj exception_record_sap;
877 asm("fnclex");
878 /* We're making the somewhat arbitrary decision that having
879 * internal errors enabled means that lisp has sufficient
880 * marbles to be able to handle exceptions, but exceptions
881 * aren't supposed to happen during cold init or reinit
882 * anyway. */
884 #if defined(LISP_FEATURE_SB_THREAD)
885 block_blockable_signals(0,&ctx->sigmask);
886 #endif
887 fake_foreign_function_call(ctx);
889 WITH_GC_AT_SAFEPOINTS_ONLY() {
890 /* Allocate the SAP objects while the "interrupts" are still
891 * disabled. */
892 context_sap = alloc_sap(ctx);
893 exception_record_sap = alloc_sap(exception_record);
894 #if defined(LISP_FEATURE_SB_THREAD)
895 thread_sigmask(SIG_SETMASK, &ctx->sigmask, NULL);
896 #endif
898 /* The exception system doesn't automatically clear pending
899 * exceptions, so we lose as soon as we execute any FP
900 * instruction unless we do this first. */
901 /* Call into lisp to handle things. */
902 funcall2(StaticSymbolFunction(HANDLE_WIN32_EXCEPTION),
903 context_sap,
904 exception_record_sap);
906 /* If Lisp doesn't nlx, we need to put things back. */
907 undo_fake_foreign_function_call(ctx);
908 #if defined(LISP_FEATURE_SB_THREAD)
909 thread_sigmask(SIG_SETMASK, &ctx->sigmask, NULL);
910 #endif
911 /* FIXME: HANDLE-WIN32-EXCEPTION should be allowed to decline */
912 return;
915 fprintf(stderr, "Exception Code: 0x%p.\n",
916 (void*)(intptr_t)exception_record->ExceptionCode);
917 fprintf(stderr, "Faulting IP: 0x%p.\n",
918 (void*)(intptr_t)exception_record->ExceptionAddress);
919 if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
920 MEMORY_BASIC_INFORMATION mem_info;
922 if (VirtualQuery(fault_address, &mem_info, sizeof mem_info)) {
923 fprintf(stderr, "page status: 0x%lx.\n", mem_info.State);
926 fprintf(stderr, "Was writing: %p, where: 0x%p.\n",
927 (void*)exception_record->ExceptionInformation[0],
928 fault_address);
931 fflush(stderr);
933 fake_foreign_function_call(ctx);
934 lose("Exception too early in cold init, cannot continue.");
938 * A good explanation of the exception handling semantics is
939 * http://win32assembly.online.fr/Exceptionhandling.html (possibly defunct)
940 * or:
941 * http://www.microsoft.com/msj/0197/exception/exception.aspx
944 EXCEPTION_DISPOSITION
945 handle_exception(EXCEPTION_RECORD *exception_record,
946 struct lisp_exception_frame *exception_frame,
947 CONTEXT *win32_context,
948 void *dispatcher_context)
950 if (!win32_context)
951 /* Not certain why this should be possible, but let's be safe... */
952 return ExceptionContinueSearch;
954 if (exception_record->ExceptionFlags & (EH_UNWINDING | EH_EXIT_UNWIND)) {
955 /* If we're being unwound, be graceful about it. */
957 /* Undo any dynamic bindings. */
958 unbind_to_here(exception_frame->bindstack_pointer,
959 arch_os_get_current_thread());
960 return ExceptionContinueSearch;
963 DWORD lastError = GetLastError();
964 DWORD lastErrno = errno;
965 DWORD code = exception_record->ExceptionCode;
966 struct thread* self = arch_os_get_current_thread();
968 os_context_t context, *ctx = &context;
969 context.win32_context = win32_context;
970 #if defined(LISP_FEATURE_SB_THREAD)
971 context.sigmask = self ? self->os_thread->blocked_signal_set : 0;
972 #endif
974 /* For EXCEPTION_ACCESS_VIOLATION only. */
975 void *fault_address = (void *)exception_record->ExceptionInformation[1];
977 odxprint(seh,
978 "SEH: rec %p, ctxptr %p, rip %p, fault %p\n"
979 "... code %p, rcx %p, fp-tags %p\n\n",
980 exception_record,
981 win32_context,
982 voidreg(win32_context,ip),
983 fault_address,
984 (void*)(intptr_t)code,
985 voidreg(win32_context,cx),
986 win32_context->FloatSave.TagWord);
988 /* This function had become unwieldy. Let's cut it down into
989 * pieces based on the different exception codes. Each exception
990 * code handler gets the chance to decline by returning non-zero if it
991 * isn't happy: */
993 int rc;
994 switch (code) {
995 case EXCEPTION_ACCESS_VIOLATION:
996 rc = handle_access_violation(
997 ctx, exception_record, fault_address, self);
998 break;
1000 case SBCL_EXCEPTION_BREAKPOINT:
1001 rc = handle_breakpoint_trap(ctx, self);
1002 break;
1004 #if defined(LISP_FEATURE_X86)
1005 case EXCEPTION_SINGLE_STEP:
1006 rc = handle_single_step(ctx);
1007 break;
1008 #endif
1010 default:
1011 rc = -1;
1014 if (rc)
1015 /* All else failed, drop through to the lisp-side exception handler. */
1016 signal_internal_error_or_lose(ctx, exception_record, fault_address);
1018 errno = lastErrno;
1019 SetLastError(lastError);
1020 return ExceptionContinueExecution;
1023 void
1024 wos_install_interrupt_handlers(struct lisp_exception_frame *handler)
1026 #ifdef LISP_FEATURE_X86
1027 handler->next_frame = get_seh_frame();
1028 handler->handler = (void*)exception_handler_wrapper;
1029 set_seh_frame(handler);
1030 #else
1031 static int once = 0;
1032 if (!once++)
1033 AddVectoredExceptionHandler(1,veh);
1034 #endif
1038 * The stubs below are replacements for the windows versions,
1039 * which can -fail- when used in our memory spaces because they
1040 * validate the memory spaces they are passed in a way that
1041 * denies our exception handler a chance to run.
1044 void *memmove(void *dest, const void *src, size_t n)
1046 if (dest < src) {
1047 int i;
1048 for (i = 0; i < n; i++) *(((char *)dest)+i) = *(((char *)src)+i);
1049 } else {
1050 while (n--) *(((char *)dest)+n) = *(((char *)src)+n);
1052 return dest;
1055 void *memcpy(void *dest, const void *src, size_t n)
1057 while (n--) *(((char *)dest)+n) = *(((char *)src)+n);
1058 return dest;
1061 char *dirname(char *path)
1063 static char buf[PATH_MAX + 1];
1064 size_t pathlen = strlen(path);
1065 int i;
1067 if (pathlen >= sizeof(buf)) {
1068 lose("Pathname too long in dirname.\n");
1069 return NULL;
1072 strcpy(buf, path);
1073 for (i = pathlen; i >= 0; --i) {
1074 if (buf[i] == '/' || buf[i] == '\\') {
1075 buf[i] = '\0';
1076 break;
1080 return buf;
1083 /* This is a manually-maintained version of ldso_stubs.S. */
1085 void __stdcall RtlUnwind(void *, void *, void *, void *); /* I don't have winternl.h */
1087 void scratch(void)
1089 CloseHandle(0);
1090 FlushConsoleInputBuffer(0);
1091 FormatMessageA(0, 0, 0, 0, 0, 0, 0);
1092 FreeLibrary(0);
1093 GetACP();
1094 GetConsoleCP();
1095 GetConsoleOutputCP();
1096 GetCurrentProcess();
1097 GetExitCodeProcess(0, 0);
1098 GetLastError();
1099 GetOEMCP();
1100 GetProcAddress(0, 0);
1101 GetProcessTimes(0, 0, 0, 0, 0);
1102 GetSystemTimeAsFileTime(0);
1103 LoadLibrary(0);
1104 LocalFree(0);
1105 PeekConsoleInput(0, 0, 0, 0);
1106 PeekNamedPipe(0, 0, 0, 0, 0, 0);
1107 ReadFile(0, 0, 0, 0, 0);
1108 Sleep(0);
1109 WriteFile(0, 0, 0, 0, 0);
1110 _get_osfhandle(0);
1111 _rmdir(0);
1112 _pipe(0,0,0);
1113 access(0,0);
1114 close(0);
1115 dup(0);
1116 isatty(0);
1117 strerror(42);
1118 write(0, 0, 0);
1119 RtlUnwind(0, 0, 0, 0);
1120 MapViewOfFile(0,0,0,0,0);
1121 UnmapViewOfFile(0);
1122 FlushViewOfFile(0,0);
1123 #ifndef LISP_FEATURE_SB_UNICODE
1124 CreateDirectoryA(0,0);
1125 CreateFileMappingA(0,0,0,0,0,0);
1126 CreateFileA(0,0,0,0,0,0,0);
1127 GetComputerNameA(0, 0);
1128 GetCurrentDirectoryA(0,0);
1129 GetEnvironmentVariableA(0, 0, 0);
1130 GetFileAttributesA(0);
1131 GetVersionExA(0);
1132 MoveFileA(0,0);
1133 SHGetFolderPathA(0, 0, 0, 0, 0);
1134 SetCurrentDirectoryA(0);
1135 SetEnvironmentVariableA(0, 0);
1136 #else
1137 CreateDirectoryW(0,0);
1138 CreateFileMappingW(0,0,0,0,0,0);
1139 CreateFileW(0,0,0,0,0,0,0);
1140 FormatMessageW(0, 0, 0, 0, 0, 0, 0);
1141 GetComputerNameW(0, 0);
1142 GetCurrentDirectoryW(0,0);
1143 GetEnvironmentVariableW(0, 0, 0);
1144 GetFileAttributesW(0);
1145 GetVersionExW(0);
1146 MoveFileW(0,0);
1147 SHGetFolderPathW(0, 0, 0, 0, 0);
1148 SetCurrentDirectoryW(0);
1149 SetEnvironmentVariableW(0, 0);
1150 #endif
1151 _exit(0);
1154 char *
1155 os_get_runtime_executable_path(int external)
1157 char path[MAX_PATH + 1];
1158 DWORD bufsize = sizeof(path);
1159 DWORD size;
1161 if ((size = GetModuleFileNameA(NULL, path, bufsize)) == 0)
1162 return NULL;
1163 else if (size == bufsize && GetLastError() == ERROR_INSUFFICIENT_BUFFER)
1164 return NULL;
1166 return copied_string(path);
1169 #ifdef LISP_FEATURE_SB_THREAD
1172 win32_wait_object_or_signal(HANDLE waitFor)
1174 struct thread * self = arch_os_get_current_thread();
1175 HANDLE handles[2];
1176 handles[0] = waitFor;
1177 handles[1] = self->private_events.events[1];
1178 return
1179 WaitForMultipleObjects(2,handles, FALSE,INFINITE);
1183 * Portability glue for win32 waitable timers.
1185 * One may ask: Why is there a wrapper in C when the calls are so
1186 * obvious that Lisp could do them directly (as it did on Windows)?
1188 * But the answer is that on POSIX platforms, we now emulate the win32
1189 * calls and hide that emulation behind this os_* abstraction.
1191 HANDLE
1192 os_create_wtimer()
1194 return CreateWaitableTimer(0, 0, 0);
1198 os_wait_for_wtimer(HANDLE handle)
1200 return win32_wait_object_or_signal(handle);
1203 void
1204 os_close_wtimer(HANDLE handle)
1206 CloseHandle(handle);
1209 void
1210 os_set_wtimer(HANDLE handle, int sec, int nsec)
1212 /* in units of 100ns, i.e. 0.1us. Negative for relative semantics. */
1213 long long dueTime
1214 = -(((long long) sec) * 10000000
1215 + ((long long) nsec + 99) / 100);
1216 SetWaitableTimer(handle, (LARGE_INTEGER*) &dueTime, 0, 0, 0, 0);
1219 void
1220 os_cancel_wtimer(HANDLE handle)
1222 CancelWaitableTimer(handle);
1224 #endif
1226 /* EOF */