From 6cde30b92d3e6092372d0164bb47b58568d9360f Mon Sep 17 00:00:00 2001 From: Gabor Melis Date: Tue, 11 Oct 2005 09:38:48 +0000 Subject: [PATCH] 0.9.5.35: * Use mutexes instead of spinlock where appropriate: possibly high lock contention, holding the lock for more than a jiffy. Seems to cure the sporadic hang-for-a-while-then-continue behaviour that some tests displayed. --- src/runtime/gencgc.c | 18 ++++++++++-------- src/runtime/interrupt.c | 3 ++- src/runtime/interrupt.h | 2 ++ src/runtime/thread.h | 4 ++++ src/runtime/x86-linux-os.c | 28 +++++++++++++--------------- version.lisp-expr | 2 +- 6 files changed, 32 insertions(+), 25 deletions(-) diff --git a/src/runtime/gencgc.c b/src/runtime/gencgc.c index 0584403be..ec37966b7 100644 --- a/src/runtime/gencgc.c +++ b/src/runtime/gencgc.c @@ -265,7 +265,9 @@ static long last_free_page; * seized before all accesses to generations[] or to parts of * page_table[] that other threads may want to see */ -static lispobj free_pages_lock=0; +#ifdef LISP_FEATURE_SB_THREAD +static pthread_mutex_t free_pages_lock = PTHREAD_MUTEX_INITIALIZER; +#endif /* @@ -516,7 +518,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) gc_assert((alloc_region->first_page == 0) && (alloc_region->last_page == -1) && (alloc_region->free_pointer == alloc_region->end_addr)); - get_spinlock(&free_pages_lock,(long) alloc_region); + thread_mutex_lock(&free_pages_lock); if (unboxed) { first_page = generations[gc_alloc_generation].alloc_unboxed_start_page; @@ -578,7 +580,7 @@ gc_alloc_new_region(long nbytes, int unboxed, struct alloc_region *alloc_region) (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES), 0); } - release_spinlock(&free_pages_lock); + thread_mutex_unlock(&free_pages_lock); /* we can do this after releasing free_pages_lock */ if (gencgc_zero_check) { @@ -715,7 +717,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) next_page = first_page+1; - get_spinlock(&free_pages_lock,(long) alloc_region); + thread_mutex_lock(&free_pages_lock); if (alloc_region->free_pointer != alloc_region->start_addr) { /* some bytes were allocated in the region */ orig_first_page_bytes_used = page_table[first_page].bytes_used; @@ -819,7 +821,7 @@ gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region) page_table[next_page].allocated = FREE_PAGE_FLAG; next_page++; } - release_spinlock(&free_pages_lock); + thread_mutex_unlock(&free_pages_lock); /* alloc_region is per-thread, we're ok to do this unlocked */ gc_set_region_empty(alloc_region); } @@ -838,7 +840,7 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) long bytes_used; long next_page; - get_spinlock(&free_pages_lock,(long) alloc_region); + thread_mutex_lock(&free_pages_lock); if (unboxed) { first_page = @@ -939,7 +941,7 @@ gc_alloc_large(long nbytes, int unboxed, struct alloc_region *alloc_region) SetSymbolValue(ALLOCATION_POINTER, (lispobj)(((char *)heap_base) + last_free_page*PAGE_BYTES),0); } - release_spinlock(&free_pages_lock); + thread_mutex_unlock(&free_pages_lock); return((void *)(page_address(first_page)+orig_first_page_bytes_used)); } @@ -954,7 +956,7 @@ gc_find_freeish_pages(long *restart_page_ptr, long nbytes, int unboxed) long bytes_found; long num_pages; long large_p=(nbytes>=large_object_size); - gc_assert(free_pages_lock); + /* FIXME: assert(free_pages_lock is held); */ /* Search for a contiguous free space of at least nbytes. If it's * a large object then align it on a page boundary by searching diff --git a/src/runtime/interrupt.c b/src/runtime/interrupt.c index 868d65c3f..bc704e389 100644 --- a/src/runtime/interrupt.c +++ b/src/runtime/interrupt.c @@ -110,7 +110,8 @@ void sigaddset_blockable(sigset_t *s) static sigset_t deferrable_sigset; static sigset_t blockable_sigset; -inline static void check_blockables_blocked_or_lose() +void +check_blockables_blocked_or_lose() { /* Get the current sigmask, by blocking the empty set. */ sigset_t empty,current; diff --git a/src/runtime/interrupt.h b/src/runtime/interrupt.h index ec7899967..b31d0f388 100644 --- a/src/runtime/interrupt.h +++ b/src/runtime/interrupt.h @@ -25,6 +25,8 @@ /* FIXME: do not rely on NSIG being a multiple of 8 */ #define REAL_SIGSET_SIZE_BYTES ((NSIG/8)) +extern void check_blockables_blocked_or_lose(); + static inline void sigcopyset(sigset_t *new, sigset_t *old) { diff --git a/src/runtime/thread.h b/src/runtime/thread.h index 3f823ccf5..9e0bd151e 100644 --- a/src/runtime/thread.h +++ b/src/runtime/thread.h @@ -124,10 +124,14 @@ static inline struct thread *arch_os_get_current_thread() { #define thread_self pthread_self #define thread_kill pthread_kill #define thread_sigmask pthread_sigmask +#define thread_mutex_lock(l) pthread_mutex_lock(l) +#define thread_mutex_unlock(l) pthread_mutex_unlock(l) #else #define thread_self getpid #define thread_kill kill #define thread_sigmask sigprocmask +#define thread_mutex_lock(l) +#define thread_mutex_unlock(l) #endif extern void create_initial_thread(lispobj); diff --git a/src/runtime/x86-linux-os.c b/src/runtime/x86-linux-os.c index 5b55fdc37..9321e6c1e 100644 --- a/src/runtime/x86-linux-os.c +++ b/src/runtime/x86-linux-os.c @@ -69,20 +69,20 @@ void debug_get_ldt() printf("%d bytes in ldt: print/x local_ldt_copy\n", n); } -volatile lispobj modify_ldt_lock; /* protect all calls to modify_ldt */ +#ifdef LISP_FEATURE_SB_THREAD +pthread_mutex_t modify_ldt_lock = PTHREAD_MUTEX_INITIALIZER; +#endif int arch_os_thread_init(struct thread *thread) { stack_t sigstack; #ifdef LISP_FEATURE_SB_THREAD - /* FIXME Lock ordering rules: all_threads_lock must usually be - * held when getting modify_ldt_lock - */ struct user_desc ldt_entry = { 1, 0, 0, /* index, address, length filled in later */ 1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1 }; int n; - get_spinlock(&modify_ldt_lock,(long)thread); + check_blockables_blocked_or_lose(); + thread_mutex_lock(&modify_ldt_lock); n=modify_ldt(0,local_ldt_copy,sizeof local_ldt_copy); /* get next free ldt entry */ @@ -96,7 +96,7 @@ int arch_os_thread_init(struct thread *thread) { ldt_entry.limit=dynamic_values_bytes; ldt_entry.limit_in_pages=0; if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) { - modify_ldt_lock=0; + thread_mutex_unlock(&modify_ldt_lock); /* modify_ldt call failed: something magical is not happening */ return 0; } @@ -105,7 +105,7 @@ int arch_os_thread_init(struct thread *thread) { + (1 << 2) /* TI set = LDT */ + 3)); /* privilege level */ thread->tls_cookie=n; - modify_ldt_lock=0; + pthread_mutex_unlock(&modify_ldt_lock); if(n<0) return 0; pthread_setspecific(specials,thread); @@ -138,16 +138,14 @@ int arch_os_thread_cleanup(struct thread *thread) { 0, 0, 0, 0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0 }; + int result; + check_blockables_blocked_or_lose(); ldt_entry.entry_number=thread->tls_cookie; - get_spinlock(&modify_ldt_lock,(long)thread); - if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) { - modify_ldt_lock=0; - /* modify_ldt call failed: something magical is not happening */ - return 0; - } - modify_ldt_lock=0; - return 1; + thread_mutex_lock(&modify_ldt_lock); + result = modify_ldt(1, &ldt_entry, sizeof (ldt_entry)); + thread_mutex_unlock(&modify_ldt_lock); + return result; } diff --git a/version.lisp-expr b/version.lisp-expr index 043ac68bf..d96a66f62 100644 --- a/version.lisp-expr +++ b/version.lisp-expr @@ -17,4 +17,4 @@ ;;; checkins which aren't released. (And occasionally for internal ;;; versions, especially for internal versions off the main CVS ;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".) -"0.9.5.34" +"0.9.5.35" -- 2.11.4.GIT