* ChangeLog.0, ChangeLog.2, ChangeLog.3, ChangeLog.4, ChangeLog,
[official-gcc.git] / boehm-gc / specific.c
blob48b53ac9f8ce86c710d400e4e45f6a0cbf5be989
1 /*
2 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
14 #if defined(GC_LINUX_THREADS)
16 #include "private/gc_priv.h" /* For GC_compare_and_exchange, GC_memory_barrier */
17 #include "private/specific.h"
19 static tse invalid_tse; /* 0 qtid is guaranteed to be invalid */
21 int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *)) {
22 int i;
23 tsd * result = (tsd *)MALLOC_CLEAR(sizeof (tsd));
25 if (0 == result) return ENOMEM;
26 pthread_mutex_init(&(result -> lock), NULL);
27 for (i = 0; i < TS_CACHE_SIZE; ++i) {
28 result -> cache[i] = &invalid_tse;
30 *key_ptr = result;
31 return 0;
34 int PREFIXED(setspecific) (tsd * key, void * value) {
35 pthread_t self = pthread_self();
36 int hash_val = HASH(self);
37 volatile tse * entry = (volatile tse *)MALLOC_CLEAR(sizeof (tse));
39 if (0 == entry) return ENOMEM;
40 pthread_mutex_lock(&(key -> lock));
41 /* Could easily check for an existing entry here. */
42 entry -> next = key -> hash[hash_val];
43 entry -> thread = self;
44 entry -> value = value;
45 /* There can only be one writer at a time, but this needs to be */
46 /* atomic with respect to concurrent readers. */
47 *(volatile tse **)(key -> hash + hash_val) = entry;
48 pthread_mutex_unlock(&(key -> lock));
49 return 0;
52 /* Remove thread-specific data for this thread. Should be called on */
53 /* thread exit. */
54 void PREFIXED(remove_specific) (tsd * key) {
55 pthread_t self = pthread_self();
56 unsigned hash_val = HASH(self);
57 tse *entry;
58 tse **link = key -> hash + hash_val;
60 pthread_mutex_lock(&(key -> lock));
61 entry = *link;
62 while (entry != NULL && entry -> thread != self) {
63 link = &(entry -> next);
64 entry = *link;
66 /* Invalidate qtid field, since qtids may be reused, and a later */
67 /* cache lookup could otherwise find this entry. */
68 entry -> qtid = INVALID_QTID;
69 if (entry != NULL) {
70 *link = entry -> next;
71 /* Atomic! concurrent accesses still work. */
72 /* They must, since readers don't lock. */
74 /* If we wanted to deallocate the entry, we'd first have to clear */
75 /* any cache entries pointing to it. That probably requires */
76 /* additional synchronization, since we can't prevent a concurrent */
77 /* cache lookup, which should still be examining deallocated memory.*/
78 /* This can only happen if the concurrent access is from another */
79 /* thread, and hence has missed the cache, but still... */
81 /* With GC, we're done, since the pointers from the cache will */
82 /* be overwritten, all local pointers to the entries will be */
83 /* dropped, and the entry will then be reclaimed. */
84 pthread_mutex_unlock(&(key -> lock));
87 /* Note that even the slow path doesn't lock. */
88 void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
89 tse * volatile * cache_ptr) {
90 pthread_t self = pthread_self();
91 unsigned hash_val = HASH(self);
92 tse *entry = key -> hash[hash_val];
94 while (entry != NULL && entry -> thread != self) {
95 entry = entry -> next;
97 if (entry == NULL) return NULL;
98 /* Set cache_entry. */
99 entry -> qtid = qtid;
100 /* It's safe to do this asynchronously. Either value */
101 /* is safe, though may produce spurious misses. */
102 *cache_ptr = entry;
103 /* Again this is safe since pointer assignments are */
104 /* presumed atomic, and either pointer is valid. */
105 return entry -> value;
108 #endif /* GC_LINUX_THREADS */