2 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
14 #include "private/thread_local_alloc.h"
15 /* To determine type of tsd impl. */
16 /* Includes private/specific.h */
19 #if defined(USE_CUSTOM_SPECIFIC)
21 static const tse invalid_tse
= {INVALID_QTID
, 0, 0, INVALID_THREADID
};
22 /* A thread-specific data entry which will never */
23 /* appear valid to a reader. Used to fill in empty */
24 /* cache entries to avoid a check for 0. */
26 GC_INNER
int GC_key_create_inner(tsd
** key_ptr
)
29 tsd
* result
= (tsd
*)MALLOC_CLEAR(sizeof(tsd
));
31 /* A quick alignment check, since we need atomic stores */
32 GC_ASSERT((word
)(&invalid_tse
.next
) % sizeof(tse
*) == 0);
33 if (0 == result
) return ENOMEM
;
34 pthread_mutex_init(&(result
-> lock
), NULL
);
35 for (i
= 0; i
< TS_CACHE_SIZE
; ++i
) {
36 result
-> cache
[i
] = (/* no const */ tse
*)&invalid_tse
;
39 for (i
= 0; i
< TS_HASH_SIZE
; ++i
) {
40 GC_ASSERT(result
-> hash
[i
].p
== 0);
47 /* Called with the lock held. */
48 GC_INNER
int GC_setspecific(tsd
* key
, void * value
)
50 pthread_t self
= pthread_self();
51 int hash_val
= HASH(self
);
54 GC_ASSERT(self
!= INVALID_THREADID
);
55 GC_dont_gc
++; /* disable GC */
56 entry
= (volatile tse
*)MALLOC_CLEAR(sizeof(tse
));
58 if (0 == entry
) return ENOMEM
;
60 pthread_mutex_lock(&(key
-> lock
));
61 /* Could easily check for an existing entry here. */
62 entry
-> next
= key
->hash
[hash_val
].p
;
63 entry
-> thread
= self
;
64 entry
-> value
= value
;
65 GC_ASSERT(entry
-> qtid
== INVALID_QTID
);
66 /* There can only be one writer at a time, but this needs to be */
67 /* atomic with respect to concurrent readers. */
68 AO_store_release(&key
->hash
[hash_val
].ao
, (AO_t
)entry
);
69 pthread_mutex_unlock(&(key
-> lock
));
73 /* Remove thread-specific data for this thread. Should be called on */
75 GC_INNER
void GC_remove_specific(tsd
* key
)
77 pthread_t self
= pthread_self();
78 unsigned hash_val
= HASH(self
);
80 tse
**link
= &key
->hash
[hash_val
].p
;
82 pthread_mutex_lock(&(key
-> lock
));
84 while (entry
!= NULL
&& entry
-> thread
!= self
) {
85 link
= &(entry
-> next
);
88 /* Invalidate qtid field, since qtids may be reused, and a later */
89 /* cache lookup could otherwise find this entry. */
91 entry
-> qtid
= INVALID_QTID
;
92 *link
= entry
-> next
;
93 /* Atomic! concurrent accesses still work. */
94 /* They must, since readers don't lock. */
95 /* We shouldn't need a volatile access here, */
96 /* since both this and the preceding write */
97 /* should become visible no later than */
98 /* the pthread_mutex_unlock() call. */
100 /* If we wanted to deallocate the entry, we'd first have to clear */
101 /* any cache entries pointing to it. That probably requires */
102 /* additional synchronization, since we can't prevent a concurrent */
103 /* cache lookup, which should still be examining deallocated memory.*/
104 /* This can only happen if the concurrent access is from another */
105 /* thread, and hence has missed the cache, but still... */
107 /* With GC, we're done, since the pointers from the cache will */
108 /* be overwritten, all local pointers to the entries will be */
109 /* dropped, and the entry will then be reclaimed. */
110 pthread_mutex_unlock(&(key
-> lock
));
113 /* Note that even the slow path doesn't lock. */
114 GC_INNER
void * GC_slow_getspecific(tsd
* key
, word qtid
,
115 tse
* volatile * cache_ptr
)
117 pthread_t self
= pthread_self();
118 unsigned hash_val
= HASH(self
);
119 tse
*entry
= key
->hash
[hash_val
].p
;
121 GC_ASSERT(qtid
!= INVALID_QTID
);
122 while (entry
!= NULL
&& entry
-> thread
!= self
) {
123 entry
= entry
-> next
;
125 if (entry
== NULL
) return NULL
;
126 /* Set cache_entry. */
127 entry
-> qtid
= (AO_t
)qtid
;
128 /* It's safe to do this asynchronously. Either value */
129 /* is safe, though may produce spurious misses. */
130 /* We're replacing one qtid with another one for the */
133 /* Again this is safe since pointer assignments are */
134 /* presumed atomic, and either pointer is valid. */
135 return entry
-> value
;
139 /* Check that that all elements of the data structure associated */
140 /* with key are marked. */
141 void GC_check_tsd_marks(tsd
*key
)
146 if (!GC_is_marked(GC_base(key
))) {
147 ABORT("Unmarked thread-specific-data table");
149 for (i
= 0; i
< TS_HASH_SIZE
; ++i
) {
150 for (p
= key
->hash
[i
].p
; p
!= 0; p
= p
-> next
) {
151 if (!GC_is_marked(GC_base(p
))) {
152 GC_err_printf("Thread-specific-data entry at %p not marked\n", p
);
153 ABORT("Unmarked tse");
157 for (i
= 0; i
< TS_CACHE_SIZE
; ++i
) {
159 if (p
!= &invalid_tse
&& !GC_is_marked(GC_base(p
))) {
160 GC_err_printf("Cached thread-specific-data entry at %p not marked\n",
162 ABORT("Unmarked cached tse");
166 #endif /* GC_ASSERTIONS */
168 #endif /* USE_CUSTOM_SPECIFIC */