2 * Copyright 1999, 2000 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09
30 * Thread locking implementation for the dynamic linker.
32 * We use the "simple, non-scalable reader-preference lock" from:
34 * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer
35 * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on
36 * Principles and Practice of Parallel Programming, April 1991.
38 * In this algorithm the lock is a single word. Its low-order bit is
39 * set when a writer holds the lock. The remaining high-order bits
40 * contain a count of readers desiring the lock. The algorithm requires
41 * atomic "compare_and_store" and "add" operations, which we implement
42 * using assembly language sequences in "rtld_start.S".
45 #include <sys/param.h>
52 #include "rtld_machdep.h"
54 #define WAFLAG 0x1 /* A writer holds the lock */
55 #define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
57 typedef struct Struct_Lock
{
62 static sigset_t fullsigmask
, oldsigmask
;
63 static int thread_flag
;
74 * Arrange for the lock to occupy its own cache line. First, we
75 * optimistically allocate just a cache line, hoping that malloc
76 * will give us a well-aligned block of memory. If that doesn't
77 * work, we allocate a larger block and take a well-aligned cache
80 base
= xmalloc(CACHE_LINE_SIZE
);
82 if ((uintptr_t)p
% CACHE_LINE_SIZE
!= 0) {
84 base
= xmalloc(2 * CACHE_LINE_SIZE
);
86 if ((r
= (uintptr_t)p
% CACHE_LINE_SIZE
) != 0)
87 p
+= CACHE_LINE_SIZE
- r
;
96 def_lock_destroy(void *lock
)
98 Lock
*l
= (Lock
*)lock
;
104 def_rlock_acquire(void *lock
)
106 Lock
*l
= (Lock
*)lock
;
108 atomic_add_acq_int(&l
->lock
, RC_INCR
);
109 while (l
->lock
& WAFLAG
)
114 def_wlock_acquire(void *lock
)
116 Lock
*l
= (Lock
*)lock
;
117 sigset_t tmp_oldsigmask
;
120 sigprocmask(SIG_BLOCK
, &fullsigmask
, &tmp_oldsigmask
);
121 if (atomic_cmpset_acq_int(&l
->lock
, 0, WAFLAG
))
123 sigprocmask(SIG_SETMASK
, &tmp_oldsigmask
, NULL
);
125 oldsigmask
= tmp_oldsigmask
;
129 def_lock_release(void *lock
)
131 Lock
*l
= (Lock
*)lock
;
133 if ((l
->lock
& WAFLAG
) == 0)
134 atomic_add_rel_int(&l
->lock
, -RC_INCR
);
136 atomic_add_rel_int(&l
->lock
, -WAFLAG
);
137 sigprocmask(SIG_SETMASK
, &oldsigmask
, NULL
);
142 def_thread_set_flag(int mask
)
144 int old_val
= thread_flag
;
150 def_thread_clr_flag(int mask
)
152 int old_val
= thread_flag
;
153 thread_flag
&= ~mask
;
158 * Public interface exposed to the rest of the dynamic linker.
160 static struct RtldLockInfo lockinfo
;
161 static struct RtldLockInfo deflockinfo
;
164 thread_mask_set(int mask
)
166 return lockinfo
.thread_set_flag(mask
);
170 thread_mask_clear(int mask
)
172 lockinfo
.thread_clr_flag(mask
);
175 #define RTLD_LOCK_CNT 3
179 } rtld_locks
[RTLD_LOCK_CNT
];
181 rtld_lock_t rtld_bind_lock
= &rtld_locks
[0];
182 rtld_lock_t rtld_libc_lock
= &rtld_locks
[1];
183 rtld_lock_t rtld_phdr_lock
= &rtld_locks
[2];
186 rlock_acquire(rtld_lock_t lock
, RtldLockState
*lockstate
)
189 if (lockstate
== NULL
)
192 if (thread_mask_set(lock
->mask
) & lock
->mask
) {
193 dbg("rlock_acquire: recursed");
194 lockstate
->lockstate
= RTLD_LOCK_UNLOCKED
;
197 lockinfo
.rlock_acquire(lock
->handle
);
198 lockstate
->lockstate
= RTLD_LOCK_RLOCKED
;
202 wlock_acquire(rtld_lock_t lock
, RtldLockState
*lockstate
)
205 if (lockstate
== NULL
)
208 if (thread_mask_set(lock
->mask
) & lock
->mask
) {
209 dbg("wlock_acquire: recursed");
210 lockstate
->lockstate
= RTLD_LOCK_UNLOCKED
;
213 lockinfo
.wlock_acquire(lock
->handle
);
214 lockstate
->lockstate
= RTLD_LOCK_WLOCKED
;
218 lock_release(rtld_lock_t lock
, RtldLockState
*lockstate
)
221 if (lockstate
== NULL
)
224 switch (lockstate
->lockstate
) {
225 case RTLD_LOCK_UNLOCKED
:
227 case RTLD_LOCK_RLOCKED
:
228 case RTLD_LOCK_WLOCKED
:
229 thread_mask_clear(lock
->mask
);
230 lockinfo
.lock_release(lock
->handle
);
238 lock_upgrade(rtld_lock_t lock
, RtldLockState
*lockstate
)
241 if (lockstate
== NULL
)
244 lock_release(lock
, lockstate
);
245 wlock_acquire(lock
, lockstate
);
249 lock_restart_for_upgrade(RtldLockState
*lockstate
)
252 if (lockstate
== NULL
)
255 switch (lockstate
->lockstate
) {
256 case RTLD_LOCK_UNLOCKED
:
257 case RTLD_LOCK_WLOCKED
:
259 case RTLD_LOCK_RLOCKED
:
260 siglongjmp(lockstate
->env
, 1);
272 deflockinfo
.rtli_version
= RTLI_VERSION
;
273 deflockinfo
.lock_create
= def_lock_create
;
274 deflockinfo
.lock_destroy
= def_lock_destroy
;
275 deflockinfo
.rlock_acquire
= def_rlock_acquire
;
276 deflockinfo
.wlock_acquire
= def_wlock_acquire
;
277 deflockinfo
.lock_release
= def_lock_release
;
278 deflockinfo
.thread_set_flag
= def_thread_set_flag
;
279 deflockinfo
.thread_clr_flag
= def_thread_clr_flag
;
280 deflockinfo
.at_fork
= NULL
;
282 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++) {
283 rtld_locks
[i
].mask
= (1 << i
);
284 rtld_locks
[i
].handle
= NULL
;
287 memcpy(&lockinfo
, &deflockinfo
, sizeof(lockinfo
));
288 _rtld_thread_init(NULL
);
290 * Construct a mask to block all signals except traps which might
291 * conceivably be generated within the dynamic linker itself.
293 sigfillset(&fullsigmask
);
294 sigdelset(&fullsigmask
, SIGILL
);
295 sigdelset(&fullsigmask
, SIGTRAP
);
296 sigdelset(&fullsigmask
, SIGABRT
);
297 sigdelset(&fullsigmask
, SIGEMT
);
298 sigdelset(&fullsigmask
, SIGFPE
);
299 sigdelset(&fullsigmask
, SIGBUS
);
300 sigdelset(&fullsigmask
, SIGSEGV
);
301 sigdelset(&fullsigmask
, SIGSYS
);
305 * Callback function to allow threads implementation to
306 * register their own locking primitives if the default
307 * one is not suitable.
308 * The current context should be the only context
309 * executing at the invocation time.
312 _rtld_thread_init(struct RtldLockInfo
*pli
)
315 void *locks
[RTLD_LOCK_CNT
];
317 /* disable all locking while this function is running */
318 flags
= thread_mask_set(~0);
324 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++)
325 if ((locks
[i
] = pli
->lock_create()) == NULL
)
328 if (i
< RTLD_LOCK_CNT
) {
330 pli
->lock_destroy(locks
[i
]);
334 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++) {
335 if (rtld_locks
[i
].handle
== NULL
)
337 if (flags
& rtld_locks
[i
].mask
)
338 lockinfo
.lock_release(rtld_locks
[i
].handle
);
339 lockinfo
.lock_destroy(rtld_locks
[i
].handle
);
342 for (i
= 0; i
< RTLD_LOCK_CNT
; i
++) {
343 rtld_locks
[i
].handle
= locks
[i
];
344 if (flags
& rtld_locks
[i
].mask
)
345 pli
->wlock_acquire(rtld_locks
[i
].handle
);
348 lockinfo
.lock_create
= pli
->lock_create
;
349 lockinfo
.lock_destroy
= pli
->lock_destroy
;
350 lockinfo
.rlock_acquire
= pli
->rlock_acquire
;
351 lockinfo
.wlock_acquire
= pli
->wlock_acquire
;
352 lockinfo
.lock_release
= pli
->lock_release
;
353 lockinfo
.thread_set_flag
= pli
->thread_set_flag
;
354 lockinfo
.thread_clr_flag
= pli
->thread_clr_flag
;
355 lockinfo
.at_fork
= pli
->at_fork
;
357 /* restore thread locking state, this time with new locks */
358 thread_mask_clear(~0);
359 thread_mask_set(flags
);
360 dbg("_rtld_thread_init: done");