Pretty print.
[glibc.git] / linuxthreads / rwlock.c
blob9da87d25d14b9161bab4b6d82da867554e41d760
1 /* Read-write lock implementation.
2 Copyright (C) 1998, 2000 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Xavier Leroy <Xavier.Leroy@inria.fr>
5 and Ulrich Drepper <drepper@cygnus.com>, 1998.
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Library General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Library General Public License for more details.
17 You should have received a copy of the GNU Library General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If not,
19 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 #include <errno.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include "internals.h"
26 #include "queue.h"
27 #include "spinlock.h"
28 #include "restart.h"
31 * Check whether the calling thread already owns one or more read locks on the
32 * specified lock. If so, return a pointer to the read lock info structure
33 * corresponding to that lock.
36 static pthread_readlock_info *
37 rwlock_is_in_list(pthread_descr self, pthread_rwlock_t *rwlock)
39 pthread_readlock_info *info;
41 for (info = self->p_readlock_list; info != NULL; info = info->pr_next)
43 if (info->pr_lock == rwlock)
44 return info;
47 return NULL;
51 * Add a new lock to the thread's list of locks for which it has a read lock.
52 * A new info node must be allocated for this, which is taken from the thread's
53 * free list, or by calling malloc. If malloc fails, a null pointer is
54 * returned. Otherwise the lock info structure is initialized and pushed
55 * onto the thread's list.
58 static pthread_readlock_info *
59 rwlock_add_to_list(pthread_descr self, pthread_rwlock_t *rwlock)
61 pthread_readlock_info *info = self->p_readlock_free;
63 if (info != NULL)
64 self->p_readlock_free = info->pr_next;
65 else
66 info = malloc(sizeof *info);
68 if (info == NULL)
69 return NULL;
71 info->pr_lock_count = 1;
72 info->pr_lock = rwlock;
73 info->pr_next = self->p_readlock_list;
74 self->p_readlock_list = info;
76 return info;
80 * If the thread owns a read lock over the given pthread_rwlock_t,
81 * and this read lock is tracked in the thread's lock list,
82 * this function returns a pointer to the info node in that list.
83 * It also decrements the lock count within that node, and if
84 * it reaches zero, it removes the node from the list.
85 * If nothing is found, it returns a null pointer.
88 static pthread_readlock_info *
89 rwlock_remove_from_list(pthread_descr self, pthread_rwlock_t *rwlock)
91 pthread_readlock_info **pinfo;
93 for (pinfo = &self->p_readlock_list; *pinfo != NULL; pinfo = &(*pinfo)->pr_next)
95 if ((*pinfo)->pr_lock == rwlock)
97 pthread_readlock_info *info = *pinfo;
98 if (--info->pr_lock_count == 0)
99 *pinfo = info->pr_next;
100 return info;
104 return NULL;
108 * This function checks whether the conditions are right to place a read lock.
109 * It returns 1 if so, otherwise zero. The rwlock's internal lock must be
110 * locked upon entry.
113 static int
114 rwlock_can_rdlock(pthread_rwlock_t *rwlock, int have_lock_already)
116 /* Can't readlock; it is write locked. */
117 if (rwlock->__rw_writer != NULL)
118 return 0;
120 /* Lock prefers readers; get it. */
121 if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP)
122 return 1;
124 /* Lock prefers writers, but none are waiting. */
125 if (queue_is_empty(&rwlock->__rw_write_waiting))
126 return 1;
128 /* Writers are waiting, but this thread already has a read lock */
129 if (have_lock_already)
130 return 1;
132 /* Writers are waiting, and this is a new lock */
133 return 0;
137 * This function helps support brain-damaged recursive read locking
138 * semantics required by Unix 98, while maintaining write priority.
139 * This basically determines whether this thread already holds a read lock
140 * already. It returns 1 if so, otherwise it returns 0.
142 * If the thread has any ``untracked read locks'' then it just assumes
143 * that this lock is among them, just to be safe, and returns 1.
145 * Also, if it finds the thread's lock in the list, it sets the pointer
146 * referenced by pexisting to refer to the list entry.
148 * If the thread has no untracked locks, and the lock is not found
149 * in its list, then it is added to the list. If this fails,
150 * then *pout_of_mem is set to 1.
153 static int
154 rwlock_have_already(pthread_descr *pself, pthread_rwlock_t *rwlock,
155 pthread_readlock_info **pexisting, int *pout_of_mem)
157 pthread_readlock_info *existing = NULL;
158 int out_of_mem = 0, have_lock_already = 0;
159 pthread_descr self = *pself;
161 if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
163 if (!self)
164 self = thread_self();
166 existing = rwlock_is_in_list(self, rwlock);
168 if (existing != NULL || self->p_untracked_readlock_count > 0)
169 have_lock_already = 1;
170 else
172 existing = rwlock_add_to_list(self, rwlock);
173 if (existing == NULL)
174 out_of_mem = 1;
178 *pout_of_mem = out_of_mem;
179 *pexisting = existing;
180 *pself = self;
182 return have_lock_already;
186 __pthread_rwlock_init (pthread_rwlock_t *rwlock,
187 const pthread_rwlockattr_t *attr)
189 __pthread_init_lock(&rwlock->__rw_lock);
190 rwlock->__rw_readers = 0;
191 rwlock->__rw_writer = NULL;
192 rwlock->__rw_read_waiting = NULL;
193 rwlock->__rw_write_waiting = NULL;
195 if (attr == NULL)
197 rwlock->__rw_kind = PTHREAD_RWLOCK_DEFAULT_NP;
198 rwlock->__rw_pshared = PTHREAD_PROCESS_PRIVATE;
200 else
202 rwlock->__rw_kind = attr->__lockkind;
203 rwlock->__rw_pshared = attr->__pshared;
206 return 0;
208 strong_alias (__pthread_rwlock_init, pthread_rwlock_init)
212 __pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
214 int readers;
215 _pthread_descr writer;
217 __pthread_lock (&rwlock->__rw_lock, NULL);
218 readers = rwlock->__rw_readers;
219 writer = rwlock->__rw_writer;
220 __pthread_unlock (&rwlock->__rw_lock);
222 if (readers > 0 || writer != NULL)
223 return EBUSY;
225 return 0;
227 strong_alias (__pthread_rwlock_destroy, pthread_rwlock_destroy)
230 __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
232 pthread_descr self = NULL;
233 pthread_readlock_info *existing;
234 int out_of_mem, have_lock_already;
236 have_lock_already = rwlock_have_already(&self, rwlock,
237 &existing, &out_of_mem);
239 for (;;)
241 if (self == NULL)
242 self = thread_self ();
244 __pthread_lock (&rwlock->__rw_lock, self);
246 if (rwlock_can_rdlock(rwlock, have_lock_already))
247 break;
249 enqueue (&rwlock->__rw_read_waiting, self);
250 __pthread_unlock (&rwlock->__rw_lock);
251 suspend (self); /* This is not a cancellation point */
254 ++rwlock->__rw_readers;
255 __pthread_unlock (&rwlock->__rw_lock);
257 if (have_lock_already || out_of_mem)
259 if (existing != NULL)
260 existing->pr_lock_count++;
261 else
262 self->p_untracked_readlock_count++;
265 return 0;
267 strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
270 __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
272 pthread_descr self = thread_self();
273 pthread_readlock_info *existing;
274 int out_of_mem, have_lock_already;
275 int retval = EBUSY;
277 have_lock_already = rwlock_have_already(&self, rwlock,
278 &existing, &out_of_mem);
280 __pthread_lock (&rwlock->__rw_lock, self);
282 /* 0 is passed to here instead of have_lock_already.
283 This is to meet Single Unix Spec requirements:
284 if writers are waiting, pthread_rwlock_tryrdlock
285 does not acquire a read lock, even if the caller has
286 one or more read locks already. */
288 if (rwlock_can_rdlock(rwlock, 0))
290 ++rwlock->__rw_readers;
291 retval = 0;
294 __pthread_unlock (&rwlock->__rw_lock);
296 if (retval == 0)
298 if (have_lock_already || out_of_mem)
300 if (existing != NULL)
301 existing->pr_lock_count++;
302 else
303 self->p_untracked_readlock_count++;
307 return retval;
309 strong_alias (__pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock)
313 __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
315 pthread_descr self = thread_self ();
317 while(1)
319 __pthread_lock (&rwlock->__rw_lock, self);
320 if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
322 rwlock->__rw_writer = self;
323 __pthread_unlock (&rwlock->__rw_lock);
324 return 0;
327 /* Suspend ourselves, then try again */
328 enqueue (&rwlock->__rw_write_waiting, self);
329 __pthread_unlock (&rwlock->__rw_lock);
330 suspend (self); /* This is not a cancellation point */
333 strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
337 __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
339 int result = EBUSY;
341 __pthread_lock (&rwlock->__rw_lock, NULL);
342 if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
344 rwlock->__rw_writer = thread_self ();
345 result = 0;
347 __pthread_unlock (&rwlock->__rw_lock);
349 return result;
351 strong_alias (__pthread_rwlock_trywrlock, pthread_rwlock_trywrlock)
355 __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
357 pthread_descr torestart;
358 pthread_descr th;
360 __pthread_lock (&rwlock->__rw_lock, NULL);
361 if (rwlock->__rw_writer != NULL)
363 /* Unlocking a write lock. */
364 if (rwlock->__rw_writer != thread_self ())
366 __pthread_unlock (&rwlock->__rw_lock);
367 return EPERM;
369 rwlock->__rw_writer = NULL;
371 if ((rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
372 && !queue_is_empty(&rwlock->__rw_read_waiting))
373 || (th = dequeue(&rwlock->__rw_write_waiting)) == NULL)
375 /* Restart all waiting readers. */
376 torestart = rwlock->__rw_read_waiting;
377 rwlock->__rw_read_waiting = NULL;
378 __pthread_unlock (&rwlock->__rw_lock);
379 while ((th = dequeue (&torestart)) != NULL)
380 restart (th);
382 else
384 /* Restart one waiting writer. */
385 __pthread_unlock (&rwlock->__rw_lock);
386 restart (th);
389 else
391 /* Unlocking a read lock. */
392 if (rwlock->__rw_readers == 0)
394 __pthread_unlock (&rwlock->__rw_lock);
395 return EPERM;
398 --rwlock->__rw_readers;
399 if (rwlock->__rw_readers == 0)
400 /* Restart one waiting writer, if any. */
401 th = dequeue (&rwlock->__rw_write_waiting);
402 else
403 th = NULL;
405 __pthread_unlock (&rwlock->__rw_lock);
406 if (th != NULL)
407 restart (th);
409 /* Recursive lock fixup */
411 if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
413 pthread_descr self = thread_self();
414 pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock);
416 if (victim != NULL)
418 if (victim->pr_lock_count == 0)
420 victim->pr_next = self->p_readlock_free;
421 self->p_readlock_free = victim;
424 else
426 if (self->p_untracked_readlock_count > 0)
427 self->p_untracked_readlock_count--;
432 return 0;
434 strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
439 pthread_rwlockattr_init (pthread_rwlockattr_t *attr)
441 attr->__lockkind = 0;
442 attr->__pshared = 0;
444 return 0;
449 __pthread_rwlockattr_destroy (pthread_rwlockattr_t *attr)
451 return 0;
453 strong_alias (__pthread_rwlockattr_destroy, pthread_rwlockattr_destroy)
457 pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *attr, int *pshared)
459 *pshared = attr->__pshared;
460 return 0;
465 pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attr, int pshared)
467 if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
468 return EINVAL;
470 attr->__pshared = pshared;
472 return 0;
477 pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t *attr, int *pref)
479 *pref = attr->__lockkind;
480 return 0;
485 pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *attr, int pref)
487 if (pref != PTHREAD_RWLOCK_PREFER_READER_NP
488 && pref != PTHREAD_RWLOCK_PREFER_WRITER_NP
489 && pref != PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP
490 && pref != PTHREAD_RWLOCK_DEFAULT_NP)
491 return EINVAL;
493 attr->__lockkind = pref;
495 return 0;