Fix PR/46200
[official-gcc.git] / libobjc / objc-sync.c
blobd01707ca6595da9cfff9590f95cf922fca9330d6
1 /* GNU Objective C Runtime @synchronized implementation
2 Copyright (C) 2010 Free Software Foundation, Inc.
3 Contributed by Nicola Pero <nicola.pero@meta-innovation.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under the
8 terms of the GNU General Public License as published by the Free Software
9 Foundation; either version 3, or (at your option) any later version.
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
14 details.
16 Under Section 7 of GPL version 3, you are granted additional
17 permissions described in the GCC Runtime Library Exception, version
18 3.1, as published by the Free Software Foundation.
20 You should have received a copy of the GNU General Public License and
21 a copy of the GCC Runtime Library Exception along with this program;
22 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 <http://www.gnu.org/licenses/>. */
26 This file implements objc_sync_enter() and objc_sync_exit(), the
27 two functions required to support @synchronized().
29 objc_sync_enter(object) needs to get a recursive lock associated
30 with 'object', and lock it.
32 objc_sync_exit(object) needs to get the recursive lock associated
33 with 'object', and unlock it.
36 /* To avoid the overhead of continuously allocating and deallocating
37 locks, we implement a pool of locks. When a lock is needed for an
38 object, we get a lock from the pool and associate it with the
39 object.
41 The lock pool need to be protected by its own lock (the
42 "protection" lock), which has to be locked then unlocked each time
43 objc_sync_enter() and objc_sync_exit() are called. To reduce the
44 contention on the protection lock, instead of a single pool with a
45 single (global) protection lock we use a number of smaller pools,
46 each with its own pool protection lock. To decide which lock pool
47 to use for each object, we compute a hash from the object pointer.
49 The implementation of each lock pool uses a linked list of all the
50 locks in the pool (both unlocked, and locked); this works in the
51 assumption that the number of locks concurrently required is very
52 low. In practice, it seems that you rarely see more than a few
53 locks ever concurrently required.
55 A standard case is a thread acquiring a lock recursively, over and
56 over again: for example when most methods of a class are protected
57 by @synchronized(self) but they also call each other. We use
58 thread-local storage to implement a cache and optimize this case.
59 The cache stores locks that the thread successfully acquired,
60 allowing objc_sync_enter() and objc_sync_exit() to locate a lock
61 which is already held by the current thread without having to use
62 any protection lock or synchronization mechanism. It can so detect
63 recursive locks/unlocks, and transform them into no-ops that
64 require no actual locking or synchronization mechanisms at all.
67 /* You can disable the thread-local cache (most likely to benchmark
68 the code with and without it) by compiling with
69 -DSYNC_CACHE_DISABLE, or commenting out the following line.
71 /* #define SYNC_CACHE_DISABLE */
73 /* If thread-local storage is not available, automatically disable the
74 cache.
76 #ifndef HAVE_TLS
77 # define SYNC_CACHE_DISABLE
78 #endif
80 #include "objc-private/common.h"
81 #include "objc/objc-sync.h" /* For objc_sync_enter(), objc_sync_exit() */
82 #include "objc/runtime.h" /* For objc_malloc() */
83 #include "objc/thr.h" /* For objc_mutex_loc() and similar */
84 #include "objc-private/objc-sync.h" /* For __objc_sync_init() */
86 /* We have 32 pools of locks, each of them protected by its own
87 protection lock. It's tempting to increase this number to reduce
88 contention; but in our tests it is high enough.
90 #define SYNC_NUMBER_OF_POOLS 32
92 /* Given an object, it determines which pool contains the associated
93 lock.
95 #define SYNC_OBJECT_HASH(OBJECT) ((((size_t)OBJECT >> 8) ^ (size_t)OBJECT) & (SYNC_NUMBER_OF_POOLS - 1))
97 /* The locks protecting each pool. */
98 static objc_mutex_t sync_pool_protection_locks[SYNC_NUMBER_OF_POOLS];
100 /* The data structure (linked list) holding the locks. */
101 typedef struct lock_node
103 /* Pointer to next entry on the list. NULL indicates end of list.
104 You need to hold the appropriate sync_pool_protection_locks[N] to
105 read or write this variable. */
106 struct lock_node *next;
108 /* The (recursive) lock. Allocated when the node is created, and
109 always not-NULL, and unchangeable, after that. */
110 objc_mutex_t lock;
112 /* This is how many times the objc_mutex_lock() has been called on
113 the lock (it is 0 when the lock is unused). Used to track when
114 the lock is no longer associated with an object and can be reused
115 for another object. It records "real" locks, potentially (but
116 not necessarily) by multiple threads. You need to hold the
117 appropriate sync_pool_protection_locks[N] to read or write this
118 variable. */
119 unsigned int usage_count;
121 /* The object that the lock is associated with. This variable can
122 only be written when holding the sync_pool_protection_locks[N]
123 and when node->usage_count == 0, ie, the lock is not being used.
124 You can read this variable either when you hold the
125 sync_pool_protection_locks[N] or when you hold node->lock,
126 because in that case you know that node->usage_count can't get to
127 zero until you release the lock. It is valid to have usage_count
128 == 0 and object != nil; in that case, the lock is not currently
129 being used, but is still currently associated with the object.
131 id object;
133 /* This is a counter reserved for use by the thread currently
134 holding the lock. So, you need to hold node->lock to read or
135 write this variable. It is normally 0, and if the cache is not
136 being used, it is kept at 0 (even if recursive locks are being
137 done; in that case, no difference is made between recursive and
138 non-recursive locks: they all increase usage_count, and call
139 objc_mutex_lock()). When the cache is being used, a thread may
140 be able to find a lock that it already holds using the cache; in
141 that case, to perform additional locks/unlocks it can
142 increase/decrease the recursive_usage_count (which does not
143 require any synchronization with other threads, since it's
144 protected by the node->lock itself) instead of the usage_count
145 (which requires locking the pool protection lock). And it can
146 skip the call to objc_mutex_lock/unlock too.
148 unsigned int recursive_usage_count;
149 } *lock_node_ptr;
152 /* The pools of locks. Each of them is a linked list of lock_nodes.
153 In the list we keep both unlocked and locked nodes.
155 static lock_node_ptr sync_pool_array[SYNC_NUMBER_OF_POOLS];
157 #ifndef SYNC_CACHE_DISABLE
158 /* We store a cache of locks acquired by each thread in thread-local
159 storage.
161 static __thread lock_node_ptr *lock_cache = NULL;
163 /* This is a conservative implementation that uses a static array of
164 fixed size as cache. Because the cache is an array that we scan
165 linearly, the bigger it is, the slower it gets. This does not
166 matter much at small sizes (eg, the overhead of checking 8 cache
167 slots instead of 4 is very small compared to the other overheads
168 involved such as function calls and lock/unlock operations), but at
169 large sizes it becomes important as obviously there is a size over
170 which using the cache backfires: the lookup is so slow that the
171 cache slows down the software instead of speeding it up. In
172 practice, it seems that most threads use a small number of
173 concurrent locks, so we have a conservative implementation with a
174 fixed-size cache of 8 locks which gives a very predictable
175 behaviour. If a thread locks lots of different locks, only the
176 first 8 get the speed benefits of the cache, but the cache remains
177 always small, fast and predictable.
179 SYNC_CACHE_SIZE is the size of the lock cache for each thread.
181 #define SYNC_CACHE_SIZE 8
182 #endif /* SYNC_CACHE_DISABLE */
184 /* Called at startup by init.c. */
185 void
186 __objc_sync_init (void)
188 int i;
190 for (i = 0; i < SYNC_NUMBER_OF_POOLS; i++)
192 lock_node_ptr new_node;
194 /* Create a protection lock for each pool. */
195 sync_pool_protection_locks[i] = objc_mutex_allocate ();
197 /* Preallocate a lock per pool. */
198 new_node = objc_malloc (sizeof (struct lock_node));
199 new_node->lock = objc_mutex_allocate ();
200 new_node->object = nil;
201 new_node->usage_count = 0;
202 new_node->recursive_usage_count = 0;
203 new_node->next = NULL;
205 sync_pool_array[i] = new_node;
210 objc_sync_enter (id object)
212 #ifndef SYNC_CACHE_DISABLE
213 int free_cache_slot;
214 #endif
215 int hash;
216 lock_node_ptr node;
217 lock_node_ptr unused_node;
219 if (object == nil)
221 return OBJC_SYNC_SUCCESS;
224 #ifndef SYNC_CACHE_DISABLE
225 if (lock_cache == NULL)
227 /* Note that this calloc only happen only once per thread, the
228 very first time a thread does a objc_sync_enter().
230 lock_cache = objc_calloc (SYNC_CACHE_SIZE, sizeof (lock_node_ptr));
233 /* Check the cache to see if we have a record of having already
234 locked the lock corresponding to this object. While doing so,
235 keep track of the first free cache node in case we need it later.
237 node = NULL;
238 free_cache_slot = -1;
241 int i;
242 for (i = 0; i < SYNC_CACHE_SIZE; i++)
244 lock_node_ptr locked_node = lock_cache[i];
246 if (locked_node == NULL)
248 if (free_cache_slot == -1)
250 free_cache_slot = i;
253 else if (locked_node->object == object)
255 node = locked_node;
256 break;
261 if (node != NULL)
263 /* We found the lock. Increase recursive_usage_count, which is
264 protected by node->lock, which we already hold.
266 node->recursive_usage_count++;
268 /* There is no need to actually lock anything, since we already
269 hold the lock. Correspondingly, objc_sync_exit() will just
270 decrease recursive_usage_count and do nothing to unlock.
272 return OBJC_SYNC_SUCCESS;
274 #endif /* SYNC_CACHE_DISABLE */
276 /* The following is the standard lookup for the lock in the standard
277 pool lock. It requires a pool protection lock.
279 hash = SYNC_OBJECT_HASH(object);
281 /* Search for an existing lock for 'object'. While searching, make
282 note of any unused lock if we find any.
284 unused_node = NULL;
286 objc_mutex_lock (sync_pool_protection_locks[hash]);
288 node = sync_pool_array[hash];
290 while (node != NULL)
292 if (node->object == object)
294 /* We found the lock. */
295 node->usage_count++;
296 objc_mutex_unlock (sync_pool_protection_locks[hash]);
298 #ifndef SYNC_CACHE_DISABLE
299 /* Put it in the cache. */
300 if (free_cache_slot != -1)
302 lock_cache[free_cache_slot] = node;
304 #endif
306 /* Lock it. */
307 objc_mutex_lock (node->lock);
309 return OBJC_SYNC_SUCCESS;
312 if (unused_node == NULL && node->usage_count == 0)
314 /* We found the first unused node. Record it. */
315 unused_node = node;
318 node = node->next;
321 /* An existing lock for 'object' could not be found. */
322 if (unused_node != NULL)
324 /* But we found a unused lock; use it. */
325 unused_node->object = object;
326 unused_node->usage_count = 1;
327 unused_node->recursive_usage_count = 0;
328 objc_mutex_unlock (sync_pool_protection_locks[hash]);
330 #ifndef SYNC_CACHE_DISABLE
331 if (free_cache_slot != -1)
333 lock_cache[free_cache_slot] = unused_node;
335 #endif
337 objc_mutex_lock (unused_node->lock);
339 return OBJC_SYNC_SUCCESS;
341 else
343 /* There are no unused nodes; allocate a new node. */
344 lock_node_ptr new_node;
346 /* Create the node. */
347 new_node = objc_malloc (sizeof (struct lock_node));
348 new_node->lock = objc_mutex_allocate ();
349 new_node->object = object;
350 new_node->usage_count = 1;
351 new_node->recursive_usage_count = 0;
353 /* Attach it at the beginning of the pool. */
354 new_node->next = sync_pool_array[hash];
355 sync_pool_array[hash] = new_node;
356 objc_mutex_unlock (sync_pool_protection_locks[hash]);
358 #ifndef SYNC_CACHE_DISABLE
359 if (free_cache_slot != -1)
361 lock_cache[free_cache_slot] = new_node;
363 #endif
365 objc_mutex_lock (new_node->lock);
367 return OBJC_SYNC_SUCCESS;
372 objc_sync_exit (id object)
374 int hash;
375 lock_node_ptr node;
377 if (object == nil)
379 return OBJC_SYNC_SUCCESS;
382 #ifndef SYNC_CACHE_DISABLE
383 if (lock_cache != NULL)
385 int i;
387 /* Find the lock in the cache. */
388 node = NULL;
389 for (i = 0; i < SYNC_CACHE_SIZE; i++)
391 lock_node_ptr locked_node = lock_cache[i];
393 if (locked_node != NULL && locked_node->object == object)
395 node = locked_node;
396 break;
399 /* Note that, if a node was found in the cache, the variable i
400 now holds the index where it was found, which will be used to
401 remove it from the cache. */
403 if (node != NULL)
405 if (node->recursive_usage_count > 0)
407 node->recursive_usage_count--;
408 return OBJC_SYNC_SUCCESS;
410 else
412 /* We need to do a real unlock. */
413 hash = SYNC_OBJECT_HASH(object);
415 /* TODO: If we had atomic increase/decrease operations
416 with memory barriers, we could avoid the lock here!
418 objc_mutex_lock (sync_pool_protection_locks[hash]);
419 node->usage_count--;
420 /* Normally, we do not reset object to nil here. We'll
421 leave the lock associated with that object, at zero
422 usage count. This makes it slighly more efficient to
423 provide a lock for that object if (as likely)
424 requested again. If the object is deallocated, we
425 don't care. It will never match a new lock that is
426 requested, and the node will be reused at some point.
428 But, if garbage collection is enabled, leaving a
429 pointer to the object in memory might prevent the
430 object from being released. In that case, we remove
431 it (TODO: maybe we should avoid using the garbage
432 collector at all ? Nothing is ever deallocated in
433 this file).
435 #if OBJC_WITH_GC
436 node->object = nil;
437 #endif
438 objc_mutex_unlock (sync_pool_protection_locks[hash]);
440 /* PS: Between objc_mutex_unlock
441 (sync_pool_protection_locks[hash]) and
442 objc_mutex_unlock (node->lock), the pool is unlocked
443 so other threads may allocate this same lock to
444 another object (!). This is not a problem, but it is
445 curious.
447 objc_mutex_unlock (node->lock);
449 /* Remove the node from the cache. */
450 lock_cache[i] = NULL;
452 return OBJC_SYNC_SUCCESS;
456 #endif
458 /* The cache either wasn't there, or didn't work (eg, we overflowed
459 it at some point and stopped recording new locks in the cache).
460 Proceed with a full search of the lock pool. */
461 hash = SYNC_OBJECT_HASH(object);
463 objc_mutex_lock (sync_pool_protection_locks[hash]);
465 /* Search for an existing lock for 'object'. */
466 node = sync_pool_array[hash];
468 while (node != NULL)
470 if (node->object == object)
472 /* We found the lock. */
473 node->usage_count--;
474 objc_mutex_unlock (sync_pool_protection_locks[hash]);
476 objc_mutex_unlock (node->lock);
478 /* No need to remove the node from the cache, since it
479 wasn't found in the cache when we looked for it!
482 return OBJC_SYNC_SUCCESS;
485 node = node->next;
488 objc_mutex_unlock (sync_pool_protection_locks[hash]);
490 /* A lock for 'object' to unlock could not be found (!!). */
491 return OBJC_SYNC_NOT_OWNING_THREAD_ERROR;