mySQL 5.0.11 sources for tomato
[tomato.git] / release / src / router / mysql / storage / innodb_plugin / sync / sync0rw.c
blob0f0f3319d7536144d74bb47a8d15ab2b3da374d7
1 /*****************************************************************************
3 Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2008, Google Inc.
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *****************************************************************************/
26 /**************************************************//**
27 @file sync/sync0rw.c
28 The read-write lock (for thread synchronization)
30 Created 9/11/1995 Heikki Tuuri
31 *******************************************************/
33 #include "sync0rw.h"
34 #ifdef UNIV_NONINL
35 #include "sync0rw.ic"
36 #endif
38 #include "os0thread.h"
39 #include "mem0mem.h"
40 #include "srv0srv.h"
41 #include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
44 IMPLEMENTATION OF THE RW_LOCK
45 =============================
46 The status of a rw_lock is held in lock_word. The initial value of lock_word is
47 X_LOCK_DECR. lock_word is decremented by 1 for each s-lock and by X_LOCK_DECR
48 for each x-lock. This describes the lock state for each value of lock_word:
50 lock_word == X_LOCK_DECR: Unlocked.
51 0 < lock_word < X_LOCK_DECR: Read locked, no waiting writers.
52 (X_LOCK_DECR - lock_word) is the
53 number of readers that hold the lock.
54 lock_word == 0: Write locked
55 -X_LOCK_DECR < lock_word < 0: Read locked, with a waiting writer.
56 (-lock_word) is the number of readers
57 that hold the lock.
58 lock_word <= -X_LOCK_DECR: Recursively write locked. lock_word has been
59 decremented by X_LOCK_DECR once for each lock,
60 so the number of locks is:
61 ((-lock_word) / X_LOCK_DECR) + 1
62 When lock_word <= -X_LOCK_DECR, we also know that lock_word % X_LOCK_DECR == 0:
63 other values of lock_word are invalid.
65 The lock_word is always read and updated atomically and consistently, so that
66 it always represents the state of the lock, and the state of the lock changes
67 with a single atomic operation. This lock_word holds all of the information
68 that a thread needs in order to determine if it is eligible to gain the lock
69 or if it must spin or sleep. The one exception to this is that writer_thread
70 must be verified before recursive write locks: to solve this scenario, we make
71 writer_thread readable by all threads, but only writeable by the x-lock holder.
73 The other members of the lock obey the following rules to remain consistent:
75 recursive: This and the writer_thread field together control the
76 behaviour of recursive x-locking.
77 lock->recursive must be FALSE in following states:
78 1) The writer_thread contains garbage i.e.: the
79 lock has just been initialized.
80 2) The lock is not x-held and there is no
81 x-waiter waiting on WAIT_EX event.
82 3) The lock is x-held or there is an x-waiter
83 waiting on WAIT_EX event but the 'pass' value
84 is non-zero.
85 lock->recursive is TRUE iff:
86 1) The lock is x-held or there is an x-waiter
87 waiting on WAIT_EX event and the 'pass' value
88 is zero.
89 This flag must be set after the writer_thread field
90 has been updated with a memory ordering barrier.
91 It is unset before the lock_word has been incremented.
92 writer_thread: Is used only in recursive x-locking. Can only be safely
93 read iff lock->recursive flag is TRUE.
94 This field is uninitialized at lock creation time and
95 is updated atomically when x-lock is acquired or when
96 move_ownership is called. A thread is only allowed to
97 set the value of this field to it's thread_id i.e.: a
98 thread cannot set writer_thread to some other thread's
99 id.
100 waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
101 signals, it should only be set to 1 when there are threads
102 waiting on event. Must be 1 when a writer starts waiting to
103 ensure the current x-locking thread sends a wake-up signal
104 during unlock. May only be reset to 0 immediately before a
105 a wake-up signal is sent to event. On most platforms, a
106 memory barrier is required after waiters is set, and before
107 verifying lock_word is still held, to ensure some unlocker
108 really does see the flags new value.
109 event: Threads wait on event for read or writer lock when another
110 thread has an x-lock or an x-lock reservation (wait_ex). A
111 thread may only wait on event after performing the following
112 actions in order:
113 (1) Record the counter value of event (with os_event_reset).
114 (2) Set waiters to 1.
115 (3) Verify lock_word <= 0.
116 (1) must come before (2) to ensure signal is not missed.
117 (2) must come before (3) to ensure a signal is sent.
118 These restrictions force the above ordering.
119 Immediately before sending the wake-up signal, we should:
120 (1) Verify lock_word == X_LOCK_DECR (unlocked)
121 (2) Reset waiters to 0.
122 wait_ex_event: A thread may only wait on the wait_ex_event after it has
123 performed the following actions in order:
124 (1) Decrement lock_word by X_LOCK_DECR.
125 (2) Record counter value of wait_ex_event (os_event_reset,
126 called from sync_array_reserve_cell).
127 (3) Verify that lock_word < 0.
128 (1) must come first to ensures no other threads become reader
129 or next writer, and notifies unlocker that signal must be sent.
130 (2) must come before (3) to ensure the signal is not missed.
131 These restrictions force the above ordering.
132 Immediately before sending the wake-up signal, we should:
133 Verify lock_word == 0 (waiting thread holds x_lock)
137 /** number of spin waits on rw-latches,
138 resulted during shared (read) locks */
139 UNIV_INTERN ib_int64_t rw_s_spin_wait_count = 0;
140 /** number of spin loop rounds on rw-latches,
141 resulted during shared (read) locks */
142 UNIV_INTERN ib_int64_t rw_s_spin_round_count = 0;
144 /** number of OS waits on rw-latches,
145 resulted during shared (read) locks */
146 UNIV_INTERN ib_int64_t rw_s_os_wait_count = 0;
148 /** number of unlocks (that unlock shared locks),
149 set only when UNIV_SYNC_PERF_STAT is defined */
150 UNIV_INTERN ib_int64_t rw_s_exit_count = 0;
152 /** number of spin waits on rw-latches,
153 resulted during exclusive (write) locks */
154 UNIV_INTERN ib_int64_t rw_x_spin_wait_count = 0;
155 /** number of spin loop rounds on rw-latches,
156 resulted during exclusive (write) locks */
157 UNIV_INTERN ib_int64_t rw_x_spin_round_count = 0;
159 /** number of OS waits on rw-latches,
160 resulted during exclusive (write) locks */
161 UNIV_INTERN ib_int64_t rw_x_os_wait_count = 0;
163 /** number of unlocks (that unlock exclusive locks),
164 set only when UNIV_SYNC_PERF_STAT is defined */
165 UNIV_INTERN ib_int64_t rw_x_exit_count = 0;
167 /* The global list of rw-locks */
168 UNIV_INTERN rw_lock_list_t rw_lock_list;
169 UNIV_INTERN mutex_t rw_lock_list_mutex;
171 #ifdef UNIV_SYNC_DEBUG
172 /* The global mutex which protects debug info lists of all rw-locks.
173 To modify the debug info list of an rw-lock, this mutex has to be
174 acquired in addition to the mutex protecting the lock. */
176 UNIV_INTERN mutex_t rw_lock_debug_mutex;
177 /* If deadlock detection does not get immediately the mutex,
178 it may wait for this event */
179 UNIV_INTERN os_event_t rw_lock_debug_event;
180 /* This is set to TRUE, if there may be waiters for the event */
181 UNIV_INTERN ibool rw_lock_debug_waiters;
183 /******************************************************************//**
184 Creates a debug info struct. */
185 static
186 rw_lock_debug_t*
187 rw_lock_debug_create(void);
188 /*======================*/
189 /******************************************************************//**
190 Frees a debug info struct. */
191 static
192 void
193 rw_lock_debug_free(
194 /*===============*/
195 rw_lock_debug_t* info);
197 /******************************************************************//**
198 Creates a debug info struct.
199 @return own: debug info struct */
200 static
201 rw_lock_debug_t*
202 rw_lock_debug_create(void)
203 /*======================*/
205 return((rw_lock_debug_t*) mem_alloc(sizeof(rw_lock_debug_t)));
208 /******************************************************************//**
209 Frees a debug info struct. */
210 static
211 void
212 rw_lock_debug_free(
213 /*===============*/
214 rw_lock_debug_t* info)
216 mem_free(info);
218 #endif /* UNIV_SYNC_DEBUG */
220 /******************************************************************//**
221 Creates, or rather, initializes an rw-lock object in a specified memory
222 location (which must be appropriately aligned). The rw-lock is initialized
223 to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free
224 is necessary only if the memory block containing it is freed. */
225 UNIV_INTERN
226 void
227 rw_lock_create_func(
228 /*================*/
229 rw_lock_t* lock, /*!< in: pointer to memory */
230 #ifdef UNIV_DEBUG
231 # ifdef UNIV_SYNC_DEBUG
232 ulint level, /*!< in: level */
233 # endif /* UNIV_SYNC_DEBUG */
234 const char* cmutex_name, /*!< in: mutex name */
235 #endif /* UNIV_DEBUG */
236 const char* cfile_name, /*!< in: file name where created */
237 ulint cline) /*!< in: file line where created */
239 /* If this is the very first time a synchronization object is
240 created, then the following call initializes the sync system. */
242 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
243 mutex_create(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK);
245 lock->mutex.cfile_name = cfile_name;
246 lock->mutex.cline = cline;
248 ut_d(lock->mutex.cmutex_name = cmutex_name);
249 ut_d(lock->mutex.mutex_type = 1);
250 #else /* INNODB_RW_LOCKS_USE_ATOMICS */
251 # ifdef UNIV_DEBUG
252 UT_NOT_USED(cmutex_name);
253 # endif
254 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
256 lock->lock_word = X_LOCK_DECR;
257 lock->waiters = 0;
259 /* We set this value to signify that lock->writer_thread
260 contains garbage at initialization and cannot be used for
261 recursive x-locking. */
262 lock->recursive = FALSE;
263 /* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */
264 memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread);
265 UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread);
267 #ifdef UNIV_SYNC_DEBUG
268 UT_LIST_INIT(lock->debug_list);
270 lock->level = level;
271 #endif /* UNIV_SYNC_DEBUG */
273 ut_d(lock->magic_n = RW_LOCK_MAGIC_N);
275 lock->cfile_name = cfile_name;
276 lock->cline = (unsigned int) cline;
278 lock->count_os_wait = 0;
279 lock->last_s_file_name = "not yet reserved";
280 lock->last_x_file_name = "not yet reserved";
281 lock->last_s_line = 0;
282 lock->last_x_line = 0;
283 lock->event = os_event_create(NULL);
284 lock->wait_ex_event = os_event_create(NULL);
286 mutex_enter(&rw_lock_list_mutex);
288 ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL
289 || UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N);
291 UT_LIST_ADD_FIRST(list, rw_lock_list, lock);
293 mutex_exit(&rw_lock_list_mutex);
296 /******************************************************************//**
297 Calling this function is obligatory only if the memory buffer containing
298 the rw-lock is freed. Removes an rw-lock object from the global list. The
299 rw-lock is checked to be in the non-locked state. */
300 UNIV_INTERN
301 void
302 rw_lock_free(
303 /*=========*/
304 rw_lock_t* lock) /*!< in: rw-lock */
306 ut_ad(rw_lock_validate(lock));
307 ut_a(lock->lock_word == X_LOCK_DECR);
309 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
310 mutex_free(rw_lock_get_mutex(lock));
311 #endif /* INNODB_RW_LOCKS_USE_ATOMICS */
313 mutex_enter(&rw_lock_list_mutex);
314 os_event_free(lock->event);
316 os_event_free(lock->wait_ex_event);
318 ut_ad(UT_LIST_GET_PREV(list, lock) == NULL
319 || UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
320 ut_ad(UT_LIST_GET_NEXT(list, lock) == NULL
321 || UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N);
323 UT_LIST_REMOVE(list, rw_lock_list, lock);
325 mutex_exit(&rw_lock_list_mutex);
327 ut_d(lock->magic_n = 0);
330 #ifdef UNIV_DEBUG
331 /******************************************************************//**
332 Checks that the rw-lock has been initialized and that there are no
333 simultaneous shared and exclusive locks.
334 @return TRUE */
335 UNIV_INTERN
336 ibool
337 rw_lock_validate(
338 /*=============*/
339 rw_lock_t* lock) /*!< in: rw-lock */
341 ulint waiters;
342 lint lock_word;
344 ut_a(lock);
346 waiters = rw_lock_get_waiters(lock);
347 lock_word = lock->lock_word;
349 ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
350 ut_a(waiters == 0 || waiters == 1);
351 ut_a(lock_word > -X_LOCK_DECR ||(-lock_word) % X_LOCK_DECR == 0);
353 return(TRUE);
355 #endif /* UNIV_DEBUG */
357 /******************************************************************//**
358 Lock an rw-lock in shared mode for the current thread. If the rw-lock is
359 locked in exclusive mode, or there is an exclusive lock request waiting,
360 the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
361 for the lock, before suspending the thread. */
362 UNIV_INTERN
363 void
364 rw_lock_s_lock_spin(
365 /*================*/
366 rw_lock_t* lock, /*!< in: pointer to rw-lock */
367 ulint pass, /*!< in: pass value; != 0, if the lock
368 will be passed to another thread to unlock */
369 const char* file_name, /*!< in: file name where lock requested */
370 ulint line) /*!< in: line where requested */
372 ulint index; /* index of the reserved wait cell */
373 ulint i = 0; /* spin round count */
375 ut_ad(rw_lock_validate(lock));
377 rw_s_spin_wait_count++; /*!< Count calls to this function */
378 lock_loop:
380 /* Spin waiting for the writer field to become free */
381 while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) {
382 if (srv_spin_wait_delay) {
383 ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
386 i++;
389 if (i == SYNC_SPIN_ROUNDS) {
390 os_thread_yield();
393 if (srv_print_latch_waits) {
394 fprintf(stderr,
395 "Thread %lu spin wait rw-s-lock at %p"
396 " cfile %s cline %lu rnds %lu\n",
397 (ulong) os_thread_pf(os_thread_get_curr_id()),
398 (void*) lock,
399 lock->cfile_name, (ulong) lock->cline, (ulong) i);
402 /* We try once again to obtain the lock */
403 if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
404 rw_s_spin_round_count += i;
406 return; /* Success */
407 } else {
409 if (i < SYNC_SPIN_ROUNDS) {
410 goto lock_loop;
413 rw_s_spin_round_count += i;
415 sync_array_reserve_cell(sync_primary_wait_array,
416 lock, RW_LOCK_SHARED,
417 file_name, line,
418 &index);
420 /* Set waiters before checking lock_word to ensure wake-up
421 signal is sent. This may lead to some unnecessary signals. */
422 rw_lock_set_waiter_flag(lock);
424 if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
425 sync_array_free_cell(sync_primary_wait_array, index);
426 return; /* Success */
429 if (srv_print_latch_waits) {
430 fprintf(stderr,
431 "Thread %lu OS wait rw-s-lock at %p"
432 " cfile %s cline %lu\n",
433 os_thread_pf(os_thread_get_curr_id()),
434 (void*) lock, lock->cfile_name,
435 (ulong) lock->cline);
438 /* these stats may not be accurate */
439 lock->count_os_wait++;
440 rw_s_os_wait_count++;
442 sync_array_wait_event(sync_primary_wait_array, index);
444 i = 0;
445 goto lock_loop;
449 /******************************************************************//**
450 This function is used in the insert buffer to move the ownership of an
451 x-latch on a buffer frame to the current thread. The x-latch was set by
452 the buffer read operation and it protected the buffer frame while the
453 read was done. The ownership is moved because we want that the current
454 thread is able to acquire a second x-latch which is stored in an mtr.
455 This, in turn, is needed to pass the debug checks of index page
456 operations. */
457 UNIV_INTERN
458 void
459 rw_lock_x_lock_move_ownership(
460 /*==========================*/
461 rw_lock_t* lock) /*!< in: lock which was x-locked in the
462 buffer read */
464 ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
466 rw_lock_set_writer_id_and_recursion_flag(lock, TRUE);
469 /******************************************************************//**
470 Function for the next writer to call. Waits for readers to exit.
471 The caller must have already decremented lock_word by X_LOCK_DECR. */
472 UNIV_INLINE
473 void
474 rw_lock_x_lock_wait(
475 /*================*/
476 rw_lock_t* lock, /*!< in: pointer to rw-lock */
477 #ifdef UNIV_SYNC_DEBUG
478 ulint pass, /*!< in: pass value; != 0, if the lock will
479 be passed to another thread to unlock */
480 #endif
481 const char* file_name,/*!< in: file name where lock requested */
482 ulint line) /*!< in: line where requested */
484 ulint index;
485 ulint i = 0;
487 ut_ad(lock->lock_word <= 0);
489 while (lock->lock_word < 0) {
490 if (srv_spin_wait_delay) {
491 ut_delay(ut_rnd_interval(0, srv_spin_wait_delay));
493 if(i < SYNC_SPIN_ROUNDS) {
494 i++;
495 continue;
498 /* If there is still a reader, then go to sleep.*/
499 rw_x_spin_round_count += i;
500 i = 0;
501 sync_array_reserve_cell(sync_primary_wait_array,
502 lock,
503 RW_LOCK_WAIT_EX,
504 file_name, line,
505 &index);
506 /* Check lock_word to ensure wake-up isn't missed.*/
507 if(lock->lock_word < 0) {
509 /* these stats may not be accurate */
510 lock->count_os_wait++;
511 rw_x_os_wait_count++;
513 /* Add debug info as it is needed to detect possible
514 deadlock. We must add info for WAIT_EX thread for
515 deadlock detection to work properly. */
516 #ifdef UNIV_SYNC_DEBUG
517 rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
518 file_name, line);
519 #endif
521 sync_array_wait_event(sync_primary_wait_array,
522 index);
523 #ifdef UNIV_SYNC_DEBUG
524 rw_lock_remove_debug_info(lock, pass,
525 RW_LOCK_WAIT_EX);
526 #endif
527 /* It is possible to wake when lock_word < 0.
528 We must pass the while-loop check to proceed.*/
529 } else {
530 sync_array_free_cell(sync_primary_wait_array,
531 index);
534 rw_x_spin_round_count += i;
537 /******************************************************************//**
538 Low-level function for acquiring an exclusive lock.
539 @return RW_LOCK_NOT_LOCKED if did not succeed, RW_LOCK_EX if success. */
540 UNIV_INLINE
541 ibool
542 rw_lock_x_lock_low(
543 /*===============*/
544 rw_lock_t* lock, /*!< in: pointer to rw-lock */
545 ulint pass, /*!< in: pass value; != 0, if the lock will
546 be passed to another thread to unlock */
547 const char* file_name,/*!< in: file name where lock requested */
548 ulint line) /*!< in: line where requested */
550 os_thread_id_t curr_thread = os_thread_get_curr_id();
552 if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
554 /* lock->recursive also tells us if the writer_thread
555 field is stale or active. As we are going to write
556 our own thread id in that field it must be that the
557 current writer_thread value is not active. */
558 ut_a(!lock->recursive);
560 /* Decrement occurred: we are writer or next-writer. */
561 rw_lock_set_writer_id_and_recursion_flag(lock,
562 pass ? FALSE : TRUE);
564 rw_lock_x_lock_wait(lock,
565 #ifdef UNIV_SYNC_DEBUG
566 pass,
567 #endif
568 file_name, line);
570 } else {
571 /* Decrement failed: relock or failed lock */
572 if (!pass && lock->recursive
573 && os_thread_eq(lock->writer_thread, curr_thread)) {
574 /* Relock */
575 lock->lock_word -= X_LOCK_DECR;
576 } else {
577 /* Another thread locked before us */
578 return(FALSE);
581 #ifdef UNIV_SYNC_DEBUG
582 rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
583 file_name, line);
584 #endif
585 lock->last_x_file_name = file_name;
586 lock->last_x_line = (unsigned int) line;
588 return(TRUE);
591 /******************************************************************//**
592 NOTE! Use the corresponding macro, not directly this function! Lock an
593 rw-lock in exclusive mode for the current thread. If the rw-lock is locked
594 in shared or exclusive mode, or there is an exclusive lock request waiting,
595 the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
596 for the lock before suspending the thread. If the same thread has an x-lock
597 on the rw-lock, locking succeed, with the following exception: if pass != 0,
598 only a single x-lock may be taken on the lock. NOTE: If the same thread has
599 an s-lock, locking does not succeed! */
600 UNIV_INTERN
601 void
602 rw_lock_x_lock_func(
603 /*================*/
604 rw_lock_t* lock, /*!< in: pointer to rw-lock */
605 ulint pass, /*!< in: pass value; != 0, if the lock will
606 be passed to another thread to unlock */
607 const char* file_name,/*!< in: file name where lock requested */
608 ulint line) /*!< in: line where requested */
610 ulint index; /*!< index of the reserved wait cell */
611 ulint i; /*!< spin round count */
612 ibool spinning = FALSE;
614 ut_ad(rw_lock_validate(lock));
615 #ifdef UNIV_SYNC_DEBUG
616 ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED));
617 #endif /* UNIV_SYNC_DEBUG */
619 i = 0;
621 lock_loop:
623 if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
624 rw_x_spin_round_count += i;
626 return; /* Locking succeeded */
628 } else {
630 if (!spinning) {
631 spinning = TRUE;
632 rw_x_spin_wait_count++;
635 /* Spin waiting for the lock_word to become free */
636 while (i < SYNC_SPIN_ROUNDS
637 && lock->lock_word <= 0) {
638 if (srv_spin_wait_delay) {
639 ut_delay(ut_rnd_interval(0,
640 srv_spin_wait_delay));
643 i++;
645 if (i == SYNC_SPIN_ROUNDS) {
646 os_thread_yield();
647 } else {
648 goto lock_loop;
652 rw_x_spin_round_count += i;
654 if (srv_print_latch_waits) {
655 fprintf(stderr,
656 "Thread %lu spin wait rw-x-lock at %p"
657 " cfile %s cline %lu rnds %lu\n",
658 os_thread_pf(os_thread_get_curr_id()), (void*) lock,
659 lock->cfile_name, (ulong) lock->cline, (ulong) i);
662 sync_array_reserve_cell(sync_primary_wait_array,
663 lock,
664 RW_LOCK_EX,
665 file_name, line,
666 &index);
668 /* Waiters must be set before checking lock_word, to ensure signal
669 is sent. This could lead to a few unnecessary wake-up signals. */
670 rw_lock_set_waiter_flag(lock);
672 if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
673 sync_array_free_cell(sync_primary_wait_array, index);
674 return; /* Locking succeeded */
677 if (srv_print_latch_waits) {
678 fprintf(stderr,
679 "Thread %lu OS wait for rw-x-lock at %p"
680 " cfile %s cline %lu\n",
681 os_thread_pf(os_thread_get_curr_id()), (void*) lock,
682 lock->cfile_name, (ulong) lock->cline);
685 /* these stats may not be accurate */
686 lock->count_os_wait++;
687 rw_x_os_wait_count++;
689 sync_array_wait_event(sync_primary_wait_array, index);
691 i = 0;
692 goto lock_loop;
695 #ifdef UNIV_SYNC_DEBUG
696 /******************************************************************//**
697 Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
698 because the debug mutex is also acquired in sync0arr while holding the OS
699 mutex protecting the sync array, and the ordinary mutex_enter might
700 recursively call routines in sync0arr, leading to a deadlock on the OS
701 mutex. */
702 UNIV_INTERN
703 void
704 rw_lock_debug_mutex_enter(void)
705 /*==========================*/
707 loop:
708 if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
709 return;
712 os_event_reset(rw_lock_debug_event);
714 rw_lock_debug_waiters = TRUE;
716 if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
717 return;
720 os_event_wait(rw_lock_debug_event);
722 goto loop;
725 /******************************************************************//**
726 Releases the debug mutex. */
727 UNIV_INTERN
728 void
729 rw_lock_debug_mutex_exit(void)
730 /*==========================*/
732 mutex_exit(&rw_lock_debug_mutex);
734 if (rw_lock_debug_waiters) {
735 rw_lock_debug_waiters = FALSE;
736 os_event_set(rw_lock_debug_event);
740 /******************************************************************//**
741 Inserts the debug information for an rw-lock. */
742 UNIV_INTERN
743 void
744 rw_lock_add_debug_info(
745 /*===================*/
746 rw_lock_t* lock, /*!< in: rw-lock */
747 ulint pass, /*!< in: pass value */
748 ulint lock_type, /*!< in: lock type */
749 const char* file_name, /*!< in: file where requested */
750 ulint line) /*!< in: line where requested */
752 rw_lock_debug_t* info;
754 ut_ad(lock);
755 ut_ad(file_name);
757 info = rw_lock_debug_create();
759 rw_lock_debug_mutex_enter();
761 info->file_name = file_name;
762 info->line = line;
763 info->lock_type = lock_type;
764 info->thread_id = os_thread_get_curr_id();
765 info->pass = pass;
767 UT_LIST_ADD_FIRST(list, lock->debug_list, info);
769 rw_lock_debug_mutex_exit();
771 if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
772 sync_thread_add_level(lock, lock->level,
773 lock_type == RW_LOCK_EX
774 && lock->lock_word < 0);
778 /******************************************************************//**
779 Removes a debug information struct for an rw-lock. */
780 UNIV_INTERN
781 void
782 rw_lock_remove_debug_info(
783 /*======================*/
784 rw_lock_t* lock, /*!< in: rw-lock */
785 ulint pass, /*!< in: pass value */
786 ulint lock_type) /*!< in: lock type */
788 rw_lock_debug_t* info;
790 ut_ad(lock);
792 if ((pass == 0) && (lock_type != RW_LOCK_WAIT_EX)) {
793 sync_thread_reset_level(lock);
796 rw_lock_debug_mutex_enter();
798 info = UT_LIST_GET_FIRST(lock->debug_list);
800 while (info != NULL) {
801 if ((pass == info->pass)
802 && ((pass != 0)
803 || os_thread_eq(info->thread_id,
804 os_thread_get_curr_id()))
805 && (info->lock_type == lock_type)) {
807 /* Found! */
808 UT_LIST_REMOVE(list, lock->debug_list, info);
809 rw_lock_debug_mutex_exit();
811 rw_lock_debug_free(info);
813 return;
816 info = UT_LIST_GET_NEXT(list, info);
819 ut_error;
821 #endif /* UNIV_SYNC_DEBUG */
823 #ifdef UNIV_SYNC_DEBUG
824 /******************************************************************//**
825 Checks if the thread has locked the rw-lock in the specified mode, with
826 the pass value == 0.
827 @return TRUE if locked */
828 UNIV_INTERN
829 ibool
830 rw_lock_own(
831 /*========*/
832 rw_lock_t* lock, /*!< in: rw-lock */
833 ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
834 RW_LOCK_EX */
836 rw_lock_debug_t* info;
838 ut_ad(lock);
839 ut_ad(rw_lock_validate(lock));
841 rw_lock_debug_mutex_enter();
843 info = UT_LIST_GET_FIRST(lock->debug_list);
845 while (info != NULL) {
847 if (os_thread_eq(info->thread_id, os_thread_get_curr_id())
848 && (info->pass == 0)
849 && (info->lock_type == lock_type)) {
851 rw_lock_debug_mutex_exit();
852 /* Found! */
854 return(TRUE);
857 info = UT_LIST_GET_NEXT(list, info);
859 rw_lock_debug_mutex_exit();
861 return(FALSE);
863 #endif /* UNIV_SYNC_DEBUG */
865 /******************************************************************//**
866 Checks if somebody has locked the rw-lock in the specified mode.
867 @return TRUE if locked */
868 UNIV_INTERN
869 ibool
870 rw_lock_is_locked(
871 /*==============*/
872 rw_lock_t* lock, /*!< in: rw-lock */
873 ulint lock_type) /*!< in: lock type: RW_LOCK_SHARED,
874 RW_LOCK_EX */
876 ibool ret = FALSE;
878 ut_ad(lock);
879 ut_ad(rw_lock_validate(lock));
881 if (lock_type == RW_LOCK_SHARED) {
882 if (rw_lock_get_reader_count(lock) > 0) {
883 ret = TRUE;
885 } else if (lock_type == RW_LOCK_EX) {
886 if (rw_lock_get_writer(lock) == RW_LOCK_EX) {
887 ret = TRUE;
889 } else {
890 ut_error;
893 return(ret);
896 #ifdef UNIV_SYNC_DEBUG
897 /***************************************************************//**
898 Prints debug info of currently locked rw-locks. */
899 UNIV_INTERN
900 void
901 rw_lock_list_print_info(
902 /*====================*/
903 FILE* file) /*!< in: file where to print */
905 rw_lock_t* lock;
906 ulint count = 0;
907 rw_lock_debug_t* info;
909 mutex_enter(&rw_lock_list_mutex);
911 fputs("-------------\n"
912 "RW-LATCH INFO\n"
913 "-------------\n", file);
915 lock = UT_LIST_GET_FIRST(rw_lock_list);
917 while (lock != NULL) {
919 count++;
921 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
922 mutex_enter(&(lock->mutex));
923 #endif
924 if (lock->lock_word != X_LOCK_DECR) {
926 fprintf(file, "RW-LOCK: %p ", (void*) lock);
928 if (rw_lock_get_waiters(lock)) {
929 fputs(" Waiters for the lock exist\n", file);
930 } else {
931 putc('\n', file);
934 rw_lock_debug_mutex_enter();
935 info = UT_LIST_GET_FIRST(lock->debug_list);
936 while (info != NULL) {
937 rw_lock_debug_print(file, info);
938 info = UT_LIST_GET_NEXT(list, info);
940 rw_lock_debug_mutex_exit();
942 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
943 mutex_exit(&(lock->mutex));
944 #endif
946 lock = UT_LIST_GET_NEXT(list, lock);
949 fprintf(file, "Total number of rw-locks %ld\n", count);
950 mutex_exit(&rw_lock_list_mutex);
953 /***************************************************************//**
954 Prints debug info of an rw-lock. */
955 UNIV_INTERN
956 void
957 rw_lock_print(
958 /*==========*/
959 rw_lock_t* lock) /*!< in: rw-lock */
961 rw_lock_debug_t* info;
963 fprintf(stderr,
964 "-------------\n"
965 "RW-LATCH INFO\n"
966 "RW-LATCH: %p ", (void*) lock);
968 #ifndef INNODB_RW_LOCKS_USE_ATOMICS
969 /* We used to acquire lock->mutex here, but it would cause a
970 recursive call to sync_thread_add_level() if UNIV_SYNC_DEBUG
971 is defined. Since this function is only invoked from
972 sync_thread_levels_g(), let us choose the smaller evil:
973 performing dirty reads instead of causing bogus deadlocks or
974 assertion failures. */
975 #endif
976 if (lock->lock_word != X_LOCK_DECR) {
978 if (rw_lock_get_waiters(lock)) {
979 fputs(" Waiters for the lock exist\n", stderr);
980 } else {
981 putc('\n', stderr);
984 rw_lock_debug_mutex_enter();
985 info = UT_LIST_GET_FIRST(lock->debug_list);
986 while (info != NULL) {
987 rw_lock_debug_print(stderr, info);
988 info = UT_LIST_GET_NEXT(list, info);
990 rw_lock_debug_mutex_exit();
994 /*********************************************************************//**
995 Prints info of a debug struct. */
996 UNIV_INTERN
997 void
998 rw_lock_debug_print(
999 /*================*/
1000 FILE* f, /*!< in: output stream */
1001 rw_lock_debug_t* info) /*!< in: debug struct */
1003 ulint rwt;
1005 rwt = info->lock_type;
1007 fprintf(f, "Locked: thread %lu file %s line %lu ",
1008 (ulong) os_thread_pf(info->thread_id), info->file_name,
1009 (ulong) info->line);
1010 if (rwt == RW_LOCK_SHARED) {
1011 fputs("S-LOCK", f);
1012 } else if (rwt == RW_LOCK_EX) {
1013 fputs("X-LOCK", f);
1014 } else if (rwt == RW_LOCK_WAIT_EX) {
1015 fputs("WAIT X-LOCK", f);
1016 } else {
1017 ut_error;
1019 if (info->pass != 0) {
1020 fprintf(f, " pass value %lu", (ulong) info->pass);
1022 putc('\n', f);
1025 /***************************************************************//**
1026 Returns the number of currently locked rw-locks. Works only in the debug
1027 version.
1028 @return number of locked rw-locks */
1029 UNIV_INTERN
1030 ulint
1031 rw_lock_n_locked(void)
1032 /*==================*/
1034 rw_lock_t* lock;
1035 ulint count = 0;
1037 mutex_enter(&rw_lock_list_mutex);
1039 lock = UT_LIST_GET_FIRST(rw_lock_list);
1041 while (lock != NULL) {
1043 if (lock->lock_word != X_LOCK_DECR) {
1044 count++;
1047 lock = UT_LIST_GET_NEXT(list, lock);
1050 mutex_exit(&rw_lock_list_mutex);
1052 return(count);
1054 #endif /* UNIV_SYNC_DEBUG */