2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Volker Lendecke 2012,2013
7 Copyright (C) Stefan Metzmacher 2013,2014
8 Copyright (C) Michael Adam 2014
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "tdb_private.h"
28 #include "system/threads.h"
30 #ifdef USE_TDB_MUTEX_LOCKING
33 * If we run with mutexes, we store the "struct tdb_mutexes" at the
34 * beginning of the file. We store an additional tdb_header right
35 * beyond the mutex area, page aligned. All the offsets within the tdb
36 * are relative to the area behind the mutex area. tdb->map_ptr points
37 * behind the mmap area as well, so the read and write path in the
38 * mutex case can remain unchanged.
40 * Early in the mutex development the mutexes were placed between the hash
41 * chain pointers and the real tdb data. This had two drawbacks: First, it
42 * made pointer calculations more complex. Second, we had to mmap the mutex
43 * area twice. One was the normal map_ptr in the tdb. This frequently changed
44 * from within tdb_oob. At least the Linux glibc robust mutex code assumes
45 * constant pointers in memory, so a constantly changing mmap area destroys
46 * the mutex list. So we had to mmap the first bytes of the file with a second
47 * mmap call. With that scheme, very weird errors happened that could be
48 * easily fixed by doing the mutex mmap in a second file. It seemed that
49 * mapping the same memory area twice does not end up in accessing the same
50 * physical page, looking at the mutexes in gdb it seemed that old data showed
51 * up after some re-mapping. To avoid a separate mutex file, the code now puts
52 * the real content of the tdb file after the mutex area. This way we do not
53 * have overlapping mmap areas, the mutex area is mmapped once and not
54 * changed, the tdb data area's mmap is constantly changed but does not
59 struct tdb_header hdr
;
61 /* protect allrecord_lock */
62 pthread_mutex_t allrecord_mutex
;
69 short int allrecord_lock
;
72 * Index 0 is the freelist mutex, followed by
73 * one mutex per hashchain.
75 pthread_mutex_t hashchains
[1];
78 bool tdb_have_mutexes(struct tdb_context
*tdb
)
80 return ((tdb
->feature_flags
& TDB_FEATURE_FLAG_MUTEX
) != 0);
83 size_t tdb_mutex_size(struct tdb_context
*tdb
)
87 if (!tdb_have_mutexes(tdb
)) {
91 mutex_size
= sizeof(struct tdb_mutexes
);
92 mutex_size
+= tdb
->hash_size
* sizeof(pthread_mutex_t
);
94 return TDB_ALIGN(mutex_size
, tdb
->page_size
);
98 * Get the index for a chain mutex
100 static bool tdb_mutex_index(struct tdb_context
*tdb
, off_t off
, off_t len
,
104 * Weird but true: We fcntl lock 1 byte at an offset 4 bytes before
105 * the 4 bytes of the freelist start and the hash chain that is about
106 * to be locked. See lock_offset() where the freelist is -1 vs the
107 * "+1" in TDB_HASH_TOP(). Because the mutex array is represented in
108 * the tdb file itself as data, we need to adjust the offset here.
110 const off_t freelist_lock_ofs
= FREELIST_TOP
- sizeof(tdb_off_t
);
112 if (!tdb_have_mutexes(tdb
)) {
116 /* Possibly the allrecord lock */
119 if (off
< freelist_lock_ofs
) {
120 /* One of the special locks */
123 if (tdb
->hash_size
== 0) {
124 /* tdb not initialized yet, called from tdb_open_ex() */
127 if (off
>= TDB_DATA_START(tdb
->hash_size
)) {
128 /* Single record lock from traverses */
133 * Now we know it's a freelist or hash chain lock. Those are always 4
134 * byte aligned. Paranoia check.
136 if ((off
% sizeof(tdb_off_t
)) != 0) {
141 * Re-index the fcntl offset into an offset into the mutex array
143 off
-= freelist_lock_ofs
; /* rebase to index 0 */
144 off
/= sizeof(tdb_off_t
); /* 0 for freelist 1-n for hashchain */
150 static bool tdb_have_mutex_chainlocks(struct tdb_context
*tdb
)
154 for (i
=0; i
< tdb
->num_lockrecs
; i
++) {
158 ret
= tdb_mutex_index(tdb
,
159 tdb
->lockrecs
[i
].off
,
160 tdb
->lockrecs
[i
].count
,
167 /* this is the freelist mutex */
177 static int chain_mutex_lock(pthread_mutex_t
*m
, bool waitflag
)
182 ret
= pthread_mutex_lock(m
);
184 ret
= pthread_mutex_trylock(m
);
186 if (ret
!= EOWNERDEAD
) {
191 * For chainlocks, we don't do any cleanup (yet?)
193 return pthread_mutex_consistent(m
);
196 static int allrecord_mutex_lock(struct tdb_mutexes
*m
, bool waitflag
)
201 ret
= pthread_mutex_lock(&m
->allrecord_mutex
);
203 ret
= pthread_mutex_trylock(&m
->allrecord_mutex
);
205 if (ret
!= EOWNERDEAD
) {
210 * The allrecord lock holder died. We need to reset the allrecord_lock
211 * to F_UNLCK. This should also be the indication for
212 * tdb_needs_recovery.
214 m
->allrecord_lock
= F_UNLCK
;
216 return pthread_mutex_consistent(&m
->allrecord_mutex
);
219 bool tdb_mutex_lock(struct tdb_context
*tdb
, int rw
, off_t off
, off_t len
,
220 bool waitflag
, int *pret
)
222 struct tdb_mutexes
*m
= tdb
->mutexes
;
223 pthread_mutex_t
*chain
;
228 if (!tdb_mutex_index(tdb
, off
, len
, &idx
)) {
231 chain
= &m
->hashchains
[idx
];
234 ret
= chain_mutex_lock(chain
, waitflag
);
245 * This is a freelist lock, which is independent to
246 * the allrecord lock. So we're done once we got the
253 if (tdb_have_mutex_chainlocks(tdb
)) {
255 * We can only check the allrecord lock once. If we do it with
256 * one chain mutex locked, we will deadlock with the allrecord
257 * locker process in the following way: We lock the first hash
258 * chain, we check for the allrecord lock. We keep the hash
259 * chain locked. Then the allrecord locker locks the
260 * allrecord_mutex. It walks the list of chain mutexes,
261 * locking them all in sequence. Meanwhile, we have the chain
262 * mutex locked, so the allrecord locker blocks trying to lock
263 * our chain mutex. Then we come in and try to lock the second
264 * chain lock, which in most cases will be the freelist. We
265 * see that the allrecord lock is locked and put ourselves on
266 * the allrecord_mutex. This will never be signalled though
267 * because the allrecord locker waits for us to give up the
276 * Check if someone is has the allrecord lock: queue if so.
279 allrecord_ok
= false;
281 if (m
->allrecord_lock
== F_UNLCK
) {
283 * allrecord lock not taken
288 if ((m
->allrecord_lock
== F_RDLCK
) && (rw
== F_RDLCK
)) {
290 * allrecord shared lock taken, but we only want to read
300 ret
= pthread_mutex_unlock(chain
);
302 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_unlock"
303 "(chain_mutex) failed: %s\n", strerror(ret
)));
307 ret
= allrecord_mutex_lock(m
, waitflag
);
312 if (waitflag
|| (ret
!= EAGAIN
)) {
313 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_%slock"
314 "(allrecord_mutex) failed: %s\n",
315 waitflag
? "" : "try_", strerror(ret
)));
320 ret
= pthread_mutex_unlock(&m
->allrecord_mutex
);
322 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_unlock"
323 "(allrecord_mutex) failed: %s\n", strerror(ret
)));
334 bool tdb_mutex_unlock(struct tdb_context
*tdb
, int rw
, off_t off
, off_t len
,
337 struct tdb_mutexes
*m
= tdb
->mutexes
;
338 pthread_mutex_t
*chain
;
342 if (!tdb_mutex_index(tdb
, off
, len
, &idx
)) {
345 chain
= &m
->hashchains
[idx
];
347 ret
= pthread_mutex_unlock(chain
);
357 int tdb_mutex_allrecord_lock(struct tdb_context
*tdb
, int ltype
,
358 enum tdb_lock_flags flags
)
360 struct tdb_mutexes
*m
= tdb
->mutexes
;
363 bool waitflag
= (flags
& TDB_LOCK_WAIT
);
366 if (tdb
->flags
& TDB_NOLOCK
) {
370 if (flags
& TDB_LOCK_MARK_ONLY
) {
374 ret
= allrecord_mutex_lock(m
, waitflag
);
375 if (!waitflag
&& (ret
== EBUSY
)) {
377 tdb
->ecode
= TDB_ERR_LOCK
;
381 if (!(flags
& TDB_LOCK_PROBE
)) {
382 TDB_LOG((tdb
, TDB_DEBUG_TRACE
,
383 "allrecord_mutex_lock() failed: %s\n",
386 tdb
->ecode
= TDB_ERR_LOCK
;
390 if (m
->allrecord_lock
!= F_UNLCK
) {
391 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "allrecord_lock == %d\n",
392 (int)m
->allrecord_lock
));
393 goto fail_unlock_allrecord_mutex
;
395 m
->allrecord_lock
= (ltype
== F_RDLCK
) ? F_RDLCK
: F_WRLCK
;
397 for (i
=0; i
<tdb
->hash_size
; i
++) {
399 /* ignore hashchains[0], the freelist */
400 pthread_mutex_t
*chain
= &m
->hashchains
[i
+1];
402 ret
= chain_mutex_lock(chain
, waitflag
);
403 if (!waitflag
&& (ret
== EBUSY
)) {
405 goto fail_unroll_allrecord_lock
;
408 if (!(flags
& TDB_LOCK_PROBE
)) {
409 TDB_LOG((tdb
, TDB_DEBUG_TRACE
,
410 "chain_mutex_lock() failed: %s\n",
414 goto fail_unroll_allrecord_lock
;
417 ret
= pthread_mutex_unlock(chain
);
419 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_unlock"
420 "(chainlock) failed: %s\n", strerror(ret
)));
422 goto fail_unroll_allrecord_lock
;
426 * We leave this routine with m->allrecord_mutex locked
430 fail_unroll_allrecord_lock
:
431 m
->allrecord_lock
= F_UNLCK
;
433 fail_unlock_allrecord_mutex
:
435 ret
= pthread_mutex_unlock(&m
->allrecord_mutex
);
437 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_unlock"
438 "(allrecord_mutex) failed: %s\n", strerror(ret
)));
441 tdb
->ecode
= TDB_ERR_LOCK
;
445 int tdb_mutex_allrecord_upgrade(struct tdb_context
*tdb
)
447 struct tdb_mutexes
*m
= tdb
->mutexes
;
451 if (tdb
->flags
& TDB_NOLOCK
) {
456 * Our only caller tdb_allrecord_upgrade()
457 * guarantees that we already own the allrecord lock.
459 * Which means m->allrecord_mutex is still locked by us.
462 if (m
->allrecord_lock
!= F_RDLCK
) {
463 tdb
->ecode
= TDB_ERR_LOCK
;
464 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "allrecord_lock == %d\n",
465 (int)m
->allrecord_lock
));
469 m
->allrecord_lock
= F_WRLCK
;
471 for (i
=0; i
<tdb
->hash_size
; i
++) {
473 /* ignore hashchains[0], the freelist */
474 pthread_mutex_t
*chain
= &m
->hashchains
[i
+1];
476 ret
= chain_mutex_lock(chain
, true);
478 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_lock"
479 "(chainlock) failed: %s\n", strerror(ret
)));
480 goto fail_unroll_allrecord_lock
;
483 ret
= pthread_mutex_unlock(chain
);
485 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_unlock"
486 "(chainlock) failed: %s\n", strerror(ret
)));
487 goto fail_unroll_allrecord_lock
;
493 fail_unroll_allrecord_lock
:
494 m
->allrecord_lock
= F_RDLCK
;
495 tdb
->ecode
= TDB_ERR_LOCK
;
499 void tdb_mutex_allrecord_downgrade(struct tdb_context
*tdb
)
501 struct tdb_mutexes
*m
= tdb
->mutexes
;
504 * Our only caller tdb_allrecord_upgrade() (in the error case)
505 * guarantees that we already own the allrecord lock.
507 * Which means m->allrecord_mutex is still locked by us.
510 if (m
->allrecord_lock
!= F_WRLCK
) {
511 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "allrecord_lock == %d\n",
512 (int)m
->allrecord_lock
));
516 m
->allrecord_lock
= F_RDLCK
;
521 int tdb_mutex_allrecord_unlock(struct tdb_context
*tdb
)
523 struct tdb_mutexes
*m
= tdb
->mutexes
;
527 if (tdb
->flags
& TDB_NOLOCK
) {
532 * Our only callers tdb_allrecord_unlock() and
533 * tdb_allrecord_lock() (in the error path)
534 * guarantee that we already own the allrecord lock.
536 * Which means m->allrecord_mutex is still locked by us.
539 if ((m
->allrecord_lock
!= F_RDLCK
) && (m
->allrecord_lock
!= F_WRLCK
)) {
540 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "allrecord_lock == %d\n",
541 (int)m
->allrecord_lock
));
545 old
= m
->allrecord_lock
;
546 m
->allrecord_lock
= F_UNLCK
;
548 ret
= pthread_mutex_unlock(&m
->allrecord_mutex
);
550 m
->allrecord_lock
= old
;
551 TDB_LOG((tdb
, TDB_DEBUG_FATAL
, "pthread_mutex_unlock"
552 "(allrecord_mutex) failed: %s\n", strerror(ret
)));
558 int tdb_mutex_init(struct tdb_context
*tdb
)
560 struct tdb_mutexes
*m
;
561 pthread_mutexattr_t ma
;
565 ret
= tdb_mutex_mmap(tdb
);
571 ret
= pthread_mutexattr_init(&ma
);
575 ret
= pthread_mutexattr_settype(&ma
, PTHREAD_MUTEX_ERRORCHECK
);
579 ret
= pthread_mutexattr_setpshared(&ma
, PTHREAD_PROCESS_SHARED
);
583 ret
= pthread_mutexattr_setrobust(&ma
, PTHREAD_MUTEX_ROBUST
);
588 for (i
=0; i
<tdb
->hash_size
+1; i
++) {
589 pthread_mutex_t
*chain
= &m
->hashchains
[i
];
591 ret
= pthread_mutex_init(chain
, &ma
);
597 m
->allrecord_lock
= F_UNLCK
;
599 ret
= pthread_mutex_init(&m
->allrecord_mutex
, &ma
);
605 pthread_mutexattr_destroy(&ma
);
612 tdb_mutex_munmap(tdb
);
618 int tdb_mutex_mmap(struct tdb_context
*tdb
)
623 len
= tdb_mutex_size(tdb
);
628 if (tdb
->mutexes
!= NULL
) {
632 ptr
= mmap(NULL
, len
, PROT_READ
|PROT_WRITE
, MAP_SHARED
|MAP_FILE
,
634 if (ptr
== MAP_FAILED
) {
637 tdb
->mutexes
= (struct tdb_mutexes
*)ptr
;
642 int tdb_mutex_munmap(struct tdb_context
*tdb
)
647 len
= tdb_mutex_size(tdb
);
652 ret
= munmap(tdb
->mutexes
, len
);
661 static bool tdb_mutex_locking_cached
;
663 static bool tdb_mutex_locking_supported(void)
665 pthread_mutexattr_t ma
;
668 static bool initialized
;
671 return tdb_mutex_locking_cached
;
676 ret
= pthread_mutexattr_init(&ma
);
680 ret
= pthread_mutexattr_settype(&ma
, PTHREAD_MUTEX_ERRORCHECK
);
684 ret
= pthread_mutexattr_setpshared(&ma
, PTHREAD_PROCESS_SHARED
);
688 ret
= pthread_mutexattr_setrobust(&ma
, PTHREAD_MUTEX_ROBUST
);
692 ret
= pthread_mutex_init(&m
, &ma
);
696 ret
= pthread_mutex_lock(&m
);
701 * This makes sure we have real mutexes
702 * from a threading library instead of just
705 ret
= pthread_mutex_lock(&m
);
706 if (ret
!= EDEADLK
) {
709 ret
= pthread_mutex_unlock(&m
);
714 tdb_mutex_locking_cached
= true;
718 pthread_mutex_unlock(&m
);
720 pthread_mutex_destroy(&m
);
722 pthread_mutexattr_destroy(&ma
);
723 return tdb_mutex_locking_cached
;
726 static void (*tdb_robust_mutext_old_handler
)(int) = SIG_ERR
;
727 static pid_t tdb_robust_mutex_pid
= -1;
729 static bool tdb_robust_mutex_setup_sigchild(void (*handler
)(int),
730 void (**p_old_handler
)(int))
732 #ifdef HAVE_SIGACTION
733 struct sigaction act
;
734 struct sigaction oldact
;
736 memset(&act
, '\0', sizeof(act
));
738 act
.sa_handler
= handler
;
740 act
.sa_flags
= SA_RESTART
;
742 sigemptyset(&act
.sa_mask
);
743 sigaddset(&act
.sa_mask
, SIGCHLD
);
744 sigaction(SIGCHLD
, &act
, &oldact
);
746 *p_old_handler
= oldact
.sa_handler
;
749 #else /* !HAVE_SIGACTION */
754 static void tdb_robust_mutex_handler(int sig
)
756 pid_t child_pid
= tdb_robust_mutex_pid
;
758 if (child_pid
!= -1) {
761 pid
= waitpid(child_pid
, NULL
, WNOHANG
);
765 tdb_robust_mutex_pid
= -1;
772 if (pid
== child_pid
) {
773 tdb_robust_mutex_pid
= -1;
778 if (tdb_robust_mutext_old_handler
== SIG_DFL
) {
781 if (tdb_robust_mutext_old_handler
== SIG_IGN
) {
784 if (tdb_robust_mutext_old_handler
== SIG_ERR
) {
788 tdb_robust_mutext_old_handler(sig
);
791 static void tdb_robust_mutex_wait_for_child(pid_t
*child_pid
)
793 int options
= WNOHANG
;
795 if (*child_pid
== -1) {
799 while (tdb_robust_mutex_pid
> 0) {
803 * First we try with WNOHANG, as the process might not exist
804 * anymore. Once we've sent SIGKILL we block waiting for the
807 pid
= waitpid(*child_pid
, NULL
, options
);
809 if (errno
== EINTR
) {
811 } else if (errno
== ECHILD
) {
817 if (pid
== *child_pid
) {
821 kill(*child_pid
, SIGKILL
);
825 tdb_robust_mutex_pid
= -1;
829 _PUBLIC_
bool tdb_runtime_check_for_robust_mutexes(void)
832 pthread_mutex_t
*m
= NULL
;
833 pthread_mutexattr_t ma
;
835 int pipe_down
[2] = { -1, -1 };
836 int pipe_up
[2] = { -1, -1 };
840 static bool initialized
;
841 pid_t saved_child_pid
= -1;
842 bool cleanup_ma
= false;
845 return tdb_mutex_locking_cached
;
850 ok
= tdb_mutex_locking_supported();
855 tdb_mutex_locking_cached
= false;
857 ptr
= mmap(NULL
, sizeof(pthread_mutex_t
), PROT_READ
|PROT_WRITE
,
858 MAP_SHARED
|MAP_ANON
, -1 /* fd */, 0);
859 if (ptr
== MAP_FAILED
) {
863 ret
= pipe(pipe_down
);
872 ret
= pthread_mutexattr_init(&ma
);
877 ret
= pthread_mutexattr_settype(&ma
, PTHREAD_MUTEX_ERRORCHECK
);
881 ret
= pthread_mutexattr_setpshared(&ma
, PTHREAD_PROCESS_SHARED
);
885 ret
= pthread_mutexattr_setrobust(&ma
, PTHREAD_MUTEX_ROBUST
);
889 ret
= pthread_mutex_init(ptr
, &ma
);
893 m
= (pthread_mutex_t
*)ptr
;
895 if (tdb_robust_mutex_setup_sigchild(tdb_robust_mutex_handler
,
896 &tdb_robust_mutext_old_handler
) == false) {
900 tdb_robust_mutex_pid
= fork();
901 saved_child_pid
= tdb_robust_mutex_pid
;
902 if (tdb_robust_mutex_pid
== 0) {
906 ret
= pthread_mutex_lock(m
);
907 nwritten
= write(pipe_up
[1], &ret
, sizeof(ret
));
908 if (nwritten
!= sizeof(ret
)) {
914 nread
= read(pipe_down
[0], &c
, 1);
921 if (tdb_robust_mutex_pid
== -1) {
929 nread
= read(pipe_up
[0], &ret
, sizeof(ret
));
930 if (nread
!= sizeof(ret
)) {
934 ret
= pthread_mutex_trylock(m
);
937 pthread_mutex_unlock(m
);
942 if (write(pipe_down
[1], &c
, 1) != 1) {
946 nread
= read(pipe_up
[0], &c
, 1);
951 tdb_robust_mutex_wait_for_child(&saved_child_pid
);
953 ret
= pthread_mutex_trylock(m
);
954 if (ret
!= EOWNERDEAD
) {
956 pthread_mutex_unlock(m
);
961 ret
= pthread_mutex_consistent(m
);
966 ret
= pthread_mutex_trylock(m
);
967 if (ret
!= EDEADLK
&& ret
!= EBUSY
) {
968 pthread_mutex_unlock(m
);
972 ret
= pthread_mutex_unlock(m
);
977 tdb_mutex_locking_cached
= true;
981 * Note that we don't reset the signal handler we just reset
982 * tdb_robust_mutex_pid to -1. This is ok as this code path is only
983 * called once per process.
985 * Leaving our signal handler avoids races with other threads potentially
986 * setting up their SIGCHLD handlers.
988 * The worst thing that can happen is that the other newer signal
989 * handler will get the SIGCHLD signal for our child and/or reap the
990 * child with a wait() function. tdb_robust_mutex_wait_for_child()
991 * handles the case where waitpid returns ECHILD.
993 tdb_robust_mutex_wait_for_child(&saved_child_pid
);
996 pthread_mutex_destroy(m
);
999 pthread_mutexattr_destroy(&ma
);
1001 if (pipe_down
[0] != -1) {
1002 close(pipe_down
[0]);
1004 if (pipe_down
[1] != -1) {
1005 close(pipe_down
[1]);
1007 if (pipe_up
[0] != -1) {
1010 if (pipe_up
[1] != -1) {
1014 munmap(ptr
, sizeof(pthread_mutex_t
));
1017 return tdb_mutex_locking_cached
;
1022 size_t tdb_mutex_size(struct tdb_context
*tdb
)
1027 bool tdb_have_mutexes(struct tdb_context
*tdb
)
1032 int tdb_mutex_allrecord_lock(struct tdb_context
*tdb
, int ltype
,
1033 enum tdb_lock_flags flags
)
1035 tdb
->ecode
= TDB_ERR_LOCK
;
1039 int tdb_mutex_allrecord_unlock(struct tdb_context
*tdb
)
1044 int tdb_mutex_allrecord_upgrade(struct tdb_context
*tdb
)
1046 tdb
->ecode
= TDB_ERR_LOCK
;
1050 void tdb_mutex_allrecord_downgrade(struct tdb_context
*tdb
)
1055 int tdb_mutex_mmap(struct tdb_context
*tdb
)
1061 int tdb_mutex_munmap(struct tdb_context
*tdb
)
1067 int tdb_mutex_init(struct tdb_context
*tdb
)
1073 _PUBLIC_
bool tdb_runtime_check_for_robust_mutexes(void)