librpc: Shorten dcerpc_binding_handle_call a bit
[Samba/gebeck_regimport.git] / lib / tdb / common / lock.c
blob4dfefd563dcb0e53281a5d23302781912e3f75bb
1 /*
2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
12 ** under the LGPL
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 _PUBLIC_ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
32 tdb->interrupt_sig_ptr = ptr;
35 static int fcntl_lock(struct tdb_context *tdb,
36 int rw, off_t off, off_t len, bool waitflag)
38 struct flock fl;
39 int cmd;
41 fl.l_type = rw;
42 fl.l_whence = SEEK_SET;
43 fl.l_start = off;
44 fl.l_len = len;
45 fl.l_pid = 0;
47 cmd = waitflag ? F_SETLKW : F_SETLK;
49 return fcntl(tdb->fd, cmd, &fl);
52 static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
54 struct flock fl;
55 #if 0 /* Check they matched up locks and unlocks correctly. */
56 char line[80];
57 FILE *locks;
58 bool found = false;
60 locks = fopen("/proc/locks", "r");
62 while (fgets(line, 80, locks)) {
63 char *p;
64 int type, start, l;
66 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
67 p = strchr(line, ':') + 1;
68 if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
69 continue;
70 p += strlen(" FLOCK ADVISORY ");
71 if (strncmp(p, "READ ", strlen("READ ")) == 0)
72 type = F_RDLCK;
73 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
74 type = F_WRLCK;
75 else
76 abort();
77 p += 6;
78 if (atoi(p) != getpid())
79 continue;
80 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
81 start = atoi(p);
82 p = strchr(p, ' ') + 1;
83 if (strncmp(p, "EOF", 3) == 0)
84 l = 0;
85 else
86 l = atoi(p) - start + 1;
88 if (off == start) {
89 if (len != l) {
90 fprintf(stderr, "Len %u should be %u: %s",
91 (int)len, l, line);
92 abort();
94 if (type != rw) {
95 fprintf(stderr, "Type %s wrong: %s",
96 rw == F_RDLCK ? "READ" : "WRITE", line);
97 abort();
99 found = true;
100 break;
104 if (!found) {
105 fprintf(stderr, "Unlock on %u@%u not found!\n",
106 (int)off, (int)len);
107 abort();
110 fclose(locks);
111 #endif
113 fl.l_type = F_UNLCK;
114 fl.l_whence = SEEK_SET;
115 fl.l_start = off;
116 fl.l_len = len;
117 fl.l_pid = 0;
119 return fcntl(tdb->fd, F_SETLKW, &fl);
122 /* list -1 is the alloc list, otherwise a hash chain. */
123 static tdb_off_t lock_offset(int list)
125 return FREELIST_TOP + 4*list;
128 /* a byte range locking function - return 0 on success
129 this functions locks/unlocks "len" byte at the specified offset.
131 On error, errno is also set so that errors are passed back properly
132 through tdb_open().
134 note that a len of zero means lock to end of file
136 int tdb_brlock(struct tdb_context *tdb,
137 int rw_type, tdb_off_t offset, size_t len,
138 enum tdb_lock_flags flags)
140 int ret;
142 if (tdb->flags & TDB_NOLOCK) {
143 return 0;
146 if (flags & TDB_LOCK_MARK_ONLY) {
147 return 0;
150 if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) {
151 tdb->ecode = TDB_ERR_RDONLY;
152 return -1;
155 do {
156 ret = fcntl_lock(tdb, rw_type, offset, len,
157 flags & TDB_LOCK_WAIT);
158 /* Check for a sigalarm break. */
159 if (ret == -1 && errno == EINTR &&
160 tdb->interrupt_sig_ptr &&
161 *tdb->interrupt_sig_ptr) {
162 break;
164 } while (ret == -1 && errno == EINTR);
166 if (ret == -1) {
167 tdb->ecode = TDB_ERR_LOCK;
168 /* Generic lock error. errno set by fcntl.
169 * EAGAIN is an expected return from non-blocking
170 * locks. */
171 if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
172 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %u rw_type=%d flags=%d len=%zu\n",
173 tdb->fd, offset, rw_type, flags, len));
175 return -1;
177 return 0;
180 int tdb_brunlock(struct tdb_context *tdb,
181 int rw_type, tdb_off_t offset, size_t len)
183 int ret;
185 if (tdb->flags & TDB_NOLOCK) {
186 return 0;
189 do {
190 ret = fcntl_unlock(tdb, rw_type, offset, len);
191 } while (ret == -1 && errno == EINTR);
193 if (ret == -1) {
194 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %u rw_type=%u len=%zu\n",
195 tdb->fd, offset, rw_type, len));
197 return ret;
201 * Do a tdb_brlock in a loop. Some OSes (such as solaris) have too
202 * conservative deadlock detection and claim a deadlock when progress can be
203 * made. For those OSes we may loop for a while.
206 static int tdb_brlock_retry(struct tdb_context *tdb,
207 int rw_type, tdb_off_t offset, size_t len,
208 enum tdb_lock_flags flags)
210 int count = 1000;
212 while (count--) {
213 struct timeval tv;
214 int ret;
216 ret = tdb_brlock(tdb, rw_type, offset, len, flags);
217 if (ret == 0) {
218 return 0;
220 if (errno != EDEADLK) {
221 break;
223 /* sleep for as short a time as we can - more portable than usleep() */
224 tv.tv_sec = 0;
225 tv.tv_usec = 1;
226 select(0, NULL, NULL, NULL, &tv);
228 return -1;
232 upgrade a read lock to a write lock.
234 int tdb_allrecord_upgrade(struct tdb_context *tdb)
236 int ret;
238 if (tdb->allrecord_lock.count != 1) {
239 TDB_LOG((tdb, TDB_DEBUG_ERROR,
240 "tdb_allrecord_upgrade failed: count %u too high\n",
241 tdb->allrecord_lock.count));
242 return -1;
245 if (tdb->allrecord_lock.off != 1) {
246 TDB_LOG((tdb, TDB_DEBUG_ERROR,
247 "tdb_allrecord_upgrade failed: already upgraded?\n"));
248 return -1;
251 ret = tdb_brlock_retry(tdb, F_WRLCK, FREELIST_TOP, 0,
252 TDB_LOCK_WAIT|TDB_LOCK_PROBE);
253 if (ret == 0) {
254 tdb->allrecord_lock.ltype = F_WRLCK;
255 tdb->allrecord_lock.off = 0;
256 return 0;
258 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_allrecord_upgrade failed\n"));
259 return -1;
262 static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
263 tdb_off_t offset)
265 unsigned int i;
267 for (i=0; i<tdb->num_lockrecs; i++) {
268 if (tdb->lockrecs[i].off == offset) {
269 return &tdb->lockrecs[i];
272 return NULL;
275 /* lock an offset in the database. */
276 int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
277 enum tdb_lock_flags flags)
279 struct tdb_lock_type *new_lck;
281 if (offset >= lock_offset(tdb->hash_size)) {
282 tdb->ecode = TDB_ERR_LOCK;
283 TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n",
284 offset, ltype));
285 return -1;
287 if (tdb->flags & TDB_NOLOCK)
288 return 0;
290 new_lck = find_nestlock(tdb, offset);
291 if (new_lck) {
293 * Just increment the in-memory struct, posix locks
294 * don't stack.
296 new_lck->count++;
297 return 0;
300 new_lck = (struct tdb_lock_type *)realloc(
301 tdb->lockrecs,
302 sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
303 if (new_lck == NULL) {
304 errno = ENOMEM;
305 return -1;
307 tdb->lockrecs = new_lck;
309 /* Since fcntl locks don't nest, we do a lock for the first one,
310 and simply bump the count for future ones */
311 if (tdb_brlock(tdb, ltype, offset, 1, flags)) {
312 return -1;
315 tdb->lockrecs[tdb->num_lockrecs].off = offset;
316 tdb->lockrecs[tdb->num_lockrecs].count = 1;
317 tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
318 tdb->num_lockrecs++;
320 return 0;
323 static int tdb_lock_and_recover(struct tdb_context *tdb)
325 int ret;
327 /* We need to match locking order in transaction commit. */
328 if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
329 return -1;
332 if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
333 tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
334 return -1;
337 ret = tdb_transaction_recover(tdb);
339 tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
340 tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
342 return ret;
345 static bool have_data_locks(const struct tdb_context *tdb)
347 unsigned int i;
349 for (i = 0; i < tdb->num_lockrecs; i++) {
350 if (tdb->lockrecs[i].off >= lock_offset(-1))
351 return true;
353 return false;
357 * A allrecord lock allows us to avoid per chain locks. Check if the allrecord
358 * lock is strong enough.
360 static int tdb_lock_covered_by_allrecord_lock(struct tdb_context *tdb,
361 int ltype)
363 if (ltype == F_RDLCK) {
365 * The allrecord_lock is equal (F_RDLCK) or stronger
366 * (F_WRLCK). Pass.
368 return 0;
371 if (tdb->allrecord_lock.ltype == F_RDLCK) {
373 * We ask for ltype==F_WRLCK, but the allrecord_lock
374 * is too weak. We can't upgrade here, so fail.
376 tdb->ecode = TDB_ERR_LOCK;
377 return -1;
381 * Asking for F_WRLCK, allrecord is F_WRLCK as well. Pass.
383 return 0;
386 static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
387 enum tdb_lock_flags waitflag)
389 int ret;
390 bool check = false;
392 if (tdb->allrecord_lock.count) {
393 return tdb_lock_covered_by_allrecord_lock(tdb, ltype);
397 * Check for recoveries: Someone might have kill -9'ed a process
398 * during a commit.
400 check = !have_data_locks(tdb);
401 ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag);
403 if (ret == 0 && check && tdb_needs_recovery(tdb)) {
404 tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
406 if (tdb_lock_and_recover(tdb) == -1) {
407 return -1;
409 return tdb_lock_list(tdb, list, ltype, waitflag);
411 return ret;
414 /* lock a list in the database. list -1 is the alloc list */
415 int tdb_lock(struct tdb_context *tdb, int list, int ltype)
417 int ret;
419 ret = tdb_lock_list(tdb, list, ltype, TDB_LOCK_WAIT);
420 if (ret) {
421 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d "
422 "ltype=%d (%s)\n", list, ltype, strerror(errno)));
424 return ret;
427 /* lock a list in the database. list -1 is the alloc list. non-blocking lock */
428 _PUBLIC_ int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype)
430 return tdb_lock_list(tdb, list, ltype, TDB_LOCK_NOWAIT);
434 int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
435 bool mark_lock)
437 int ret = -1;
438 struct tdb_lock_type *lck;
440 if (tdb->flags & TDB_NOLOCK)
441 return 0;
443 /* Sanity checks */
444 if (offset >= lock_offset(tdb->hash_size)) {
445 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->hash_size));
446 return ret;
449 lck = find_nestlock(tdb, offset);
450 if ((lck == NULL) || (lck->count == 0)) {
451 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n"));
452 return -1;
455 if (lck->count > 1) {
456 lck->count--;
457 return 0;
461 * This lock has count==1 left, so we need to unlock it in the
462 * kernel. We don't bother with decrementing the in-memory array
463 * element, we're about to overwrite it with the last array element
464 * anyway.
467 if (mark_lock) {
468 ret = 0;
469 } else {
470 ret = tdb_brunlock(tdb, ltype, offset, 1);
474 * Shrink the array by overwriting the element just unlocked with the
475 * last array element.
477 *lck = tdb->lockrecs[--tdb->num_lockrecs];
480 * We don't bother with realloc when the array shrinks, but if we have
481 * a completely idle tdb we should get rid of the locked array.
484 if (tdb->num_lockrecs == 0) {
485 SAFE_FREE(tdb->lockrecs);
488 if (ret)
489 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n"));
490 return ret;
493 _PUBLIC_ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
495 /* a global lock allows us to avoid per chain locks */
496 if (tdb->allrecord_lock.count) {
497 return tdb_lock_covered_by_allrecord_lock(tdb, ltype);
500 return tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
504 get the transaction lock
506 int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
507 enum tdb_lock_flags lockflags)
509 return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, lockflags);
513 release the transaction lock
515 int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
517 return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
520 /* Returns 0 if all done, -1 if error, 1 if ok. */
521 static int tdb_allrecord_check(struct tdb_context *tdb, int ltype,
522 enum tdb_lock_flags flags, bool upgradable)
524 /* There are no locks on read-only dbs */
525 if (tdb->read_only || tdb->traverse_read) {
526 tdb->ecode = TDB_ERR_LOCK;
527 return -1;
530 if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) {
531 tdb->allrecord_lock.count++;
532 return 0;
535 if (tdb->allrecord_lock.count) {
536 /* a global lock of a different type exists */
537 tdb->ecode = TDB_ERR_LOCK;
538 return -1;
541 if (tdb_have_extra_locks(tdb)) {
542 /* can't combine global and chain locks */
543 tdb->ecode = TDB_ERR_LOCK;
544 return -1;
547 if (upgradable && ltype != F_RDLCK) {
548 /* tdb error: you can't upgrade a write lock! */
549 tdb->ecode = TDB_ERR_LOCK;
550 return -1;
552 return 1;
555 /* We only need to lock individual bytes, but Linux merges consecutive locks
556 * so we lock in contiguous ranges. */
557 static int tdb_chainlock_gradual(struct tdb_context *tdb,
558 int ltype, enum tdb_lock_flags flags,
559 size_t off, size_t len)
561 int ret;
562 enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
564 if (len <= 4) {
565 /* Single record. Just do blocking lock. */
566 return tdb_brlock(tdb, ltype, off, len, flags);
569 /* First we try non-blocking. */
570 ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
571 if (ret == 0) {
572 return 0;
575 /* Try locking first half, then second. */
576 ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2);
577 if (ret == -1)
578 return -1;
580 ret = tdb_chainlock_gradual(tdb, ltype, flags,
581 off + len / 2, len - len / 2);
582 if (ret == -1) {
583 tdb_brunlock(tdb, ltype, off, len / 2);
584 return -1;
586 return 0;
589 /* lock/unlock entire database. It can only be upgradable if you have some
590 * other way of guaranteeing exclusivity (ie. transaction write lock).
591 * We do the locking gradually to avoid being starved by smaller locks. */
592 int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
593 enum tdb_lock_flags flags, bool upgradable)
595 switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) {
596 case -1:
597 return -1;
598 case 0:
599 return 0;
602 /* We cover two kinds of locks:
603 * 1) Normal chain locks. Taken for almost all operations.
604 * 2) Individual records locks. Taken after normal or free
605 * chain locks.
607 * It is (1) which cause the starvation problem, so we're only
608 * gradual for that. */
609 if (tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP,
610 tdb->hash_size * 4) == -1) {
611 return -1;
614 /* Grab individual record locks. */
615 if (tdb_brlock(tdb, ltype, lock_offset(tdb->hash_size), 0,
616 flags) == -1) {
617 tdb_brunlock(tdb, ltype, FREELIST_TOP,
618 tdb->hash_size * 4);
619 return -1;
622 tdb->allrecord_lock.count = 1;
623 /* If it's upgradable, it's actually exclusive so we can treat
624 * it as a write lock. */
625 tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
626 tdb->allrecord_lock.off = upgradable;
628 if (tdb_needs_recovery(tdb)) {
629 bool mark = flags & TDB_LOCK_MARK_ONLY;
630 tdb_allrecord_unlock(tdb, ltype, mark);
631 if (mark) {
632 tdb->ecode = TDB_ERR_LOCK;
633 TDB_LOG((tdb, TDB_DEBUG_ERROR,
634 "tdb_lockall_mark cannot do recovery\n"));
635 return -1;
637 if (tdb_lock_and_recover(tdb) == -1) {
638 return -1;
640 return tdb_allrecord_lock(tdb, ltype, flags, upgradable);
643 return 0;
648 /* unlock entire db */
649 int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock)
651 /* There are no locks on read-only dbs */
652 if (tdb->read_only || tdb->traverse_read) {
653 tdb->ecode = TDB_ERR_LOCK;
654 return -1;
657 if (tdb->allrecord_lock.count == 0) {
658 tdb->ecode = TDB_ERR_LOCK;
659 return -1;
662 /* Upgradable locks are marked as write locks. */
663 if (tdb->allrecord_lock.ltype != ltype
664 && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
665 tdb->ecode = TDB_ERR_LOCK;
666 return -1;
669 if (tdb->allrecord_lock.count > 1) {
670 tdb->allrecord_lock.count--;
671 return 0;
674 if (!mark_lock && tdb_brunlock(tdb, ltype, FREELIST_TOP, 0)) {
675 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed (%s)\n", strerror(errno)));
676 return -1;
679 tdb->allrecord_lock.count = 0;
680 tdb->allrecord_lock.ltype = 0;
682 return 0;
685 /* lock entire database with write lock */
686 _PUBLIC_ int tdb_lockall(struct tdb_context *tdb)
688 tdb_trace(tdb, "tdb_lockall");
689 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
692 /* lock entire database with write lock - mark only */
693 _PUBLIC_ int tdb_lockall_mark(struct tdb_context *tdb)
695 tdb_trace(tdb, "tdb_lockall_mark");
696 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY, false);
699 /* unlock entire database with write lock - unmark only */
700 _PUBLIC_ int tdb_lockall_unmark(struct tdb_context *tdb)
702 tdb_trace(tdb, "tdb_lockall_unmark");
703 return tdb_allrecord_unlock(tdb, F_WRLCK, true);
706 /* lock entire database with write lock - nonblocking varient */
707 _PUBLIC_ int tdb_lockall_nonblock(struct tdb_context *tdb)
709 int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
710 tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
711 return ret;
714 /* unlock entire database with write lock */
715 _PUBLIC_ int tdb_unlockall(struct tdb_context *tdb)
717 tdb_trace(tdb, "tdb_unlockall");
718 return tdb_allrecord_unlock(tdb, F_WRLCK, false);
721 /* lock entire database with read lock */
722 _PUBLIC_ int tdb_lockall_read(struct tdb_context *tdb)
724 tdb_trace(tdb, "tdb_lockall_read");
725 return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
728 /* lock entire database with read lock - nonblock varient */
729 _PUBLIC_ int tdb_lockall_read_nonblock(struct tdb_context *tdb)
731 int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false);
732 tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
733 return ret;
736 /* unlock entire database with read lock */
737 _PUBLIC_ int tdb_unlockall_read(struct tdb_context *tdb)
739 tdb_trace(tdb, "tdb_unlockall_read");
740 return tdb_allrecord_unlock(tdb, F_RDLCK, false);
743 /* lock/unlock one hash chain. This is meant to be used to reduce
744 contention - it cannot guarantee how many records will be locked */
745 _PUBLIC_ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
747 int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
748 tdb_trace_1rec(tdb, "tdb_chainlock", key);
749 return ret;
752 /* lock/unlock one hash chain, non-blocking. This is meant to be used
753 to reduce contention - it cannot guarantee how many records will be
754 locked */
755 _PUBLIC_ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
757 int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
758 tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret);
759 return ret;
762 /* mark a chain as locked without actually locking it. Warning! use with great caution! */
763 _PUBLIC_ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
765 int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
766 F_WRLCK, TDB_LOCK_MARK_ONLY);
767 tdb_trace_1rec(tdb, "tdb_chainlock_mark", key);
768 return ret;
771 /* unmark a chain as locked without actually locking it. Warning! use with great caution! */
772 _PUBLIC_ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
774 tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key);
775 return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
776 F_WRLCK, true);
779 _PUBLIC_ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
781 tdb_trace_1rec(tdb, "tdb_chainunlock", key);
782 return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
785 _PUBLIC_ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
787 int ret;
788 ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
789 tdb_trace_1rec(tdb, "tdb_chainlock_read", key);
790 return ret;
793 _PUBLIC_ int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
795 tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
796 return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
799 /* record lock stops delete underneath */
800 int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
802 if (tdb->allrecord_lock.count) {
803 return 0;
805 return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
809 Write locks override our own fcntl readlocks, so check it here.
810 Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
811 an error to fail to get the lock here.
813 int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
815 struct tdb_traverse_lock *i;
816 for (i = &tdb->travlocks; i; i = i->next)
817 if (i->off == off)
818 return -1;
819 if (tdb->allrecord_lock.count) {
820 if (tdb->allrecord_lock.ltype == F_WRLCK) {
821 return 0;
823 return -1;
825 return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
828 int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
830 if (tdb->allrecord_lock.count) {
831 return 0;
833 return tdb_brunlock(tdb, F_WRLCK, off, 1);
836 /* fcntl locks don't stack: avoid unlocking someone else's */
837 int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
839 struct tdb_traverse_lock *i;
840 uint32_t count = 0;
842 if (tdb->allrecord_lock.count) {
843 return 0;
846 if (off == 0)
847 return 0;
848 for (i = &tdb->travlocks; i; i = i->next)
849 if (i->off == off)
850 count++;
851 return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0);
854 bool tdb_have_extra_locks(struct tdb_context *tdb)
856 unsigned int extra = tdb->num_lockrecs;
858 /* A transaction holds the lock for all records. */
859 if (!tdb->transaction && tdb->allrecord_lock.count) {
860 return true;
863 /* We always hold the active lock if CLEAR_IF_FIRST. */
864 if (find_nestlock(tdb, ACTIVE_LOCK)) {
865 extra--;
868 /* In a transaction, we expect to hold the transaction lock */
869 if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) {
870 extra--;
873 return extra;
876 /* The transaction code uses this to remove all locks. */
877 void tdb_release_transaction_locks(struct tdb_context *tdb)
879 unsigned int i, active = 0;
881 if (tdb->allrecord_lock.count != 0) {
882 tdb_allrecord_unlock(tdb, tdb->allrecord_lock.ltype, false);
883 tdb->allrecord_lock.count = 0;
886 for (i=0;i<tdb->num_lockrecs;i++) {
887 struct tdb_lock_type *lck = &tdb->lockrecs[i];
889 /* Don't release the active lock! Copy it to first entry. */
890 if (lck->off == ACTIVE_LOCK) {
891 tdb->lockrecs[active++] = *lck;
892 } else {
893 tdb_brunlock(tdb, lck->ltype, lck->off, 1);
896 tdb->num_lockrecs = active;
897 if (tdb->num_lockrecs == 0) {
898 SAFE_FREE(tdb->lockrecs);
902 /* Following functions are added specifically to support CTDB. */
904 /* Don't do actual fcntl locking, just mark tdb locked */
905 int tdb_transaction_write_lock_mark(struct tdb_context *tdb);
906 _PUBLIC_ int tdb_transaction_write_lock_mark(struct tdb_context *tdb)
908 return tdb_transaction_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY);
911 /* Don't do actual fcntl unlocking, just mark tdb unlocked */
912 int tdb_transaction_write_lock_unmark(struct tdb_context *tdb);
913 _PUBLIC_ int tdb_transaction_write_lock_unmark(struct tdb_context *tdb)
915 return tdb_nest_unlock(tdb, TRANSACTION_LOCK, F_WRLCK, true);