s4-auth-krb: Streamline and cleanup code to make it readable.
[Samba/id10ts.git] / lib / tdb2 / lock.c
blobdf0ec0c20854f85ebd012db896ea322906ea73d3
1 /*
2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
12 ** under the LGPL
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "private.h"
29 #include <assert.h>
30 #include <ccan/build_assert/build_assert.h>
32 /* If we were threaded, we could wait for unlock, but we're not, so fail. */
33 enum TDB_ERROR owner_conflict(struct tdb_context *tdb, const char *call)
35 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
36 "%s: lock owned by another tdb in this process.",
37 call);
40 /* If we fork, we no longer really own locks. */
41 bool check_lock_pid(struct tdb_context *tdb, const char *call, bool log)
43 /* No locks? No problem! */
44 if (tdb->file->allrecord_lock.count == 0
45 && tdb->file->num_lockrecs == 0) {
46 return true;
49 /* No fork? No problem! */
50 if (tdb->file->locker == getpid()) {
51 return true;
54 if (log) {
55 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
56 "%s: fork() detected after lock acquisition!"
57 " (%u vs %u)", call, tdb->file->locker, getpid());
59 return false;
62 int tdb_fcntl_lock(int fd, int rw, off_t off, off_t len, bool waitflag,
63 void *unused)
65 struct flock fl;
66 int ret;
68 do {
69 fl.l_type = rw;
70 fl.l_whence = SEEK_SET;
71 fl.l_start = off;
72 fl.l_len = len;
74 if (waitflag)
75 ret = fcntl(fd, F_SETLKW, &fl);
76 else
77 ret = fcntl(fd, F_SETLK, &fl);
78 } while (ret != 0 && errno == EINTR);
79 return ret;
82 int tdb_fcntl_unlock(int fd, int rw, off_t off, off_t len, void *unused)
84 struct flock fl;
85 int ret;
87 do {
88 fl.l_type = F_UNLCK;
89 fl.l_whence = SEEK_SET;
90 fl.l_start = off;
91 fl.l_len = len;
93 ret = fcntl(fd, F_SETLKW, &fl);
94 } while (ret != 0 && errno == EINTR);
95 return ret;
98 static int lock(struct tdb_context *tdb,
99 int rw, off_t off, off_t len, bool waitflag)
101 int ret;
102 if (tdb->file->allrecord_lock.count == 0
103 && tdb->file->num_lockrecs == 0) {
104 tdb->file->locker = getpid();
107 tdb->stats.lock_lowlevel++;
108 ret = tdb->lock_fn(tdb->file->fd, rw, off, len, waitflag,
109 tdb->lock_data);
110 if (!waitflag) {
111 tdb->stats.lock_nonblock++;
112 if (ret != 0)
113 tdb->stats.lock_nonblock_fail++;
115 return ret;
118 static int unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
120 #if 0 /* Check they matched up locks and unlocks correctly. */
121 char line[80];
122 FILE *locks;
123 bool found = false;
125 locks = fopen("/proc/locks", "r");
127 while (fgets(line, 80, locks)) {
128 char *p;
129 int type, start, l;
131 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
132 p = strchr(line, ':') + 1;
133 if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
134 continue;
135 p += strlen(" FLOCK ADVISORY ");
136 if (strncmp(p, "READ ", strlen("READ ")) == 0)
137 type = F_RDLCK;
138 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
139 type = F_WRLCK;
140 else
141 abort();
142 p += 6;
143 if (atoi(p) != getpid())
144 continue;
145 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
146 start = atoi(p);
147 p = strchr(p, ' ') + 1;
148 if (strncmp(p, "EOF", 3) == 0)
149 l = 0;
150 else
151 l = atoi(p) - start + 1;
153 if (off == start) {
154 if (len != l) {
155 fprintf(stderr, "Len %u should be %u: %s",
156 (int)len, l, line);
157 abort();
159 if (type != rw) {
160 fprintf(stderr, "Type %s wrong: %s",
161 rw == F_RDLCK ? "READ" : "WRITE", line);
162 abort();
164 found = true;
165 break;
169 if (!found) {
170 fprintf(stderr, "Unlock on %u@%u not found!",
171 (int)off, (int)len);
172 abort();
175 fclose(locks);
176 #endif
178 return tdb->unlock_fn(tdb->file->fd, rw, off, len, tdb->lock_data);
181 /* a byte range locking function - return 0 on success
182 this functions locks len bytes at the specified offset.
184 note that a len of zero means lock to end of file
186 enum TDB_ERROR tdb_brlock(struct tdb_context *tdb,
187 int rw_type, tdb_off_t offset, tdb_off_t len,
188 enum tdb_lock_flags flags)
190 int ret;
192 if (tdb->flags & TDB_NOLOCK) {
193 return TDB_SUCCESS;
196 if (rw_type == F_WRLCK && (tdb->flags & TDB_RDONLY)) {
197 return tdb_logerr(tdb, TDB_ERR_RDONLY, TDB_LOG_USE_ERROR,
198 "Write lock attempted on read-only database");
201 /* A 32 bit system cannot open a 64-bit file, but it could have
202 * expanded since then: check here. */
203 if ((size_t)(offset + len) != offset + len) {
204 return tdb_logerr(tdb, TDB_ERR_IO, TDB_LOG_ERROR,
205 "tdb_brlock: lock on giant offset %llu",
206 (long long)(offset + len));
209 ret = lock(tdb, rw_type, offset, len, flags & TDB_LOCK_WAIT);
210 if (ret != 0) {
211 /* Generic lock error. errno set by fcntl.
212 * EAGAIN is an expected return from non-blocking
213 * locks. */
214 if (!(flags & TDB_LOCK_PROBE)
215 && (errno != EAGAIN && errno != EINTR)) {
216 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
217 "tdb_brlock failed (fd=%d) at"
218 " offset %zu rw_type=%d flags=%d len=%zu:"
219 " %s",
220 tdb->file->fd, (size_t)offset, rw_type,
221 flags, (size_t)len, strerror(errno));
223 return TDB_ERR_LOCK;
225 return TDB_SUCCESS;
228 enum TDB_ERROR tdb_brunlock(struct tdb_context *tdb,
229 int rw_type, tdb_off_t offset, size_t len)
231 if (tdb->flags & TDB_NOLOCK) {
232 return TDB_SUCCESS;
235 if (!check_lock_pid(tdb, "tdb_brunlock", true))
236 return TDB_ERR_LOCK;
238 if (unlock(tdb, rw_type, offset, len) == -1) {
239 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
240 "tdb_brunlock failed (fd=%d) at offset %zu"
241 " rw_type=%d len=%zu: %s",
242 tdb->file->fd, (size_t)offset, rw_type,
243 (size_t)len, strerror(errno));
245 return TDB_SUCCESS;
249 upgrade a read lock to a write lock. This needs to be handled in a
250 special way as some OSes (such as solaris) have too conservative
251 deadlock detection and claim a deadlock when progress can be
252 made. For those OSes we may loop for a while.
254 enum TDB_ERROR tdb_allrecord_upgrade(struct tdb_context *tdb, off_t start)
256 int count = 1000;
258 if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
259 return TDB_ERR_LOCK;
261 if (tdb->file->allrecord_lock.count != 1) {
262 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
263 "tdb_allrecord_upgrade failed:"
264 " count %u too high",
265 tdb->file->allrecord_lock.count);
268 if (tdb->file->allrecord_lock.off != 1) {
269 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
270 "tdb_allrecord_upgrade failed:"
271 " already upgraded?");
274 if (tdb->file->allrecord_lock.owner != tdb) {
275 return owner_conflict(tdb, "tdb_allrecord_upgrade");
278 while (count--) {
279 struct timeval tv;
280 if (tdb_brlock(tdb, F_WRLCK, start, 0,
281 TDB_LOCK_WAIT|TDB_LOCK_PROBE) == TDB_SUCCESS) {
282 tdb->file->allrecord_lock.ltype = F_WRLCK;
283 tdb->file->allrecord_lock.off = 0;
284 return TDB_SUCCESS;
286 if (errno != EDEADLK) {
287 break;
289 /* sleep for as short a time as we can - more portable than usleep() */
290 tv.tv_sec = 0;
291 tv.tv_usec = 1;
292 select(0, NULL, NULL, NULL, &tv);
295 if (errno != EAGAIN && errno != EINTR)
296 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
297 "tdb_allrecord_upgrade failed");
298 return TDB_ERR_LOCK;
301 static struct tdb_lock *find_nestlock(struct tdb_context *tdb, tdb_off_t offset,
302 const struct tdb_context *owner)
304 unsigned int i;
306 for (i=0; i<tdb->file->num_lockrecs; i++) {
307 if (tdb->file->lockrecs[i].off == offset) {
308 if (owner && tdb->file->lockrecs[i].owner != owner)
309 return NULL;
310 return &tdb->file->lockrecs[i];
313 return NULL;
316 enum TDB_ERROR tdb_lock_and_recover(struct tdb_context *tdb)
318 enum TDB_ERROR ecode;
320 if (!check_lock_pid(tdb, "tdb_transaction_prepare_commit", true))
321 return TDB_ERR_LOCK;
323 ecode = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK,
324 false);
325 if (ecode != TDB_SUCCESS) {
326 return ecode;
329 ecode = tdb_lock_open(tdb, F_WRLCK, TDB_LOCK_WAIT|TDB_LOCK_NOCHECK);
330 if (ecode != TDB_SUCCESS) {
331 tdb_allrecord_unlock(tdb, F_WRLCK);
332 return ecode;
334 ecode = tdb_transaction_recover(tdb);
335 tdb_unlock_open(tdb, F_WRLCK);
336 tdb_allrecord_unlock(tdb, F_WRLCK);
338 return ecode;
341 /* lock an offset in the database. */
342 enum TDB_ERROR tdb_nest_lock(struct tdb_context *tdb,
343 tdb_off_t offset, int ltype,
344 enum tdb_lock_flags flags)
346 struct tdb_lock *new_lck;
347 enum TDB_ERROR ecode;
349 if (!(tdb->flags & TDB_VERSION1)
350 && offset > (TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
351 + tdb->file->map_size / 8)) {
352 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
353 "tdb_nest_lock: invalid offset %zu ltype=%d",
354 (size_t)offset, ltype);
357 if (tdb->flags & TDB_NOLOCK)
358 return TDB_SUCCESS;
360 if (!check_lock_pid(tdb, "tdb_nest_lock", true)) {
361 return TDB_ERR_LOCK;
364 tdb->stats.locks++;
366 new_lck = find_nestlock(tdb, offset, NULL);
367 if (new_lck) {
368 if (new_lck->owner != tdb) {
369 return owner_conflict(tdb, "tdb_nest_lock");
372 if (new_lck->ltype == F_RDLCK && ltype == F_WRLCK) {
373 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
374 "tdb_nest_lock:"
375 " offset %zu has read lock",
376 (size_t)offset);
378 /* Just increment the struct, posix locks don't stack. */
379 new_lck->count++;
380 return TDB_SUCCESS;
383 #if 0
384 if (tdb->file->num_lockrecs
385 && offset >= TDB_HASH_LOCK_START
386 && offset < TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE) {
387 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
388 "tdb_nest_lock: already have a hash lock?");
390 #endif
392 new_lck = (struct tdb_lock *)realloc(
393 tdb->file->lockrecs,
394 sizeof(*tdb->file->lockrecs) * (tdb->file->num_lockrecs+1));
395 if (new_lck == NULL) {
396 return tdb_logerr(tdb, TDB_ERR_OOM, TDB_LOG_ERROR,
397 "tdb_nest_lock:"
398 " unable to allocate %zu lock struct",
399 tdb->file->num_lockrecs + 1);
401 tdb->file->lockrecs = new_lck;
403 /* Since fcntl locks don't nest, we do a lock for the first one,
404 and simply bump the count for future ones */
405 ecode = tdb_brlock(tdb, ltype, offset, 1, flags);
406 if (ecode != TDB_SUCCESS) {
407 return ecode;
410 /* First time we grab a lock, perhaps someone died in commit? */
411 if (!(flags & TDB_LOCK_NOCHECK)
412 && tdb->file->num_lockrecs == 0) {
413 tdb_bool_err berr = tdb_needs_recovery(tdb);
414 if (berr != false) {
415 tdb_brunlock(tdb, ltype, offset, 1);
417 if (berr < 0)
418 return TDB_OFF_TO_ERR(berr);
419 ecode = tdb_lock_and_recover(tdb);
420 if (ecode == TDB_SUCCESS) {
421 ecode = tdb_brlock(tdb, ltype, offset, 1,
422 flags);
424 if (ecode != TDB_SUCCESS) {
425 return ecode;
430 tdb->file->lockrecs[tdb->file->num_lockrecs].owner = tdb;
431 tdb->file->lockrecs[tdb->file->num_lockrecs].off = offset;
432 tdb->file->lockrecs[tdb->file->num_lockrecs].count = 1;
433 tdb->file->lockrecs[tdb->file->num_lockrecs].ltype = ltype;
434 tdb->file->num_lockrecs++;
436 return TDB_SUCCESS;
439 enum TDB_ERROR tdb_nest_unlock(struct tdb_context *tdb,
440 tdb_off_t off, int ltype)
442 struct tdb_lock *lck;
443 enum TDB_ERROR ecode;
445 if (tdb->flags & TDB_NOLOCK)
446 return TDB_SUCCESS;
448 lck = find_nestlock(tdb, off, tdb);
449 if ((lck == NULL) || (lck->count == 0)) {
450 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
451 "tdb_nest_unlock: no lock for %zu",
452 (size_t)off);
455 if (lck->count > 1) {
456 lck->count--;
457 return TDB_SUCCESS;
461 * This lock has count==1 left, so we need to unlock it in the
462 * kernel. We don't bother with decrementing the in-memory array
463 * element, we're about to overwrite it with the last array element
464 * anyway.
466 ecode = tdb_brunlock(tdb, ltype, off, 1);
469 * Shrink the array by overwriting the element just unlocked with the
470 * last array element.
472 *lck = tdb->file->lockrecs[--tdb->file->num_lockrecs];
474 return ecode;
478 get the transaction lock
480 enum TDB_ERROR tdb_transaction_lock(struct tdb_context *tdb, int ltype)
482 return tdb_nest_lock(tdb, TDB_TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
486 release the transaction lock
488 void tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
490 tdb_nest_unlock(tdb, TDB_TRANSACTION_LOCK, ltype);
493 /* We only need to lock individual bytes, but Linux merges consecutive locks
494 * so we lock in contiguous ranges. */
495 enum TDB_ERROR tdb_lock_gradual(struct tdb_context *tdb,
496 int ltype, enum tdb_lock_flags flags,
497 tdb_off_t off, tdb_off_t len)
499 enum TDB_ERROR ecode;
500 enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
502 if (len <= 1) {
503 /* 0 would mean to end-of-file... */
504 assert(len != 0);
505 /* Single hash. Just do blocking lock. */
506 return tdb_brlock(tdb, ltype, off, len, flags);
509 /* First we try non-blocking. */
510 ecode = tdb_brlock(tdb, ltype, off, len, nb_flags);
511 if (ecode != TDB_ERR_LOCK) {
512 return ecode;
515 /* Try locking first half, then second. */
516 ecode = tdb_lock_gradual(tdb, ltype, flags, off, len / 2);
517 if (ecode != TDB_SUCCESS)
518 return ecode;
520 ecode = tdb_lock_gradual(tdb, ltype, flags,
521 off + len / 2, len - len / 2);
522 if (ecode != TDB_SUCCESS) {
523 tdb_brunlock(tdb, ltype, off, len / 2);
525 return ecode;
528 /* lock/unlock entire database. It can only be upgradable if you have some
529 * other way of guaranteeing exclusivity (ie. transaction write lock). */
530 enum TDB_ERROR tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
531 enum tdb_lock_flags flags, bool upgradable)
533 enum TDB_ERROR ecode;
534 tdb_bool_err berr;
536 if (tdb->flags & TDB_VERSION1) {
537 if (tdb1_allrecord_lock(tdb, ltype, flags, upgradable) == -1)
538 return tdb->last_error;
539 return TDB_SUCCESS;
542 if (tdb->flags & TDB_NOLOCK)
543 return TDB_SUCCESS;
545 if (!check_lock_pid(tdb, "tdb_allrecord_lock", true)) {
546 return TDB_ERR_LOCK;
549 if (tdb->file->allrecord_lock.count) {
550 if (tdb->file->allrecord_lock.owner != tdb) {
551 return owner_conflict(tdb, "tdb_allrecord_lock");
554 if (ltype == F_RDLCK
555 || tdb->file->allrecord_lock.ltype == F_WRLCK) {
556 tdb->file->allrecord_lock.count++;
557 return TDB_SUCCESS;
560 /* a global lock of a different type exists */
561 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
562 "tdb_allrecord_lock: already have %s lock",
563 tdb->file->allrecord_lock.ltype == F_RDLCK
564 ? "read" : "write");
567 if (tdb_has_hash_locks(tdb)) {
568 /* can't combine global and chain locks */
569 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
570 "tdb_allrecord_lock:"
571 " already have chain lock");
574 if (upgradable && ltype != F_RDLCK) {
575 /* tdb error: you can't upgrade a write lock! */
576 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
577 "tdb_allrecord_lock:"
578 " can't upgrade a write lock");
581 tdb->stats.locks++;
582 again:
583 /* Lock hashes, gradually. */
584 ecode = tdb_lock_gradual(tdb, ltype, flags, TDB_HASH_LOCK_START,
585 TDB_HASH_LOCK_RANGE);
586 if (ecode != TDB_SUCCESS)
587 return ecode;
589 /* Lock free tables: there to end of file. */
590 ecode = tdb_brlock(tdb, ltype,
591 TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE,
592 0, flags);
593 if (ecode != TDB_SUCCESS) {
594 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START,
595 TDB_HASH_LOCK_RANGE);
596 return ecode;
599 tdb->file->allrecord_lock.owner = tdb;
600 tdb->file->allrecord_lock.count = 1;
601 /* If it's upgradable, it's actually exclusive so we can treat
602 * it as a write lock. */
603 tdb->file->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
604 tdb->file->allrecord_lock.off = upgradable;
606 /* Now check for needing recovery. */
607 if (flags & TDB_LOCK_NOCHECK)
608 return TDB_SUCCESS;
610 berr = tdb_needs_recovery(tdb);
611 if (likely(berr == false))
612 return TDB_SUCCESS;
614 tdb_allrecord_unlock(tdb, ltype);
615 if (berr < 0)
616 return TDB_OFF_TO_ERR(berr);
617 ecode = tdb_lock_and_recover(tdb);
618 if (ecode != TDB_SUCCESS) {
619 return ecode;
621 goto again;
624 enum TDB_ERROR tdb_lock_open(struct tdb_context *tdb,
625 int ltype, enum tdb_lock_flags flags)
627 return tdb_nest_lock(tdb, TDB_OPEN_LOCK, ltype, flags);
630 void tdb_unlock_open(struct tdb_context *tdb, int ltype)
632 tdb_nest_unlock(tdb, TDB_OPEN_LOCK, ltype);
635 bool tdb_has_open_lock(struct tdb_context *tdb)
637 return !(tdb->flags & TDB_NOLOCK)
638 && find_nestlock(tdb, TDB_OPEN_LOCK, tdb) != NULL;
641 enum TDB_ERROR tdb_lock_expand(struct tdb_context *tdb, int ltype)
643 /* Lock doesn't protect data, so don't check (we recurse if we do!) */
644 return tdb_nest_lock(tdb, TDB_EXPANSION_LOCK, ltype,
645 TDB_LOCK_WAIT | TDB_LOCK_NOCHECK);
648 void tdb_unlock_expand(struct tdb_context *tdb, int ltype)
650 tdb_nest_unlock(tdb, TDB_EXPANSION_LOCK, ltype);
653 /* unlock entire db */
654 void tdb_allrecord_unlock(struct tdb_context *tdb, int ltype)
656 if (tdb->flags & TDB_VERSION1) {
657 tdb1_allrecord_unlock(tdb, ltype);
658 return;
661 if (tdb->flags & TDB_NOLOCK)
662 return;
664 if (tdb->file->allrecord_lock.count == 0) {
665 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
666 "tdb_allrecord_unlock: not locked!");
667 return;
670 if (tdb->file->allrecord_lock.owner != tdb) {
671 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
672 "tdb_allrecord_unlock: not locked by us!");
673 return;
676 /* Upgradable locks are marked as write locks. */
677 if (tdb->file->allrecord_lock.ltype != ltype
678 && (!tdb->file->allrecord_lock.off || ltype != F_RDLCK)) {
679 tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
680 "tdb_allrecord_unlock: have %s lock",
681 tdb->file->allrecord_lock.ltype == F_RDLCK
682 ? "read" : "write");
683 return;
686 if (tdb->file->allrecord_lock.count > 1) {
687 tdb->file->allrecord_lock.count--;
688 return;
691 tdb->file->allrecord_lock.count = 0;
692 tdb->file->allrecord_lock.ltype = 0;
694 tdb_brunlock(tdb, ltype, TDB_HASH_LOCK_START, 0);
697 bool tdb_has_expansion_lock(struct tdb_context *tdb)
699 return find_nestlock(tdb, TDB_EXPANSION_LOCK, tdb) != NULL;
702 bool tdb_has_hash_locks(struct tdb_context *tdb)
704 unsigned int i;
706 for (i=0; i<tdb->file->num_lockrecs; i++) {
707 if (tdb->file->lockrecs[i].off >= TDB_HASH_LOCK_START
708 && tdb->file->lockrecs[i].off < (TDB_HASH_LOCK_START
709 + TDB_HASH_LOCK_RANGE))
710 return true;
712 return false;
715 static bool tdb_has_free_lock(struct tdb_context *tdb)
717 unsigned int i;
719 if (tdb->flags & TDB_NOLOCK)
720 return false;
722 for (i=0; i<tdb->file->num_lockrecs; i++) {
723 if (tdb->file->lockrecs[i].off
724 > TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE)
725 return true;
727 return false;
730 enum TDB_ERROR tdb_lock_hashes(struct tdb_context *tdb,
731 tdb_off_t hash_lock,
732 tdb_len_t hash_range,
733 int ltype, enum tdb_lock_flags waitflag)
735 /* FIXME: Do this properly, using hlock_range */
736 unsigned l = TDB_HASH_LOCK_START
737 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
739 /* a allrecord lock allows us to avoid per chain locks */
740 if (tdb->file->allrecord_lock.count) {
741 if (!check_lock_pid(tdb, "tdb_lock_hashes", true))
742 return TDB_ERR_LOCK;
744 if (tdb->file->allrecord_lock.owner != tdb)
745 return owner_conflict(tdb, "tdb_lock_hashes");
746 if (ltype == tdb->file->allrecord_lock.ltype
747 || ltype == F_RDLCK) {
748 return TDB_SUCCESS;
751 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
752 "tdb_lock_hashes:"
753 " already have %s allrecordlock",
754 tdb->file->allrecord_lock.ltype == F_RDLCK
755 ? "read" : "write");
758 if (tdb_has_free_lock(tdb)) {
759 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
760 "tdb_lock_hashes: already have free lock");
763 if (tdb_has_expansion_lock(tdb)) {
764 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
765 "tdb_lock_hashes:"
766 " already have expansion lock");
769 return tdb_nest_lock(tdb, l, ltype, waitflag);
772 enum TDB_ERROR tdb_unlock_hashes(struct tdb_context *tdb,
773 tdb_off_t hash_lock,
774 tdb_len_t hash_range, int ltype)
776 unsigned l = TDB_HASH_LOCK_START
777 + (hash_lock >> (64 - TDB_HASH_LOCK_RANGE_BITS));
779 if (tdb->flags & TDB_NOLOCK)
780 return 0;
782 /* a allrecord lock allows us to avoid per chain locks */
783 if (tdb->file->allrecord_lock.count) {
784 if (tdb->file->allrecord_lock.ltype == F_RDLCK
785 && ltype == F_WRLCK) {
786 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
787 "tdb_unlock_hashes RO allrecord!");
789 if (tdb->file->allrecord_lock.owner != tdb) {
790 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_USE_ERROR,
791 "tdb_unlock_hashes:"
792 " not locked by us!");
794 return TDB_SUCCESS;
797 return tdb_nest_unlock(tdb, l, ltype);
800 /* Hash locks use TDB_HASH_LOCK_START + the next 30 bits.
801 * Then we begin; bucket offsets are sizeof(tdb_len_t) apart, so we divide.
802 * The result is that on 32 bit systems we don't use lock values > 2^31 on
803 * files that are less than 4GB.
805 static tdb_off_t free_lock_off(tdb_off_t b_off)
807 return TDB_HASH_LOCK_START + TDB_HASH_LOCK_RANGE
808 + b_off / sizeof(tdb_off_t);
811 enum TDB_ERROR tdb_lock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off,
812 enum tdb_lock_flags waitflag)
814 assert(b_off >= sizeof(struct tdb_header));
816 if (tdb->flags & TDB_NOLOCK)
817 return 0;
819 /* a allrecord lock allows us to avoid per chain locks */
820 if (tdb->file->allrecord_lock.count) {
821 if (!check_lock_pid(tdb, "tdb_lock_free_bucket", true))
822 return TDB_ERR_LOCK;
824 if (tdb->file->allrecord_lock.owner != tdb) {
825 return owner_conflict(tdb, "tdb_lock_free_bucket");
828 if (tdb->file->allrecord_lock.ltype == F_WRLCK)
829 return 0;
830 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
831 "tdb_lock_free_bucket with"
832 " read-only allrecordlock!");
835 #if 0 /* FIXME */
836 if (tdb_has_expansion_lock(tdb)) {
837 return tdb_logerr(tdb, TDB_ERR_LOCK, TDB_LOG_ERROR,
838 "tdb_lock_free_bucket:"
839 " already have expansion lock");
841 #endif
843 return tdb_nest_lock(tdb, free_lock_off(b_off), F_WRLCK, waitflag);
846 void tdb_unlock_free_bucket(struct tdb_context *tdb, tdb_off_t b_off)
848 if (tdb->file->allrecord_lock.count)
849 return;
851 tdb_nest_unlock(tdb, free_lock_off(b_off), F_WRLCK);
854 _PUBLIC_ enum TDB_ERROR tdb_lockall(struct tdb_context *tdb)
856 return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
859 _PUBLIC_ void tdb_unlockall(struct tdb_context *tdb)
861 tdb_allrecord_unlock(tdb, F_WRLCK);
864 _PUBLIC_ enum TDB_ERROR tdb_lockall_read(struct tdb_context *tdb)
866 return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
869 _PUBLIC_ void tdb_unlockall_read(struct tdb_context *tdb)
871 tdb_allrecord_unlock(tdb, F_RDLCK);
874 void tdb_lock_cleanup(struct tdb_context *tdb)
876 unsigned int i;
878 /* We don't want to warn: they're allowed to close tdb after fork. */
879 if (!check_lock_pid(tdb, "tdb_close", false))
880 return;
882 while (tdb->file->allrecord_lock.count
883 && tdb->file->allrecord_lock.owner == tdb) {
884 tdb_allrecord_unlock(tdb, tdb->file->allrecord_lock.ltype);
887 for (i=0; i<tdb->file->num_lockrecs; i++) {
888 if (tdb->file->lockrecs[i].owner == tdb) {
889 tdb_nest_unlock(tdb,
890 tdb->file->lockrecs[i].off,
891 tdb->file->lockrecs[i].ltype);
892 i--;