tdb: use tdb_nest_lock() for open lock.
[Samba.git] / lib / tdb / common / lock.c
blob6f5799a8198f13dd06851aae03869c635035200e
1 /*
2 Unix SMB/CIFS implementation.
4 trivial database library
6 Copyright (C) Andrew Tridgell 1999-2005
7 Copyright (C) Paul `Rusty' Russell 2000
8 Copyright (C) Jeremy Allison 2000-2003
10 ** NOTE! The following LGPL license applies to the tdb
11 ** library. This does NOT imply that all of Samba is released
12 ** under the LGPL
14 This library is free software; you can redistribute it and/or
15 modify it under the terms of the GNU Lesser General Public
16 License as published by the Free Software Foundation; either
17 version 3 of the License, or (at your option) any later version.
19 This library is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 Lesser General Public License for more details.
24 You should have received a copy of the GNU Lesser General Public
25 License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 #include "tdb_private.h"
30 void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
32 tdb->interrupt_sig_ptr = ptr;
35 static int fcntl_lock(struct tdb_context *tdb,
36 int rw, off_t off, off_t len, bool waitflag)
38 struct flock fl;
40 fl.l_type = rw;
41 fl.l_whence = SEEK_SET;
42 fl.l_start = off;
43 fl.l_len = len;
44 fl.l_pid = 0;
46 if (waitflag)
47 return fcntl(tdb->fd, F_SETLKW, &fl);
48 else
49 return fcntl(tdb->fd, F_SETLK, &fl);
52 static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
54 struct flock fl;
55 #if 0 /* Check they matched up locks and unlocks correctly. */
56 char line[80];
57 FILE *locks;
58 bool found = false;
60 locks = fopen("/proc/locks", "r");
62 while (fgets(line, 80, locks)) {
63 char *p;
64 int type, start, l;
66 /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
67 p = strchr(line, ':') + 1;
68 if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
69 continue;
70 p += strlen(" FLOCK ADVISORY ");
71 if (strncmp(p, "READ ", strlen("READ ")) == 0)
72 type = F_RDLCK;
73 else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
74 type = F_WRLCK;
75 else
76 abort();
77 p += 6;
78 if (atoi(p) != getpid())
79 continue;
80 p = strchr(strchr(p, ' ') + 1, ' ') + 1;
81 start = atoi(p);
82 p = strchr(p, ' ') + 1;
83 if (strncmp(p, "EOF", 3) == 0)
84 l = 0;
85 else
86 l = atoi(p) - start + 1;
88 if (off == start) {
89 if (len != l) {
90 fprintf(stderr, "Len %u should be %u: %s",
91 (int)len, l, line);
92 abort();
94 if (type != rw) {
95 fprintf(stderr, "Type %s wrong: %s",
96 rw == F_RDLCK ? "READ" : "WRITE", line);
97 abort();
99 found = true;
100 break;
104 if (!found) {
105 fprintf(stderr, "Unlock on %u@%u not found!\n",
106 (int)off, (int)len);
107 abort();
110 fclose(locks);
111 #endif
113 fl.l_type = F_UNLCK;
114 fl.l_whence = SEEK_SET;
115 fl.l_start = off;
116 fl.l_len = len;
117 fl.l_pid = 0;
119 return fcntl(tdb->fd, F_SETLKW, &fl);
122 /* a byte range locking function - return 0 on success
123 this functions locks/unlocks 1 byte at the specified offset.
125 On error, errno is also set so that errors are passed back properly
126 through tdb_open().
128 note that a len of zero means lock to end of file
130 int tdb_brlock(struct tdb_context *tdb,
131 int rw_type, tdb_off_t offset, size_t len,
132 enum tdb_lock_flags flags)
134 int ret;
136 if (tdb->flags & TDB_NOLOCK) {
137 return 0;
140 if (flags & TDB_LOCK_MARK_ONLY) {
141 return 0;
144 if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) {
145 tdb->ecode = TDB_ERR_RDONLY;
146 return -1;
149 do {
150 ret = fcntl_lock(tdb, rw_type, offset, len,
151 flags & TDB_LOCK_WAIT);
152 /* Check for a sigalarm break. */
153 if (ret == -1 && errno == EINTR &&
154 tdb->interrupt_sig_ptr &&
155 *tdb->interrupt_sig_ptr) {
156 break;
158 } while (ret == -1 && errno == EINTR);
160 if (ret == -1) {
161 tdb->ecode = TDB_ERR_LOCK;
162 /* Generic lock error. errno set by fcntl.
163 * EAGAIN is an expected return from non-blocking
164 * locks. */
165 if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
166 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %d rw_type=%d flags=%d len=%d\n",
167 tdb->fd, offset, rw_type, flags, (int)len));
169 return -1;
171 return 0;
174 int tdb_brunlock(struct tdb_context *tdb,
175 int rw_type, tdb_off_t offset, size_t len)
177 int ret;
179 if (tdb->flags & TDB_NOLOCK) {
180 return 0;
183 do {
184 ret = fcntl_unlock(tdb, rw_type, offset, len);
185 } while (ret == -1 && errno == EINTR);
187 if (ret == -1) {
188 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %d rw_type=%d len=%d\n",
189 tdb->fd, offset, rw_type, (int)len));
191 return ret;
195 upgrade a read lock to a write lock. This needs to be handled in a
196 special way as some OSes (such as solaris) have too conservative
197 deadlock detection and claim a deadlock when progress can be
198 made. For those OSes we may loop for a while.
200 int tdb_brlock_upgrade(struct tdb_context *tdb, tdb_off_t offset, size_t len)
202 int count = 1000;
203 while (count--) {
204 struct timeval tv;
205 if (tdb_brlock(tdb, F_WRLCK, offset, len,
206 TDB_LOCK_WAIT|TDB_LOCK_PROBE) == 0) {
207 return 0;
209 if (errno != EDEADLK) {
210 break;
212 /* sleep for as short a time as we can - more portable than usleep() */
213 tv.tv_sec = 0;
214 tv.tv_usec = 1;
215 select(0, NULL, NULL, NULL, &tv);
217 TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock_upgrade failed at offset %d\n", offset));
218 return -1;
221 /* list -1 is the alloc list, otherwise a hash chain. */
222 static tdb_off_t lock_offset(int list)
224 return FREELIST_TOP + 4*list;
227 static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
228 tdb_off_t offset)
230 unsigned int i;
232 for (i=0; i<tdb->num_lockrecs; i++) {
233 if (tdb->lockrecs[i].off == offset) {
234 return &tdb->lockrecs[i];
237 return NULL;
240 /* lock an offset in the database. */
241 int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
242 enum tdb_lock_flags flags)
244 struct tdb_lock_type *new_lck;
246 if (offset >= lock_offset(tdb->header.hash_size)) {
247 tdb->ecode = TDB_ERR_LOCK;
248 TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n",
249 offset, ltype));
250 return -1;
252 if (tdb->flags & TDB_NOLOCK)
253 return 0;
255 new_lck = find_nestlock(tdb, offset);
256 if (new_lck) {
258 * Just increment the in-memory struct, posix locks
259 * don't stack.
261 new_lck->count++;
262 return 0;
265 new_lck = (struct tdb_lock_type *)realloc(
266 tdb->lockrecs,
267 sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
268 if (new_lck == NULL) {
269 errno = ENOMEM;
270 return -1;
272 tdb->lockrecs = new_lck;
274 /* Since fcntl locks don't nest, we do a lock for the first one,
275 and simply bump the count for future ones */
276 if (tdb->methods->brlock(tdb, ltype, offset, 1, flags)) {
277 return -1;
280 tdb->num_locks++;
282 tdb->lockrecs[tdb->num_lockrecs].off = offset;
283 tdb->lockrecs[tdb->num_lockrecs].count = 1;
284 tdb->lockrecs[tdb->num_lockrecs].ltype = ltype;
285 tdb->num_lockrecs += 1;
287 return 0;
290 /* lock a list in the database. list -1 is the alloc list */
291 int tdb_lock(struct tdb_context *tdb, int list, int ltype)
293 int ret;
295 /* a allrecord lock allows us to avoid per chain locks */
296 if (tdb->allrecord_lock.count &&
297 (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
298 return 0;
301 if (tdb->allrecord_lock.count) {
302 tdb->ecode = TDB_ERR_LOCK;
303 ret = -1;
304 } else {
305 ret = tdb_nest_lock(tdb, lock_offset(list), ltype,
306 TDB_LOCK_WAIT);
308 if (ret) {
309 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d "
310 "ltype=%d (%s)\n", list, ltype, strerror(errno)));
312 return ret;
315 /* lock a list in the database. list -1 is the alloc list. non-blocking lock */
316 int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype)
318 /* a allrecord lock allows us to avoid per chain locks */
319 if (tdb->allrecord_lock.count &&
320 (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
321 return 0;
324 if (tdb->allrecord_lock.count) {
325 tdb->ecode = TDB_ERR_LOCK;
326 return -1;
329 return tdb_nest_lock(tdb, lock_offset(list), ltype, TDB_LOCK_NOWAIT);
333 int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
334 bool mark_lock)
336 int ret = -1;
337 struct tdb_lock_type *lck;
339 if (tdb->flags & TDB_NOLOCK)
340 return 0;
342 /* Sanity checks */
343 if (offset >= lock_offset(tdb->header.hash_size)) {
344 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->header.hash_size));
345 return ret;
348 lck = find_nestlock(tdb, offset);
349 if ((lck == NULL) || (lck->count == 0)) {
350 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n"));
351 return -1;
354 if (lck->count > 1) {
355 lck->count--;
356 return 0;
360 * This lock has count==1 left, so we need to unlock it in the
361 * kernel. We don't bother with decrementing the in-memory array
362 * element, we're about to overwrite it with the last array element
363 * anyway.
366 if (mark_lock) {
367 ret = 0;
368 } else {
369 ret = tdb->methods->brunlock(tdb, ltype, offset, 1);
371 tdb->num_locks--;
374 * Shrink the array by overwriting the element just unlocked with the
375 * last array element.
378 if (tdb->num_lockrecs > 1) {
379 *lck = tdb->lockrecs[tdb->num_lockrecs-1];
381 tdb->num_lockrecs -= 1;
384 * We don't bother with realloc when the array shrinks, but if we have
385 * a completely idle tdb we should get rid of the locked array.
388 if (tdb->num_lockrecs == 0) {
389 SAFE_FREE(tdb->lockrecs);
392 if (ret)
393 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n"));
394 return ret;
397 int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
399 /* a global lock allows us to avoid per chain locks */
400 if (tdb->allrecord_lock.count &&
401 (ltype == tdb->allrecord_lock.ltype || ltype == F_RDLCK)) {
402 return 0;
405 if (tdb->allrecord_lock.count) {
406 tdb->ecode = TDB_ERR_LOCK;
407 return -1;
410 return tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
414 get the transaction lock
416 int tdb_transaction_lock(struct tdb_context *tdb, int ltype)
418 return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, TDB_LOCK_WAIT);
422 release the transaction lock
424 int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
426 return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
432 /* lock/unlock entire database */
433 static int _tdb_lockall(struct tdb_context *tdb, int ltype,
434 enum tdb_lock_flags flags)
436 /* There are no locks on read-only dbs */
437 if (tdb->read_only || tdb->traverse_read) {
438 tdb->ecode = TDB_ERR_LOCK;
439 return -1;
442 if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) {
443 tdb->allrecord_lock.count++;
444 return 0;
447 if (tdb->allrecord_lock.count) {
448 /* a global lock of a different type exists */
449 tdb->ecode = TDB_ERR_LOCK;
450 return -1;
453 if (tdb_have_extra_locks(tdb)) {
454 /* can't combine global and chain locks */
455 tdb->ecode = TDB_ERR_LOCK;
456 return -1;
459 if (tdb->methods->brlock(tdb, ltype,
460 FREELIST_TOP, 4*tdb->header.hash_size,
461 flags)) {
462 if (flags & TDB_LOCK_WAIT) {
463 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lockall failed (%s)\n", strerror(errno)));
465 return -1;
468 tdb->allrecord_lock.count = 1;
469 tdb->allrecord_lock.ltype = ltype;
471 return 0;
476 /* unlock entire db */
477 static int _tdb_unlockall(struct tdb_context *tdb, int ltype, bool mark_lock)
479 /* There are no locks on read-only dbs */
480 if (tdb->read_only || tdb->traverse_read) {
481 tdb->ecode = TDB_ERR_LOCK;
482 return -1;
485 if (tdb->allrecord_lock.ltype != ltype || tdb->allrecord_lock.count == 0) {
486 tdb->ecode = TDB_ERR_LOCK;
487 return -1;
490 if (tdb->allrecord_lock.count > 1) {
491 tdb->allrecord_lock.count--;
492 return 0;
495 if (!mark_lock &&
496 tdb->methods->brunlock(tdb, ltype,
497 FREELIST_TOP, 4*tdb->header.hash_size)) {
498 TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed (%s)\n", strerror(errno)));
499 return -1;
502 tdb->allrecord_lock.count = 0;
503 tdb->allrecord_lock.ltype = 0;
505 return 0;
508 /* lock entire database with write lock */
509 int tdb_lockall(struct tdb_context *tdb)
511 tdb_trace(tdb, "tdb_lockall");
512 return _tdb_lockall(tdb, F_WRLCK, TDB_LOCK_WAIT);
515 /* lock entire database with write lock - mark only */
516 int tdb_lockall_mark(struct tdb_context *tdb)
518 tdb_trace(tdb, "tdb_lockall_mark");
519 return _tdb_lockall(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY);
522 /* unlock entire database with write lock - unmark only */
523 int tdb_lockall_unmark(struct tdb_context *tdb)
525 tdb_trace(tdb, "tdb_lockall_unmark");
526 return _tdb_unlockall(tdb, F_WRLCK, true);
529 /* lock entire database with write lock - nonblocking varient */
530 int tdb_lockall_nonblock(struct tdb_context *tdb)
532 int ret = _tdb_lockall(tdb, F_WRLCK, TDB_LOCK_NOWAIT);
533 tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
534 return ret;
537 /* unlock entire database with write lock */
538 int tdb_unlockall(struct tdb_context *tdb)
540 tdb_trace(tdb, "tdb_unlockall");
541 return _tdb_unlockall(tdb, F_WRLCK, false);
544 /* lock entire database with read lock */
545 int tdb_lockall_read(struct tdb_context *tdb)
547 tdb_trace(tdb, "tdb_lockall_read");
548 return _tdb_lockall(tdb, F_RDLCK, TDB_LOCK_WAIT);
551 /* lock entire database with read lock - nonblock varient */
552 int tdb_lockall_read_nonblock(struct tdb_context *tdb)
554 int ret = _tdb_lockall(tdb, F_RDLCK, TDB_LOCK_NOWAIT);
555 tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
556 return ret;
559 /* unlock entire database with read lock */
560 int tdb_unlockall_read(struct tdb_context *tdb)
562 tdb_trace(tdb, "tdb_unlockall_read");
563 return _tdb_unlockall(tdb, F_RDLCK, false);
566 /* lock/unlock one hash chain. This is meant to be used to reduce
567 contention - it cannot guarantee how many records will be locked */
568 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
570 int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
571 tdb_trace_1rec(tdb, "tdb_chainlock", key);
572 return ret;
575 /* lock/unlock one hash chain, non-blocking. This is meant to be used
576 to reduce contention - it cannot guarantee how many records will be
577 locked */
578 int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
580 int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
581 tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret);
582 return ret;
585 /* mark a chain as locked without actually locking it. Warning! use with great caution! */
586 int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
588 int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
589 F_WRLCK, TDB_LOCK_MARK_ONLY);
590 tdb_trace_1rec(tdb, "tdb_chainlock_mark", key);
591 return ret;
594 /* unmark a chain as locked without actually locking it. Warning! use with great caution! */
595 int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
597 tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key);
598 return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
599 F_WRLCK, true);
602 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
604 tdb_trace_1rec(tdb, "tdb_chainunlock", key);
605 return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
608 int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
610 int ret;
611 ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
612 tdb_trace_1rec(tdb, "tdb_chainlock_read", key);
613 return ret;
616 int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
618 tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
619 return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
624 /* record lock stops delete underneath */
625 int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
627 if (tdb->allrecord_lock.count) {
628 return 0;
630 return off ? tdb->methods->brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
634 Write locks override our own fcntl readlocks, so check it here.
635 Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
636 an error to fail to get the lock here.
638 int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
640 struct tdb_traverse_lock *i;
641 for (i = &tdb->travlocks; i; i = i->next)
642 if (i->off == off)
643 return -1;
644 return tdb->methods->brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
647 int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
649 return tdb->methods->brunlock(tdb, F_WRLCK, off, 1);
652 /* fcntl locks don't stack: avoid unlocking someone else's */
653 int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
655 struct tdb_traverse_lock *i;
656 uint32_t count = 0;
658 if (tdb->allrecord_lock.count) {
659 return 0;
662 if (off == 0)
663 return 0;
664 for (i = &tdb->travlocks; i; i = i->next)
665 if (i->off == off)
666 count++;
667 return (count == 1 ? tdb->methods->brunlock(tdb, F_RDLCK, off, 1) : 0);
670 bool tdb_have_extra_locks(struct tdb_context *tdb)
672 unsigned int extra = tdb->num_lockrecs;
674 if (tdb->allrecord_lock.count) {
675 return true;
678 /* In a transaction, we expect to hold the transaction lock */
679 if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) {
680 extra--;
683 return extra;
686 /* The transaction code uses this to remove all locks. Note that this
687 may include OPEN_LOCK. */
688 void tdb_release_extra_locks(struct tdb_context *tdb)
690 unsigned int i, extra = 0;
692 if (tdb->allrecord_lock.count != 0) {
693 tdb_brunlock(tdb, tdb->allrecord_lock.ltype,
694 FREELIST_TOP, 4*tdb->header.hash_size);
695 tdb->allrecord_lock.count = 0;
698 for (i=0;i<tdb->num_lockrecs;i++) {
699 struct tdb_lock_type *lck = &tdb->lockrecs[i];
701 if (tdb->transaction && lck->off == TRANSACTION_LOCK) {
702 tdb->lockrecs[extra++] = *lck;
703 } else {
704 tdb_brunlock(tdb, lck->ltype, lck->off, 1);
707 tdb->num_locks = extra;
708 tdb->num_lockrecs = extra;
709 if (tdb->num_lockrecs == 0) {
710 SAFE_FREE(tdb->lockrecs);