r22711: Fix a compile warnign in query_user(). Ensure that user_rid
[Samba/bb.git] / source3 / locking / brlock.c
blob88f993e14b4f70cc5b2a97b584735e90a7000419
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 /* This module implements a tdb based byte range locking service,
25 replacing the fcntl() based byte range locking previously
26 used. This allows us to provide the same semantics as NT */
28 #include "includes.h"
30 #undef DBGC_CLASS
31 #define DBGC_CLASS DBGC_LOCKING
33 #define ZERO_ZERO 0
35 /* The open brlock.tdb database. */
37 static TDB_CONTEXT *tdb;
39 /****************************************************************************
40 Debug info at level 10 for lock struct.
41 ****************************************************************************/
43 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
45 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
47 (unsigned int)pls->context.smbpid,
48 (unsigned int)pls->context.tid,
49 (unsigned int)procid_to_pid(&pls->context.pid) ));
51 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
52 (double)pls->start,
53 (double)pls->size,
54 pls->fnum,
55 lock_type_name(pls->lock_type),
56 lock_flav_name(pls->lock_flav) ));
59 /****************************************************************************
60 See if two locking contexts are equal.
61 ****************************************************************************/
63 BOOL brl_same_context(const struct lock_context *ctx1,
64 const struct lock_context *ctx2)
66 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
67 (ctx1->smbpid == ctx2->smbpid) &&
68 (ctx1->tid == ctx2->tid));
71 /****************************************************************************
72 See if lck1 and lck2 overlap.
73 ****************************************************************************/
75 static BOOL brl_overlap(const struct lock_struct *lck1,
76 const struct lock_struct *lck2)
78 /* this extra check is not redundent - it copes with locks
79 that go beyond the end of 64 bit file space */
80 if (lck1->size != 0 &&
81 lck1->start == lck2->start &&
82 lck1->size == lck2->size) {
83 return True;
86 if (lck1->start >= (lck2->start+lck2->size) ||
87 lck2->start >= (lck1->start+lck1->size)) {
88 return False;
90 return True;
93 /****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
97 static BOOL brl_conflict(const struct lock_struct *lck1,
98 const struct lock_struct *lck2)
100 /* Ignore PENDING locks. */
101 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
102 return False;
104 /* Read locks never conflict. */
105 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106 return False;
109 if (brl_same_context(&lck1->context, &lck2->context) &&
110 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
111 return False;
114 return brl_overlap(lck1, lck2);
117 /****************************************************************************
118 See if lock2 can be added when lock1 is in place - when both locks are POSIX
119 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
120 know already match.
121 ****************************************************************************/
123 static BOOL brl_conflict_posix(const struct lock_struct *lck1,
124 const struct lock_struct *lck2)
126 #if defined(DEVELOPER)
127 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
128 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
129 #endif
131 /* Ignore PENDING locks. */
132 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
133 return False;
135 /* Read locks never conflict. */
136 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
137 return False;
140 /* Locks on the same context con't conflict. Ignore fnum. */
141 if (brl_same_context(&lck1->context, &lck2->context)) {
142 return False;
145 /* One is read, the other write, or the context is different,
146 do they overlap ? */
147 return brl_overlap(lck1, lck2);
150 #if ZERO_ZERO
151 static BOOL brl_conflict1(const struct lock_struct *lck1,
152 const struct lock_struct *lck2)
154 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
155 return False;
157 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
158 return False;
161 if (brl_same_context(&lck1->context, &lck2->context) &&
162 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
163 return False;
166 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
167 return True;
170 if (lck1->start >= (lck2->start + lck2->size) ||
171 lck2->start >= (lck1->start + lck1->size)) {
172 return False;
175 return True;
177 #endif
179 /****************************************************************************
180 Check to see if this lock conflicts, but ignore our own locks on the
181 same fnum only. This is the read/write lock check code path.
182 This is never used in the POSIX lock case.
183 ****************************************************************************/
185 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
187 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
188 return False;
190 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
191 return False;
193 /* POSIX flavour locks never conflict here - this is only called
194 in the read/write path. */
196 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
197 return False;
200 * Incoming WRITE locks conflict with existing READ locks even
201 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
204 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
205 if (brl_same_context(&lck1->context, &lck2->context) &&
206 lck1->fnum == lck2->fnum)
207 return False;
210 return brl_overlap(lck1, lck2);
213 /****************************************************************************
214 Check if an unlock overlaps a pending lock.
215 ****************************************************************************/
217 static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
219 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
220 return True;
221 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
222 return True;
223 return False;
226 /****************************************************************************
227 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
228 is the same as this one and changes its error code. I wonder if any
229 app depends on this ?
230 ****************************************************************************/
232 static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock)
234 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
235 /* amazing the little things you learn with a test
236 suite. Locks beyond this offset (as a 64 bit
237 number!) always generate the conflict error code,
238 unless the top bit is set */
239 if (!blocking_lock) {
240 fsp->last_lock_failure = *lock;
242 return NT_STATUS_FILE_LOCK_CONFLICT;
245 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
246 lock->context.tid == fsp->last_lock_failure.context.tid &&
247 lock->fnum == fsp->last_lock_failure.fnum &&
248 lock->start == fsp->last_lock_failure.start) {
249 return NT_STATUS_FILE_LOCK_CONFLICT;
252 if (!blocking_lock) {
253 fsp->last_lock_failure = *lock;
255 return NT_STATUS_LOCK_NOT_GRANTED;
258 /****************************************************************************
259 Open up the brlock.tdb database.
260 ****************************************************************************/
262 void brl_init(int read_only)
264 if (tdb) {
265 return;
267 tdb = tdb_open_log(lock_path("brlock.tdb"),
268 lp_open_files_db_hash_size(),
269 TDB_DEFAULT|(read_only?0x0:TDB_CLEAR_IF_FIRST),
270 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
271 if (!tdb) {
272 DEBUG(0,("Failed to open byte range locking database %s\n",
273 lock_path("brlock.tdb")));
274 return;
277 /* Activate the per-hashchain freelist */
278 tdb_set_max_dead(tdb, 5);
281 /****************************************************************************
282 Close down the brlock.tdb database.
283 ****************************************************************************/
285 void brl_shutdown(int read_only)
287 if (!tdb) {
288 return;
290 tdb_close(tdb);
293 #if ZERO_ZERO
294 /****************************************************************************
295 Compare two locks for sorting.
296 ****************************************************************************/
298 static int lock_compare(const struct lock_struct *lck1,
299 const struct lock_struct *lck2)
301 if (lck1->start != lck2->start) {
302 return (lck1->start - lck2->start);
304 if (lck2->size != lck1->size) {
305 return ((int)lck1->size - (int)lck2->size);
307 return 0;
309 #endif
311 /****************************************************************************
312 Lock a range of bytes - Windows lock semantics.
313 ****************************************************************************/
315 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
316 const struct lock_struct *plock, BOOL blocking_lock)
318 unsigned int i;
319 files_struct *fsp = br_lck->fsp;
320 struct lock_struct *locks = br_lck->lock_data;
322 for (i=0; i < br_lck->num_locks; i++) {
323 /* Do any Windows or POSIX locks conflict ? */
324 if (brl_conflict(&locks[i], plock)) {
325 return brl_lock_failed(fsp,plock,blocking_lock);
327 #if ZERO_ZERO
328 if (plock->start == 0 && plock->size == 0 &&
329 locks[i].size == 0) {
330 break;
332 #endif
335 /* We can get the Windows lock, now see if it needs to
336 be mapped into a lower level POSIX one, and if so can
337 we get it ? */
339 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
340 int errno_ret;
341 if (!set_posix_lock_windows_flavour(fsp,
342 plock->start,
343 plock->size,
344 plock->lock_type,
345 &plock->context,
346 locks,
347 br_lck->num_locks,
348 &errno_ret)) {
349 if (errno_ret == EACCES || errno_ret == EAGAIN) {
350 return NT_STATUS_FILE_LOCK_CONFLICT;
351 } else {
352 return map_nt_error_from_unix(errno);
357 /* no conflicts - add it to the list of locks */
358 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
359 if (!locks) {
360 return NT_STATUS_NO_MEMORY;
363 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
364 br_lck->num_locks += 1;
365 br_lck->lock_data = locks;
366 br_lck->modified = True;
368 return NT_STATUS_OK;
371 /****************************************************************************
372 Cope with POSIX range splits and merges.
373 ****************************************************************************/
375 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
376 const struct lock_struct *ex, /* existing lock. */
377 const struct lock_struct *plock, /* proposed lock. */
378 BOOL *lock_was_added)
380 BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
382 /* We can't merge non-conflicting locks on different context - ignore fnum. */
384 if (!brl_same_context(&ex->context, &plock->context)) {
385 /* Just copy. */
386 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
387 return 1;
390 /* We now know we have the same context. */
392 /* Did we overlap ? */
394 /*********************************************
395 +---------+
396 | ex |
397 +---------+
398 +-------+
399 | plock |
400 +-------+
401 OR....
402 +---------+
403 | ex |
404 +---------+
405 **********************************************/
407 if ( (ex->start > (plock->start + plock->size)) ||
408 (plock->start > (ex->start + ex->size))) {
409 /* No overlap with this lock - copy existing. */
410 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
411 return 1;
414 /*********************************************
415 +---------------------------+
416 | ex |
417 +---------------------------+
418 +---------------------------+
419 | plock | -> replace with plock.
420 +---------------------------+
421 **********************************************/
423 if ( (ex->start >= plock->start) &&
424 (ex->start + ex->size <= plock->start + plock->size) ) {
425 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
426 *lock_was_added = True;
427 return 1;
430 /*********************************************
431 +-----------------------+
432 | ex |
433 +-----------------------+
434 +---------------+
435 | plock |
436 +---------------+
437 OR....
438 +-------+
439 | ex |
440 +-------+
441 +---------------+
442 | plock |
443 +---------------+
445 BECOMES....
446 +---------------+-------+
447 | plock | ex | - different lock types.
448 +---------------+-------+
449 OR.... (merge)
450 +-----------------------+
451 | ex | - same lock type.
452 +-----------------------+
453 **********************************************/
455 if ( (ex->start >= plock->start) &&
456 (ex->start <= plock->start + plock->size) &&
457 (ex->start + ex->size > plock->start + plock->size) ) {
459 *lock_was_added = True;
461 /* If the lock types are the same, we merge, if different, we
462 add the new lock before the old. */
464 if (lock_types_differ) {
465 /* Add new. */
466 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
467 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
468 /* Adjust existing start and size. */
469 lck_arr[1].start = plock->start + plock->size;
470 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
471 return 2;
472 } else {
473 /* Merge. */
474 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
475 /* Set new start and size. */
476 lck_arr[0].start = plock->start;
477 lck_arr[0].size = (ex->start + ex->size) - plock->start;
478 return 1;
482 /*********************************************
483 +-----------------------+
484 | ex |
485 +-----------------------+
486 +---------------+
487 | plock |
488 +---------------+
489 OR....
490 +-------+
491 | ex |
492 +-------+
493 +---------------+
494 | plock |
495 +---------------+
496 BECOMES....
497 +-------+---------------+
498 | ex | plock | - different lock types
499 +-------+---------------+
501 OR.... (merge)
502 +-----------------------+
503 | ex | - same lock type.
504 +-----------------------+
506 **********************************************/
508 if ( (ex->start < plock->start) &&
509 (ex->start + ex->size >= plock->start) &&
510 (ex->start + ex->size <= plock->start + plock->size) ) {
512 *lock_was_added = True;
514 /* If the lock types are the same, we merge, if different, we
515 add the new lock after the old. */
517 if (lock_types_differ) {
518 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
519 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
520 /* Adjust existing size. */
521 lck_arr[0].size = plock->start - ex->start;
522 return 2;
523 } else {
524 /* Merge. */
525 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
526 /* Adjust existing size. */
527 lck_arr[0].size = (plock->start + plock->size) - ex->start;
528 return 1;
532 /*********************************************
533 +---------------------------+
534 | ex |
535 +---------------------------+
536 +---------+
537 | plock |
538 +---------+
539 BECOMES.....
540 +-------+---------+---------+
541 | ex | plock | ex | - different lock types.
542 +-------+---------+---------+
544 +---------------------------+
545 | ex | - same lock type.
546 +---------------------------+
547 **********************************************/
549 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
550 *lock_was_added = True;
552 if (lock_types_differ) {
554 /* We have to split ex into two locks here. */
556 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
557 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
558 memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
560 /* Adjust first existing size. */
561 lck_arr[0].size = plock->start - ex->start;
563 /* Adjust second existing start and size. */
564 lck_arr[2].start = plock->start + plock->size;
565 lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
566 return 3;
567 } else {
568 /* Just eat plock. */
569 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
570 return 1;
574 /* Never get here. */
575 smb_panic("brlock_posix_split_merge\n");
576 /* Notreached. */
577 abort();
578 /* Keep some compilers happy. */
579 return 0;
582 /****************************************************************************
583 Lock a range of bytes - POSIX lock semantics.
584 We must cope with range splits and merges.
585 ****************************************************************************/
587 static NTSTATUS brl_lock_posix(struct byte_range_lock *br_lck,
588 const struct lock_struct *plock)
590 unsigned int i, count;
591 struct lock_struct *locks = br_lck->lock_data;
592 struct lock_struct *tp;
593 BOOL lock_was_added = False;
594 BOOL signal_pending_read = False;
596 /* No zero-zero locks for POSIX. */
597 if (plock->start == 0 && plock->size == 0) {
598 return NT_STATUS_INVALID_PARAMETER;
601 /* Don't allow 64-bit lock wrap. */
602 if (plock->start + plock->size < plock->start ||
603 plock->start + plock->size < plock->size) {
604 return NT_STATUS_INVALID_PARAMETER;
607 /* The worst case scenario here is we have to split an
608 existing POSIX lock range into two, and add our lock,
609 so we need at most 2 more entries. */
611 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
612 if (!tp) {
613 return NT_STATUS_NO_MEMORY;
616 count = 0;
617 for (i=0; i < br_lck->num_locks; i++) {
618 struct lock_struct *curr_lock = &locks[i];
620 /* If we have a pending read lock, a lock downgrade should
621 trigger a lock re-evaluation. */
622 if (curr_lock->lock_type == PENDING_READ_LOCK &&
623 brl_pending_overlap(plock, curr_lock)) {
624 signal_pending_read = True;
627 if (curr_lock->lock_flav == WINDOWS_LOCK) {
628 /* Do any Windows flavour locks conflict ? */
629 if (brl_conflict(curr_lock, plock)) {
630 /* No games with error messages. */
631 SAFE_FREE(tp);
632 return NT_STATUS_FILE_LOCK_CONFLICT;
634 /* Just copy the Windows lock into the new array. */
635 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
636 count++;
637 } else {
638 /* POSIX conflict semantics are different. */
639 if (brl_conflict_posix(curr_lock, plock)) {
640 /* Can't block ourselves with POSIX locks. */
641 /* No games with error messages. */
642 SAFE_FREE(tp);
643 return NT_STATUS_FILE_LOCK_CONFLICT;
646 /* Work out overlaps. */
647 count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
651 if (!lock_was_added) {
652 memcpy(&tp[count], plock, sizeof(struct lock_struct));
653 count++;
656 /* We can get the POSIX lock, now see if it needs to
657 be mapped into a lower level POSIX one, and if so can
658 we get it ? */
660 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
661 int errno_ret;
663 /* The lower layer just needs to attempt to
664 get the system POSIX lock. We've weeded out
665 any conflicts above. */
667 if (!set_posix_lock_posix_flavour(br_lck->fsp,
668 plock->start,
669 plock->size,
670 plock->lock_type,
671 &errno_ret)) {
672 if (errno_ret == EACCES || errno_ret == EAGAIN) {
673 SAFE_FREE(tp);
674 return NT_STATUS_FILE_LOCK_CONFLICT;
675 } else {
676 SAFE_FREE(tp);
677 return map_nt_error_from_unix(errno);
682 /* Realloc so we don't leak entries per lock call. */
683 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
684 if (!tp) {
685 return NT_STATUS_NO_MEMORY;
687 br_lck->num_locks = count;
688 SAFE_FREE(br_lck->lock_data);
689 br_lck->lock_data = tp;
690 locks = tp;
691 br_lck->modified = True;
693 /* A successful downgrade from write to read lock can trigger a lock
694 re-evalutation where waiting readers can now proceed. */
696 if (signal_pending_read) {
697 /* Send unlock messages to any pending read waiters that overlap. */
698 for (i=0; i < br_lck->num_locks; i++) {
699 struct lock_struct *pend_lock = &locks[i];
701 /* Ignore non-pending locks. */
702 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
703 continue;
706 if (pend_lock->lock_type == PENDING_READ_LOCK &&
707 brl_pending_overlap(plock, pend_lock)) {
708 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
709 procid_str_static(&pend_lock->context.pid )));
711 message_send_pid(pend_lock->context.pid,
712 MSG_SMB_UNLOCK,
713 NULL, 0, True);
718 return NT_STATUS_OK;
721 /****************************************************************************
722 Lock a range of bytes.
723 ****************************************************************************/
725 NTSTATUS brl_lock(struct byte_range_lock *br_lck,
726 uint32 smbpid,
727 struct process_id pid,
728 br_off start,
729 br_off size,
730 enum brl_type lock_type,
731 enum brl_flavour lock_flav,
732 BOOL blocking_lock)
734 NTSTATUS ret;
735 struct lock_struct lock;
737 #if !ZERO_ZERO
738 if (start == 0 && size == 0) {
739 DEBUG(0,("client sent 0/0 lock - please report this\n"));
741 #endif
743 lock.context.smbpid = smbpid;
744 lock.context.pid = pid;
745 lock.context.tid = br_lck->fsp->conn->cnum;
746 lock.start = start;
747 lock.size = size;
748 lock.fnum = br_lck->fsp->fnum;
749 lock.lock_type = lock_type;
750 lock.lock_flav = lock_flav;
752 if (lock_flav == WINDOWS_LOCK) {
753 ret = brl_lock_windows(br_lck, &lock, blocking_lock);
754 } else {
755 ret = brl_lock_posix(br_lck, &lock);
758 #if ZERO_ZERO
759 /* sort the lock list */
760 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
761 #endif
763 return ret;
766 /****************************************************************************
767 Unlock a range of bytes - Windows semantics.
768 ****************************************************************************/
770 static BOOL brl_unlock_windows(struct byte_range_lock *br_lck, const struct lock_struct *plock)
772 unsigned int i, j;
773 struct lock_struct *locks = br_lck->lock_data;
774 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
776 #if ZERO_ZERO
777 /* Delete write locks by preference... The lock list
778 is sorted in the zero zero case. */
780 for (i = 0; i < br_lck->num_locks; i++) {
781 struct lock_struct *lock = &locks[i];
783 if (lock->lock_type == WRITE_LOCK &&
784 brl_same_context(&lock->context, &plock->context) &&
785 lock->fnum == plock->fnum &&
786 lock->lock_flav == WINDOWS_LOCK &&
787 lock->start == plock->start &&
788 lock->size == plock->size) {
790 /* found it - delete it */
791 deleted_lock_type = lock->lock_type;
792 break;
796 if (i != br_lck->num_locks) {
797 /* We found it - don't search again. */
798 goto unlock_continue;
800 #endif
802 for (i = 0; i < br_lck->num_locks; i++) {
803 struct lock_struct *lock = &locks[i];
805 /* Only remove our own locks that match in start, size, and flavour. */
806 if (brl_same_context(&lock->context, &plock->context) &&
807 lock->fnum == plock->fnum &&
808 lock->lock_flav == WINDOWS_LOCK &&
809 lock->start == plock->start &&
810 lock->size == plock->size ) {
811 deleted_lock_type = lock->lock_type;
812 break;
816 if (i == br_lck->num_locks) {
817 /* we didn't find it */
818 return False;
821 #if ZERO_ZERO
822 unlock_continue:
823 #endif
825 /* Actually delete the lock. */
826 if (i < br_lck->num_locks - 1) {
827 memmove(&locks[i], &locks[i+1],
828 sizeof(*locks)*((br_lck->num_locks-1) - i));
831 br_lck->num_locks -= 1;
832 br_lck->modified = True;
834 /* Unlock the underlying POSIX regions. */
835 if(lp_posix_locking(br_lck->fsp->conn->params)) {
836 release_posix_lock_windows_flavour(br_lck->fsp,
837 plock->start,
838 plock->size,
839 deleted_lock_type,
840 &plock->context,
841 locks,
842 br_lck->num_locks);
845 /* Send unlock messages to any pending waiters that overlap. */
846 for (j=0; j < br_lck->num_locks; j++) {
847 struct lock_struct *pend_lock = &locks[j];
849 /* Ignore non-pending locks. */
850 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
851 continue;
854 /* We could send specific lock info here... */
855 if (brl_pending_overlap(plock, pend_lock)) {
856 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
857 procid_str_static(&pend_lock->context.pid )));
859 message_send_pid(pend_lock->context.pid,
860 MSG_SMB_UNLOCK,
861 NULL, 0, True);
865 return True;
868 /****************************************************************************
869 Unlock a range of bytes - POSIX semantics.
870 ****************************************************************************/
872 static BOOL brl_unlock_posix(struct byte_range_lock *br_lck, const struct lock_struct *plock)
874 unsigned int i, j, count;
875 struct lock_struct *tp;
876 struct lock_struct *locks = br_lck->lock_data;
877 BOOL overlap_found = False;
879 /* No zero-zero locks for POSIX. */
880 if (plock->start == 0 && plock->size == 0) {
881 return False;
884 /* Don't allow 64-bit lock wrap. */
885 if (plock->start + plock->size < plock->start ||
886 plock->start + plock->size < plock->size) {
887 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
888 return False;
891 /* The worst case scenario here is we have to split an
892 existing POSIX lock range into two, so we need at most
893 1 more entry. */
895 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
896 if (!tp) {
897 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
898 return False;
901 count = 0;
902 for (i = 0; i < br_lck->num_locks; i++) {
903 struct lock_struct *lock = &locks[i];
904 struct lock_struct tmp_lock[3];
905 BOOL lock_was_added = False;
906 unsigned int tmp_count;
908 /* Only remove our own locks - ignore fnum. */
909 if (IS_PENDING_LOCK(lock->lock_type) ||
910 !brl_same_context(&lock->context, &plock->context)) {
911 memcpy(&tp[count], lock, sizeof(struct lock_struct));
912 count++;
913 continue;
916 /* Work out overlaps. */
917 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
919 if (tmp_count == 1) {
920 /* Ether the locks didn't overlap, or the unlock completely
921 overlapped this lock. If it didn't overlap, then there's
922 no change in the locks. */
923 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
924 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
925 /* No change in this lock. */
926 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
927 count++;
928 } else {
929 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
930 overlap_found = True;
932 continue;
933 } else if (tmp_count == 2) {
934 /* The unlock overlapped an existing lock. Copy the truncated
935 lock into the lock array. */
936 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
937 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
938 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
939 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
940 if (tmp_lock[0].size != locks[i].size) {
941 overlap_found = True;
943 } else {
944 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
945 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
946 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
947 if (tmp_lock[1].start != locks[i].start) {
948 overlap_found = True;
951 count++;
952 continue;
953 } else {
954 /* tmp_count == 3 - (we split a lock range in two). */
955 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
956 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
957 SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
959 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
960 count++;
961 memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
962 count++;
963 overlap_found = True;
964 /* Optimisation... */
965 /* We know we're finished here as we can't overlap any
966 more POSIX locks. Copy the rest of the lock array. */
967 if (i < br_lck->num_locks - 1) {
968 memcpy(&tp[count], &locks[i+1],
969 sizeof(*locks)*((br_lck->num_locks-1) - i));
970 count += ((br_lck->num_locks-1) - i);
972 break;
976 if (!overlap_found) {
977 /* Just ignore - no change. */
978 SAFE_FREE(tp);
979 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
980 return True;
983 /* Unlock any POSIX regions. */
984 if(lp_posix_locking(br_lck->fsp->conn->params)) {
985 release_posix_lock_posix_flavour(br_lck->fsp,
986 plock->start,
987 plock->size,
988 &plock->context,
990 count);
993 /* Realloc so we don't leak entries per unlock call. */
994 if (count) {
995 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
996 if (!tp) {
997 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
998 return False;
1000 } else {
1001 /* We deleted the last lock. */
1002 SAFE_FREE(tp);
1003 tp = NULL;
1006 br_lck->num_locks = count;
1007 SAFE_FREE(br_lck->lock_data);
1008 locks = tp;
1009 br_lck->lock_data = tp;
1010 br_lck->modified = True;
1012 /* Send unlock messages to any pending waiters that overlap. */
1014 for (j=0; j < br_lck->num_locks; j++) {
1015 struct lock_struct *pend_lock = &locks[j];
1017 /* Ignore non-pending locks. */
1018 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1019 continue;
1022 /* We could send specific lock info here... */
1023 if (brl_pending_overlap(plock, pend_lock)) {
1024 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1025 procid_str_static(&pend_lock->context.pid )));
1027 message_send_pid(pend_lock->context.pid,
1028 MSG_SMB_UNLOCK,
1029 NULL, 0, True);
1033 return True;
1036 /****************************************************************************
1037 Unlock a range of bytes.
1038 ****************************************************************************/
1040 BOOL brl_unlock(struct byte_range_lock *br_lck,
1041 uint32 smbpid,
1042 struct process_id pid,
1043 br_off start,
1044 br_off size,
1045 enum brl_flavour lock_flav)
1047 struct lock_struct lock;
1049 lock.context.smbpid = smbpid;
1050 lock.context.pid = pid;
1051 lock.context.tid = br_lck->fsp->conn->cnum;
1052 lock.start = start;
1053 lock.size = size;
1054 lock.fnum = br_lck->fsp->fnum;
1055 lock.lock_type = UNLOCK_LOCK;
1056 lock.lock_flav = lock_flav;
1058 if (lock_flav == WINDOWS_LOCK) {
1059 return brl_unlock_windows(br_lck, &lock);
1060 } else {
1061 return brl_unlock_posix(br_lck, &lock);
1065 /****************************************************************************
1066 Test if we could add a lock if we wanted to.
1067 Returns True if the region required is currently unlocked, False if locked.
1068 ****************************************************************************/
1070 BOOL brl_locktest(struct byte_range_lock *br_lck,
1071 uint32 smbpid,
1072 struct process_id pid,
1073 br_off start,
1074 br_off size,
1075 enum brl_type lock_type,
1076 enum brl_flavour lock_flav)
1078 BOOL ret = True;
1079 unsigned int i;
1080 struct lock_struct lock;
1081 const struct lock_struct *locks = br_lck->lock_data;
1082 files_struct *fsp = br_lck->fsp;
1084 lock.context.smbpid = smbpid;
1085 lock.context.pid = pid;
1086 lock.context.tid = br_lck->fsp->conn->cnum;
1087 lock.start = start;
1088 lock.size = size;
1089 lock.fnum = fsp->fnum;
1090 lock.lock_type = lock_type;
1091 lock.lock_flav = lock_flav;
1093 /* Make sure existing locks don't conflict */
1094 for (i=0; i < br_lck->num_locks; i++) {
1096 * Our own locks don't conflict.
1098 if (brl_conflict_other(&locks[i], &lock)) {
1099 return False;
1104 * There is no lock held by an SMB daemon, check to
1105 * see if there is a POSIX lock from a UNIX or NFS process.
1106 * This only conflicts with Windows locks, not POSIX locks.
1109 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1110 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1112 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1113 (double)start, (double)size, ret ? "locked" : "unlocked",
1114 fsp->fnum, fsp->fsp_name ));
1116 /* We need to return the inverse of is_posix_locked. */
1117 ret = !ret;
1120 /* no conflicts - we could have added it */
1121 return ret;
1124 /****************************************************************************
1125 Query for existing locks.
1126 ****************************************************************************/
1128 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1129 uint32 *psmbpid,
1130 struct process_id pid,
1131 br_off *pstart,
1132 br_off *psize,
1133 enum brl_type *plock_type,
1134 enum brl_flavour lock_flav)
1136 unsigned int i;
1137 struct lock_struct lock;
1138 const struct lock_struct *locks = br_lck->lock_data;
1139 files_struct *fsp = br_lck->fsp;
1141 lock.context.smbpid = *psmbpid;
1142 lock.context.pid = pid;
1143 lock.context.tid = br_lck->fsp->conn->cnum;
1144 lock.start = *pstart;
1145 lock.size = *psize;
1146 lock.fnum = fsp->fnum;
1147 lock.lock_type = *plock_type;
1148 lock.lock_flav = lock_flav;
1150 /* Make sure existing locks don't conflict */
1151 for (i=0; i < br_lck->num_locks; i++) {
1152 const struct lock_struct *exlock = &locks[i];
1153 BOOL conflict = False;
1155 if (exlock->lock_flav == WINDOWS_LOCK) {
1156 conflict = brl_conflict(exlock, &lock);
1157 } else {
1158 conflict = brl_conflict_posix(exlock, &lock);
1161 if (conflict) {
1162 *psmbpid = exlock->context.smbpid;
1163 *pstart = exlock->start;
1164 *psize = exlock->size;
1165 *plock_type = exlock->lock_type;
1166 return NT_STATUS_LOCK_NOT_GRANTED;
1171 * There is no lock held by an SMB daemon, check to
1172 * see if there is a POSIX lock from a UNIX or NFS process.
1175 if(lp_posix_locking(fsp->conn->params)) {
1176 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1178 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1179 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1180 fsp->fnum, fsp->fsp_name ));
1182 if (ret) {
1183 /* Hmmm. No clue what to set smbpid to - use -1. */
1184 *psmbpid = 0xFFFF;
1185 return NT_STATUS_LOCK_NOT_GRANTED;
1189 return NT_STATUS_OK;
1192 /****************************************************************************
1193 Remove a particular pending lock.
1194 ****************************************************************************/
1196 BOOL brl_lock_cancel(struct byte_range_lock *br_lck,
1197 uint32 smbpid,
1198 struct process_id pid,
1199 br_off start,
1200 br_off size,
1201 enum brl_flavour lock_flav)
1203 unsigned int i;
1204 struct lock_struct *locks = br_lck->lock_data;
1205 struct lock_context context;
1207 context.smbpid = smbpid;
1208 context.pid = pid;
1209 context.tid = br_lck->fsp->conn->cnum;
1211 for (i = 0; i < br_lck->num_locks; i++) {
1212 struct lock_struct *lock = &locks[i];
1214 /* For pending locks we *always* care about the fnum. */
1215 if (brl_same_context(&lock->context, &context) &&
1216 lock->fnum == br_lck->fsp->fnum &&
1217 IS_PENDING_LOCK(lock->lock_type) &&
1218 lock->lock_flav == lock_flav &&
1219 lock->start == start &&
1220 lock->size == size) {
1221 break;
1225 if (i == br_lck->num_locks) {
1226 /* Didn't find it. */
1227 return False;
1230 if (i < br_lck->num_locks - 1) {
1231 /* Found this particular pending lock - delete it */
1232 memmove(&locks[i], &locks[i+1],
1233 sizeof(*locks)*((br_lck->num_locks-1) - i));
1236 br_lck->num_locks -= 1;
1237 br_lck->modified = True;
1238 return True;
1241 /****************************************************************************
1242 Remove any locks associated with a open file.
1243 We return True if this process owns any other Windows locks on this
1244 fd and so we should not immediately close the fd.
1245 ****************************************************************************/
1247 void brl_close_fnum(struct byte_range_lock *br_lck)
1249 files_struct *fsp = br_lck->fsp;
1250 uint16 tid = fsp->conn->cnum;
1251 int fnum = fsp->fnum;
1252 unsigned int i, j, dcount=0;
1253 int num_deleted_windows_locks = 0;
1254 struct lock_struct *locks = br_lck->lock_data;
1255 struct process_id pid = procid_self();
1256 BOOL unlock_individually = False;
1258 if(lp_posix_locking(fsp->conn->params)) {
1260 /* Check if there are any Windows locks associated with this dev/ino
1261 pair that are not this fnum. If so we need to call unlock on each
1262 one in order to release the system POSIX locks correctly. */
1264 for (i=0; i < br_lck->num_locks; i++) {
1265 struct lock_struct *lock = &locks[i];
1267 if (!procid_equal(&lock->context.pid, &pid)) {
1268 continue;
1271 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1272 continue; /* Ignore pending. */
1275 if (lock->context.tid != tid || lock->fnum != fnum) {
1276 unlock_individually = True;
1277 break;
1281 if (unlock_individually) {
1282 struct lock_struct *locks_copy;
1283 unsigned int num_locks_copy;
1285 /* Copy the current lock array. */
1286 if (br_lck->num_locks) {
1287 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1288 if (!locks_copy) {
1289 smb_panic("brl_close_fnum: talloc fail.\n");
1291 } else {
1292 locks_copy = NULL;
1295 num_locks_copy = br_lck->num_locks;
1297 for (i=0; i < num_locks_copy; i++) {
1298 struct lock_struct *lock = &locks_copy[i];
1300 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1301 (lock->fnum == fnum)) {
1302 brl_unlock(br_lck,
1303 lock->context.smbpid,
1304 pid,
1305 lock->start,
1306 lock->size,
1307 lock->lock_flav);
1310 return;
1314 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1316 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1318 for (i=0; i < br_lck->num_locks; i++) {
1319 struct lock_struct *lock = &locks[i];
1320 BOOL del_this_lock = False;
1322 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1323 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1324 del_this_lock = True;
1325 num_deleted_windows_locks++;
1326 } else if (lock->lock_flav == POSIX_LOCK) {
1327 del_this_lock = True;
1331 if (del_this_lock) {
1332 /* Send unlock messages to any pending waiters that overlap. */
1333 for (j=0; j < br_lck->num_locks; j++) {
1334 struct lock_struct *pend_lock = &locks[j];
1336 /* Ignore our own or non-pending locks. */
1337 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1338 continue;
1341 /* Optimisation - don't send to this fnum as we're
1342 closing it. */
1343 if (pend_lock->context.tid == tid &&
1344 procid_equal(&pend_lock->context.pid, &pid) &&
1345 pend_lock->fnum == fnum) {
1346 continue;
1349 /* We could send specific lock info here... */
1350 if (brl_pending_overlap(lock, pend_lock)) {
1351 message_send_pid(pend_lock->context.pid,
1352 MSG_SMB_UNLOCK,
1353 NULL, 0, True);
1357 /* found it - delete it */
1358 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1359 memmove(&locks[i], &locks[i+1],
1360 sizeof(*locks)*((br_lck->num_locks-1) - i));
1362 br_lck->num_locks--;
1363 br_lck->modified = True;
1364 i--;
1365 dcount++;
1369 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1370 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1371 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1375 /****************************************************************************
1376 Ensure this set of lock entries is valid.
1377 ****************************************************************************/
1379 static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1381 unsigned int i;
1382 unsigned int num_valid_entries = 0;
1383 struct lock_struct *locks = *pplocks;
1385 for (i = 0; i < *pnum_entries; i++) {
1386 struct lock_struct *lock_data = &locks[i];
1387 if (!process_exists(lock_data->context.pid)) {
1388 /* This process no longer exists - mark this
1389 entry as invalid by zeroing it. */
1390 ZERO_STRUCTP(lock_data);
1391 } else {
1392 num_valid_entries++;
1396 if (num_valid_entries != *pnum_entries) {
1397 struct lock_struct *new_lock_data = NULL;
1399 if (num_valid_entries) {
1400 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1401 if (!new_lock_data) {
1402 DEBUG(3, ("malloc fail\n"));
1403 return False;
1406 num_valid_entries = 0;
1407 for (i = 0; i < *pnum_entries; i++) {
1408 struct lock_struct *lock_data = &locks[i];
1409 if (lock_data->context.smbpid &&
1410 lock_data->context.tid) {
1411 /* Valid (nonzero) entry - copy it. */
1412 memcpy(&new_lock_data[num_valid_entries],
1413 lock_data, sizeof(struct lock_struct));
1414 num_valid_entries++;
1419 SAFE_FREE(*pplocks);
1420 *pplocks = new_lock_data;
1421 *pnum_entries = num_valid_entries;
1424 return True;
1427 /****************************************************************************
1428 Traverse the whole database with this function, calling traverse_callback
1429 on each lock.
1430 ****************************************************************************/
1432 static int traverse_fn(TDB_CONTEXT *ttdb, TDB_DATA kbuf, TDB_DATA dbuf, void *state)
1434 struct lock_struct *locks;
1435 struct lock_key *key;
1436 unsigned int i;
1437 unsigned int num_locks = 0;
1438 unsigned int orig_num_locks = 0;
1440 BRLOCK_FN(traverse_callback) = (BRLOCK_FN_CAST())state;
1442 /* In a traverse function we must make a copy of
1443 dbuf before modifying it. */
1445 locks = (struct lock_struct *)memdup(dbuf.dptr, dbuf.dsize);
1446 if (!locks) {
1447 return -1; /* Terminate traversal. */
1450 key = (struct lock_key *)kbuf.dptr;
1451 orig_num_locks = num_locks = dbuf.dsize/sizeof(*locks);
1453 /* Ensure the lock db is clean of entries from invalid processes. */
1455 if (!validate_lock_entries(&num_locks, &locks)) {
1456 SAFE_FREE(locks);
1457 return -1; /* Terminate traversal */
1460 if (orig_num_locks != num_locks) {
1461 dbuf.dptr = (uint8 *)locks;
1462 dbuf.dsize = num_locks * sizeof(*locks);
1464 if (dbuf.dsize) {
1465 tdb_store(ttdb, kbuf, dbuf, TDB_REPLACE);
1466 } else {
1467 tdb_delete(ttdb, kbuf);
1471 for ( i=0; i<num_locks; i++) {
1472 traverse_callback(key->device,
1473 key->inode,
1474 locks[i].context.pid,
1475 locks[i].lock_type,
1476 locks[i].lock_flav,
1477 locks[i].start,
1478 locks[i].size);
1481 SAFE_FREE(locks);
1482 return 0;
1485 /*******************************************************************
1486 Call the specified function on each lock in the database.
1487 ********************************************************************/
1489 int brl_forall(BRLOCK_FN(fn))
1491 if (!tdb) {
1492 return 0;
1494 return tdb_traverse(tdb, traverse_fn, (void *)fn);
1497 /*******************************************************************
1498 Store a potentially modified set of byte range lock data back into
1499 the database.
1500 Unlock the record.
1501 ********************************************************************/
1503 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1505 TDB_DATA key;
1507 key.dptr = (uint8 *)&br_lck->key;
1508 key.dsize = sizeof(struct lock_key);
1510 if (br_lck->read_only) {
1511 SMB_ASSERT(!br_lck->modified);
1514 if (!br_lck->modified) {
1515 goto done;
1518 if (br_lck->num_locks == 0) {
1519 /* No locks - delete this entry. */
1520 if (tdb_delete(tdb, key) == -1) {
1521 smb_panic("Could not delete byte range lock entry\n");
1523 } else {
1524 TDB_DATA data;
1525 data.dptr = (uint8 *)br_lck->lock_data;
1526 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1528 if (tdb_store(tdb, key, data, TDB_REPLACE) == -1) {
1529 smb_panic("Could not store byte range mode entry\n");
1533 done:
1535 if (!br_lck->read_only) {
1536 tdb_chainunlock(tdb, key);
1538 SAFE_FREE(br_lck->lock_data);
1539 return 0;
1542 /*******************************************************************
1543 Fetch a set of byte range lock data from the database.
1544 Leave the record locked.
1545 TALLOC_FREE(brl) will release the lock in the destructor.
1546 ********************************************************************/
1548 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1549 files_struct *fsp, BOOL read_only)
1551 TDB_DATA key;
1552 TDB_DATA data;
1553 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1555 if (br_lck == NULL) {
1556 return NULL;
1559 br_lck->fsp = fsp;
1560 br_lck->num_locks = 0;
1561 br_lck->modified = False;
1562 memset(&br_lck->key, '\0', sizeof(struct lock_key));
1563 br_lck->key.device = fsp->dev;
1564 br_lck->key.inode = fsp->inode;
1566 key.dptr = (uint8 *)&br_lck->key;
1567 key.dsize = sizeof(struct lock_key);
1569 if (!fsp->lockdb_clean) {
1570 /* We must be read/write to clean
1571 the dead entries. */
1572 read_only = False;
1575 if (read_only) {
1576 br_lck->read_only = True;
1577 } else {
1578 if (tdb_chainlock(tdb, key) != 0) {
1579 DEBUG(3, ("Could not lock byte range lock entry\n"));
1580 TALLOC_FREE(br_lck);
1581 return NULL;
1583 br_lck->read_only = False;
1586 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1588 data = tdb_fetch(tdb, key);
1589 br_lck->lock_data = (struct lock_struct *)data.dptr;
1590 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1592 if (!fsp->lockdb_clean) {
1594 /* This is the first time we've accessed this. */
1595 /* Go through and ensure all entries exist - remove any that don't. */
1596 /* Makes the lockdb self cleaning at low cost. */
1598 if (!validate_lock_entries(&br_lck->num_locks,
1599 &br_lck->lock_data)) {
1600 SAFE_FREE(br_lck->lock_data);
1601 TALLOC_FREE(br_lck);
1602 return NULL;
1605 /* Mark the lockdb as "clean" as seen from this open file. */
1606 fsp->lockdb_clean = True;
1609 if (DEBUGLEVEL >= 10) {
1610 unsigned int i;
1611 struct lock_struct *locks = br_lck->lock_data;
1612 DEBUG(10,("brl_get_locks_internal: %u current locks on dev=%.0f, inode=%.0f\n",
1613 br_lck->num_locks,
1614 (double)fsp->dev, (double)fsp->inode ));
1615 for( i = 0; i < br_lck->num_locks; i++) {
1616 print_lock_struct(i, &locks[i]);
1619 return br_lck;
1622 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1623 files_struct *fsp)
1625 return brl_get_locks_internal(mem_ctx, fsp, False);
1628 struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
1629 files_struct *fsp)
1631 return brl_get_locks_internal(mem_ctx, fsp, True);