smbd: Invalidate the session correctly.
[Samba/wip.git] / source3 / locking / brlock.c
blobb5eebc8e0401984d1d618c99e20585b08690ad2f
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
27 #include "includes.h"
28 #include "system/filesys.h"
29 #include "locking/proto.h"
30 #include "smbd/globals.h"
31 #include "dbwrap/dbwrap.h"
32 #include "dbwrap/dbwrap_open.h"
33 #include "serverid.h"
34 #include "messages.h"
35 #include "util_tdb.h"
37 #undef DBGC_CLASS
38 #define DBGC_CLASS DBGC_LOCKING
40 #define ZERO_ZERO 0
42 /* The open brlock.tdb database. */
44 static struct db_context *brlock_db;
46 struct byte_range_lock {
47 struct files_struct *fsp;
48 unsigned int num_locks;
49 bool modified;
50 bool have_read_oplocks;
51 struct lock_struct *lock_data;
52 struct db_record *record;
55 /****************************************************************************
56 Debug info at level 10 for lock struct.
57 ****************************************************************************/
59 static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
61 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
63 (unsigned long long)pls->context.smblctx,
64 (unsigned int)pls->context.tid,
65 server_id_str(talloc_tos(), &pls->context.pid) ));
67 DEBUG(10,("start = %.0f, size = %.0f, fnum = %llu, %s %s\n",
68 (double)pls->start,
69 (double)pls->size,
70 (unsigned long long)pls->fnum,
71 lock_type_name(pls->lock_type),
72 lock_flav_name(pls->lock_flav) ));
75 unsigned int brl_num_locks(const struct byte_range_lock *brl)
77 return brl->num_locks;
80 struct files_struct *brl_fsp(struct byte_range_lock *brl)
82 return brl->fsp;
85 bool brl_have_read_oplocks(const struct byte_range_lock *brl)
87 return brl->have_read_oplocks;
90 void brl_set_have_read_oplocks(struct byte_range_lock *brl,
91 bool have_read_oplocks)
93 DEBUG(10, ("Setting have_read_oplocks to %s\n",
94 have_read_oplocks ? "true" : "false"));
95 SMB_ASSERT(brl->record != NULL); /* otherwise we're readonly */
96 brl->have_read_oplocks = have_read_oplocks;
97 brl->modified = true;
100 /****************************************************************************
101 See if two locking contexts are equal.
102 ****************************************************************************/
104 static bool brl_same_context(const struct lock_context *ctx1,
105 const struct lock_context *ctx2)
107 return (serverid_equal(&ctx1->pid, &ctx2->pid) &&
108 (ctx1->smblctx == ctx2->smblctx) &&
109 (ctx1->tid == ctx2->tid));
112 /****************************************************************************
113 See if lck1 and lck2 overlap.
114 ****************************************************************************/
116 static bool brl_overlap(const struct lock_struct *lck1,
117 const struct lock_struct *lck2)
119 /* XXX Remove for Win7 compatibility. */
120 /* this extra check is not redundant - it copes with locks
121 that go beyond the end of 64 bit file space */
122 if (lck1->size != 0 &&
123 lck1->start == lck2->start &&
124 lck1->size == lck2->size) {
125 return True;
128 if (lck1->start >= (lck2->start+lck2->size) ||
129 lck2->start >= (lck1->start+lck1->size)) {
130 return False;
132 return True;
135 /****************************************************************************
136 See if lock2 can be added when lock1 is in place.
137 ****************************************************************************/
139 static bool brl_conflict(const struct lock_struct *lck1,
140 const struct lock_struct *lck2)
142 /* Ignore PENDING locks. */
143 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
144 return False;
146 /* Read locks never conflict. */
147 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
148 return False;
151 /* A READ lock can stack on top of a WRITE lock if they have the same
152 * context & fnum. */
153 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
154 brl_same_context(&lck1->context, &lck2->context) &&
155 lck1->fnum == lck2->fnum) {
156 return False;
159 return brl_overlap(lck1, lck2);
162 /****************************************************************************
163 See if lock2 can be added when lock1 is in place - when both locks are POSIX
164 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
165 know already match.
166 ****************************************************************************/
168 static bool brl_conflict_posix(const struct lock_struct *lck1,
169 const struct lock_struct *lck2)
171 #if defined(DEVELOPER)
172 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
173 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
174 #endif
176 /* Ignore PENDING locks. */
177 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
178 return False;
180 /* Read locks never conflict. */
181 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
182 return False;
185 /* Locks on the same context con't conflict. Ignore fnum. */
186 if (brl_same_context(&lck1->context, &lck2->context)) {
187 return False;
190 /* One is read, the other write, or the context is different,
191 do they overlap ? */
192 return brl_overlap(lck1, lck2);
195 #if ZERO_ZERO
196 static bool brl_conflict1(const struct lock_struct *lck1,
197 const struct lock_struct *lck2)
199 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
200 return False;
202 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
203 return False;
206 if (brl_same_context(&lck1->context, &lck2->context) &&
207 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
208 return False;
211 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
212 return True;
215 if (lck1->start >= (lck2->start + lck2->size) ||
216 lck2->start >= (lck1->start + lck1->size)) {
217 return False;
220 return True;
222 #endif
224 /****************************************************************************
225 Check to see if this lock conflicts, but ignore our own locks on the
226 same fnum only. This is the read/write lock check code path.
227 This is never used in the POSIX lock case.
228 ****************************************************************************/
230 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
232 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
233 return False;
235 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
236 return False;
238 /* POSIX flavour locks never conflict here - this is only called
239 in the read/write path. */
241 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
242 return False;
245 * Incoming WRITE locks conflict with existing READ locks even
246 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
249 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
250 if (brl_same_context(&lck1->context, &lck2->context) &&
251 lck1->fnum == lck2->fnum)
252 return False;
255 return brl_overlap(lck1, lck2);
258 /****************************************************************************
259 Check if an unlock overlaps a pending lock.
260 ****************************************************************************/
262 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
264 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
265 return True;
266 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
267 return True;
268 return False;
271 /****************************************************************************
272 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
273 is the same as this one and changes its error code. I wonder if any
274 app depends on this ?
275 ****************************************************************************/
277 static NTSTATUS brl_lock_failed(files_struct *fsp,
278 const struct lock_struct *lock,
279 bool blocking_lock)
281 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
282 /* amazing the little things you learn with a test
283 suite. Locks beyond this offset (as a 64 bit
284 number!) always generate the conflict error code,
285 unless the top bit is set */
286 if (!blocking_lock) {
287 fsp->last_lock_failure = *lock;
289 return NT_STATUS_FILE_LOCK_CONFLICT;
292 if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
293 lock->context.tid == fsp->last_lock_failure.context.tid &&
294 lock->fnum == fsp->last_lock_failure.fnum &&
295 lock->start == fsp->last_lock_failure.start) {
296 return NT_STATUS_FILE_LOCK_CONFLICT;
299 if (!blocking_lock) {
300 fsp->last_lock_failure = *lock;
302 return NT_STATUS_LOCK_NOT_GRANTED;
305 /****************************************************************************
306 Open up the brlock.tdb database.
307 ****************************************************************************/
309 void brl_init(bool read_only)
311 int tdb_flags;
313 if (brlock_db) {
314 return;
317 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
319 if (!lp_clustering()) {
321 * We can't use the SEQNUM trick to cache brlock
322 * entries in the clustering case because ctdb seqnum
323 * propagation has a delay.
325 tdb_flags |= TDB_SEQNUM;
328 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
329 lp_open_files_db_hash_size(), tdb_flags,
330 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
331 DBWRAP_LOCK_ORDER_2);
332 if (!brlock_db) {
333 DEBUG(0,("Failed to open byte range locking database %s\n",
334 lock_path("brlock.tdb")));
335 return;
339 /****************************************************************************
340 Close down the brlock.tdb database.
341 ****************************************************************************/
343 void brl_shutdown(void)
345 TALLOC_FREE(brlock_db);
348 #if ZERO_ZERO
349 /****************************************************************************
350 Compare two locks for sorting.
351 ****************************************************************************/
353 static int lock_compare(const struct lock_struct *lck1,
354 const struct lock_struct *lck2)
356 if (lck1->start != lck2->start) {
357 return (lck1->start - lck2->start);
359 if (lck2->size != lck1->size) {
360 return ((int)lck1->size - (int)lck2->size);
362 return 0;
364 #endif
366 /****************************************************************************
367 Lock a range of bytes - Windows lock semantics.
368 ****************************************************************************/
370 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
371 struct lock_struct *plock, bool blocking_lock)
373 unsigned int i;
374 files_struct *fsp = br_lck->fsp;
375 struct lock_struct *locks = br_lck->lock_data;
376 NTSTATUS status;
378 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
380 if ((plock->start + plock->size - 1 < plock->start) &&
381 plock->size != 0) {
382 return NT_STATUS_INVALID_LOCK_RANGE;
385 for (i=0; i < br_lck->num_locks; i++) {
386 /* Do any Windows or POSIX locks conflict ? */
387 if (brl_conflict(&locks[i], plock)) {
388 /* Remember who blocked us. */
389 plock->context.smblctx = locks[i].context.smblctx;
390 return brl_lock_failed(fsp,plock,blocking_lock);
392 #if ZERO_ZERO
393 if (plock->start == 0 && plock->size == 0 &&
394 locks[i].size == 0) {
395 break;
397 #endif
400 if (!IS_PENDING_LOCK(plock->lock_type)) {
401 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
404 /* We can get the Windows lock, now see if it needs to
405 be mapped into a lower level POSIX one, and if so can
406 we get it ? */
408 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
409 int errno_ret;
410 if (!set_posix_lock_windows_flavour(fsp,
411 plock->start,
412 plock->size,
413 plock->lock_type,
414 &plock->context,
415 locks,
416 br_lck->num_locks,
417 &errno_ret)) {
419 /* We don't know who blocked us. */
420 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
422 if (errno_ret == EACCES || errno_ret == EAGAIN) {
423 status = NT_STATUS_FILE_LOCK_CONFLICT;
424 goto fail;
425 } else {
426 status = map_nt_error_from_unix(errno);
427 goto fail;
432 /* no conflicts - add it to the list of locks */
433 locks = talloc_realloc(br_lck, locks, struct lock_struct,
434 (br_lck->num_locks + 1));
435 if (!locks) {
436 status = NT_STATUS_NO_MEMORY;
437 goto fail;
440 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
441 br_lck->num_locks += 1;
442 br_lck->lock_data = locks;
443 br_lck->modified = True;
445 return NT_STATUS_OK;
446 fail:
447 if (!IS_PENDING_LOCK(plock->lock_type)) {
448 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
450 return status;
453 /****************************************************************************
454 Cope with POSIX range splits and merges.
455 ****************************************************************************/
457 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
458 struct lock_struct *ex, /* existing lock. */
459 struct lock_struct *plock) /* proposed lock. */
461 bool lock_types_differ = (ex->lock_type != plock->lock_type);
463 /* We can't merge non-conflicting locks on different context - ignore fnum. */
465 if (!brl_same_context(&ex->context, &plock->context)) {
466 /* Just copy. */
467 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
468 return 1;
471 /* We now know we have the same context. */
473 /* Did we overlap ? */
475 /*********************************************
476 +---------+
477 | ex |
478 +---------+
479 +-------+
480 | plock |
481 +-------+
482 OR....
483 +---------+
484 | ex |
485 +---------+
486 **********************************************/
488 if ( (ex->start > (plock->start + plock->size)) ||
489 (plock->start > (ex->start + ex->size))) {
491 /* No overlap with this lock - copy existing. */
493 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
494 return 1;
497 /*********************************************
498 +---------------------------+
499 | ex |
500 +---------------------------+
501 +---------------------------+
502 | plock | -> replace with plock.
503 +---------------------------+
505 +---------------+
506 | ex |
507 +---------------+
508 +---------------------------+
509 | plock | -> replace with plock.
510 +---------------------------+
512 **********************************************/
514 if ( (ex->start >= plock->start) &&
515 (ex->start + ex->size <= plock->start + plock->size) ) {
517 /* Replace - discard existing lock. */
519 return 0;
522 /*********************************************
523 Adjacent after.
524 +-------+
525 | ex |
526 +-------+
527 +---------------+
528 | plock |
529 +---------------+
531 BECOMES....
532 +---------------+-------+
533 | plock | ex | - different lock types.
534 +---------------+-------+
535 OR.... (merge)
536 +-----------------------+
537 | plock | - same lock type.
538 +-----------------------+
539 **********************************************/
541 if (plock->start + plock->size == ex->start) {
543 /* If the lock types are the same, we merge, if different, we
544 add the remainder of the old lock. */
546 if (lock_types_differ) {
547 /* Add existing. */
548 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
549 return 1;
550 } else {
551 /* Merge - adjust incoming lock as we may have more
552 * merging to come. */
553 plock->size += ex->size;
554 return 0;
558 /*********************************************
559 Adjacent before.
560 +-------+
561 | ex |
562 +-------+
563 +---------------+
564 | plock |
565 +---------------+
566 BECOMES....
567 +-------+---------------+
568 | ex | plock | - different lock types
569 +-------+---------------+
571 OR.... (merge)
572 +-----------------------+
573 | plock | - same lock type.
574 +-----------------------+
576 **********************************************/
578 if (ex->start + ex->size == plock->start) {
580 /* If the lock types are the same, we merge, if different, we
581 add the existing lock. */
583 if (lock_types_differ) {
584 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
585 return 1;
586 } else {
587 /* Merge - adjust incoming lock as we may have more
588 * merging to come. */
589 plock->start = ex->start;
590 plock->size += ex->size;
591 return 0;
595 /*********************************************
596 Overlap after.
597 +-----------------------+
598 | ex |
599 +-----------------------+
600 +---------------+
601 | plock |
602 +---------------+
604 +----------------+
605 | ex |
606 +----------------+
607 +---------------+
608 | plock |
609 +---------------+
611 BECOMES....
612 +---------------+-------+
613 | plock | ex | - different lock types.
614 +---------------+-------+
615 OR.... (merge)
616 +-----------------------+
617 | plock | - same lock type.
618 +-----------------------+
619 **********************************************/
621 if ( (ex->start >= plock->start) &&
622 (ex->start <= plock->start + plock->size) &&
623 (ex->start + ex->size > plock->start + plock->size) ) {
625 /* If the lock types are the same, we merge, if different, we
626 add the remainder of the old lock. */
628 if (lock_types_differ) {
629 /* Add remaining existing. */
630 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
631 /* Adjust existing start and size. */
632 lck_arr[0].start = plock->start + plock->size;
633 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
634 return 1;
635 } else {
636 /* Merge - adjust incoming lock as we may have more
637 * merging to come. */
638 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
639 return 0;
643 /*********************************************
644 Overlap before.
645 +-----------------------+
646 | ex |
647 +-----------------------+
648 +---------------+
649 | plock |
650 +---------------+
652 +-------------+
653 | ex |
654 +-------------+
655 +---------------+
656 | plock |
657 +---------------+
659 BECOMES....
660 +-------+---------------+
661 | ex | plock | - different lock types
662 +-------+---------------+
664 OR.... (merge)
665 +-----------------------+
666 | plock | - same lock type.
667 +-----------------------+
669 **********************************************/
671 if ( (ex->start < plock->start) &&
672 (ex->start + ex->size >= plock->start) &&
673 (ex->start + ex->size <= plock->start + plock->size) ) {
675 /* If the lock types are the same, we merge, if different, we
676 add the truncated old lock. */
678 if (lock_types_differ) {
679 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
680 /* Adjust existing size. */
681 lck_arr[0].size = plock->start - ex->start;
682 return 1;
683 } else {
684 /* Merge - adjust incoming lock as we may have more
685 * merging to come. MUST ADJUST plock SIZE FIRST ! */
686 plock->size += (plock->start - ex->start);
687 plock->start = ex->start;
688 return 0;
692 /*********************************************
693 Complete overlap.
694 +---------------------------+
695 | ex |
696 +---------------------------+
697 +---------+
698 | plock |
699 +---------+
700 BECOMES.....
701 +-------+---------+---------+
702 | ex | plock | ex | - different lock types.
703 +-------+---------+---------+
705 +---------------------------+
706 | plock | - same lock type.
707 +---------------------------+
708 **********************************************/
710 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
712 if (lock_types_differ) {
714 /* We have to split ex into two locks here. */
716 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
717 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
719 /* Adjust first existing size. */
720 lck_arr[0].size = plock->start - ex->start;
722 /* Adjust second existing start and size. */
723 lck_arr[1].start = plock->start + plock->size;
724 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
725 return 2;
726 } else {
727 /* Just eat the existing locks, merge them into plock. */
728 plock->start = ex->start;
729 plock->size = ex->size;
730 return 0;
734 /* Never get here. */
735 smb_panic("brlock_posix_split_merge");
736 /* Notreached. */
738 /* Keep some compilers happy. */
739 return 0;
742 /****************************************************************************
743 Lock a range of bytes - POSIX lock semantics.
744 We must cope with range splits and merges.
745 ****************************************************************************/
747 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
748 struct byte_range_lock *br_lck,
749 struct lock_struct *plock)
751 unsigned int i, count, posix_count;
752 struct lock_struct *locks = br_lck->lock_data;
753 struct lock_struct *tp;
754 bool signal_pending_read = False;
755 bool break_oplocks = false;
756 NTSTATUS status;
758 /* No zero-zero locks for POSIX. */
759 if (plock->start == 0 && plock->size == 0) {
760 return NT_STATUS_INVALID_PARAMETER;
763 /* Don't allow 64-bit lock wrap. */
764 if (plock->start + plock->size - 1 < plock->start) {
765 return NT_STATUS_INVALID_PARAMETER;
768 /* The worst case scenario here is we have to split an
769 existing POSIX lock range into two, and add our lock,
770 so we need at most 2 more entries. */
772 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 2);
773 if (!tp) {
774 return NT_STATUS_NO_MEMORY;
777 count = posix_count = 0;
779 for (i=0; i < br_lck->num_locks; i++) {
780 struct lock_struct *curr_lock = &locks[i];
782 /* If we have a pending read lock, a lock downgrade should
783 trigger a lock re-evaluation. */
784 if (curr_lock->lock_type == PENDING_READ_LOCK &&
785 brl_pending_overlap(plock, curr_lock)) {
786 signal_pending_read = True;
789 if (curr_lock->lock_flav == WINDOWS_LOCK) {
790 /* Do any Windows flavour locks conflict ? */
791 if (brl_conflict(curr_lock, plock)) {
792 /* No games with error messages. */
793 TALLOC_FREE(tp);
794 /* Remember who blocked us. */
795 plock->context.smblctx = curr_lock->context.smblctx;
796 return NT_STATUS_FILE_LOCK_CONFLICT;
798 /* Just copy the Windows lock into the new array. */
799 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
800 count++;
801 } else {
802 unsigned int tmp_count = 0;
804 /* POSIX conflict semantics are different. */
805 if (brl_conflict_posix(curr_lock, plock)) {
806 /* Can't block ourselves with POSIX locks. */
807 /* No games with error messages. */
808 TALLOC_FREE(tp);
809 /* Remember who blocked us. */
810 plock->context.smblctx = curr_lock->context.smblctx;
811 return NT_STATUS_FILE_LOCK_CONFLICT;
814 /* Work out overlaps. */
815 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
816 posix_count += tmp_count;
817 count += tmp_count;
822 * Break oplocks while we hold a brl. Since lock() and unlock() calls
823 * are not symetric with POSIX semantics, we cannot guarantee our
824 * contend_level2_oplocks_begin/end calls will be acquired and
825 * released one-for-one as with Windows semantics. Therefore we only
826 * call contend_level2_oplocks_begin if this is the first POSIX brl on
827 * the file.
829 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
830 posix_count == 0);
831 if (break_oplocks) {
832 contend_level2_oplocks_begin(br_lck->fsp,
833 LEVEL2_CONTEND_POSIX_BRL);
836 /* Try and add the lock in order, sorted by lock start. */
837 for (i=0; i < count; i++) {
838 struct lock_struct *curr_lock = &tp[i];
840 if (curr_lock->start <= plock->start) {
841 continue;
845 if (i < count) {
846 memmove(&tp[i+1], &tp[i],
847 (count - i)*sizeof(struct lock_struct));
849 memcpy(&tp[i], plock, sizeof(struct lock_struct));
850 count++;
852 /* We can get the POSIX lock, now see if it needs to
853 be mapped into a lower level POSIX one, and if so can
854 we get it ? */
856 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
857 int errno_ret;
859 /* The lower layer just needs to attempt to
860 get the system POSIX lock. We've weeded out
861 any conflicts above. */
863 if (!set_posix_lock_posix_flavour(br_lck->fsp,
864 plock->start,
865 plock->size,
866 plock->lock_type,
867 &errno_ret)) {
869 /* We don't know who blocked us. */
870 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
872 if (errno_ret == EACCES || errno_ret == EAGAIN) {
873 TALLOC_FREE(tp);
874 status = NT_STATUS_FILE_LOCK_CONFLICT;
875 goto fail;
876 } else {
877 TALLOC_FREE(tp);
878 status = map_nt_error_from_unix(errno);
879 goto fail;
884 /* If we didn't use all the allocated size,
885 * Realloc so we don't leak entries per lock call. */
886 if (count < br_lck->num_locks + 2) {
887 tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
888 if (!tp) {
889 status = NT_STATUS_NO_MEMORY;
890 goto fail;
894 br_lck->num_locks = count;
895 TALLOC_FREE(br_lck->lock_data);
896 br_lck->lock_data = tp;
897 locks = tp;
898 br_lck->modified = True;
900 /* A successful downgrade from write to read lock can trigger a lock
901 re-evalutation where waiting readers can now proceed. */
903 if (signal_pending_read) {
904 /* Send unlock messages to any pending read waiters that overlap. */
905 for (i=0; i < br_lck->num_locks; i++) {
906 struct lock_struct *pend_lock = &locks[i];
908 /* Ignore non-pending locks. */
909 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
910 continue;
913 if (pend_lock->lock_type == PENDING_READ_LOCK &&
914 brl_pending_overlap(plock, pend_lock)) {
915 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
916 procid_str_static(&pend_lock->context.pid )));
918 messaging_send(msg_ctx, pend_lock->context.pid,
919 MSG_SMB_UNLOCK, &data_blob_null);
924 return NT_STATUS_OK;
925 fail:
926 if (break_oplocks) {
927 contend_level2_oplocks_end(br_lck->fsp,
928 LEVEL2_CONTEND_POSIX_BRL);
930 return status;
933 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
934 struct byte_range_lock *br_lck,
935 struct lock_struct *plock,
936 bool blocking_lock,
937 struct blocking_lock_record *blr)
939 VFS_FIND(brl_lock_windows);
940 return handle->fns->brl_lock_windows_fn(handle, br_lck, plock,
941 blocking_lock, blr);
944 /****************************************************************************
945 Lock a range of bytes.
946 ****************************************************************************/
948 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
949 struct byte_range_lock *br_lck,
950 uint64_t smblctx,
951 struct server_id pid,
952 br_off start,
953 br_off size,
954 enum brl_type lock_type,
955 enum brl_flavour lock_flav,
956 bool blocking_lock,
957 uint64_t *psmblctx,
958 struct blocking_lock_record *blr)
960 NTSTATUS ret;
961 struct lock_struct lock;
963 #if !ZERO_ZERO
964 if (start == 0 && size == 0) {
965 DEBUG(0,("client sent 0/0 lock - please report this\n"));
967 #endif
969 #ifdef DEVELOPER
970 /* Quieten valgrind on test. */
971 ZERO_STRUCT(lock);
972 #endif
974 lock.context.smblctx = smblctx;
975 lock.context.pid = pid;
976 lock.context.tid = br_lck->fsp->conn->cnum;
977 lock.start = start;
978 lock.size = size;
979 lock.fnum = br_lck->fsp->fnum;
980 lock.lock_type = lock_type;
981 lock.lock_flav = lock_flav;
983 if (lock_flav == WINDOWS_LOCK) {
984 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
985 &lock, blocking_lock, blr);
986 } else {
987 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
990 #if ZERO_ZERO
991 /* sort the lock list */
992 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
993 #endif
995 /* If we're returning an error, return who blocked us. */
996 if (!NT_STATUS_IS_OK(ret) && psmblctx) {
997 *psmblctx = lock.context.smblctx;
999 return ret;
1002 /****************************************************************************
1003 Unlock a range of bytes - Windows semantics.
1004 ****************************************************************************/
1006 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
1007 struct byte_range_lock *br_lck,
1008 const struct lock_struct *plock)
1010 unsigned int i, j;
1011 struct lock_struct *locks = br_lck->lock_data;
1012 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
1014 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
1016 #if ZERO_ZERO
1017 /* Delete write locks by preference... The lock list
1018 is sorted in the zero zero case. */
1020 for (i = 0; i < br_lck->num_locks; i++) {
1021 struct lock_struct *lock = &locks[i];
1023 if (lock->lock_type == WRITE_LOCK &&
1024 brl_same_context(&lock->context, &plock->context) &&
1025 lock->fnum == plock->fnum &&
1026 lock->lock_flav == WINDOWS_LOCK &&
1027 lock->start == plock->start &&
1028 lock->size == plock->size) {
1030 /* found it - delete it */
1031 deleted_lock_type = lock->lock_type;
1032 break;
1036 if (i != br_lck->num_locks) {
1037 /* We found it - don't search again. */
1038 goto unlock_continue;
1040 #endif
1042 for (i = 0; i < br_lck->num_locks; i++) {
1043 struct lock_struct *lock = &locks[i];
1045 if (IS_PENDING_LOCK(lock->lock_type)) {
1046 continue;
1049 /* Only remove our own locks that match in start, size, and flavour. */
1050 if (brl_same_context(&lock->context, &plock->context) &&
1051 lock->fnum == plock->fnum &&
1052 lock->lock_flav == WINDOWS_LOCK &&
1053 lock->start == plock->start &&
1054 lock->size == plock->size ) {
1055 deleted_lock_type = lock->lock_type;
1056 break;
1060 if (i == br_lck->num_locks) {
1061 /* we didn't find it */
1062 return False;
1065 #if ZERO_ZERO
1066 unlock_continue:
1067 #endif
1069 /* Actually delete the lock. */
1070 if (i < br_lck->num_locks - 1) {
1071 memmove(&locks[i], &locks[i+1],
1072 sizeof(*locks)*((br_lck->num_locks-1) - i));
1075 br_lck->num_locks -= 1;
1076 br_lck->modified = True;
1078 /* Unlock the underlying POSIX regions. */
1079 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1080 release_posix_lock_windows_flavour(br_lck->fsp,
1081 plock->start,
1082 plock->size,
1083 deleted_lock_type,
1084 &plock->context,
1085 locks,
1086 br_lck->num_locks);
1089 /* Send unlock messages to any pending waiters that overlap. */
1090 for (j=0; j < br_lck->num_locks; j++) {
1091 struct lock_struct *pend_lock = &locks[j];
1093 /* Ignore non-pending locks. */
1094 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1095 continue;
1098 /* We could send specific lock info here... */
1099 if (brl_pending_overlap(plock, pend_lock)) {
1100 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1101 procid_str_static(&pend_lock->context.pid )));
1103 messaging_send(msg_ctx, pend_lock->context.pid,
1104 MSG_SMB_UNLOCK, &data_blob_null);
1108 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1109 return True;
1112 /****************************************************************************
1113 Unlock a range of bytes - POSIX semantics.
1114 ****************************************************************************/
1116 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1117 struct byte_range_lock *br_lck,
1118 struct lock_struct *plock)
1120 unsigned int i, j, count;
1121 struct lock_struct *tp;
1122 struct lock_struct *locks = br_lck->lock_data;
1123 bool overlap_found = False;
1125 /* No zero-zero locks for POSIX. */
1126 if (plock->start == 0 && plock->size == 0) {
1127 return False;
1130 /* Don't allow 64-bit lock wrap. */
1131 if (plock->start + plock->size < plock->start ||
1132 plock->start + plock->size < plock->size) {
1133 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1134 return False;
1137 /* The worst case scenario here is we have to split an
1138 existing POSIX lock range into two, so we need at most
1139 1 more entry. */
1141 tp = talloc_array(br_lck, struct lock_struct, br_lck->num_locks + 1);
1142 if (!tp) {
1143 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1144 return False;
1147 count = 0;
1148 for (i = 0; i < br_lck->num_locks; i++) {
1149 struct lock_struct *lock = &locks[i];
1150 unsigned int tmp_count;
1152 /* Only remove our own locks - ignore fnum. */
1153 if (IS_PENDING_LOCK(lock->lock_type) ||
1154 !brl_same_context(&lock->context, &plock->context)) {
1155 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1156 count++;
1157 continue;
1160 if (lock->lock_flav == WINDOWS_LOCK) {
1161 /* Do any Windows flavour locks conflict ? */
1162 if (brl_conflict(lock, plock)) {
1163 TALLOC_FREE(tp);
1164 return false;
1166 /* Just copy the Windows lock into the new array. */
1167 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1168 count++;
1169 continue;
1172 /* Work out overlaps. */
1173 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1175 if (tmp_count == 0) {
1176 /* plock overlapped the existing lock completely,
1177 or replaced it. Don't copy the existing lock. */
1178 overlap_found = true;
1179 } else if (tmp_count == 1) {
1180 /* Either no overlap, (simple copy of existing lock) or
1181 * an overlap of an existing lock. */
1182 /* If the lock changed size, we had an overlap. */
1183 if (tp[count].size != lock->size) {
1184 overlap_found = true;
1186 count += tmp_count;
1187 } else if (tmp_count == 2) {
1188 /* We split a lock range in two. */
1189 overlap_found = true;
1190 count += tmp_count;
1192 /* Optimisation... */
1193 /* We know we're finished here as we can't overlap any
1194 more POSIX locks. Copy the rest of the lock array. */
1196 if (i < br_lck->num_locks - 1) {
1197 memcpy(&tp[count], &locks[i+1],
1198 sizeof(*locks)*((br_lck->num_locks-1) - i));
1199 count += ((br_lck->num_locks-1) - i);
1201 break;
1206 if (!overlap_found) {
1207 /* Just ignore - no change. */
1208 TALLOC_FREE(tp);
1209 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1210 return True;
1213 /* Unlock any POSIX regions. */
1214 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1215 release_posix_lock_posix_flavour(br_lck->fsp,
1216 plock->start,
1217 plock->size,
1218 &plock->context,
1220 count);
1223 /* Realloc so we don't leak entries per unlock call. */
1224 if (count) {
1225 tp = talloc_realloc(br_lck, tp, struct lock_struct, count);
1226 if (!tp) {
1227 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1228 return False;
1230 } else {
1231 /* We deleted the last lock. */
1232 TALLOC_FREE(tp);
1233 tp = NULL;
1236 contend_level2_oplocks_end(br_lck->fsp,
1237 LEVEL2_CONTEND_POSIX_BRL);
1239 br_lck->num_locks = count;
1240 TALLOC_FREE(br_lck->lock_data);
1241 locks = tp;
1242 br_lck->lock_data = tp;
1243 br_lck->modified = True;
1245 /* Send unlock messages to any pending waiters that overlap. */
1247 for (j=0; j < br_lck->num_locks; j++) {
1248 struct lock_struct *pend_lock = &locks[j];
1250 /* Ignore non-pending locks. */
1251 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1252 continue;
1255 /* We could send specific lock info here... */
1256 if (brl_pending_overlap(plock, pend_lock)) {
1257 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1258 procid_str_static(&pend_lock->context.pid )));
1260 messaging_send(msg_ctx, pend_lock->context.pid,
1261 MSG_SMB_UNLOCK, &data_blob_null);
1265 return True;
1268 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1269 struct messaging_context *msg_ctx,
1270 struct byte_range_lock *br_lck,
1271 const struct lock_struct *plock)
1273 VFS_FIND(brl_unlock_windows);
1274 return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck,
1275 plock);
1278 /****************************************************************************
1279 Unlock a range of bytes.
1280 ****************************************************************************/
1282 bool brl_unlock(struct messaging_context *msg_ctx,
1283 struct byte_range_lock *br_lck,
1284 uint64_t smblctx,
1285 struct server_id pid,
1286 br_off start,
1287 br_off size,
1288 enum brl_flavour lock_flav)
1290 struct lock_struct lock;
1292 lock.context.smblctx = smblctx;
1293 lock.context.pid = pid;
1294 lock.context.tid = br_lck->fsp->conn->cnum;
1295 lock.start = start;
1296 lock.size = size;
1297 lock.fnum = br_lck->fsp->fnum;
1298 lock.lock_type = UNLOCK_LOCK;
1299 lock.lock_flav = lock_flav;
1301 if (lock_flav == WINDOWS_LOCK) {
1302 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1303 br_lck, &lock);
1304 } else {
1305 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1309 /****************************************************************************
1310 Test if we could add a lock if we wanted to.
1311 Returns True if the region required is currently unlocked, False if locked.
1312 ****************************************************************************/
1314 bool brl_locktest(struct byte_range_lock *br_lck,
1315 uint64_t smblctx,
1316 struct server_id pid,
1317 br_off start,
1318 br_off size,
1319 enum brl_type lock_type,
1320 enum brl_flavour lock_flav)
1322 bool ret = True;
1323 unsigned int i;
1324 struct lock_struct lock;
1325 const struct lock_struct *locks = br_lck->lock_data;
1326 files_struct *fsp = br_lck->fsp;
1328 lock.context.smblctx = smblctx;
1329 lock.context.pid = pid;
1330 lock.context.tid = br_lck->fsp->conn->cnum;
1331 lock.start = start;
1332 lock.size = size;
1333 lock.fnum = fsp->fnum;
1334 lock.lock_type = lock_type;
1335 lock.lock_flav = lock_flav;
1337 /* Make sure existing locks don't conflict */
1338 for (i=0; i < br_lck->num_locks; i++) {
1340 * Our own locks don't conflict.
1342 if (brl_conflict_other(&locks[i], &lock)) {
1343 return False;
1348 * There is no lock held by an SMB daemon, check to
1349 * see if there is a POSIX lock from a UNIX or NFS process.
1350 * This only conflicts with Windows locks, not POSIX locks.
1353 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1354 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1356 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for %s file %s\n",
1357 (double)start, (double)size, ret ? "locked" : "unlocked",
1358 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1360 /* We need to return the inverse of is_posix_locked. */
1361 ret = !ret;
1364 /* no conflicts - we could have added it */
1365 return ret;
1368 /****************************************************************************
1369 Query for existing locks.
1370 ****************************************************************************/
1372 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1373 uint64_t *psmblctx,
1374 struct server_id pid,
1375 br_off *pstart,
1376 br_off *psize,
1377 enum brl_type *plock_type,
1378 enum brl_flavour lock_flav)
1380 unsigned int i;
1381 struct lock_struct lock;
1382 const struct lock_struct *locks = br_lck->lock_data;
1383 files_struct *fsp = br_lck->fsp;
1385 lock.context.smblctx = *psmblctx;
1386 lock.context.pid = pid;
1387 lock.context.tid = br_lck->fsp->conn->cnum;
1388 lock.start = *pstart;
1389 lock.size = *psize;
1390 lock.fnum = fsp->fnum;
1391 lock.lock_type = *plock_type;
1392 lock.lock_flav = lock_flav;
1394 /* Make sure existing locks don't conflict */
1395 for (i=0; i < br_lck->num_locks; i++) {
1396 const struct lock_struct *exlock = &locks[i];
1397 bool conflict = False;
1399 if (exlock->lock_flav == WINDOWS_LOCK) {
1400 conflict = brl_conflict(exlock, &lock);
1401 } else {
1402 conflict = brl_conflict_posix(exlock, &lock);
1405 if (conflict) {
1406 *psmblctx = exlock->context.smblctx;
1407 *pstart = exlock->start;
1408 *psize = exlock->size;
1409 *plock_type = exlock->lock_type;
1410 return NT_STATUS_LOCK_NOT_GRANTED;
1415 * There is no lock held by an SMB daemon, check to
1416 * see if there is a POSIX lock from a UNIX or NFS process.
1419 if(lp_posix_locking(fsp->conn->params)) {
1420 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1422 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for %s file %s\n",
1423 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1424 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1426 if (ret) {
1427 /* Hmmm. No clue what to set smblctx to - use -1. */
1428 *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1429 return NT_STATUS_LOCK_NOT_GRANTED;
1433 return NT_STATUS_OK;
1437 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1438 struct byte_range_lock *br_lck,
1439 struct lock_struct *plock,
1440 struct blocking_lock_record *blr)
1442 VFS_FIND(brl_cancel_windows);
1443 return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock, blr);
1446 /****************************************************************************
1447 Remove a particular pending lock.
1448 ****************************************************************************/
1449 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1450 uint64_t smblctx,
1451 struct server_id pid,
1452 br_off start,
1453 br_off size,
1454 enum brl_flavour lock_flav,
1455 struct blocking_lock_record *blr)
1457 bool ret;
1458 struct lock_struct lock;
1460 lock.context.smblctx = smblctx;
1461 lock.context.pid = pid;
1462 lock.context.tid = br_lck->fsp->conn->cnum;
1463 lock.start = start;
1464 lock.size = size;
1465 lock.fnum = br_lck->fsp->fnum;
1466 lock.lock_flav = lock_flav;
1467 /* lock.lock_type doesn't matter */
1469 if (lock_flav == WINDOWS_LOCK) {
1470 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1471 &lock, blr);
1472 } else {
1473 ret = brl_lock_cancel_default(br_lck, &lock);
1476 return ret;
1479 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1480 struct lock_struct *plock)
1482 unsigned int i;
1483 struct lock_struct *locks = br_lck->lock_data;
1485 SMB_ASSERT(plock);
1487 for (i = 0; i < br_lck->num_locks; i++) {
1488 struct lock_struct *lock = &locks[i];
1490 /* For pending locks we *always* care about the fnum. */
1491 if (brl_same_context(&lock->context, &plock->context) &&
1492 lock->fnum == plock->fnum &&
1493 IS_PENDING_LOCK(lock->lock_type) &&
1494 lock->lock_flav == plock->lock_flav &&
1495 lock->start == plock->start &&
1496 lock->size == plock->size) {
1497 break;
1501 if (i == br_lck->num_locks) {
1502 /* Didn't find it. */
1503 return False;
1506 if (i < br_lck->num_locks - 1) {
1507 /* Found this particular pending lock - delete it */
1508 memmove(&locks[i], &locks[i+1],
1509 sizeof(*locks)*((br_lck->num_locks-1) - i));
1512 br_lck->num_locks -= 1;
1513 br_lck->modified = True;
1514 return True;
1517 /****************************************************************************
1518 Remove any locks associated with a open file.
1519 We return True if this process owns any other Windows locks on this
1520 fd and so we should not immediately close the fd.
1521 ****************************************************************************/
1523 void brl_close_fnum(struct messaging_context *msg_ctx,
1524 struct byte_range_lock *br_lck)
1526 files_struct *fsp = br_lck->fsp;
1527 uint32_t tid = fsp->conn->cnum;
1528 uint64_t fnum = fsp->fnum;
1529 unsigned int i;
1530 struct lock_struct *locks = br_lck->lock_data;
1531 struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
1532 struct lock_struct *locks_copy;
1533 unsigned int num_locks_copy;
1535 /* Copy the current lock array. */
1536 if (br_lck->num_locks) {
1537 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1538 if (!locks_copy) {
1539 smb_panic("brl_close_fnum: talloc failed");
1541 } else {
1542 locks_copy = NULL;
1545 num_locks_copy = br_lck->num_locks;
1547 for (i=0; i < num_locks_copy; i++) {
1548 struct lock_struct *lock = &locks_copy[i];
1550 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) &&
1551 (lock->fnum == fnum)) {
1552 brl_unlock(msg_ctx,
1553 br_lck,
1554 lock->context.smblctx,
1555 pid,
1556 lock->start,
1557 lock->size,
1558 lock->lock_flav);
1563 bool brl_mark_disconnected(struct files_struct *fsp)
1565 uint32_t tid = fsp->conn->cnum;
1566 uint64_t smblctx = fsp->op->global->open_persistent_id;
1567 uint64_t fnum = fsp->fnum;
1568 unsigned int i;
1569 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1570 struct byte_range_lock *br_lck = NULL;
1572 if (!fsp->op->global->durable) {
1573 return false;
1576 if (fsp->current_lock_count == 0) {
1577 return true;
1580 br_lck = brl_get_locks(talloc_tos(), fsp);
1581 if (br_lck == NULL) {
1582 return false;
1585 for (i=0; i < br_lck->num_locks; i++) {
1586 struct lock_struct *lock = &br_lck->lock_data[i];
1589 * as this is a durable handle, we only expect locks
1590 * of the current file handle!
1593 if (lock->context.smblctx != smblctx) {
1594 TALLOC_FREE(br_lck);
1595 return false;
1598 if (lock->context.tid != tid) {
1599 TALLOC_FREE(br_lck);
1600 return false;
1603 if (!serverid_equal(&lock->context.pid, &self)) {
1604 TALLOC_FREE(br_lck);
1605 return false;
1608 if (lock->fnum != fnum) {
1609 TALLOC_FREE(br_lck);
1610 return false;
1613 server_id_set_disconnected(&lock->context.pid);
1614 lock->context.tid = TID_FIELD_INVALID;
1615 lock->fnum = FNUM_FIELD_INVALID;
1618 br_lck->modified = true;
1619 TALLOC_FREE(br_lck);
1620 return true;
1623 bool brl_reconnect_disconnected(struct files_struct *fsp)
1625 uint32_t tid = fsp->conn->cnum;
1626 uint64_t smblctx = fsp->op->global->open_persistent_id;
1627 uint64_t fnum = fsp->fnum;
1628 unsigned int i;
1629 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1630 struct byte_range_lock *br_lck = NULL;
1632 if (!fsp->op->global->durable) {
1633 return false;
1637 * When reconnecting, we do not want to validate the brlock entries
1638 * and thereby remove our own (disconnected) entries but reactivate
1639 * them instead.
1641 fsp->lockdb_clean = true;
1643 br_lck = brl_get_locks(talloc_tos(), fsp);
1644 if (br_lck == NULL) {
1645 return false;
1648 if (br_lck->num_locks == 0) {
1649 TALLOC_FREE(br_lck);
1650 return true;
1653 for (i=0; i < br_lck->num_locks; i++) {
1654 struct lock_struct *lock = &br_lck->lock_data[i];
1657 * as this is a durable handle we only expect locks
1658 * of the current file handle!
1661 if (lock->context.smblctx != smblctx) {
1662 TALLOC_FREE(br_lck);
1663 return false;
1666 if (lock->context.tid != TID_FIELD_INVALID) {
1667 TALLOC_FREE(br_lck);
1668 return false;
1671 if (!server_id_is_disconnected(&lock->context.pid)) {
1672 TALLOC_FREE(br_lck);
1673 return false;
1676 if (lock->fnum != FNUM_FIELD_INVALID) {
1677 TALLOC_FREE(br_lck);
1678 return false;
1681 lock->context.pid = self;
1682 lock->context.tid = tid;
1683 lock->fnum = fnum;
1686 fsp->current_lock_count = br_lck->num_locks;
1687 br_lck->modified = true;
1688 TALLOC_FREE(br_lck);
1689 return true;
1692 /****************************************************************************
1693 Ensure this set of lock entries is valid.
1694 ****************************************************************************/
1695 static bool validate_lock_entries(TALLOC_CTX *mem_ctx,
1696 unsigned int *pnum_entries, struct lock_struct **pplocks,
1697 bool keep_disconnected)
1699 unsigned int i;
1700 unsigned int num_valid_entries = 0;
1701 struct lock_struct *locks = *pplocks;
1702 TALLOC_CTX *frame = talloc_stackframe();
1703 struct server_id *ids;
1704 bool *exists;
1706 ids = talloc_array(frame, struct server_id, *pnum_entries);
1707 if (ids == NULL) {
1708 DEBUG(0, ("validate_lock_entries: "
1709 "talloc_array(struct server_id, %u) failed\n",
1710 *pnum_entries));
1711 talloc_free(frame);
1712 return false;
1715 exists = talloc_array(frame, bool, *pnum_entries);
1716 if (exists == NULL) {
1717 DEBUG(0, ("validate_lock_entries: "
1718 "talloc_array(bool, %u) failed\n",
1719 *pnum_entries));
1720 talloc_free(frame);
1721 return false;
1724 for (i = 0; i < *pnum_entries; i++) {
1725 ids[i] = locks[i].context.pid;
1728 if (!serverids_exist(ids, *pnum_entries, exists)) {
1729 DEBUG(3, ("validate_lock_entries: serverids_exists failed\n"));
1730 talloc_free(frame);
1731 return false;
1734 for (i = 0; i < *pnum_entries; i++) {
1735 if (exists[i]) {
1736 num_valid_entries++;
1737 continue;
1740 if (keep_disconnected &&
1741 server_id_is_disconnected(&ids[i]))
1743 num_valid_entries++;
1744 continue;
1747 /* This process no longer exists - mark this
1748 entry as invalid by zeroing it. */
1749 ZERO_STRUCTP(&locks[i]);
1751 TALLOC_FREE(frame);
1753 if (num_valid_entries != *pnum_entries) {
1754 struct lock_struct *new_lock_data = NULL;
1756 if (num_valid_entries) {
1757 new_lock_data = talloc_array(
1758 mem_ctx, struct lock_struct,
1759 num_valid_entries);
1760 if (!new_lock_data) {
1761 DEBUG(3, ("malloc fail\n"));
1762 return False;
1765 num_valid_entries = 0;
1766 for (i = 0; i < *pnum_entries; i++) {
1767 struct lock_struct *lock_data = &locks[i];
1768 if (lock_data->context.smblctx &&
1769 lock_data->context.tid) {
1770 /* Valid (nonzero) entry - copy it. */
1771 memcpy(&new_lock_data[num_valid_entries],
1772 lock_data, sizeof(struct lock_struct));
1773 num_valid_entries++;
1778 TALLOC_FREE(*pplocks);
1779 *pplocks = new_lock_data;
1780 *pnum_entries = num_valid_entries;
1783 return True;
1786 struct brl_forall_cb {
1787 void (*fn)(struct file_id id, struct server_id pid,
1788 enum brl_type lock_type,
1789 enum brl_flavour lock_flav,
1790 br_off start, br_off size,
1791 void *private_data);
1792 void *private_data;
1795 /****************************************************************************
1796 Traverse the whole database with this function, calling traverse_callback
1797 on each lock.
1798 ****************************************************************************/
1800 static int brl_traverse_fn(struct db_record *rec, void *state)
1802 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1803 struct lock_struct *locks;
1804 struct file_id *key;
1805 unsigned int i;
1806 unsigned int num_locks = 0;
1807 unsigned int orig_num_locks = 0;
1808 TDB_DATA dbkey;
1809 TDB_DATA value;
1811 dbkey = dbwrap_record_get_key(rec);
1812 value = dbwrap_record_get_value(rec);
1814 /* In a traverse function we must make a copy of
1815 dbuf before modifying it. */
1817 locks = (struct lock_struct *)talloc_memdup(
1818 talloc_tos(), value.dptr, value.dsize);
1819 if (!locks) {
1820 return -1; /* Terminate traversal. */
1823 key = (struct file_id *)dbkey.dptr;
1824 orig_num_locks = num_locks = value.dsize/sizeof(*locks);
1826 /* Ensure the lock db is clean of entries from invalid processes. */
1828 if (!validate_lock_entries(talloc_tos(), &num_locks, &locks, true)) {
1829 TALLOC_FREE(locks);
1830 return -1; /* Terminate traversal */
1833 if (orig_num_locks != num_locks) {
1834 if (num_locks) {
1835 TDB_DATA data;
1836 data.dptr = (uint8_t *)locks;
1837 data.dsize = num_locks*sizeof(struct lock_struct);
1838 dbwrap_record_store(rec, data, TDB_REPLACE);
1839 } else {
1840 dbwrap_record_delete(rec);
1844 if (cb->fn) {
1845 for ( i=0; i<num_locks; i++) {
1846 cb->fn(*key,
1847 locks[i].context.pid,
1848 locks[i].lock_type,
1849 locks[i].lock_flav,
1850 locks[i].start,
1851 locks[i].size,
1852 cb->private_data);
1856 TALLOC_FREE(locks);
1857 return 0;
1860 /*******************************************************************
1861 Call the specified function on each lock in the database.
1862 ********************************************************************/
1864 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1865 enum brl_type lock_type,
1866 enum brl_flavour lock_flav,
1867 br_off start, br_off size,
1868 void *private_data),
1869 void *private_data)
1871 struct brl_forall_cb cb;
1872 NTSTATUS status;
1873 int count = 0;
1875 if (!brlock_db) {
1876 return 0;
1878 cb.fn = fn;
1879 cb.private_data = private_data;
1880 status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
1882 if (!NT_STATUS_IS_OK(status)) {
1883 return -1;
1884 } else {
1885 return count;
1889 /*******************************************************************
1890 Store a potentially modified set of byte range lock data back into
1891 the database.
1892 Unlock the record.
1893 ********************************************************************/
1895 static void byte_range_lock_flush(struct byte_range_lock *br_lck)
1897 size_t data_len;
1898 if (!br_lck->modified) {
1899 DEBUG(10, ("br_lck not modified\n"));
1900 goto done;
1903 data_len = br_lck->num_locks * sizeof(struct lock_struct);
1905 if (br_lck->have_read_oplocks) {
1906 data_len += 1;
1909 DEBUG(10, ("data_len=%d\n", (int)data_len));
1911 if (data_len == 0) {
1912 /* No locks - delete this entry. */
1913 NTSTATUS status = dbwrap_record_delete(br_lck->record);
1914 if (!NT_STATUS_IS_OK(status)) {
1915 DEBUG(0, ("delete_rec returned %s\n",
1916 nt_errstr(status)));
1917 smb_panic("Could not delete byte range lock entry");
1919 } else {
1920 TDB_DATA data;
1921 NTSTATUS status;
1923 data.dsize = data_len;
1924 data.dptr = talloc_array(talloc_tos(), uint8_t, data_len);
1925 SMB_ASSERT(data.dptr != NULL);
1927 memcpy(data.dptr, br_lck->lock_data,
1928 br_lck->num_locks * sizeof(struct lock_struct));
1930 if (br_lck->have_read_oplocks) {
1931 data.dptr[data_len-1] = 1;
1934 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
1935 TALLOC_FREE(data.dptr);
1936 if (!NT_STATUS_IS_OK(status)) {
1937 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1938 smb_panic("Could not store byte range mode entry");
1942 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db)));
1944 done:
1945 br_lck->modified = false;
1946 TALLOC_FREE(br_lck->record);
1949 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1951 byte_range_lock_flush(br_lck);
1952 return 0;
1955 /*******************************************************************
1956 Fetch a set of byte range lock data from the database.
1957 Leave the record locked.
1958 TALLOC_FREE(brl) will release the lock in the destructor.
1959 ********************************************************************/
1961 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx, files_struct *fsp)
1963 TDB_DATA key, data;
1964 struct byte_range_lock *br_lck = talloc(mem_ctx, struct byte_range_lock);
1966 if (br_lck == NULL) {
1967 return NULL;
1970 br_lck->fsp = fsp;
1971 br_lck->num_locks = 0;
1972 br_lck->have_read_oplocks = false;
1973 br_lck->modified = False;
1975 key.dptr = (uint8 *)&fsp->file_id;
1976 key.dsize = sizeof(struct file_id);
1978 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
1980 if (br_lck->record == NULL) {
1981 DEBUG(3, ("Could not lock byte range lock entry\n"));
1982 TALLOC_FREE(br_lck);
1983 return NULL;
1986 data = dbwrap_record_get_value(br_lck->record);
1988 br_lck->lock_data = NULL;
1990 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1992 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1994 if (br_lck->num_locks != 0) {
1995 br_lck->lock_data = talloc_array(
1996 br_lck, struct lock_struct, br_lck->num_locks);
1997 if (br_lck->lock_data == NULL) {
1998 DEBUG(0, ("malloc failed\n"));
1999 TALLOC_FREE(br_lck);
2000 return NULL;
2003 memcpy(br_lck->lock_data, data.dptr,
2004 talloc_get_size(br_lck->lock_data));
2007 DEBUG(10, ("data.dsize=%d\n", (int)data.dsize));
2009 if ((data.dsize % sizeof(struct lock_struct)) == 1) {
2010 br_lck->have_read_oplocks = (data.dptr[data.dsize-1] == 1);
2013 if (!fsp->lockdb_clean) {
2014 int orig_num_locks = br_lck->num_locks;
2017 * This is the first time we access the byte range lock
2018 * record with this fsp. Go through and ensure all entries
2019 * are valid - remove any that don't.
2020 * This makes the lockdb self cleaning at low cost.
2022 * Note: Disconnected entries belong to disconnected
2023 * durable handles. So at this point, we have a new
2024 * handle on the file and the disconnected durable has
2025 * already been closed (we are not a durable reconnect).
2026 * So we need to clean the disconnected brl entry.
2029 if (!validate_lock_entries(br_lck, &br_lck->num_locks,
2030 &br_lck->lock_data, false)) {
2031 TALLOC_FREE(br_lck);
2032 return NULL;
2035 /* Ensure invalid locks are cleaned up in the destructor. */
2036 if (orig_num_locks != br_lck->num_locks) {
2037 br_lck->modified = True;
2040 /* Mark the lockdb as "clean" as seen from this open file. */
2041 fsp->lockdb_clean = True;
2044 if (DEBUGLEVEL >= 10) {
2045 unsigned int i;
2046 struct lock_struct *locks = br_lck->lock_data;
2047 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
2048 br_lck->num_locks,
2049 file_id_string_tos(&fsp->file_id)));
2050 for( i = 0; i < br_lck->num_locks; i++) {
2051 print_lock_struct(i, &locks[i]);
2055 return br_lck;
2058 struct brl_get_locks_readonly_state {
2059 TALLOC_CTX *mem_ctx;
2060 struct byte_range_lock **br_lock;
2063 static void brl_get_locks_readonly_parser(TDB_DATA key, TDB_DATA data,
2064 void *private_data)
2066 struct brl_get_locks_readonly_state *state =
2067 (struct brl_get_locks_readonly_state *)private_data;
2068 struct byte_range_lock *br_lock;
2070 br_lock = talloc_pooled_object(
2071 state->mem_ctx, struct byte_range_lock, 1, data.dsize);
2072 if (br_lock == NULL) {
2073 *state->br_lock = NULL;
2074 return;
2076 br_lock->lock_data = (struct lock_struct *)talloc_memdup(
2077 br_lock, data.dptr, data.dsize);
2078 br_lock->num_locks = data.dsize / sizeof(struct lock_struct);
2080 if ((data.dsize % sizeof(struct lock_struct)) == 1) {
2081 br_lock->have_read_oplocks = (data.dptr[data.dsize-1] == 1);
2084 DEBUG(10, ("Got %d bytes, have_read_oplocks: %s\n", (int)data.dsize,
2085 br_lock->have_read_oplocks ? "true" : "false"));
2087 *state->br_lock = br_lock;
2090 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
2092 struct byte_range_lock *br_lock = NULL;
2093 struct byte_range_lock *rw = NULL;
2095 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
2096 dbwrap_get_seqnum(brlock_db), fsp->brlock_seqnum));
2098 if ((fsp->brlock_rec != NULL)
2099 && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
2101 * We have cached the brlock_rec and the database did not
2102 * change.
2104 return fsp->brlock_rec;
2107 if (!fsp->lockdb_clean) {
2109 * Fetch the record in R/W mode to give validate_lock_entries
2110 * a chance to kick in once.
2112 rw = brl_get_locks(talloc_tos(), fsp);
2113 if (rw == NULL) {
2114 return NULL;
2116 fsp->lockdb_clean = true;
2119 if (rw != NULL) {
2120 size_t lock_data_size;
2123 * Make a copy of the already retrieved and sanitized rw record
2125 lock_data_size = rw->num_locks * sizeof(struct lock_struct);
2126 br_lock = talloc_pooled_object(
2127 fsp, struct byte_range_lock, 1, lock_data_size);
2128 if (br_lock == NULL) {
2129 goto fail;
2131 br_lock->have_read_oplocks = rw->have_read_oplocks;
2132 br_lock->num_locks = rw->num_locks;
2133 br_lock->lock_data = (struct lock_struct *)talloc_memdup(
2134 br_lock, rw->lock_data, lock_data_size);
2135 } else {
2136 struct brl_get_locks_readonly_state state;
2137 NTSTATUS status;
2140 * Parse the record fresh from the database
2143 state.mem_ctx = fsp;
2144 state.br_lock = &br_lock;
2146 status = dbwrap_parse_record(
2147 brlock_db,
2148 make_tdb_data((uint8_t *)&fsp->file_id,
2149 sizeof(fsp->file_id)),
2150 brl_get_locks_readonly_parser, &state);
2151 if (!NT_STATUS_IS_OK(status)) {
2152 DEBUG(3, ("Could not parse byte range lock record: "
2153 "%s\n", nt_errstr(status)));
2154 goto fail;
2156 if (br_lock == NULL) {
2157 goto fail;
2161 br_lock->fsp = fsp;
2162 br_lock->modified = false;
2163 br_lock->record = NULL;
2165 if (lp_clustering()) {
2167 * In the cluster case we can't cache the brlock struct
2168 * because dbwrap_get_seqnum does not work reliably over
2169 * ctdb. Thus we have to throw away the brlock struct soon.
2171 talloc_steal(talloc_tos(), br_lock);
2172 } else {
2174 * Cache the brlock struct, invalidated when the dbwrap_seqnum
2175 * changes. See beginning of this routine.
2177 TALLOC_FREE(fsp->brlock_rec);
2178 fsp->brlock_rec = br_lock;
2179 fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
2182 fail:
2183 TALLOC_FREE(rw);
2184 return br_lock;
2187 struct brl_revalidate_state {
2188 ssize_t array_size;
2189 uint32 num_pids;
2190 struct server_id *pids;
2194 * Collect PIDs of all processes with pending entries
2197 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
2198 enum brl_type lock_type,
2199 enum brl_flavour lock_flav,
2200 br_off start, br_off size,
2201 void *private_data)
2203 struct brl_revalidate_state *state =
2204 (struct brl_revalidate_state *)private_data;
2206 if (!IS_PENDING_LOCK(lock_type)) {
2207 return;
2210 add_to_large_array(state, sizeof(pid), (void *)&pid,
2211 &state->pids, &state->num_pids,
2212 &state->array_size);
2216 * qsort callback to sort the processes
2219 static int compare_procids(const void *p1, const void *p2)
2221 const struct server_id *i1 = (const struct server_id *)p1;
2222 const struct server_id *i2 = (const struct server_id *)p2;
2224 if (i1->pid < i2->pid) return -1;
2225 if (i2->pid > i2->pid) return 1;
2226 return 0;
2230 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2231 * locks so that they retry. Mainly used in the cluster code after a node has
2232 * died.
2234 * Done in two steps to avoid double-sends: First we collect all entries in an
2235 * array, then qsort that array and only send to non-dupes.
2238 void brl_revalidate(struct messaging_context *msg_ctx,
2239 void *private_data,
2240 uint32_t msg_type,
2241 struct server_id server_id,
2242 DATA_BLOB *data)
2244 struct brl_revalidate_state *state;
2245 uint32 i;
2246 struct server_id last_pid;
2248 if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
2249 DEBUG(0, ("talloc failed\n"));
2250 return;
2253 brl_forall(brl_revalidate_collect, state);
2255 if (state->array_size == -1) {
2256 DEBUG(0, ("talloc failed\n"));
2257 goto done;
2260 if (state->num_pids == 0) {
2261 goto done;
2264 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2266 ZERO_STRUCT(last_pid);
2268 for (i=0; i<state->num_pids; i++) {
2269 if (serverid_equal(&last_pid, &state->pids[i])) {
2271 * We've seen that one already
2273 continue;
2276 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2277 &data_blob_null);
2278 last_pid = state->pids[i];
2281 done:
2282 TALLOC_FREE(state);
2283 return;
2286 bool brl_cleanup_disconnected(struct file_id fid, uint64_t open_persistent_id)
2288 bool ret = false;
2289 TALLOC_CTX *frame = talloc_stackframe();
2290 TDB_DATA key, val;
2291 struct db_record *rec;
2292 struct lock_struct *lock;
2293 unsigned n, num;
2294 NTSTATUS status;
2296 key = make_tdb_data((void*)&fid, sizeof(fid));
2298 rec = dbwrap_fetch_locked(brlock_db, frame, key);
2299 if (rec == NULL) {
2300 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record "
2301 "for file %s\n", file_id_string(frame, &fid)));
2302 goto done;
2305 val = dbwrap_record_get_value(rec);
2306 lock = (struct lock_struct*)val.dptr;
2307 num = val.dsize / sizeof(struct lock_struct);
2308 if (lock == NULL) {
2309 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for "
2310 "file %s\n", file_id_string(frame, &fid)));
2311 ret = true;
2312 goto done;
2315 for (n=0; n<num; n++) {
2316 struct lock_context *ctx = &lock[n].context;
2318 if (!server_id_is_disconnected(&ctx->pid)) {
2319 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2320 "%s used by server %s, do not cleanup\n",
2321 file_id_string(frame, &fid),
2322 server_id_str(frame, &ctx->pid)));
2323 goto done;
2326 if (ctx->smblctx != open_persistent_id) {
2327 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2328 "%s expected smblctx %llu but found %llu"
2329 ", do not cleanup\n",
2330 file_id_string(frame, &fid),
2331 (unsigned long long)open_persistent_id,
2332 (unsigned long long)ctx->smblctx));
2333 goto done;
2337 status = dbwrap_record_delete(rec);
2338 if (!NT_STATUS_IS_OK(status)) {
2339 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record "
2340 "for file %s from %s, open %llu: %s\n",
2341 file_id_string(frame, &fid), dbwrap_name(brlock_db),
2342 (unsigned long long)open_persistent_id,
2343 nt_errstr(status)));
2344 goto done;
2347 DEBUG(10, ("brl_cleanup_disconnected: "
2348 "file %s cleaned up %u entries from open %llu\n",
2349 file_id_string(frame, &fid), num,
2350 (unsigned long long)open_persistent_id));
2352 ret = true;
2353 done:
2354 talloc_free(frame);
2355 return ret;