Change to using TDB_INCOMPATIBLE_HASH (the jenkins hash) on all
[Samba.git] / source3 / locking / brlock.c
blob650f72509111ac8092326bbdad4b9f7a21ab6725
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
27 #include "includes.h"
28 #include "librpc/gen_ndr/messaging.h"
29 #include "smbd/globals.h"
31 #undef DBGC_CLASS
32 #define DBGC_CLASS DBGC_LOCKING
34 #define ZERO_ZERO 0
36 /* The open brlock.tdb database. */
38 static struct db_context *brlock_db;
40 /****************************************************************************
41 Debug info at level 10 for lock struct.
42 ****************************************************************************/
44 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
46 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
48 (unsigned long long)pls->context.smblctx,
49 (unsigned int)pls->context.tid,
50 procid_str(talloc_tos(), &pls->context.pid) ));
52 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
53 (double)pls->start,
54 (double)pls->size,
55 pls->fnum,
56 lock_type_name(pls->lock_type),
57 lock_flav_name(pls->lock_flav) ));
60 /****************************************************************************
61 See if two locking contexts are equal.
62 ****************************************************************************/
64 bool brl_same_context(const struct lock_context *ctx1,
65 const struct lock_context *ctx2)
67 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
68 (ctx1->smblctx == ctx2->smblctx) &&
69 (ctx1->tid == ctx2->tid));
72 /****************************************************************************
73 See if lck1 and lck2 overlap.
74 ****************************************************************************/
76 static bool brl_overlap(const struct lock_struct *lck1,
77 const struct lock_struct *lck2)
79 /* XXX Remove for Win7 compatibility. */
80 /* this extra check is not redundent - it copes with locks
81 that go beyond the end of 64 bit file space */
82 if (lck1->size != 0 &&
83 lck1->start == lck2->start &&
84 lck1->size == lck2->size) {
85 return True;
88 if (lck1->start >= (lck2->start+lck2->size) ||
89 lck2->start >= (lck1->start+lck1->size)) {
90 return False;
92 return True;
95 /****************************************************************************
96 See if lock2 can be added when lock1 is in place.
97 ****************************************************************************/
99 static bool brl_conflict(const struct lock_struct *lck1,
100 const struct lock_struct *lck2)
102 /* Ignore PENDING locks. */
103 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
104 return False;
106 /* Read locks never conflict. */
107 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
108 return False;
111 /* A READ lock can stack on top of a WRITE lock if they have the same
112 * context & fnum. */
113 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
114 brl_same_context(&lck1->context, &lck2->context) &&
115 lck1->fnum == lck2->fnum) {
116 return False;
119 return brl_overlap(lck1, lck2);
122 /****************************************************************************
123 See if lock2 can be added when lock1 is in place - when both locks are POSIX
124 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
125 know already match.
126 ****************************************************************************/
128 static bool brl_conflict_posix(const struct lock_struct *lck1,
129 const struct lock_struct *lck2)
131 #if defined(DEVELOPER)
132 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
133 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
134 #endif
136 /* Ignore PENDING locks. */
137 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
138 return False;
140 /* Read locks never conflict. */
141 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
142 return False;
145 /* Locks on the same context con't conflict. Ignore fnum. */
146 if (brl_same_context(&lck1->context, &lck2->context)) {
147 return False;
150 /* One is read, the other write, or the context is different,
151 do they overlap ? */
152 return brl_overlap(lck1, lck2);
155 #if ZERO_ZERO
156 static bool brl_conflict1(const struct lock_struct *lck1,
157 const struct lock_struct *lck2)
159 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
160 return False;
162 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
163 return False;
166 if (brl_same_context(&lck1->context, &lck2->context) &&
167 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
168 return False;
171 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
172 return True;
175 if (lck1->start >= (lck2->start + lck2->size) ||
176 lck2->start >= (lck1->start + lck1->size)) {
177 return False;
180 return True;
182 #endif
184 /****************************************************************************
185 Check to see if this lock conflicts, but ignore our own locks on the
186 same fnum only. This is the read/write lock check code path.
187 This is never used in the POSIX lock case.
188 ****************************************************************************/
190 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
192 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
193 return False;
195 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
196 return False;
198 /* POSIX flavour locks never conflict here - this is only called
199 in the read/write path. */
201 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
202 return False;
205 * Incoming WRITE locks conflict with existing READ locks even
206 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
209 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
210 if (brl_same_context(&lck1->context, &lck2->context) &&
211 lck1->fnum == lck2->fnum)
212 return False;
215 return brl_overlap(lck1, lck2);
218 /****************************************************************************
219 Check if an unlock overlaps a pending lock.
220 ****************************************************************************/
222 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
224 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
225 return True;
226 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
227 return True;
228 return False;
231 /****************************************************************************
232 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
233 is the same as this one and changes its error code. I wonder if any
234 app depends on this ?
235 ****************************************************************************/
237 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
239 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
240 /* amazing the little things you learn with a test
241 suite. Locks beyond this offset (as a 64 bit
242 number!) always generate the conflict error code,
243 unless the top bit is set */
244 if (!blocking_lock) {
245 fsp->last_lock_failure = *lock;
247 return NT_STATUS_FILE_LOCK_CONFLICT;
250 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
251 lock->context.tid == fsp->last_lock_failure.context.tid &&
252 lock->fnum == fsp->last_lock_failure.fnum &&
253 lock->start == fsp->last_lock_failure.start) {
254 return NT_STATUS_FILE_LOCK_CONFLICT;
257 if (!blocking_lock) {
258 fsp->last_lock_failure = *lock;
260 return NT_STATUS_LOCK_NOT_GRANTED;
263 /****************************************************************************
264 Open up the brlock.tdb database.
265 ****************************************************************************/
267 void brl_init(bool read_only)
269 int tdb_flags;
271 if (brlock_db) {
272 return;
275 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
277 if (!lp_clustering()) {
279 * We can't use the SEQNUM trick to cache brlock
280 * entries in the clustering case because ctdb seqnum
281 * propagation has a delay.
283 tdb_flags |= TDB_SEQNUM;
286 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
287 lp_open_files_db_hash_size(), tdb_flags,
288 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
289 if (!brlock_db) {
290 DEBUG(0,("Failed to open byte range locking database %s\n",
291 lock_path("brlock.tdb")));
292 return;
296 /****************************************************************************
297 Close down the brlock.tdb database.
298 ****************************************************************************/
300 void brl_shutdown(void)
302 TALLOC_FREE(brlock_db);
305 #if ZERO_ZERO
306 /****************************************************************************
307 Compare two locks for sorting.
308 ****************************************************************************/
310 static int lock_compare(const struct lock_struct *lck1,
311 const struct lock_struct *lck2)
313 if (lck1->start != lck2->start) {
314 return (lck1->start - lck2->start);
316 if (lck2->size != lck1->size) {
317 return ((int)lck1->size - (int)lck2->size);
319 return 0;
321 #endif
323 /****************************************************************************
324 Lock a range of bytes - Windows lock semantics.
325 ****************************************************************************/
327 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
328 struct lock_struct *plock, bool blocking_lock)
330 unsigned int i;
331 files_struct *fsp = br_lck->fsp;
332 struct lock_struct *locks = br_lck->lock_data;
333 NTSTATUS status;
335 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
337 if ((plock->start + plock->size - 1 < plock->start) &&
338 plock->size != 0) {
339 return NT_STATUS_INVALID_LOCK_RANGE;
342 for (i=0; i < br_lck->num_locks; i++) {
343 /* Do any Windows or POSIX locks conflict ? */
344 if (brl_conflict(&locks[i], plock)) {
345 /* Remember who blocked us. */
346 plock->context.smblctx = locks[i].context.smblctx;
347 return brl_lock_failed(fsp,plock,blocking_lock);
349 #if ZERO_ZERO
350 if (plock->start == 0 && plock->size == 0 &&
351 locks[i].size == 0) {
352 break;
354 #endif
357 if (!IS_PENDING_LOCK(plock->lock_type)) {
358 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
361 /* We can get the Windows lock, now see if it needs to
362 be mapped into a lower level POSIX one, and if so can
363 we get it ? */
365 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
366 int errno_ret;
367 if (!set_posix_lock_windows_flavour(fsp,
368 plock->start,
369 plock->size,
370 plock->lock_type,
371 &plock->context,
372 locks,
373 br_lck->num_locks,
374 &errno_ret)) {
376 /* We don't know who blocked us. */
377 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
379 if (errno_ret == EACCES || errno_ret == EAGAIN) {
380 status = NT_STATUS_FILE_LOCK_CONFLICT;
381 goto fail;
382 } else {
383 status = map_nt_error_from_unix(errno);
384 goto fail;
389 /* no conflicts - add it to the list of locks */
390 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
391 if (!locks) {
392 status = NT_STATUS_NO_MEMORY;
393 goto fail;
396 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
397 br_lck->num_locks += 1;
398 br_lck->lock_data = locks;
399 br_lck->modified = True;
401 return NT_STATUS_OK;
402 fail:
403 if (!IS_PENDING_LOCK(plock->lock_type)) {
404 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
406 return status;
409 /****************************************************************************
410 Cope with POSIX range splits and merges.
411 ****************************************************************************/
413 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
414 struct lock_struct *ex, /* existing lock. */
415 struct lock_struct *plock) /* proposed lock. */
417 bool lock_types_differ = (ex->lock_type != plock->lock_type);
419 /* We can't merge non-conflicting locks on different context - ignore fnum. */
421 if (!brl_same_context(&ex->context, &plock->context)) {
422 /* Just copy. */
423 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
424 return 1;
427 /* We now know we have the same context. */
429 /* Did we overlap ? */
431 /*********************************************
432 +---------+
433 | ex |
434 +---------+
435 +-------+
436 | plock |
437 +-------+
438 OR....
439 +---------+
440 | ex |
441 +---------+
442 **********************************************/
444 if ( (ex->start > (plock->start + plock->size)) ||
445 (plock->start > (ex->start + ex->size))) {
447 /* No overlap with this lock - copy existing. */
449 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
450 return 1;
453 /*********************************************
454 +---------------------------+
455 | ex |
456 +---------------------------+
457 +---------------------------+
458 | plock | -> replace with plock.
459 +---------------------------+
461 +---------------+
462 | ex |
463 +---------------+
464 +---------------------------+
465 | plock | -> replace with plock.
466 +---------------------------+
468 **********************************************/
470 if ( (ex->start >= plock->start) &&
471 (ex->start + ex->size <= plock->start + plock->size) ) {
473 /* Replace - discard existing lock. */
475 return 0;
478 /*********************************************
479 Adjacent after.
480 +-------+
481 | ex |
482 +-------+
483 +---------------+
484 | plock |
485 +---------------+
487 BECOMES....
488 +---------------+-------+
489 | plock | ex | - different lock types.
490 +---------------+-------+
491 OR.... (merge)
492 +-----------------------+
493 | plock | - same lock type.
494 +-----------------------+
495 **********************************************/
497 if (plock->start + plock->size == ex->start) {
499 /* If the lock types are the same, we merge, if different, we
500 add the remainder of the old lock. */
502 if (lock_types_differ) {
503 /* Add existing. */
504 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
505 return 1;
506 } else {
507 /* Merge - adjust incoming lock as we may have more
508 * merging to come. */
509 plock->size += ex->size;
510 return 0;
514 /*********************************************
515 Adjacent before.
516 +-------+
517 | ex |
518 +-------+
519 +---------------+
520 | plock |
521 +---------------+
522 BECOMES....
523 +-------+---------------+
524 | ex | plock | - different lock types
525 +-------+---------------+
527 OR.... (merge)
528 +-----------------------+
529 | plock | - same lock type.
530 +-----------------------+
532 **********************************************/
534 if (ex->start + ex->size == plock->start) {
536 /* If the lock types are the same, we merge, if different, we
537 add the existing lock. */
539 if (lock_types_differ) {
540 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
541 return 1;
542 } else {
543 /* Merge - adjust incoming lock as we may have more
544 * merging to come. */
545 plock->start = ex->start;
546 plock->size += ex->size;
547 return 0;
551 /*********************************************
552 Overlap after.
553 +-----------------------+
554 | ex |
555 +-----------------------+
556 +---------------+
557 | plock |
558 +---------------+
560 +----------------+
561 | ex |
562 +----------------+
563 +---------------+
564 | plock |
565 +---------------+
567 BECOMES....
568 +---------------+-------+
569 | plock | ex | - different lock types.
570 +---------------+-------+
571 OR.... (merge)
572 +-----------------------+
573 | plock | - same lock type.
574 +-----------------------+
575 **********************************************/
577 if ( (ex->start >= plock->start) &&
578 (ex->start <= plock->start + plock->size) &&
579 (ex->start + ex->size > plock->start + plock->size) ) {
581 /* If the lock types are the same, we merge, if different, we
582 add the remainder of the old lock. */
584 if (lock_types_differ) {
585 /* Add remaining existing. */
586 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
587 /* Adjust existing start and size. */
588 lck_arr[0].start = plock->start + plock->size;
589 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
590 return 1;
591 } else {
592 /* Merge - adjust incoming lock as we may have more
593 * merging to come. */
594 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
595 return 0;
599 /*********************************************
600 Overlap before.
601 +-----------------------+
602 | ex |
603 +-----------------------+
604 +---------------+
605 | plock |
606 +---------------+
608 +-------------+
609 | ex |
610 +-------------+
611 +---------------+
612 | plock |
613 +---------------+
615 BECOMES....
616 +-------+---------------+
617 | ex | plock | - different lock types
618 +-------+---------------+
620 OR.... (merge)
621 +-----------------------+
622 | plock | - same lock type.
623 +-----------------------+
625 **********************************************/
627 if ( (ex->start < plock->start) &&
628 (ex->start + ex->size >= plock->start) &&
629 (ex->start + ex->size <= plock->start + plock->size) ) {
631 /* If the lock types are the same, we merge, if different, we
632 add the truncated old lock. */
634 if (lock_types_differ) {
635 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
636 /* Adjust existing size. */
637 lck_arr[0].size = plock->start - ex->start;
638 return 1;
639 } else {
640 /* Merge - adjust incoming lock as we may have more
641 * merging to come. MUST ADJUST plock SIZE FIRST ! */
642 plock->size += (plock->start - ex->start);
643 plock->start = ex->start;
644 return 0;
648 /*********************************************
649 Complete overlap.
650 +---------------------------+
651 | ex |
652 +---------------------------+
653 +---------+
654 | plock |
655 +---------+
656 BECOMES.....
657 +-------+---------+---------+
658 | ex | plock | ex | - different lock types.
659 +-------+---------+---------+
661 +---------------------------+
662 | plock | - same lock type.
663 +---------------------------+
664 **********************************************/
666 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
668 if (lock_types_differ) {
670 /* We have to split ex into two locks here. */
672 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
673 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
675 /* Adjust first existing size. */
676 lck_arr[0].size = plock->start - ex->start;
678 /* Adjust second existing start and size. */
679 lck_arr[1].start = plock->start + plock->size;
680 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
681 return 2;
682 } else {
683 /* Just eat the existing locks, merge them into plock. */
684 plock->start = ex->start;
685 plock->size = ex->size;
686 return 0;
690 /* Never get here. */
691 smb_panic("brlock_posix_split_merge");
692 /* Notreached. */
694 /* Keep some compilers happy. */
695 return 0;
698 /****************************************************************************
699 Lock a range of bytes - POSIX lock semantics.
700 We must cope with range splits and merges.
701 ****************************************************************************/
703 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
704 struct byte_range_lock *br_lck,
705 struct lock_struct *plock)
707 unsigned int i, count, posix_count;
708 struct lock_struct *locks = br_lck->lock_data;
709 struct lock_struct *tp;
710 bool signal_pending_read = False;
711 bool break_oplocks = false;
712 NTSTATUS status;
714 /* No zero-zero locks for POSIX. */
715 if (plock->start == 0 && plock->size == 0) {
716 return NT_STATUS_INVALID_PARAMETER;
719 /* Don't allow 64-bit lock wrap. */
720 if (plock->start + plock->size - 1 < plock->start) {
721 return NT_STATUS_INVALID_PARAMETER;
724 /* The worst case scenario here is we have to split an
725 existing POSIX lock range into two, and add our lock,
726 so we need at most 2 more entries. */
728 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
729 if (!tp) {
730 return NT_STATUS_NO_MEMORY;
733 count = posix_count = 0;
735 for (i=0; i < br_lck->num_locks; i++) {
736 struct lock_struct *curr_lock = &locks[i];
738 /* If we have a pending read lock, a lock downgrade should
739 trigger a lock re-evaluation. */
740 if (curr_lock->lock_type == PENDING_READ_LOCK &&
741 brl_pending_overlap(plock, curr_lock)) {
742 signal_pending_read = True;
745 if (curr_lock->lock_flav == WINDOWS_LOCK) {
746 /* Do any Windows flavour locks conflict ? */
747 if (brl_conflict(curr_lock, plock)) {
748 /* No games with error messages. */
749 SAFE_FREE(tp);
750 /* Remember who blocked us. */
751 plock->context.smblctx = curr_lock->context.smblctx;
752 return NT_STATUS_FILE_LOCK_CONFLICT;
754 /* Just copy the Windows lock into the new array. */
755 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
756 count++;
757 } else {
758 unsigned int tmp_count = 0;
760 /* POSIX conflict semantics are different. */
761 if (brl_conflict_posix(curr_lock, plock)) {
762 /* Can't block ourselves with POSIX locks. */
763 /* No games with error messages. */
764 SAFE_FREE(tp);
765 /* Remember who blocked us. */
766 plock->context.smblctx = curr_lock->context.smblctx;
767 return NT_STATUS_FILE_LOCK_CONFLICT;
770 /* Work out overlaps. */
771 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
772 posix_count += tmp_count;
773 count += tmp_count;
778 * Break oplocks while we hold a brl. Since lock() and unlock() calls
779 * are not symetric with POSIX semantics, we cannot guarantee our
780 * contend_level2_oplocks_begin/end calls will be acquired and
781 * released one-for-one as with Windows semantics. Therefore we only
782 * call contend_level2_oplocks_begin if this is the first POSIX brl on
783 * the file.
785 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
786 posix_count == 0);
787 if (break_oplocks) {
788 contend_level2_oplocks_begin(br_lck->fsp,
789 LEVEL2_CONTEND_POSIX_BRL);
792 /* Try and add the lock in order, sorted by lock start. */
793 for (i=0; i < count; i++) {
794 struct lock_struct *curr_lock = &tp[i];
796 if (curr_lock->start <= plock->start) {
797 continue;
801 if (i < count) {
802 memmove(&tp[i+1], &tp[i],
803 (count - i)*sizeof(struct lock_struct));
805 memcpy(&tp[i], plock, sizeof(struct lock_struct));
806 count++;
808 /* We can get the POSIX lock, now see if it needs to
809 be mapped into a lower level POSIX one, and if so can
810 we get it ? */
812 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
813 int errno_ret;
815 /* The lower layer just needs to attempt to
816 get the system POSIX lock. We've weeded out
817 any conflicts above. */
819 if (!set_posix_lock_posix_flavour(br_lck->fsp,
820 plock->start,
821 plock->size,
822 plock->lock_type,
823 &errno_ret)) {
825 /* We don't know who blocked us. */
826 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
828 if (errno_ret == EACCES || errno_ret == EAGAIN) {
829 SAFE_FREE(tp);
830 status = NT_STATUS_FILE_LOCK_CONFLICT;
831 goto fail;
832 } else {
833 SAFE_FREE(tp);
834 status = map_nt_error_from_unix(errno);
835 goto fail;
840 /* If we didn't use all the allocated size,
841 * Realloc so we don't leak entries per lock call. */
842 if (count < br_lck->num_locks + 2) {
843 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
844 if (!tp) {
845 status = NT_STATUS_NO_MEMORY;
846 goto fail;
850 br_lck->num_locks = count;
851 SAFE_FREE(br_lck->lock_data);
852 br_lck->lock_data = tp;
853 locks = tp;
854 br_lck->modified = True;
856 /* A successful downgrade from write to read lock can trigger a lock
857 re-evalutation where waiting readers can now proceed. */
859 if (signal_pending_read) {
860 /* Send unlock messages to any pending read waiters that overlap. */
861 for (i=0; i < br_lck->num_locks; i++) {
862 struct lock_struct *pend_lock = &locks[i];
864 /* Ignore non-pending locks. */
865 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
866 continue;
869 if (pend_lock->lock_type == PENDING_READ_LOCK &&
870 brl_pending_overlap(plock, pend_lock)) {
871 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
872 procid_str_static(&pend_lock->context.pid )));
874 messaging_send(msg_ctx, pend_lock->context.pid,
875 MSG_SMB_UNLOCK, &data_blob_null);
880 return NT_STATUS_OK;
881 fail:
882 if (break_oplocks) {
883 contend_level2_oplocks_end(br_lck->fsp,
884 LEVEL2_CONTEND_POSIX_BRL);
886 return status;
889 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
890 struct byte_range_lock *br_lck,
891 struct lock_struct *plock,
892 bool blocking_lock,
893 struct blocking_lock_record *blr)
895 VFS_FIND(brl_lock_windows);
896 return handle->fns->brl_lock_windows(handle, br_lck, plock,
897 blocking_lock, blr);
900 /****************************************************************************
901 Lock a range of bytes.
902 ****************************************************************************/
904 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
905 struct byte_range_lock *br_lck,
906 uint64_t smblctx,
907 struct server_id pid,
908 br_off start,
909 br_off size,
910 enum brl_type lock_type,
911 enum brl_flavour lock_flav,
912 bool blocking_lock,
913 uint64_t *psmblctx,
914 struct blocking_lock_record *blr)
916 NTSTATUS ret;
917 struct lock_struct lock;
919 #if !ZERO_ZERO
920 if (start == 0 && size == 0) {
921 DEBUG(0,("client sent 0/0 lock - please report this\n"));
923 #endif
925 #ifdef DEVELOPER
926 /* Quieten valgrind on test. */
927 memset(&lock, '\0', sizeof(lock));
928 #endif
930 lock.context.smblctx = smblctx;
931 lock.context.pid = pid;
932 lock.context.tid = br_lck->fsp->conn->cnum;
933 lock.start = start;
934 lock.size = size;
935 lock.fnum = br_lck->fsp->fnum;
936 lock.lock_type = lock_type;
937 lock.lock_flav = lock_flav;
939 if (lock_flav == WINDOWS_LOCK) {
940 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
941 &lock, blocking_lock, blr);
942 } else {
943 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
946 #if ZERO_ZERO
947 /* sort the lock list */
948 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
949 #endif
951 /* If we're returning an error, return who blocked us. */
952 if (!NT_STATUS_IS_OK(ret) && psmblctx) {
953 *psmblctx = lock.context.smblctx;
955 return ret;
958 /****************************************************************************
959 Unlock a range of bytes - Windows semantics.
960 ****************************************************************************/
962 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
963 struct byte_range_lock *br_lck,
964 const struct lock_struct *plock)
966 unsigned int i, j;
967 struct lock_struct *locks = br_lck->lock_data;
968 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
970 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
972 #if ZERO_ZERO
973 /* Delete write locks by preference... The lock list
974 is sorted in the zero zero case. */
976 for (i = 0; i < br_lck->num_locks; i++) {
977 struct lock_struct *lock = &locks[i];
979 if (lock->lock_type == WRITE_LOCK &&
980 brl_same_context(&lock->context, &plock->context) &&
981 lock->fnum == plock->fnum &&
982 lock->lock_flav == WINDOWS_LOCK &&
983 lock->start == plock->start &&
984 lock->size == plock->size) {
986 /* found it - delete it */
987 deleted_lock_type = lock->lock_type;
988 break;
992 if (i != br_lck->num_locks) {
993 /* We found it - don't search again. */
994 goto unlock_continue;
996 #endif
998 for (i = 0; i < br_lck->num_locks; i++) {
999 struct lock_struct *lock = &locks[i];
1001 if (IS_PENDING_LOCK(lock->lock_type)) {
1002 continue;
1005 /* Only remove our own locks that match in start, size, and flavour. */
1006 if (brl_same_context(&lock->context, &plock->context) &&
1007 lock->fnum == plock->fnum &&
1008 lock->lock_flav == WINDOWS_LOCK &&
1009 lock->start == plock->start &&
1010 lock->size == plock->size ) {
1011 deleted_lock_type = lock->lock_type;
1012 break;
1016 if (i == br_lck->num_locks) {
1017 /* we didn't find it */
1018 return False;
1021 #if ZERO_ZERO
1022 unlock_continue:
1023 #endif
1025 /* Actually delete the lock. */
1026 if (i < br_lck->num_locks - 1) {
1027 memmove(&locks[i], &locks[i+1],
1028 sizeof(*locks)*((br_lck->num_locks-1) - i));
1031 br_lck->num_locks -= 1;
1032 br_lck->modified = True;
1034 /* Unlock the underlying POSIX regions. */
1035 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1036 release_posix_lock_windows_flavour(br_lck->fsp,
1037 plock->start,
1038 plock->size,
1039 deleted_lock_type,
1040 &plock->context,
1041 locks,
1042 br_lck->num_locks);
1045 /* Send unlock messages to any pending waiters that overlap. */
1046 for (j=0; j < br_lck->num_locks; j++) {
1047 struct lock_struct *pend_lock = &locks[j];
1049 /* Ignore non-pending locks. */
1050 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1051 continue;
1054 /* We could send specific lock info here... */
1055 if (brl_pending_overlap(plock, pend_lock)) {
1056 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1057 procid_str_static(&pend_lock->context.pid )));
1059 messaging_send(msg_ctx, pend_lock->context.pid,
1060 MSG_SMB_UNLOCK, &data_blob_null);
1064 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1065 return True;
1068 /****************************************************************************
1069 Unlock a range of bytes - POSIX semantics.
1070 ****************************************************************************/
1072 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1073 struct byte_range_lock *br_lck,
1074 struct lock_struct *plock)
1076 unsigned int i, j, count;
1077 struct lock_struct *tp;
1078 struct lock_struct *locks = br_lck->lock_data;
1079 bool overlap_found = False;
1081 /* No zero-zero locks for POSIX. */
1082 if (plock->start == 0 && plock->size == 0) {
1083 return False;
1086 /* Don't allow 64-bit lock wrap. */
1087 if (plock->start + plock->size < plock->start ||
1088 plock->start + plock->size < plock->size) {
1089 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1090 return False;
1093 /* The worst case scenario here is we have to split an
1094 existing POSIX lock range into two, so we need at most
1095 1 more entry. */
1097 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
1098 if (!tp) {
1099 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1100 return False;
1103 count = 0;
1104 for (i = 0; i < br_lck->num_locks; i++) {
1105 struct lock_struct *lock = &locks[i];
1106 unsigned int tmp_count;
1108 /* Only remove our own locks - ignore fnum. */
1109 if (IS_PENDING_LOCK(lock->lock_type) ||
1110 !brl_same_context(&lock->context, &plock->context)) {
1111 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1112 count++;
1113 continue;
1116 if (lock->lock_flav == WINDOWS_LOCK) {
1117 /* Do any Windows flavour locks conflict ? */
1118 if (brl_conflict(lock, plock)) {
1119 SAFE_FREE(tp);
1120 return false;
1122 /* Just copy the Windows lock into the new array. */
1123 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1124 count++;
1125 continue;
1128 /* Work out overlaps. */
1129 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1131 if (tmp_count == 0) {
1132 /* plock overlapped the existing lock completely,
1133 or replaced it. Don't copy the existing lock. */
1134 overlap_found = true;
1135 } else if (tmp_count == 1) {
1136 /* Either no overlap, (simple copy of existing lock) or
1137 * an overlap of an existing lock. */
1138 /* If the lock changed size, we had an overlap. */
1139 if (tp[count].size != lock->size) {
1140 overlap_found = true;
1142 count += tmp_count;
1143 } else if (tmp_count == 2) {
1144 /* We split a lock range in two. */
1145 overlap_found = true;
1146 count += tmp_count;
1148 /* Optimisation... */
1149 /* We know we're finished here as we can't overlap any
1150 more POSIX locks. Copy the rest of the lock array. */
1152 if (i < br_lck->num_locks - 1) {
1153 memcpy(&tp[count], &locks[i+1],
1154 sizeof(*locks)*((br_lck->num_locks-1) - i));
1155 count += ((br_lck->num_locks-1) - i);
1157 break;
1162 if (!overlap_found) {
1163 /* Just ignore - no change. */
1164 SAFE_FREE(tp);
1165 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1166 return True;
1169 /* Unlock any POSIX regions. */
1170 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1171 release_posix_lock_posix_flavour(br_lck->fsp,
1172 plock->start,
1173 plock->size,
1174 &plock->context,
1176 count);
1179 /* Realloc so we don't leak entries per unlock call. */
1180 if (count) {
1181 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1182 if (!tp) {
1183 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1184 return False;
1186 } else {
1187 /* We deleted the last lock. */
1188 SAFE_FREE(tp);
1189 tp = NULL;
1192 contend_level2_oplocks_end(br_lck->fsp,
1193 LEVEL2_CONTEND_POSIX_BRL);
1195 br_lck->num_locks = count;
1196 SAFE_FREE(br_lck->lock_data);
1197 locks = tp;
1198 br_lck->lock_data = tp;
1199 br_lck->modified = True;
1201 /* Send unlock messages to any pending waiters that overlap. */
1203 for (j=0; j < br_lck->num_locks; j++) {
1204 struct lock_struct *pend_lock = &locks[j];
1206 /* Ignore non-pending locks. */
1207 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1208 continue;
1211 /* We could send specific lock info here... */
1212 if (brl_pending_overlap(plock, pend_lock)) {
1213 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1214 procid_str_static(&pend_lock->context.pid )));
1216 messaging_send(msg_ctx, pend_lock->context.pid,
1217 MSG_SMB_UNLOCK, &data_blob_null);
1221 return True;
1224 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1225 struct messaging_context *msg_ctx,
1226 struct byte_range_lock *br_lck,
1227 const struct lock_struct *plock)
1229 VFS_FIND(brl_unlock_windows);
1230 return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock);
1233 /****************************************************************************
1234 Unlock a range of bytes.
1235 ****************************************************************************/
1237 bool brl_unlock(struct messaging_context *msg_ctx,
1238 struct byte_range_lock *br_lck,
1239 uint64_t smblctx,
1240 struct server_id pid,
1241 br_off start,
1242 br_off size,
1243 enum brl_flavour lock_flav)
1245 struct lock_struct lock;
1247 lock.context.smblctx = smblctx;
1248 lock.context.pid = pid;
1249 lock.context.tid = br_lck->fsp->conn->cnum;
1250 lock.start = start;
1251 lock.size = size;
1252 lock.fnum = br_lck->fsp->fnum;
1253 lock.lock_type = UNLOCK_LOCK;
1254 lock.lock_flav = lock_flav;
1256 if (lock_flav == WINDOWS_LOCK) {
1257 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1258 br_lck, &lock);
1259 } else {
1260 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1264 /****************************************************************************
1265 Test if we could add a lock if we wanted to.
1266 Returns True if the region required is currently unlocked, False if locked.
1267 ****************************************************************************/
1269 bool brl_locktest(struct byte_range_lock *br_lck,
1270 uint64_t smblctx,
1271 struct server_id pid,
1272 br_off start,
1273 br_off size,
1274 enum brl_type lock_type,
1275 enum brl_flavour lock_flav)
1277 bool ret = True;
1278 unsigned int i;
1279 struct lock_struct lock;
1280 const struct lock_struct *locks = br_lck->lock_data;
1281 files_struct *fsp = br_lck->fsp;
1283 lock.context.smblctx = smblctx;
1284 lock.context.pid = pid;
1285 lock.context.tid = br_lck->fsp->conn->cnum;
1286 lock.start = start;
1287 lock.size = size;
1288 lock.fnum = fsp->fnum;
1289 lock.lock_type = lock_type;
1290 lock.lock_flav = lock_flav;
1292 /* Make sure existing locks don't conflict */
1293 for (i=0; i < br_lck->num_locks; i++) {
1295 * Our own locks don't conflict.
1297 if (brl_conflict_other(&locks[i], &lock)) {
1298 return False;
1303 * There is no lock held by an SMB daemon, check to
1304 * see if there is a POSIX lock from a UNIX or NFS process.
1305 * This only conflicts with Windows locks, not POSIX locks.
1308 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1309 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1311 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1312 (double)start, (double)size, ret ? "locked" : "unlocked",
1313 fsp->fnum, fsp_str_dbg(fsp)));
1315 /* We need to return the inverse of is_posix_locked. */
1316 ret = !ret;
1319 /* no conflicts - we could have added it */
1320 return ret;
1323 /****************************************************************************
1324 Query for existing locks.
1325 ****************************************************************************/
1327 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1328 uint64_t *psmblctx,
1329 struct server_id pid,
1330 br_off *pstart,
1331 br_off *psize,
1332 enum brl_type *plock_type,
1333 enum brl_flavour lock_flav)
1335 unsigned int i;
1336 struct lock_struct lock;
1337 const struct lock_struct *locks = br_lck->lock_data;
1338 files_struct *fsp = br_lck->fsp;
1340 lock.context.smblctx = *psmblctx;
1341 lock.context.pid = pid;
1342 lock.context.tid = br_lck->fsp->conn->cnum;
1343 lock.start = *pstart;
1344 lock.size = *psize;
1345 lock.fnum = fsp->fnum;
1346 lock.lock_type = *plock_type;
1347 lock.lock_flav = lock_flav;
1349 /* Make sure existing locks don't conflict */
1350 for (i=0; i < br_lck->num_locks; i++) {
1351 const struct lock_struct *exlock = &locks[i];
1352 bool conflict = False;
1354 if (exlock->lock_flav == WINDOWS_LOCK) {
1355 conflict = brl_conflict(exlock, &lock);
1356 } else {
1357 conflict = brl_conflict_posix(exlock, &lock);
1360 if (conflict) {
1361 *psmblctx = exlock->context.smblctx;
1362 *pstart = exlock->start;
1363 *psize = exlock->size;
1364 *plock_type = exlock->lock_type;
1365 return NT_STATUS_LOCK_NOT_GRANTED;
1370 * There is no lock held by an SMB daemon, check to
1371 * see if there is a POSIX lock from a UNIX or NFS process.
1374 if(lp_posix_locking(fsp->conn->params)) {
1375 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1377 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1378 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1379 fsp->fnum, fsp_str_dbg(fsp)));
1381 if (ret) {
1382 /* Hmmm. No clue what to set smblctx to - use -1. */
1383 *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1384 return NT_STATUS_LOCK_NOT_GRANTED;
1388 return NT_STATUS_OK;
1392 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1393 struct byte_range_lock *br_lck,
1394 struct lock_struct *plock,
1395 struct blocking_lock_record *blr)
1397 VFS_FIND(brl_cancel_windows);
1398 return handle->fns->brl_cancel_windows(handle, br_lck, plock, blr);
1401 /****************************************************************************
1402 Remove a particular pending lock.
1403 ****************************************************************************/
1404 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1405 uint64_t smblctx,
1406 struct server_id pid,
1407 br_off start,
1408 br_off size,
1409 enum brl_flavour lock_flav,
1410 struct blocking_lock_record *blr)
1412 bool ret;
1413 struct lock_struct lock;
1415 lock.context.smblctx = smblctx;
1416 lock.context.pid = pid;
1417 lock.context.tid = br_lck->fsp->conn->cnum;
1418 lock.start = start;
1419 lock.size = size;
1420 lock.fnum = br_lck->fsp->fnum;
1421 lock.lock_flav = lock_flav;
1422 /* lock.lock_type doesn't matter */
1424 if (lock_flav == WINDOWS_LOCK) {
1425 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1426 &lock, blr);
1427 } else {
1428 ret = brl_lock_cancel_default(br_lck, &lock);
1431 return ret;
1434 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1435 struct lock_struct *plock)
1437 unsigned int i;
1438 struct lock_struct *locks = br_lck->lock_data;
1440 SMB_ASSERT(plock);
1442 for (i = 0; i < br_lck->num_locks; i++) {
1443 struct lock_struct *lock = &locks[i];
1445 /* For pending locks we *always* care about the fnum. */
1446 if (brl_same_context(&lock->context, &plock->context) &&
1447 lock->fnum == plock->fnum &&
1448 IS_PENDING_LOCK(lock->lock_type) &&
1449 lock->lock_flav == plock->lock_flav &&
1450 lock->start == plock->start &&
1451 lock->size == plock->size) {
1452 break;
1456 if (i == br_lck->num_locks) {
1457 /* Didn't find it. */
1458 return False;
1461 if (i < br_lck->num_locks - 1) {
1462 /* Found this particular pending lock - delete it */
1463 memmove(&locks[i], &locks[i+1],
1464 sizeof(*locks)*((br_lck->num_locks-1) - i));
1467 br_lck->num_locks -= 1;
1468 br_lck->modified = True;
1469 return True;
1472 /****************************************************************************
1473 Remove any locks associated with a open file.
1474 We return True if this process owns any other Windows locks on this
1475 fd and so we should not immediately close the fd.
1476 ****************************************************************************/
1478 void brl_close_fnum(struct messaging_context *msg_ctx,
1479 struct byte_range_lock *br_lck)
1481 files_struct *fsp = br_lck->fsp;
1482 uint16 tid = fsp->conn->cnum;
1483 int fnum = fsp->fnum;
1484 unsigned int i, j, dcount=0;
1485 int num_deleted_windows_locks = 0;
1486 struct lock_struct *locks = br_lck->lock_data;
1487 struct server_id pid = sconn_server_id(fsp->conn->sconn);
1488 bool unlock_individually = False;
1489 bool posix_level2_contention_ended = false;
1491 if(lp_posix_locking(fsp->conn->params)) {
1493 /* Check if there are any Windows locks associated with this dev/ino
1494 pair that are not this fnum. If so we need to call unlock on each
1495 one in order to release the system POSIX locks correctly. */
1497 for (i=0; i < br_lck->num_locks; i++) {
1498 struct lock_struct *lock = &locks[i];
1500 if (!procid_equal(&lock->context.pid, &pid)) {
1501 continue;
1504 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1505 continue; /* Ignore pending. */
1508 if (lock->context.tid != tid || lock->fnum != fnum) {
1509 unlock_individually = True;
1510 break;
1514 if (unlock_individually) {
1515 struct lock_struct *locks_copy;
1516 unsigned int num_locks_copy;
1518 /* Copy the current lock array. */
1519 if (br_lck->num_locks) {
1520 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1521 if (!locks_copy) {
1522 smb_panic("brl_close_fnum: talloc failed");
1524 } else {
1525 locks_copy = NULL;
1528 num_locks_copy = br_lck->num_locks;
1530 for (i=0; i < num_locks_copy; i++) {
1531 struct lock_struct *lock = &locks_copy[i];
1533 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1534 (lock->fnum == fnum)) {
1535 brl_unlock(msg_ctx,
1536 br_lck,
1537 lock->context.smblctx,
1538 pid,
1539 lock->start,
1540 lock->size,
1541 lock->lock_flav);
1544 return;
1548 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1550 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1552 for (i=0; i < br_lck->num_locks; i++) {
1553 struct lock_struct *lock = &locks[i];
1554 bool del_this_lock = False;
1556 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1557 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1558 del_this_lock = True;
1559 num_deleted_windows_locks++;
1560 contend_level2_oplocks_end(br_lck->fsp,
1561 LEVEL2_CONTEND_WINDOWS_BRL);
1562 } else if (lock->lock_flav == POSIX_LOCK) {
1563 del_this_lock = True;
1565 /* Only end level2 contention once for posix */
1566 if (!posix_level2_contention_ended) {
1567 posix_level2_contention_ended = true;
1568 contend_level2_oplocks_end(br_lck->fsp,
1569 LEVEL2_CONTEND_POSIX_BRL);
1574 if (del_this_lock) {
1575 /* Send unlock messages to any pending waiters that overlap. */
1576 for (j=0; j < br_lck->num_locks; j++) {
1577 struct lock_struct *pend_lock = &locks[j];
1579 /* Ignore our own or non-pending locks. */
1580 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1581 continue;
1584 /* Optimisation - don't send to this fnum as we're
1585 closing it. */
1586 if (pend_lock->context.tid == tid &&
1587 procid_equal(&pend_lock->context.pid, &pid) &&
1588 pend_lock->fnum == fnum) {
1589 continue;
1592 /* We could send specific lock info here... */
1593 if (brl_pending_overlap(lock, pend_lock)) {
1594 messaging_send(msg_ctx, pend_lock->context.pid,
1595 MSG_SMB_UNLOCK, &data_blob_null);
1599 /* found it - delete it */
1600 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1601 memmove(&locks[i], &locks[i+1],
1602 sizeof(*locks)*((br_lck->num_locks-1) - i));
1604 br_lck->num_locks--;
1605 br_lck->modified = True;
1606 i--;
1607 dcount++;
1611 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1612 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1613 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1617 /****************************************************************************
1618 Ensure this set of lock entries is valid.
1619 ****************************************************************************/
1620 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1622 unsigned int i;
1623 unsigned int num_valid_entries = 0;
1624 struct lock_struct *locks = *pplocks;
1626 for (i = 0; i < *pnum_entries; i++) {
1627 struct lock_struct *lock_data = &locks[i];
1628 if (!serverid_exists(&lock_data->context.pid)) {
1629 /* This process no longer exists - mark this
1630 entry as invalid by zeroing it. */
1631 ZERO_STRUCTP(lock_data);
1632 } else {
1633 num_valid_entries++;
1637 if (num_valid_entries != *pnum_entries) {
1638 struct lock_struct *new_lock_data = NULL;
1640 if (num_valid_entries) {
1641 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1642 if (!new_lock_data) {
1643 DEBUG(3, ("malloc fail\n"));
1644 return False;
1647 num_valid_entries = 0;
1648 for (i = 0; i < *pnum_entries; i++) {
1649 struct lock_struct *lock_data = &locks[i];
1650 if (lock_data->context.smblctx &&
1651 lock_data->context.tid) {
1652 /* Valid (nonzero) entry - copy it. */
1653 memcpy(&new_lock_data[num_valid_entries],
1654 lock_data, sizeof(struct lock_struct));
1655 num_valid_entries++;
1660 SAFE_FREE(*pplocks);
1661 *pplocks = new_lock_data;
1662 *pnum_entries = num_valid_entries;
1665 return True;
1668 struct brl_forall_cb {
1669 void (*fn)(struct file_id id, struct server_id pid,
1670 enum brl_type lock_type,
1671 enum brl_flavour lock_flav,
1672 br_off start, br_off size,
1673 void *private_data);
1674 void *private_data;
1677 /****************************************************************************
1678 Traverse the whole database with this function, calling traverse_callback
1679 on each lock.
1680 ****************************************************************************/
1682 static int traverse_fn(struct db_record *rec, void *state)
1684 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1685 struct lock_struct *locks;
1686 struct file_id *key;
1687 unsigned int i;
1688 unsigned int num_locks = 0;
1689 unsigned int orig_num_locks = 0;
1691 /* In a traverse function we must make a copy of
1692 dbuf before modifying it. */
1694 locks = (struct lock_struct *)memdup(rec->value.dptr,
1695 rec->value.dsize);
1696 if (!locks) {
1697 return -1; /* Terminate traversal. */
1700 key = (struct file_id *)rec->key.dptr;
1701 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
1703 /* Ensure the lock db is clean of entries from invalid processes. */
1705 if (!validate_lock_entries(&num_locks, &locks)) {
1706 SAFE_FREE(locks);
1707 return -1; /* Terminate traversal */
1710 if (orig_num_locks != num_locks) {
1711 if (num_locks) {
1712 TDB_DATA data;
1713 data.dptr = (uint8_t *)locks;
1714 data.dsize = num_locks*sizeof(struct lock_struct);
1715 rec->store(rec, data, TDB_REPLACE);
1716 } else {
1717 rec->delete_rec(rec);
1721 if (cb->fn) {
1722 for ( i=0; i<num_locks; i++) {
1723 cb->fn(*key,
1724 locks[i].context.pid,
1725 locks[i].lock_type,
1726 locks[i].lock_flav,
1727 locks[i].start,
1728 locks[i].size,
1729 cb->private_data);
1733 SAFE_FREE(locks);
1734 return 0;
1737 /*******************************************************************
1738 Call the specified function on each lock in the database.
1739 ********************************************************************/
1741 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1742 enum brl_type lock_type,
1743 enum brl_flavour lock_flav,
1744 br_off start, br_off size,
1745 void *private_data),
1746 void *private_data)
1748 struct brl_forall_cb cb;
1750 if (!brlock_db) {
1751 return 0;
1753 cb.fn = fn;
1754 cb.private_data = private_data;
1755 return brlock_db->traverse(brlock_db, traverse_fn, &cb);
1758 /*******************************************************************
1759 Store a potentially modified set of byte range lock data back into
1760 the database.
1761 Unlock the record.
1762 ********************************************************************/
1764 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1766 if (br_lck->read_only) {
1767 SMB_ASSERT(!br_lck->modified);
1770 if (!br_lck->modified) {
1771 goto done;
1774 if (br_lck->num_locks == 0) {
1775 /* No locks - delete this entry. */
1776 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
1777 if (!NT_STATUS_IS_OK(status)) {
1778 DEBUG(0, ("delete_rec returned %s\n",
1779 nt_errstr(status)));
1780 smb_panic("Could not delete byte range lock entry");
1782 } else {
1783 TDB_DATA data;
1784 NTSTATUS status;
1786 data.dptr = (uint8 *)br_lck->lock_data;
1787 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1789 status = br_lck->record->store(br_lck->record, data,
1790 TDB_REPLACE);
1791 if (!NT_STATUS_IS_OK(status)) {
1792 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1793 smb_panic("Could not store byte range mode entry");
1797 done:
1799 SAFE_FREE(br_lck->lock_data);
1800 TALLOC_FREE(br_lck->record);
1801 return 0;
1804 /*******************************************************************
1805 Fetch a set of byte range lock data from the database.
1806 Leave the record locked.
1807 TALLOC_FREE(brl) will release the lock in the destructor.
1808 ********************************************************************/
1810 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1811 files_struct *fsp, bool read_only)
1813 TDB_DATA key, data;
1814 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1816 if (br_lck == NULL) {
1817 return NULL;
1820 br_lck->fsp = fsp;
1821 br_lck->num_locks = 0;
1822 br_lck->modified = False;
1823 br_lck->key = fsp->file_id;
1825 key.dptr = (uint8 *)&br_lck->key;
1826 key.dsize = sizeof(struct file_id);
1828 if (!fsp->lockdb_clean) {
1829 /* We must be read/write to clean
1830 the dead entries. */
1831 read_only = False;
1834 if (read_only) {
1835 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
1836 DEBUG(3, ("Could not fetch byte range lock record\n"));
1837 TALLOC_FREE(br_lck);
1838 return NULL;
1840 br_lck->record = NULL;
1842 else {
1843 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
1845 if (br_lck->record == NULL) {
1846 DEBUG(3, ("Could not lock byte range lock entry\n"));
1847 TALLOC_FREE(br_lck);
1848 return NULL;
1851 data = br_lck->record->value;
1854 br_lck->read_only = read_only;
1855 br_lck->lock_data = NULL;
1857 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1859 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1861 if (br_lck->num_locks != 0) {
1862 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1863 br_lck->num_locks);
1864 if (br_lck->lock_data == NULL) {
1865 DEBUG(0, ("malloc failed\n"));
1866 TALLOC_FREE(br_lck);
1867 return NULL;
1870 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1873 if (!fsp->lockdb_clean) {
1874 int orig_num_locks = br_lck->num_locks;
1876 /* This is the first time we've accessed this. */
1877 /* Go through and ensure all entries exist - remove any that don't. */
1878 /* Makes the lockdb self cleaning at low cost. */
1880 if (!validate_lock_entries(&br_lck->num_locks,
1881 &br_lck->lock_data)) {
1882 SAFE_FREE(br_lck->lock_data);
1883 TALLOC_FREE(br_lck);
1884 return NULL;
1887 /* Ensure invalid locks are cleaned up in the destructor. */
1888 if (orig_num_locks != br_lck->num_locks) {
1889 br_lck->modified = True;
1892 /* Mark the lockdb as "clean" as seen from this open file. */
1893 fsp->lockdb_clean = True;
1896 if (DEBUGLEVEL >= 10) {
1897 unsigned int i;
1898 struct lock_struct *locks = br_lck->lock_data;
1899 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1900 br_lck->num_locks,
1901 file_id_string_tos(&fsp->file_id)));
1902 for( i = 0; i < br_lck->num_locks; i++) {
1903 print_lock_struct(i, &locks[i]);
1906 return br_lck;
1909 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1910 files_struct *fsp)
1912 return brl_get_locks_internal(mem_ctx, fsp, False);
1915 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
1917 struct byte_range_lock *br_lock;
1919 if (lp_clustering()) {
1920 return brl_get_locks_internal(talloc_tos(), fsp, true);
1923 if ((fsp->brlock_rec != NULL)
1924 && (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
1925 return fsp->brlock_rec;
1928 TALLOC_FREE(fsp->brlock_rec);
1930 br_lock = brl_get_locks_internal(talloc_tos(), fsp, false);
1931 if (br_lock == NULL) {
1932 return NULL;
1934 fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db);
1936 fsp->brlock_rec = talloc_zero(fsp, struct byte_range_lock);
1937 if (fsp->brlock_rec == NULL) {
1938 goto fail;
1940 fsp->brlock_rec->fsp = fsp;
1941 fsp->brlock_rec->num_locks = br_lock->num_locks;
1942 fsp->brlock_rec->read_only = true;
1943 fsp->brlock_rec->key = br_lock->key;
1945 fsp->brlock_rec->lock_data = (struct lock_struct *)
1946 talloc_memdup(fsp->brlock_rec, br_lock->lock_data,
1947 sizeof(struct lock_struct) * br_lock->num_locks);
1948 if (fsp->brlock_rec->lock_data == NULL) {
1949 goto fail;
1952 TALLOC_FREE(br_lock);
1953 return fsp->brlock_rec;
1954 fail:
1955 TALLOC_FREE(br_lock);
1956 TALLOC_FREE(fsp->brlock_rec);
1957 return NULL;
1960 struct brl_revalidate_state {
1961 ssize_t array_size;
1962 uint32 num_pids;
1963 struct server_id *pids;
1967 * Collect PIDs of all processes with pending entries
1970 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
1971 enum brl_type lock_type,
1972 enum brl_flavour lock_flav,
1973 br_off start, br_off size,
1974 void *private_data)
1976 struct brl_revalidate_state *state =
1977 (struct brl_revalidate_state *)private_data;
1979 if (!IS_PENDING_LOCK(lock_type)) {
1980 return;
1983 add_to_large_array(state, sizeof(pid), (void *)&pid,
1984 &state->pids, &state->num_pids,
1985 &state->array_size);
1989 * qsort callback to sort the processes
1992 static int compare_procids(const void *p1, const void *p2)
1994 const struct server_id *i1 = (struct server_id *)p1;
1995 const struct server_id *i2 = (struct server_id *)p2;
1997 if (i1->pid < i2->pid) return -1;
1998 if (i2->pid > i2->pid) return 1;
1999 return 0;
2003 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2004 * locks so that they retry. Mainly used in the cluster code after a node has
2005 * died.
2007 * Done in two steps to avoid double-sends: First we collect all entries in an
2008 * array, then qsort that array and only send to non-dupes.
2011 static void brl_revalidate(struct messaging_context *msg_ctx,
2012 void *private_data,
2013 uint32_t msg_type,
2014 struct server_id server_id,
2015 DATA_BLOB *data)
2017 struct brl_revalidate_state *state;
2018 uint32 i;
2019 struct server_id last_pid;
2021 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
2022 DEBUG(0, ("talloc failed\n"));
2023 return;
2026 brl_forall(brl_revalidate_collect, state);
2028 if (state->array_size == -1) {
2029 DEBUG(0, ("talloc failed\n"));
2030 goto done;
2033 if (state->num_pids == 0) {
2034 goto done;
2037 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2039 ZERO_STRUCT(last_pid);
2041 for (i=0; i<state->num_pids; i++) {
2042 if (procid_equal(&last_pid, &state->pids[i])) {
2044 * We've seen that one already
2046 continue;
2049 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2050 &data_blob_null);
2051 last_pid = state->pids[i];
2054 done:
2055 TALLOC_FREE(state);
2056 return;
2059 void brl_register_msgs(struct messaging_context *msg_ctx)
2061 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
2062 brl_revalidate);