dsdb-acl: give error string if we can not obtain the schema
[Samba/gebeck_regimport.git] / source3 / locking / brlock.c
blobb7abaa9288c2e8c13764e5f6375d8d6fc0fda5a9
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
27 #include "includes.h"
28 #include "system/filesys.h"
29 #include "locking/proto.h"
30 #include "smbd/globals.h"
31 #include "dbwrap/dbwrap.h"
32 #include "dbwrap/dbwrap_open.h"
33 #include "serverid.h"
34 #include "messages.h"
36 #undef DBGC_CLASS
37 #define DBGC_CLASS DBGC_LOCKING
39 #define ZERO_ZERO 0
41 /* The open brlock.tdb database. */
43 static struct db_context *brlock_db;
45 /****************************************************************************
46 Debug info at level 10 for lock struct.
47 ****************************************************************************/
49 static void print_lock_struct(unsigned int i, const struct lock_struct *pls)
51 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
53 (unsigned long long)pls->context.smblctx,
54 (unsigned int)pls->context.tid,
55 server_id_str(talloc_tos(), &pls->context.pid) ));
57 DEBUG(10,("start = %.0f, size = %.0f, fnum = %llu, %s %s\n",
58 (double)pls->start,
59 (double)pls->size,
60 (unsigned long long)pls->fnum,
61 lock_type_name(pls->lock_type),
62 lock_flav_name(pls->lock_flav) ));
65 /****************************************************************************
66 See if two locking contexts are equal.
67 ****************************************************************************/
69 bool brl_same_context(const struct lock_context *ctx1,
70 const struct lock_context *ctx2)
72 return (serverid_equal(&ctx1->pid, &ctx2->pid) &&
73 (ctx1->smblctx == ctx2->smblctx) &&
74 (ctx1->tid == ctx2->tid));
77 /****************************************************************************
78 See if lck1 and lck2 overlap.
79 ****************************************************************************/
81 static bool brl_overlap(const struct lock_struct *lck1,
82 const struct lock_struct *lck2)
84 /* XXX Remove for Win7 compatibility. */
85 /* this extra check is not redundent - it copes with locks
86 that go beyond the end of 64 bit file space */
87 if (lck1->size != 0 &&
88 lck1->start == lck2->start &&
89 lck1->size == lck2->size) {
90 return True;
93 if (lck1->start >= (lck2->start+lck2->size) ||
94 lck2->start >= (lck1->start+lck1->size)) {
95 return False;
97 return True;
100 /****************************************************************************
101 See if lock2 can be added when lock1 is in place.
102 ****************************************************************************/
104 static bool brl_conflict(const struct lock_struct *lck1,
105 const struct lock_struct *lck2)
107 /* Ignore PENDING locks. */
108 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
109 return False;
111 /* Read locks never conflict. */
112 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
113 return False;
116 /* A READ lock can stack on top of a WRITE lock if they have the same
117 * context & fnum. */
118 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
119 brl_same_context(&lck1->context, &lck2->context) &&
120 lck1->fnum == lck2->fnum) {
121 return False;
124 return brl_overlap(lck1, lck2);
127 /****************************************************************************
128 See if lock2 can be added when lock1 is in place - when both locks are POSIX
129 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
130 know already match.
131 ****************************************************************************/
133 static bool brl_conflict_posix(const struct lock_struct *lck1,
134 const struct lock_struct *lck2)
136 #if defined(DEVELOPER)
137 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
138 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
139 #endif
141 /* Ignore PENDING locks. */
142 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
143 return False;
145 /* Read locks never conflict. */
146 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
147 return False;
150 /* Locks on the same context con't conflict. Ignore fnum. */
151 if (brl_same_context(&lck1->context, &lck2->context)) {
152 return False;
155 /* One is read, the other write, or the context is different,
156 do they overlap ? */
157 return brl_overlap(lck1, lck2);
160 #if ZERO_ZERO
161 static bool brl_conflict1(const struct lock_struct *lck1,
162 const struct lock_struct *lck2)
164 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
165 return False;
167 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
168 return False;
171 if (brl_same_context(&lck1->context, &lck2->context) &&
172 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
173 return False;
176 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
177 return True;
180 if (lck1->start >= (lck2->start + lck2->size) ||
181 lck2->start >= (lck1->start + lck1->size)) {
182 return False;
185 return True;
187 #endif
189 /****************************************************************************
190 Check to see if this lock conflicts, but ignore our own locks on the
191 same fnum only. This is the read/write lock check code path.
192 This is never used in the POSIX lock case.
193 ****************************************************************************/
195 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
197 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
198 return False;
200 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
201 return False;
203 /* POSIX flavour locks never conflict here - this is only called
204 in the read/write path. */
206 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
207 return False;
210 * Incoming WRITE locks conflict with existing READ locks even
211 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
214 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
215 if (brl_same_context(&lck1->context, &lck2->context) &&
216 lck1->fnum == lck2->fnum)
217 return False;
220 return brl_overlap(lck1, lck2);
223 /****************************************************************************
224 Check if an unlock overlaps a pending lock.
225 ****************************************************************************/
227 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
229 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
230 return True;
231 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
232 return True;
233 return False;
236 /****************************************************************************
237 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
238 is the same as this one and changes its error code. I wonder if any
239 app depends on this ?
240 ****************************************************************************/
242 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
244 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
245 /* amazing the little things you learn with a test
246 suite. Locks beyond this offset (as a 64 bit
247 number!) always generate the conflict error code,
248 unless the top bit is set */
249 if (!blocking_lock) {
250 fsp->last_lock_failure = *lock;
252 return NT_STATUS_FILE_LOCK_CONFLICT;
255 if (serverid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
256 lock->context.tid == fsp->last_lock_failure.context.tid &&
257 lock->fnum == fsp->last_lock_failure.fnum &&
258 lock->start == fsp->last_lock_failure.start) {
259 return NT_STATUS_FILE_LOCK_CONFLICT;
262 if (!blocking_lock) {
263 fsp->last_lock_failure = *lock;
265 return NT_STATUS_LOCK_NOT_GRANTED;
268 /****************************************************************************
269 Open up the brlock.tdb database.
270 ****************************************************************************/
272 void brl_init(bool read_only)
274 int tdb_flags;
276 if (brlock_db) {
277 return;
280 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
282 if (!lp_clustering()) {
284 * We can't use the SEQNUM trick to cache brlock
285 * entries in the clustering case because ctdb seqnum
286 * propagation has a delay.
288 tdb_flags |= TDB_SEQNUM;
291 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
292 lp_open_files_db_hash_size(), tdb_flags,
293 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644,
294 DBWRAP_LOCK_ORDER_2);
295 if (!brlock_db) {
296 DEBUG(0,("Failed to open byte range locking database %s\n",
297 lock_path("brlock.tdb")));
298 return;
302 /****************************************************************************
303 Close down the brlock.tdb database.
304 ****************************************************************************/
306 void brl_shutdown(void)
308 TALLOC_FREE(brlock_db);
311 #if ZERO_ZERO
312 /****************************************************************************
313 Compare two locks for sorting.
314 ****************************************************************************/
316 static int lock_compare(const struct lock_struct *lck1,
317 const struct lock_struct *lck2)
319 if (lck1->start != lck2->start) {
320 return (lck1->start - lck2->start);
322 if (lck2->size != lck1->size) {
323 return ((int)lck1->size - (int)lck2->size);
325 return 0;
327 #endif
329 /****************************************************************************
330 Lock a range of bytes - Windows lock semantics.
331 ****************************************************************************/
333 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
334 struct lock_struct *plock, bool blocking_lock)
336 unsigned int i;
337 files_struct *fsp = br_lck->fsp;
338 struct lock_struct *locks = br_lck->lock_data;
339 NTSTATUS status;
341 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
343 if ((plock->start + plock->size - 1 < plock->start) &&
344 plock->size != 0) {
345 return NT_STATUS_INVALID_LOCK_RANGE;
348 for (i=0; i < br_lck->num_locks; i++) {
349 /* Do any Windows or POSIX locks conflict ? */
350 if (brl_conflict(&locks[i], plock)) {
351 /* Remember who blocked us. */
352 plock->context.smblctx = locks[i].context.smblctx;
353 return brl_lock_failed(fsp,plock,blocking_lock);
355 #if ZERO_ZERO
356 if (plock->start == 0 && plock->size == 0 &&
357 locks[i].size == 0) {
358 break;
360 #endif
363 if (!IS_PENDING_LOCK(plock->lock_type)) {
364 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
367 /* We can get the Windows lock, now see if it needs to
368 be mapped into a lower level POSIX one, and if so can
369 we get it ? */
371 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
372 int errno_ret;
373 if (!set_posix_lock_windows_flavour(fsp,
374 plock->start,
375 plock->size,
376 plock->lock_type,
377 &plock->context,
378 locks,
379 br_lck->num_locks,
380 &errno_ret)) {
382 /* We don't know who blocked us. */
383 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
385 if (errno_ret == EACCES || errno_ret == EAGAIN) {
386 status = NT_STATUS_FILE_LOCK_CONFLICT;
387 goto fail;
388 } else {
389 status = map_nt_error_from_unix(errno);
390 goto fail;
395 /* no conflicts - add it to the list of locks */
396 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
397 if (!locks) {
398 status = NT_STATUS_NO_MEMORY;
399 goto fail;
402 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
403 br_lck->num_locks += 1;
404 br_lck->lock_data = locks;
405 br_lck->modified = True;
407 return NT_STATUS_OK;
408 fail:
409 if (!IS_PENDING_LOCK(plock->lock_type)) {
410 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
412 return status;
415 /****************************************************************************
416 Cope with POSIX range splits and merges.
417 ****************************************************************************/
419 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
420 struct lock_struct *ex, /* existing lock. */
421 struct lock_struct *plock) /* proposed lock. */
423 bool lock_types_differ = (ex->lock_type != plock->lock_type);
425 /* We can't merge non-conflicting locks on different context - ignore fnum. */
427 if (!brl_same_context(&ex->context, &plock->context)) {
428 /* Just copy. */
429 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
430 return 1;
433 /* We now know we have the same context. */
435 /* Did we overlap ? */
437 /*********************************************
438 +---------+
439 | ex |
440 +---------+
441 +-------+
442 | plock |
443 +-------+
444 OR....
445 +---------+
446 | ex |
447 +---------+
448 **********************************************/
450 if ( (ex->start > (plock->start + plock->size)) ||
451 (plock->start > (ex->start + ex->size))) {
453 /* No overlap with this lock - copy existing. */
455 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
456 return 1;
459 /*********************************************
460 +---------------------------+
461 | ex |
462 +---------------------------+
463 +---------------------------+
464 | plock | -> replace with plock.
465 +---------------------------+
467 +---------------+
468 | ex |
469 +---------------+
470 +---------------------------+
471 | plock | -> replace with plock.
472 +---------------------------+
474 **********************************************/
476 if ( (ex->start >= plock->start) &&
477 (ex->start + ex->size <= plock->start + plock->size) ) {
479 /* Replace - discard existing lock. */
481 return 0;
484 /*********************************************
485 Adjacent after.
486 +-------+
487 | ex |
488 +-------+
489 +---------------+
490 | plock |
491 +---------------+
493 BECOMES....
494 +---------------+-------+
495 | plock | ex | - different lock types.
496 +---------------+-------+
497 OR.... (merge)
498 +-----------------------+
499 | plock | - same lock type.
500 +-----------------------+
501 **********************************************/
503 if (plock->start + plock->size == ex->start) {
505 /* If the lock types are the same, we merge, if different, we
506 add the remainder of the old lock. */
508 if (lock_types_differ) {
509 /* Add existing. */
510 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
511 return 1;
512 } else {
513 /* Merge - adjust incoming lock as we may have more
514 * merging to come. */
515 plock->size += ex->size;
516 return 0;
520 /*********************************************
521 Adjacent before.
522 +-------+
523 | ex |
524 +-------+
525 +---------------+
526 | plock |
527 +---------------+
528 BECOMES....
529 +-------+---------------+
530 | ex | plock | - different lock types
531 +-------+---------------+
533 OR.... (merge)
534 +-----------------------+
535 | plock | - same lock type.
536 +-----------------------+
538 **********************************************/
540 if (ex->start + ex->size == plock->start) {
542 /* If the lock types are the same, we merge, if different, we
543 add the existing lock. */
545 if (lock_types_differ) {
546 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
547 return 1;
548 } else {
549 /* Merge - adjust incoming lock as we may have more
550 * merging to come. */
551 plock->start = ex->start;
552 plock->size += ex->size;
553 return 0;
557 /*********************************************
558 Overlap after.
559 +-----------------------+
560 | ex |
561 +-----------------------+
562 +---------------+
563 | plock |
564 +---------------+
566 +----------------+
567 | ex |
568 +----------------+
569 +---------------+
570 | plock |
571 +---------------+
573 BECOMES....
574 +---------------+-------+
575 | plock | ex | - different lock types.
576 +---------------+-------+
577 OR.... (merge)
578 +-----------------------+
579 | plock | - same lock type.
580 +-----------------------+
581 **********************************************/
583 if ( (ex->start >= plock->start) &&
584 (ex->start <= plock->start + plock->size) &&
585 (ex->start + ex->size > plock->start + plock->size) ) {
587 /* If the lock types are the same, we merge, if different, we
588 add the remainder of the old lock. */
590 if (lock_types_differ) {
591 /* Add remaining existing. */
592 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
593 /* Adjust existing start and size. */
594 lck_arr[0].start = plock->start + plock->size;
595 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
596 return 1;
597 } else {
598 /* Merge - adjust incoming lock as we may have more
599 * merging to come. */
600 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
601 return 0;
605 /*********************************************
606 Overlap before.
607 +-----------------------+
608 | ex |
609 +-----------------------+
610 +---------------+
611 | plock |
612 +---------------+
614 +-------------+
615 | ex |
616 +-------------+
617 +---------------+
618 | plock |
619 +---------------+
621 BECOMES....
622 +-------+---------------+
623 | ex | plock | - different lock types
624 +-------+---------------+
626 OR.... (merge)
627 +-----------------------+
628 | plock | - same lock type.
629 +-----------------------+
631 **********************************************/
633 if ( (ex->start < plock->start) &&
634 (ex->start + ex->size >= plock->start) &&
635 (ex->start + ex->size <= plock->start + plock->size) ) {
637 /* If the lock types are the same, we merge, if different, we
638 add the truncated old lock. */
640 if (lock_types_differ) {
641 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
642 /* Adjust existing size. */
643 lck_arr[0].size = plock->start - ex->start;
644 return 1;
645 } else {
646 /* Merge - adjust incoming lock as we may have more
647 * merging to come. MUST ADJUST plock SIZE FIRST ! */
648 plock->size += (plock->start - ex->start);
649 plock->start = ex->start;
650 return 0;
654 /*********************************************
655 Complete overlap.
656 +---------------------------+
657 | ex |
658 +---------------------------+
659 +---------+
660 | plock |
661 +---------+
662 BECOMES.....
663 +-------+---------+---------+
664 | ex | plock | ex | - different lock types.
665 +-------+---------+---------+
667 +---------------------------+
668 | plock | - same lock type.
669 +---------------------------+
670 **********************************************/
672 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
674 if (lock_types_differ) {
676 /* We have to split ex into two locks here. */
678 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
679 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
681 /* Adjust first existing size. */
682 lck_arr[0].size = plock->start - ex->start;
684 /* Adjust second existing start and size. */
685 lck_arr[1].start = plock->start + plock->size;
686 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
687 return 2;
688 } else {
689 /* Just eat the existing locks, merge them into plock. */
690 plock->start = ex->start;
691 plock->size = ex->size;
692 return 0;
696 /* Never get here. */
697 smb_panic("brlock_posix_split_merge");
698 /* Notreached. */
700 /* Keep some compilers happy. */
701 return 0;
704 /****************************************************************************
705 Lock a range of bytes - POSIX lock semantics.
706 We must cope with range splits and merges.
707 ****************************************************************************/
709 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
710 struct byte_range_lock *br_lck,
711 struct lock_struct *plock)
713 unsigned int i, count, posix_count;
714 struct lock_struct *locks = br_lck->lock_data;
715 struct lock_struct *tp;
716 bool signal_pending_read = False;
717 bool break_oplocks = false;
718 NTSTATUS status;
720 /* No zero-zero locks for POSIX. */
721 if (plock->start == 0 && plock->size == 0) {
722 return NT_STATUS_INVALID_PARAMETER;
725 /* Don't allow 64-bit lock wrap. */
726 if (plock->start + plock->size - 1 < plock->start) {
727 return NT_STATUS_INVALID_PARAMETER;
730 /* The worst case scenario here is we have to split an
731 existing POSIX lock range into two, and add our lock,
732 so we need at most 2 more entries. */
734 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
735 if (!tp) {
736 return NT_STATUS_NO_MEMORY;
739 count = posix_count = 0;
741 for (i=0; i < br_lck->num_locks; i++) {
742 struct lock_struct *curr_lock = &locks[i];
744 /* If we have a pending read lock, a lock downgrade should
745 trigger a lock re-evaluation. */
746 if (curr_lock->lock_type == PENDING_READ_LOCK &&
747 brl_pending_overlap(plock, curr_lock)) {
748 signal_pending_read = True;
751 if (curr_lock->lock_flav == WINDOWS_LOCK) {
752 /* Do any Windows flavour locks conflict ? */
753 if (brl_conflict(curr_lock, plock)) {
754 /* No games with error messages. */
755 SAFE_FREE(tp);
756 /* Remember who blocked us. */
757 plock->context.smblctx = curr_lock->context.smblctx;
758 return NT_STATUS_FILE_LOCK_CONFLICT;
760 /* Just copy the Windows lock into the new array. */
761 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
762 count++;
763 } else {
764 unsigned int tmp_count = 0;
766 /* POSIX conflict semantics are different. */
767 if (brl_conflict_posix(curr_lock, plock)) {
768 /* Can't block ourselves with POSIX locks. */
769 /* No games with error messages. */
770 SAFE_FREE(tp);
771 /* Remember who blocked us. */
772 plock->context.smblctx = curr_lock->context.smblctx;
773 return NT_STATUS_FILE_LOCK_CONFLICT;
776 /* Work out overlaps. */
777 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
778 posix_count += tmp_count;
779 count += tmp_count;
784 * Break oplocks while we hold a brl. Since lock() and unlock() calls
785 * are not symetric with POSIX semantics, we cannot guarantee our
786 * contend_level2_oplocks_begin/end calls will be acquired and
787 * released one-for-one as with Windows semantics. Therefore we only
788 * call contend_level2_oplocks_begin if this is the first POSIX brl on
789 * the file.
791 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
792 posix_count == 0);
793 if (break_oplocks) {
794 contend_level2_oplocks_begin(br_lck->fsp,
795 LEVEL2_CONTEND_POSIX_BRL);
798 /* Try and add the lock in order, sorted by lock start. */
799 for (i=0; i < count; i++) {
800 struct lock_struct *curr_lock = &tp[i];
802 if (curr_lock->start <= plock->start) {
803 continue;
807 if (i < count) {
808 memmove(&tp[i+1], &tp[i],
809 (count - i)*sizeof(struct lock_struct));
811 memcpy(&tp[i], plock, sizeof(struct lock_struct));
812 count++;
814 /* We can get the POSIX lock, now see if it needs to
815 be mapped into a lower level POSIX one, and if so can
816 we get it ? */
818 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
819 int errno_ret;
821 /* The lower layer just needs to attempt to
822 get the system POSIX lock. We've weeded out
823 any conflicts above. */
825 if (!set_posix_lock_posix_flavour(br_lck->fsp,
826 plock->start,
827 plock->size,
828 plock->lock_type,
829 &errno_ret)) {
831 /* We don't know who blocked us. */
832 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
834 if (errno_ret == EACCES || errno_ret == EAGAIN) {
835 SAFE_FREE(tp);
836 status = NT_STATUS_FILE_LOCK_CONFLICT;
837 goto fail;
838 } else {
839 SAFE_FREE(tp);
840 status = map_nt_error_from_unix(errno);
841 goto fail;
846 /* If we didn't use all the allocated size,
847 * Realloc so we don't leak entries per lock call. */
848 if (count < br_lck->num_locks + 2) {
849 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
850 if (!tp) {
851 status = NT_STATUS_NO_MEMORY;
852 goto fail;
856 br_lck->num_locks = count;
857 SAFE_FREE(br_lck->lock_data);
858 br_lck->lock_data = tp;
859 locks = tp;
860 br_lck->modified = True;
862 /* A successful downgrade from write to read lock can trigger a lock
863 re-evalutation where waiting readers can now proceed. */
865 if (signal_pending_read) {
866 /* Send unlock messages to any pending read waiters that overlap. */
867 for (i=0; i < br_lck->num_locks; i++) {
868 struct lock_struct *pend_lock = &locks[i];
870 /* Ignore non-pending locks. */
871 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
872 continue;
875 if (pend_lock->lock_type == PENDING_READ_LOCK &&
876 brl_pending_overlap(plock, pend_lock)) {
877 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
878 procid_str_static(&pend_lock->context.pid )));
880 messaging_send(msg_ctx, pend_lock->context.pid,
881 MSG_SMB_UNLOCK, &data_blob_null);
886 return NT_STATUS_OK;
887 fail:
888 if (break_oplocks) {
889 contend_level2_oplocks_end(br_lck->fsp,
890 LEVEL2_CONTEND_POSIX_BRL);
892 return status;
895 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
896 struct byte_range_lock *br_lck,
897 struct lock_struct *plock,
898 bool blocking_lock,
899 struct blocking_lock_record *blr)
901 VFS_FIND(brl_lock_windows);
902 return handle->fns->brl_lock_windows_fn(handle, br_lck, plock,
903 blocking_lock, blr);
906 /****************************************************************************
907 Lock a range of bytes.
908 ****************************************************************************/
910 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
911 struct byte_range_lock *br_lck,
912 uint64_t smblctx,
913 struct server_id pid,
914 br_off start,
915 br_off size,
916 enum brl_type lock_type,
917 enum brl_flavour lock_flav,
918 bool blocking_lock,
919 uint64_t *psmblctx,
920 struct blocking_lock_record *blr)
922 NTSTATUS ret;
923 struct lock_struct lock;
925 #if !ZERO_ZERO
926 if (start == 0 && size == 0) {
927 DEBUG(0,("client sent 0/0 lock - please report this\n"));
929 #endif
931 #ifdef DEVELOPER
932 /* Quieten valgrind on test. */
933 memset(&lock, '\0', sizeof(lock));
934 #endif
936 lock.context.smblctx = smblctx;
937 lock.context.pid = pid;
938 lock.context.tid = br_lck->fsp->conn->cnum;
939 lock.start = start;
940 lock.size = size;
941 lock.fnum = br_lck->fsp->fnum;
942 lock.lock_type = lock_type;
943 lock.lock_flav = lock_flav;
945 if (lock_flav == WINDOWS_LOCK) {
946 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
947 &lock, blocking_lock, blr);
948 } else {
949 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
952 #if ZERO_ZERO
953 /* sort the lock list */
954 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
955 #endif
957 /* If we're returning an error, return who blocked us. */
958 if (!NT_STATUS_IS_OK(ret) && psmblctx) {
959 *psmblctx = lock.context.smblctx;
961 return ret;
964 /****************************************************************************
965 Unlock a range of bytes - Windows semantics.
966 ****************************************************************************/
968 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
969 struct byte_range_lock *br_lck,
970 const struct lock_struct *plock)
972 unsigned int i, j;
973 struct lock_struct *locks = br_lck->lock_data;
974 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
976 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
978 #if ZERO_ZERO
979 /* Delete write locks by preference... The lock list
980 is sorted in the zero zero case. */
982 for (i = 0; i < br_lck->num_locks; i++) {
983 struct lock_struct *lock = &locks[i];
985 if (lock->lock_type == WRITE_LOCK &&
986 brl_same_context(&lock->context, &plock->context) &&
987 lock->fnum == plock->fnum &&
988 lock->lock_flav == WINDOWS_LOCK &&
989 lock->start == plock->start &&
990 lock->size == plock->size) {
992 /* found it - delete it */
993 deleted_lock_type = lock->lock_type;
994 break;
998 if (i != br_lck->num_locks) {
999 /* We found it - don't search again. */
1000 goto unlock_continue;
1002 #endif
1004 for (i = 0; i < br_lck->num_locks; i++) {
1005 struct lock_struct *lock = &locks[i];
1007 if (IS_PENDING_LOCK(lock->lock_type)) {
1008 continue;
1011 /* Only remove our own locks that match in start, size, and flavour. */
1012 if (brl_same_context(&lock->context, &plock->context) &&
1013 lock->fnum == plock->fnum &&
1014 lock->lock_flav == WINDOWS_LOCK &&
1015 lock->start == plock->start &&
1016 lock->size == plock->size ) {
1017 deleted_lock_type = lock->lock_type;
1018 break;
1022 if (i == br_lck->num_locks) {
1023 /* we didn't find it */
1024 return False;
1027 #if ZERO_ZERO
1028 unlock_continue:
1029 #endif
1031 /* Actually delete the lock. */
1032 if (i < br_lck->num_locks - 1) {
1033 memmove(&locks[i], &locks[i+1],
1034 sizeof(*locks)*((br_lck->num_locks-1) - i));
1037 br_lck->num_locks -= 1;
1038 br_lck->modified = True;
1040 /* Unlock the underlying POSIX regions. */
1041 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1042 release_posix_lock_windows_flavour(br_lck->fsp,
1043 plock->start,
1044 plock->size,
1045 deleted_lock_type,
1046 &plock->context,
1047 locks,
1048 br_lck->num_locks);
1051 /* Send unlock messages to any pending waiters that overlap. */
1052 for (j=0; j < br_lck->num_locks; j++) {
1053 struct lock_struct *pend_lock = &locks[j];
1055 /* Ignore non-pending locks. */
1056 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1057 continue;
1060 /* We could send specific lock info here... */
1061 if (brl_pending_overlap(plock, pend_lock)) {
1062 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1063 procid_str_static(&pend_lock->context.pid )));
1065 messaging_send(msg_ctx, pend_lock->context.pid,
1066 MSG_SMB_UNLOCK, &data_blob_null);
1070 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1071 return True;
1074 /****************************************************************************
1075 Unlock a range of bytes - POSIX semantics.
1076 ****************************************************************************/
1078 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1079 struct byte_range_lock *br_lck,
1080 struct lock_struct *plock)
1082 unsigned int i, j, count;
1083 struct lock_struct *tp;
1084 struct lock_struct *locks = br_lck->lock_data;
1085 bool overlap_found = False;
1087 /* No zero-zero locks for POSIX. */
1088 if (plock->start == 0 && plock->size == 0) {
1089 return False;
1092 /* Don't allow 64-bit lock wrap. */
1093 if (plock->start + plock->size < plock->start ||
1094 plock->start + plock->size < plock->size) {
1095 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1096 return False;
1099 /* The worst case scenario here is we have to split an
1100 existing POSIX lock range into two, so we need at most
1101 1 more entry. */
1103 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
1104 if (!tp) {
1105 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1106 return False;
1109 count = 0;
1110 for (i = 0; i < br_lck->num_locks; i++) {
1111 struct lock_struct *lock = &locks[i];
1112 unsigned int tmp_count;
1114 /* Only remove our own locks - ignore fnum. */
1115 if (IS_PENDING_LOCK(lock->lock_type) ||
1116 !brl_same_context(&lock->context, &plock->context)) {
1117 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1118 count++;
1119 continue;
1122 if (lock->lock_flav == WINDOWS_LOCK) {
1123 /* Do any Windows flavour locks conflict ? */
1124 if (brl_conflict(lock, plock)) {
1125 SAFE_FREE(tp);
1126 return false;
1128 /* Just copy the Windows lock into the new array. */
1129 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1130 count++;
1131 continue;
1134 /* Work out overlaps. */
1135 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1137 if (tmp_count == 0) {
1138 /* plock overlapped the existing lock completely,
1139 or replaced it. Don't copy the existing lock. */
1140 overlap_found = true;
1141 } else if (tmp_count == 1) {
1142 /* Either no overlap, (simple copy of existing lock) or
1143 * an overlap of an existing lock. */
1144 /* If the lock changed size, we had an overlap. */
1145 if (tp[count].size != lock->size) {
1146 overlap_found = true;
1148 count += tmp_count;
1149 } else if (tmp_count == 2) {
1150 /* We split a lock range in two. */
1151 overlap_found = true;
1152 count += tmp_count;
1154 /* Optimisation... */
1155 /* We know we're finished here as we can't overlap any
1156 more POSIX locks. Copy the rest of the lock array. */
1158 if (i < br_lck->num_locks - 1) {
1159 memcpy(&tp[count], &locks[i+1],
1160 sizeof(*locks)*((br_lck->num_locks-1) - i));
1161 count += ((br_lck->num_locks-1) - i);
1163 break;
1168 if (!overlap_found) {
1169 /* Just ignore - no change. */
1170 SAFE_FREE(tp);
1171 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1172 return True;
1175 /* Unlock any POSIX regions. */
1176 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1177 release_posix_lock_posix_flavour(br_lck->fsp,
1178 plock->start,
1179 plock->size,
1180 &plock->context,
1182 count);
1185 /* Realloc so we don't leak entries per unlock call. */
1186 if (count) {
1187 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1188 if (!tp) {
1189 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1190 return False;
1192 } else {
1193 /* We deleted the last lock. */
1194 SAFE_FREE(tp);
1195 tp = NULL;
1198 contend_level2_oplocks_end(br_lck->fsp,
1199 LEVEL2_CONTEND_POSIX_BRL);
1201 br_lck->num_locks = count;
1202 SAFE_FREE(br_lck->lock_data);
1203 locks = tp;
1204 br_lck->lock_data = tp;
1205 br_lck->modified = True;
1207 /* Send unlock messages to any pending waiters that overlap. */
1209 for (j=0; j < br_lck->num_locks; j++) {
1210 struct lock_struct *pend_lock = &locks[j];
1212 /* Ignore non-pending locks. */
1213 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1214 continue;
1217 /* We could send specific lock info here... */
1218 if (brl_pending_overlap(plock, pend_lock)) {
1219 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1220 procid_str_static(&pend_lock->context.pid )));
1222 messaging_send(msg_ctx, pend_lock->context.pid,
1223 MSG_SMB_UNLOCK, &data_blob_null);
1227 return True;
1230 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1231 struct messaging_context *msg_ctx,
1232 struct byte_range_lock *br_lck,
1233 const struct lock_struct *plock)
1235 VFS_FIND(brl_unlock_windows);
1236 return handle->fns->brl_unlock_windows_fn(handle, msg_ctx, br_lck,
1237 plock);
1240 /****************************************************************************
1241 Unlock a range of bytes.
1242 ****************************************************************************/
1244 bool brl_unlock(struct messaging_context *msg_ctx,
1245 struct byte_range_lock *br_lck,
1246 uint64_t smblctx,
1247 struct server_id pid,
1248 br_off start,
1249 br_off size,
1250 enum brl_flavour lock_flav)
1252 struct lock_struct lock;
1254 lock.context.smblctx = smblctx;
1255 lock.context.pid = pid;
1256 lock.context.tid = br_lck->fsp->conn->cnum;
1257 lock.start = start;
1258 lock.size = size;
1259 lock.fnum = br_lck->fsp->fnum;
1260 lock.lock_type = UNLOCK_LOCK;
1261 lock.lock_flav = lock_flav;
1263 if (lock_flav == WINDOWS_LOCK) {
1264 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1265 br_lck, &lock);
1266 } else {
1267 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1271 /****************************************************************************
1272 Test if we could add a lock if we wanted to.
1273 Returns True if the region required is currently unlocked, False if locked.
1274 ****************************************************************************/
1276 bool brl_locktest(struct byte_range_lock *br_lck,
1277 uint64_t smblctx,
1278 struct server_id pid,
1279 br_off start,
1280 br_off size,
1281 enum brl_type lock_type,
1282 enum brl_flavour lock_flav)
1284 bool ret = True;
1285 unsigned int i;
1286 struct lock_struct lock;
1287 const struct lock_struct *locks = br_lck->lock_data;
1288 files_struct *fsp = br_lck->fsp;
1290 lock.context.smblctx = smblctx;
1291 lock.context.pid = pid;
1292 lock.context.tid = br_lck->fsp->conn->cnum;
1293 lock.start = start;
1294 lock.size = size;
1295 lock.fnum = fsp->fnum;
1296 lock.lock_type = lock_type;
1297 lock.lock_flav = lock_flav;
1299 /* Make sure existing locks don't conflict */
1300 for (i=0; i < br_lck->num_locks; i++) {
1302 * Our own locks don't conflict.
1304 if (brl_conflict_other(&locks[i], &lock)) {
1305 return False;
1310 * There is no lock held by an SMB daemon, check to
1311 * see if there is a POSIX lock from a UNIX or NFS process.
1312 * This only conflicts with Windows locks, not POSIX locks.
1315 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1316 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1318 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for %s file %s\n",
1319 (double)start, (double)size, ret ? "locked" : "unlocked",
1320 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1322 /* We need to return the inverse of is_posix_locked. */
1323 ret = !ret;
1326 /* no conflicts - we could have added it */
1327 return ret;
1330 /****************************************************************************
1331 Query for existing locks.
1332 ****************************************************************************/
1334 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1335 uint64_t *psmblctx,
1336 struct server_id pid,
1337 br_off *pstart,
1338 br_off *psize,
1339 enum brl_type *plock_type,
1340 enum brl_flavour lock_flav)
1342 unsigned int i;
1343 struct lock_struct lock;
1344 const struct lock_struct *locks = br_lck->lock_data;
1345 files_struct *fsp = br_lck->fsp;
1347 lock.context.smblctx = *psmblctx;
1348 lock.context.pid = pid;
1349 lock.context.tid = br_lck->fsp->conn->cnum;
1350 lock.start = *pstart;
1351 lock.size = *psize;
1352 lock.fnum = fsp->fnum;
1353 lock.lock_type = *plock_type;
1354 lock.lock_flav = lock_flav;
1356 /* Make sure existing locks don't conflict */
1357 for (i=0; i < br_lck->num_locks; i++) {
1358 const struct lock_struct *exlock = &locks[i];
1359 bool conflict = False;
1361 if (exlock->lock_flav == WINDOWS_LOCK) {
1362 conflict = brl_conflict(exlock, &lock);
1363 } else {
1364 conflict = brl_conflict_posix(exlock, &lock);
1367 if (conflict) {
1368 *psmblctx = exlock->context.smblctx;
1369 *pstart = exlock->start;
1370 *psize = exlock->size;
1371 *plock_type = exlock->lock_type;
1372 return NT_STATUS_LOCK_NOT_GRANTED;
1377 * There is no lock held by an SMB daemon, check to
1378 * see if there is a POSIX lock from a UNIX or NFS process.
1381 if(lp_posix_locking(fsp->conn->params)) {
1382 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1384 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for %s file %s\n",
1385 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1386 fsp_fnum_dbg(fsp), fsp_str_dbg(fsp)));
1388 if (ret) {
1389 /* Hmmm. No clue what to set smblctx to - use -1. */
1390 *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1391 return NT_STATUS_LOCK_NOT_GRANTED;
1395 return NT_STATUS_OK;
1399 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1400 struct byte_range_lock *br_lck,
1401 struct lock_struct *plock,
1402 struct blocking_lock_record *blr)
1404 VFS_FIND(brl_cancel_windows);
1405 return handle->fns->brl_cancel_windows_fn(handle, br_lck, plock, blr);
1408 /****************************************************************************
1409 Remove a particular pending lock.
1410 ****************************************************************************/
1411 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1412 uint64_t smblctx,
1413 struct server_id pid,
1414 br_off start,
1415 br_off size,
1416 enum brl_flavour lock_flav,
1417 struct blocking_lock_record *blr)
1419 bool ret;
1420 struct lock_struct lock;
1422 lock.context.smblctx = smblctx;
1423 lock.context.pid = pid;
1424 lock.context.tid = br_lck->fsp->conn->cnum;
1425 lock.start = start;
1426 lock.size = size;
1427 lock.fnum = br_lck->fsp->fnum;
1428 lock.lock_flav = lock_flav;
1429 /* lock.lock_type doesn't matter */
1431 if (lock_flav == WINDOWS_LOCK) {
1432 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1433 &lock, blr);
1434 } else {
1435 ret = brl_lock_cancel_default(br_lck, &lock);
1438 return ret;
1441 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1442 struct lock_struct *plock)
1444 unsigned int i;
1445 struct lock_struct *locks = br_lck->lock_data;
1447 SMB_ASSERT(plock);
1449 for (i = 0; i < br_lck->num_locks; i++) {
1450 struct lock_struct *lock = &locks[i];
1452 /* For pending locks we *always* care about the fnum. */
1453 if (brl_same_context(&lock->context, &plock->context) &&
1454 lock->fnum == plock->fnum &&
1455 IS_PENDING_LOCK(lock->lock_type) &&
1456 lock->lock_flav == plock->lock_flav &&
1457 lock->start == plock->start &&
1458 lock->size == plock->size) {
1459 break;
1463 if (i == br_lck->num_locks) {
1464 /* Didn't find it. */
1465 return False;
1468 if (i < br_lck->num_locks - 1) {
1469 /* Found this particular pending lock - delete it */
1470 memmove(&locks[i], &locks[i+1],
1471 sizeof(*locks)*((br_lck->num_locks-1) - i));
1474 br_lck->num_locks -= 1;
1475 br_lck->modified = True;
1476 return True;
1479 /****************************************************************************
1480 Remove any locks associated with a open file.
1481 We return True if this process owns any other Windows locks on this
1482 fd and so we should not immediately close the fd.
1483 ****************************************************************************/
1485 void brl_close_fnum(struct messaging_context *msg_ctx,
1486 struct byte_range_lock *br_lck)
1488 files_struct *fsp = br_lck->fsp;
1489 uint32_t tid = fsp->conn->cnum;
1490 uint64_t fnum = fsp->fnum;
1491 unsigned int i;
1492 struct lock_struct *locks = br_lck->lock_data;
1493 struct server_id pid = messaging_server_id(fsp->conn->sconn->msg_ctx);
1494 struct lock_struct *locks_copy;
1495 unsigned int num_locks_copy;
1497 /* Copy the current lock array. */
1498 if (br_lck->num_locks) {
1499 locks_copy = (struct lock_struct *)talloc_memdup(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1500 if (!locks_copy) {
1501 smb_panic("brl_close_fnum: talloc failed");
1503 } else {
1504 locks_copy = NULL;
1507 num_locks_copy = br_lck->num_locks;
1509 for (i=0; i < num_locks_copy; i++) {
1510 struct lock_struct *lock = &locks_copy[i];
1512 if (lock->context.tid == tid && serverid_equal(&lock->context.pid, &pid) &&
1513 (lock->fnum == fnum)) {
1514 brl_unlock(msg_ctx,
1515 br_lck,
1516 lock->context.smblctx,
1517 pid,
1518 lock->start,
1519 lock->size,
1520 lock->lock_flav);
1525 bool brl_mark_disconnected(struct files_struct *fsp)
1527 uint32_t tid = fsp->conn->cnum;
1528 uint64_t smblctx = fsp->op->global->open_persistent_id;
1529 uint64_t fnum = fsp->fnum;
1530 unsigned int i;
1531 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1532 struct byte_range_lock *br_lck = NULL;
1534 if (!fsp->op->global->durable) {
1535 return false;
1538 if (fsp->current_lock_count == 0) {
1539 return true;
1542 br_lck = brl_get_locks(talloc_tos(), fsp);
1543 if (br_lck == NULL) {
1544 return false;
1547 for (i=0; i < br_lck->num_locks; i++) {
1548 struct lock_struct *lock = &br_lck->lock_data[i];
1551 * as this is a durable handle, we only expect locks
1552 * of the current file handle!
1555 if (lock->context.smblctx != smblctx) {
1556 TALLOC_FREE(br_lck);
1557 return false;
1560 if (lock->context.tid != tid) {
1561 TALLOC_FREE(br_lck);
1562 return false;
1565 if (!serverid_equal(&lock->context.pid, &self)) {
1566 TALLOC_FREE(br_lck);
1567 return false;
1570 if (lock->fnum != fnum) {
1571 TALLOC_FREE(br_lck);
1572 return false;
1575 server_id_set_disconnected(&lock->context.pid);
1576 lock->context.tid = TID_FIELD_INVALID;
1577 lock->fnum = FNUM_FIELD_INVALID;
1580 br_lck->modified = true;
1581 TALLOC_FREE(br_lck);
1582 return true;
1585 bool brl_reconnect_disconnected(struct files_struct *fsp)
1587 uint32_t tid = fsp->conn->cnum;
1588 uint64_t smblctx = fsp->op->global->open_persistent_id;
1589 uint64_t fnum = fsp->fnum;
1590 unsigned int i;
1591 struct server_id self = messaging_server_id(fsp->conn->sconn->msg_ctx);
1592 struct byte_range_lock *br_lck = NULL;
1594 if (!fsp->op->global->durable) {
1595 return false;
1598 /* we want to validate ourself */
1599 fsp->lockdb_clean = true;
1601 br_lck = brl_get_locks(talloc_tos(), fsp);
1602 if (br_lck == NULL) {
1603 return false;
1606 if (br_lck->num_locks == 0) {
1607 TALLOC_FREE(br_lck);
1608 return true;
1611 for (i=0; i < br_lck->num_locks; i++) {
1612 struct lock_struct *lock = &br_lck->lock_data[i];
1615 * as this is a durable handle we only expect locks
1616 * of the current file handle!
1619 if (lock->context.smblctx != smblctx) {
1620 TALLOC_FREE(br_lck);
1621 return false;
1624 if (lock->context.tid != TID_FIELD_INVALID) {
1625 TALLOC_FREE(br_lck);
1626 return false;
1629 if (!server_id_is_disconnected(&lock->context.pid)) {
1630 TALLOC_FREE(br_lck);
1631 return false;
1634 if (lock->fnum != FNUM_FIELD_INVALID) {
1635 TALLOC_FREE(br_lck);
1636 return false;
1639 lock->context.pid = self;
1640 lock->context.tid = tid;
1641 lock->fnum = fnum;
1644 fsp->current_lock_count = br_lck->num_locks;
1645 br_lck->modified = true;
1646 TALLOC_FREE(br_lck);
1647 return true;
1650 /****************************************************************************
1651 Ensure this set of lock entries is valid.
1652 ****************************************************************************/
1653 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1655 unsigned int i;
1656 unsigned int num_valid_entries = 0;
1657 struct lock_struct *locks = *pplocks;
1659 for (i = 0; i < *pnum_entries; i++) {
1660 struct lock_struct *lock_data = &locks[i];
1661 if (!serverid_exists(&lock_data->context.pid)) {
1662 /* This process no longer exists - mark this
1663 entry as invalid by zeroing it. */
1664 ZERO_STRUCTP(lock_data);
1665 } else {
1666 num_valid_entries++;
1670 if (num_valid_entries != *pnum_entries) {
1671 struct lock_struct *new_lock_data = NULL;
1673 if (num_valid_entries) {
1674 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1675 if (!new_lock_data) {
1676 DEBUG(3, ("malloc fail\n"));
1677 return False;
1680 num_valid_entries = 0;
1681 for (i = 0; i < *pnum_entries; i++) {
1682 struct lock_struct *lock_data = &locks[i];
1683 if (lock_data->context.smblctx &&
1684 lock_data->context.tid) {
1685 /* Valid (nonzero) entry - copy it. */
1686 memcpy(&new_lock_data[num_valid_entries],
1687 lock_data, sizeof(struct lock_struct));
1688 num_valid_entries++;
1693 SAFE_FREE(*pplocks);
1694 *pplocks = new_lock_data;
1695 *pnum_entries = num_valid_entries;
1698 return True;
1701 struct brl_forall_cb {
1702 void (*fn)(struct file_id id, struct server_id pid,
1703 enum brl_type lock_type,
1704 enum brl_flavour lock_flav,
1705 br_off start, br_off size,
1706 void *private_data);
1707 void *private_data;
1710 /****************************************************************************
1711 Traverse the whole database with this function, calling traverse_callback
1712 on each lock.
1713 ****************************************************************************/
1715 static int brl_traverse_fn(struct db_record *rec, void *state)
1717 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1718 struct lock_struct *locks;
1719 struct file_id *key;
1720 unsigned int i;
1721 unsigned int num_locks = 0;
1722 unsigned int orig_num_locks = 0;
1723 TDB_DATA dbkey;
1724 TDB_DATA value;
1726 dbkey = dbwrap_record_get_key(rec);
1727 value = dbwrap_record_get_value(rec);
1729 /* In a traverse function we must make a copy of
1730 dbuf before modifying it. */
1732 locks = (struct lock_struct *)memdup(value.dptr, value.dsize);
1733 if (!locks) {
1734 return -1; /* Terminate traversal. */
1737 key = (struct file_id *)dbkey.dptr;
1738 orig_num_locks = num_locks = value.dsize/sizeof(*locks);
1740 /* Ensure the lock db is clean of entries from invalid processes. */
1742 if (!validate_lock_entries(&num_locks, &locks)) {
1743 SAFE_FREE(locks);
1744 return -1; /* Terminate traversal */
1747 if (orig_num_locks != num_locks) {
1748 if (num_locks) {
1749 TDB_DATA data;
1750 data.dptr = (uint8_t *)locks;
1751 data.dsize = num_locks*sizeof(struct lock_struct);
1752 dbwrap_record_store(rec, data, TDB_REPLACE);
1753 } else {
1754 dbwrap_record_delete(rec);
1758 if (cb->fn) {
1759 for ( i=0; i<num_locks; i++) {
1760 cb->fn(*key,
1761 locks[i].context.pid,
1762 locks[i].lock_type,
1763 locks[i].lock_flav,
1764 locks[i].start,
1765 locks[i].size,
1766 cb->private_data);
1770 SAFE_FREE(locks);
1771 return 0;
1774 /*******************************************************************
1775 Call the specified function on each lock in the database.
1776 ********************************************************************/
1778 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1779 enum brl_type lock_type,
1780 enum brl_flavour lock_flav,
1781 br_off start, br_off size,
1782 void *private_data),
1783 void *private_data)
1785 struct brl_forall_cb cb;
1786 NTSTATUS status;
1787 int count = 0;
1789 if (!brlock_db) {
1790 return 0;
1792 cb.fn = fn;
1793 cb.private_data = private_data;
1794 status = dbwrap_traverse(brlock_db, brl_traverse_fn, &cb, &count);
1796 if (!NT_STATUS_IS_OK(status)) {
1797 return -1;
1798 } else {
1799 return count;
1803 /*******************************************************************
1804 Store a potentially modified set of byte range lock data back into
1805 the database.
1806 Unlock the record.
1807 ********************************************************************/
1809 static void byte_range_lock_flush(struct byte_range_lock *br_lck)
1811 if (br_lck->read_only) {
1812 SMB_ASSERT(!br_lck->modified);
1815 if (!br_lck->modified) {
1816 goto done;
1819 if (br_lck->num_locks == 0) {
1820 /* No locks - delete this entry. */
1821 NTSTATUS status = dbwrap_record_delete(br_lck->record);
1822 if (!NT_STATUS_IS_OK(status)) {
1823 DEBUG(0, ("delete_rec returned %s\n",
1824 nt_errstr(status)));
1825 smb_panic("Could not delete byte range lock entry");
1827 } else {
1828 TDB_DATA data;
1829 NTSTATUS status;
1831 data.dptr = (uint8 *)br_lck->lock_data;
1832 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1834 status = dbwrap_record_store(br_lck->record, data, TDB_REPLACE);
1835 if (!NT_STATUS_IS_OK(status)) {
1836 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1837 smb_panic("Could not store byte range mode entry");
1841 done:
1843 br_lck->read_only = true;
1844 br_lck->modified = false;
1846 TALLOC_FREE(br_lck->record);
1849 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1851 byte_range_lock_flush(br_lck);
1852 SAFE_FREE(br_lck->lock_data);
1853 return 0;
1856 /*******************************************************************
1857 Fetch a set of byte range lock data from the database.
1858 Leave the record locked.
1859 TALLOC_FREE(brl) will release the lock in the destructor.
1860 ********************************************************************/
1862 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1863 files_struct *fsp, bool read_only)
1865 TDB_DATA key, data;
1866 struct byte_range_lock *br_lck = talloc(mem_ctx, struct byte_range_lock);
1867 bool do_read_only = read_only;
1869 if (br_lck == NULL) {
1870 return NULL;
1873 br_lck->fsp = fsp;
1874 br_lck->num_locks = 0;
1875 br_lck->modified = False;
1876 br_lck->key = fsp->file_id;
1878 key.dptr = (uint8 *)&br_lck->key;
1879 key.dsize = sizeof(struct file_id);
1881 if (!fsp->lockdb_clean) {
1882 /* We must be read/write to clean
1883 the dead entries. */
1884 do_read_only = false;
1887 if (do_read_only) {
1888 NTSTATUS status;
1889 status = dbwrap_fetch(brlock_db, br_lck, key, &data);
1890 if (!NT_STATUS_IS_OK(status)) {
1891 DEBUG(3, ("Could not fetch byte range lock record\n"));
1892 TALLOC_FREE(br_lck);
1893 return NULL;
1895 br_lck->record = NULL;
1896 } else {
1897 br_lck->record = dbwrap_fetch_locked(brlock_db, br_lck, key);
1899 if (br_lck->record == NULL) {
1900 DEBUG(3, ("Could not lock byte range lock entry\n"));
1901 TALLOC_FREE(br_lck);
1902 return NULL;
1905 data = dbwrap_record_get_value(br_lck->record);
1908 br_lck->read_only = do_read_only;
1909 br_lck->lock_data = NULL;
1911 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1913 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1915 if (br_lck->num_locks != 0) {
1916 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1917 br_lck->num_locks);
1918 if (br_lck->lock_data == NULL) {
1919 DEBUG(0, ("malloc failed\n"));
1920 TALLOC_FREE(br_lck);
1921 return NULL;
1924 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1927 if (!fsp->lockdb_clean) {
1928 int orig_num_locks = br_lck->num_locks;
1930 /* This is the first time we've accessed this. */
1931 /* Go through and ensure all entries exist - remove any that don't. */
1932 /* Makes the lockdb self cleaning at low cost. */
1934 if (!validate_lock_entries(&br_lck->num_locks,
1935 &br_lck->lock_data)) {
1936 SAFE_FREE(br_lck->lock_data);
1937 TALLOC_FREE(br_lck);
1938 return NULL;
1941 /* Ensure invalid locks are cleaned up in the destructor. */
1942 if (orig_num_locks != br_lck->num_locks) {
1943 br_lck->modified = True;
1946 /* Mark the lockdb as "clean" as seen from this open file. */
1947 fsp->lockdb_clean = True;
1950 if (DEBUGLEVEL >= 10) {
1951 unsigned int i;
1952 struct lock_struct *locks = br_lck->lock_data;
1953 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1954 br_lck->num_locks,
1955 file_id_string_tos(&fsp->file_id)));
1956 for( i = 0; i < br_lck->num_locks; i++) {
1957 print_lock_struct(i, &locks[i]);
1961 if (do_read_only != read_only) {
1963 * this stores the record and gets rid of
1964 * the write lock that is needed for a cleanup
1966 byte_range_lock_flush(br_lck);
1969 return br_lck;
1972 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1973 files_struct *fsp)
1975 return brl_get_locks_internal(mem_ctx, fsp, False);
1978 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
1980 struct byte_range_lock *br_lock;
1982 if (lp_clustering()) {
1983 return brl_get_locks_internal(talloc_tos(), fsp, true);
1986 if ((fsp->brlock_rec != NULL)
1987 && (dbwrap_get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
1988 return fsp->brlock_rec;
1991 TALLOC_FREE(fsp->brlock_rec);
1993 br_lock = brl_get_locks_internal(talloc_tos(), fsp, true);
1994 if (br_lock == NULL) {
1995 return NULL;
1997 fsp->brlock_seqnum = dbwrap_get_seqnum(brlock_db);
1999 fsp->brlock_rec = talloc_move(fsp, &br_lock);
2001 return fsp->brlock_rec;
2004 struct brl_revalidate_state {
2005 ssize_t array_size;
2006 uint32 num_pids;
2007 struct server_id *pids;
2011 * Collect PIDs of all processes with pending entries
2014 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
2015 enum brl_type lock_type,
2016 enum brl_flavour lock_flav,
2017 br_off start, br_off size,
2018 void *private_data)
2020 struct brl_revalidate_state *state =
2021 (struct brl_revalidate_state *)private_data;
2023 if (!IS_PENDING_LOCK(lock_type)) {
2024 return;
2027 add_to_large_array(state, sizeof(pid), (void *)&pid,
2028 &state->pids, &state->num_pids,
2029 &state->array_size);
2033 * qsort callback to sort the processes
2036 static int compare_procids(const void *p1, const void *p2)
2038 const struct server_id *i1 = (const struct server_id *)p1;
2039 const struct server_id *i2 = (const struct server_id *)p2;
2041 if (i1->pid < i2->pid) return -1;
2042 if (i2->pid > i2->pid) return 1;
2043 return 0;
2047 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2048 * locks so that they retry. Mainly used in the cluster code after a node has
2049 * died.
2051 * Done in two steps to avoid double-sends: First we collect all entries in an
2052 * array, then qsort that array and only send to non-dupes.
2055 void brl_revalidate(struct messaging_context *msg_ctx,
2056 void *private_data,
2057 uint32_t msg_type,
2058 struct server_id server_id,
2059 DATA_BLOB *data)
2061 struct brl_revalidate_state *state;
2062 uint32 i;
2063 struct server_id last_pid;
2065 if (!(state = talloc_zero(NULL, struct brl_revalidate_state))) {
2066 DEBUG(0, ("talloc failed\n"));
2067 return;
2070 brl_forall(brl_revalidate_collect, state);
2072 if (state->array_size == -1) {
2073 DEBUG(0, ("talloc failed\n"));
2074 goto done;
2077 if (state->num_pids == 0) {
2078 goto done;
2081 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2083 ZERO_STRUCT(last_pid);
2085 for (i=0; i<state->num_pids; i++) {
2086 if (serverid_equal(&last_pid, &state->pids[i])) {
2088 * We've seen that one already
2090 continue;
2093 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2094 &data_blob_null);
2095 last_pid = state->pids[i];
2098 done:
2099 TALLOC_FREE(state);
2100 return;