2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "lib/util/server_id.h"
30 #include "locking/proto.h"
31 #include "smbd/globals.h"
32 #include "dbwrap/dbwrap.h"
33 #include "dbwrap/dbwrap_open.h"
39 #define DBGC_CLASS DBGC_LOCKING
43 /* The open brlock.tdb database. */
45 static struct db_context
*brlock_db
;
47 struct byte_range_lock
{
48 struct files_struct
*fsp
;
49 TALLOC_CTX
*req_mem_ctx
;
50 const struct GUID
*req_guid
;
51 unsigned int num_locks
;
53 struct lock_struct
*lock_data
;
54 struct db_record
*record
;
57 /****************************************************************************
58 Debug info at level 10 for lock struct.
59 ****************************************************************************/
61 static void print_lock_struct(unsigned int i
, const struct lock_struct
*pls
)
63 struct server_id_buf tmp
;
65 DBG_DEBUG("[%u]: smblctx = %"PRIu64
", tid = %"PRIu32
", pid = %s, "
66 "start = %"PRIu64
", size = %"PRIu64
", fnum = %"PRIu64
", "
71 server_id_str_buf(pls
->context
.pid
, &tmp
),
75 lock_type_name(pls
->lock_type
),
76 lock_flav_name(pls
->lock_flav
));
79 unsigned int brl_num_locks(const struct byte_range_lock
*brl
)
81 return brl
->num_locks
;
84 struct files_struct
*brl_fsp(struct byte_range_lock
*brl
)
89 TALLOC_CTX
*brl_req_mem_ctx(const struct byte_range_lock
*brl
)
91 if (brl
->req_mem_ctx
== NULL
) {
92 return talloc_get_type_abort(brl
, struct byte_range_lock
);
95 return brl
->req_mem_ctx
;
98 const struct GUID
*brl_req_guid(const struct byte_range_lock
*brl
)
100 if (brl
->req_guid
== NULL
) {
101 static const struct GUID brl_zero_req_guid
;
102 return &brl_zero_req_guid
;
105 return brl
->req_guid
;
108 /****************************************************************************
109 See if two locking contexts are equal.
110 ****************************************************************************/
112 static bool brl_same_context(const struct lock_context
*ctx1
,
113 const struct lock_context
*ctx2
)
115 return (server_id_equal(&ctx1
->pid
, &ctx2
->pid
) &&
116 (ctx1
->smblctx
== ctx2
->smblctx
) &&
117 (ctx1
->tid
== ctx2
->tid
));
120 bool byte_range_valid(uint64_t ofs
, uint64_t len
)
122 uint64_t max_len
= UINT64_MAX
- ofs
;
123 uint64_t effective_len
;
126 * [MS-FSA] specifies this:
128 * If (((FileOffset + Length - 1) < FileOffset) && Length != 0) {
129 * return STATUS_INVALID_LOCK_RANGE
132 * We avoid integer wrapping and calculate
133 * max and effective len instead.
140 effective_len
= len
- 1;
141 if (effective_len
<= max_len
) {
148 bool byte_range_overlap(uint64_t ofs1
,
158 * This is based on [MS-FSA] 2.1.4.10
159 * Algorithm for Determining If a Range Access
160 * Conflicts with Byte-Range Locks
164 * The {0, 0} range doesn't conflict with any byte-range lock
166 if (ofs1
== 0 && len1
== 0) {
169 if (ofs2
== 0 && len2
== 0) {
174 * The caller should have checked that the ranges are
175 * valid. But currently we gracefully handle
176 * the overflow of a read/write check.
178 valid
= byte_range_valid(ofs1
, len1
);
180 last1
= ofs1
+ len1
- 1;
184 valid
= byte_range_valid(ofs2
, len2
);
186 last2
= ofs2
+ len2
- 1;
192 * If one range starts after the last
193 * byte of the other range there's
206 /****************************************************************************
207 See if lck1 and lck2 overlap.
208 ****************************************************************************/
210 static bool brl_overlap(const struct lock_struct
*lck1
,
211 const struct lock_struct
*lck2
)
213 return byte_range_overlap(lck1
->start
,
219 /****************************************************************************
220 See if lock2 can be added when lock1 is in place.
221 ****************************************************************************/
223 static bool brl_conflict(const struct lock_struct
*lck1
,
224 const struct lock_struct
*lck2
)
226 /* Read locks never conflict. */
227 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
231 /* A READ lock can stack on top of a WRITE lock if they have the same
233 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
234 brl_same_context(&lck1
->context
, &lck2
->context
) &&
235 lck1
->fnum
== lck2
->fnum
) {
239 return brl_overlap(lck1
, lck2
);
242 /****************************************************************************
243 See if lock2 can be added when lock1 is in place - when both locks are POSIX
244 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
246 ****************************************************************************/
248 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
249 const struct lock_struct
*lck2
)
251 #if defined(DEVELOPER)
252 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
253 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
256 /* Read locks never conflict. */
257 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
261 /* Locks on the same context don't conflict. Ignore fnum. */
262 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
266 /* One is read, the other write, or the context is different,
268 return brl_overlap(lck1
, lck2
);
272 static bool brl_conflict1(const struct lock_struct
*lck1
,
273 const struct lock_struct
*lck2
)
275 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
279 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
280 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
284 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
288 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
289 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
297 /****************************************************************************
298 Check to see if this lock conflicts, but ignore our own locks on the
299 same fnum only. This is the read/write lock check code path.
300 This is never used in the POSIX lock case.
301 ****************************************************************************/
303 static bool brl_conflict_other(const struct lock_struct
*lock
,
304 const struct lock_struct
*rw_probe
)
306 if (lock
->lock_type
== READ_LOCK
&& rw_probe
->lock_type
== READ_LOCK
) {
310 if (lock
->lock_flav
== POSIX_LOCK
&&
311 rw_probe
->lock_flav
== POSIX_LOCK
) {
313 * POSIX flavour locks never conflict here - this is only called
314 * in the read/write path.
319 if (!brl_overlap(lock
, rw_probe
)) {
321 * I/O can only conflict when overlapping a lock, thus let it
327 if (!brl_same_context(&lock
->context
, &rw_probe
->context
)) {
329 * Different process, conflict
334 if (lock
->fnum
!= rw_probe
->fnum
) {
336 * Different file handle, conflict
341 if ((lock
->lock_type
== READ_LOCK
) &&
342 (rw_probe
->lock_type
== WRITE_LOCK
)) {
344 * Incoming WRITE locks conflict with existing READ locks even
345 * if the context is the same. JRA. See LOCKTEST7 in
352 * I/O request compatible with existing lock, let it pass without
359 /****************************************************************************
360 Open up the brlock.tdb database.
361 ****************************************************************************/
363 void brl_init(bool read_only
)
372 tdb_flags
= SMBD_VOLATILE_TDB_FLAGS
| TDB_SEQNUM
;
374 db_path
= lock_path(talloc_tos(), "brlock.tdb");
375 if (db_path
== NULL
) {
376 DEBUG(0, ("out of memory!\n"));
380 brlock_db
= db_open(NULL
, db_path
,
381 SMBD_VOLATILE_TDB_HASH_SIZE
, tdb_flags
,
382 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644,
383 DBWRAP_LOCK_ORDER_2
, DBWRAP_FLAG_NONE
);
385 DEBUG(0,("Failed to open byte range locking database %s\n",
387 TALLOC_FREE(db_path
);
390 TALLOC_FREE(db_path
);
393 /****************************************************************************
394 Close down the brlock.tdb database.
395 ****************************************************************************/
397 void brl_shutdown(void)
399 TALLOC_FREE(brlock_db
);
403 /****************************************************************************
404 Compare two locks for sorting.
405 ****************************************************************************/
407 static int lock_compare(const struct lock_struct
*lck1
,
408 const struct lock_struct
*lck2
)
410 if (lck1
->start
!= lck2
->start
) {
411 return NUMERIC_CMP(lck1
->start
, lck2
->start
);
413 return NUMERIC_CMP(lck1
->size
, lck2
->size
);
417 /****************************************************************************
418 Lock a range of bytes - Windows lock semantics.
419 ****************************************************************************/
421 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
422 struct lock_struct
*plock
)
425 files_struct
*fsp
= br_lck
->fsp
;
426 struct lock_struct
*locks
= br_lck
->lock_data
;
430 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
432 valid
= byte_range_valid(plock
->start
, plock
->size
);
434 return NT_STATUS_INVALID_LOCK_RANGE
;
437 for (i
=0; i
< br_lck
->num_locks
; i
++) {
438 /* Do any Windows or POSIX locks conflict ? */
439 if (brl_conflict(&locks
[i
], plock
)) {
440 if (!serverid_exists(&locks
[i
].context
.pid
)) {
441 locks
[i
].context
.pid
.pid
= 0;
442 br_lck
->modified
= true;
445 /* Remember who blocked us. */
446 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
447 return NT_STATUS_LOCK_NOT_GRANTED
;
450 if (plock
->start
== 0 && plock
->size
== 0 &&
451 locks
[i
].size
== 0) {
457 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
459 /* We can get the Windows lock, now see if it needs to
460 be mapped into a lower level POSIX one, and if so can
463 if (lp_posix_locking(fsp
->conn
->params
)) {
465 if (!set_posix_lock_windows_flavour(fsp
,
474 /* We don't know who blocked us. */
475 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
477 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
478 status
= NT_STATUS_LOCK_NOT_GRANTED
;
481 status
= map_nt_error_from_unix(errno
);
487 /* no conflicts - add it to the list of locks */
488 locks
= talloc_realloc(br_lck
, locks
, struct lock_struct
,
489 (br_lck
->num_locks
+ 1));
491 status
= NT_STATUS_NO_MEMORY
;
495 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
496 br_lck
->num_locks
+= 1;
497 br_lck
->lock_data
= locks
;
498 br_lck
->modified
= True
;
502 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
506 /****************************************************************************
507 Cope with POSIX range splits and merges.
508 ****************************************************************************/
510 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
511 struct lock_struct
*ex
, /* existing lock. */
512 struct lock_struct
*plock
) /* proposed lock. */
514 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
516 /* We can't merge non-conflicting locks on different context - ignore fnum. */
518 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
520 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
524 /* We now know we have the same context. */
526 /* Did we overlap ? */
528 /*********************************************
539 **********************************************/
541 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
542 (plock
->start
> (ex
->start
+ ex
->size
))) {
544 /* No overlap with this lock - copy existing. */
546 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
550 /*********************************************
551 +---------------------------+
553 +---------------------------+
554 +---------------------------+
555 | plock | -> replace with plock.
556 +---------------------------+
561 +---------------------------+
562 | plock | -> replace with plock.
563 +---------------------------+
565 **********************************************/
567 if ( (ex
->start
>= plock
->start
) &&
568 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
570 /* Replace - discard existing lock. */
575 /*********************************************
585 +---------------+-------+
586 | plock | ex | - different lock types.
587 +---------------+-------+
589 +-----------------------+
590 | plock | - same lock type.
591 +-----------------------+
592 **********************************************/
594 if (plock
->start
+ plock
->size
== ex
->start
) {
596 /* If the lock types are the same, we merge, if different, we
597 add the remainder of the old lock. */
599 if (lock_types_differ
) {
601 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
604 /* Merge - adjust incoming lock as we may have more
605 * merging to come. */
606 plock
->size
+= ex
->size
;
611 /*********************************************
620 +-------+---------------+
621 | ex | plock | - different lock types
622 +-------+---------------+
625 +-----------------------+
626 | plock | - same lock type.
627 +-----------------------+
629 **********************************************/
631 if (ex
->start
+ ex
->size
== plock
->start
) {
633 /* If the lock types are the same, we merge, if different, we
634 add the existing lock. */
636 if (lock_types_differ
) {
637 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
640 /* Merge - adjust incoming lock as we may have more
641 * merging to come. */
642 plock
->start
= ex
->start
;
643 plock
->size
+= ex
->size
;
648 /*********************************************
650 +-----------------------+
652 +-----------------------+
665 +---------------+-------+
666 | plock | ex | - different lock types.
667 +---------------+-------+
669 +-----------------------+
670 | plock | - same lock type.
671 +-----------------------+
672 **********************************************/
674 if ( (ex
->start
>= plock
->start
) &&
675 (ex
->start
<= plock
->start
+ plock
->size
) &&
676 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
678 /* If the lock types are the same, we merge, if different, we
679 add the remainder of the old lock. */
681 if (lock_types_differ
) {
682 /* Add remaining existing. */
683 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
684 /* Adjust existing start and size. */
685 lck_arr
[0].start
= plock
->start
+ plock
->size
;
686 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
689 /* Merge - adjust incoming lock as we may have more
690 * merging to come. */
691 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
696 /*********************************************
698 +-----------------------+
700 +-----------------------+
713 +-------+---------------+
714 | ex | plock | - different lock types
715 +-------+---------------+
718 +-----------------------+
719 | plock | - same lock type.
720 +-----------------------+
722 **********************************************/
724 if ( (ex
->start
< plock
->start
) &&
725 (ex
->start
+ ex
->size
>= plock
->start
) &&
726 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
728 /* If the lock types are the same, we merge, if different, we
729 add the truncated old lock. */
731 if (lock_types_differ
) {
732 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
733 /* Adjust existing size. */
734 lck_arr
[0].size
= plock
->start
- ex
->start
;
737 /* Merge - adjust incoming lock as we may have more
738 * merging to come. MUST ADJUST plock SIZE FIRST ! */
739 plock
->size
+= (plock
->start
- ex
->start
);
740 plock
->start
= ex
->start
;
745 /*********************************************
747 +---------------------------+
749 +---------------------------+
754 +-------+---------+---------+
755 | ex | plock | ex | - different lock types.
756 +-------+---------+---------+
758 +---------------------------+
759 | plock | - same lock type.
760 +---------------------------+
761 **********************************************/
763 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
765 if (lock_types_differ
) {
767 /* We have to split ex into two locks here. */
769 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
770 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
772 /* Adjust first existing size. */
773 lck_arr
[0].size
= plock
->start
- ex
->start
;
775 /* Adjust second existing start and size. */
776 lck_arr
[1].start
= plock
->start
+ plock
->size
;
777 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
780 /* Just eat the existing locks, merge them into plock. */
781 plock
->start
= ex
->start
;
782 plock
->size
= ex
->size
;
787 /* Never get here. */
788 smb_panic("brlock_posix_split_merge");
791 /* Keep some compilers happy. */
795 /****************************************************************************
796 Lock a range of bytes - POSIX lock semantics.
797 We must cope with range splits and merges.
798 ****************************************************************************/
800 static NTSTATUS
brl_lock_posix(struct byte_range_lock
*br_lck
,
801 struct lock_struct
*plock
)
803 unsigned int i
, count
, posix_count
;
804 struct lock_struct
*locks
= br_lck
->lock_data
;
805 struct lock_struct
*tp
;
806 bool break_oplocks
= false;
809 /* No zero-zero locks for POSIX. */
810 if (plock
->start
== 0 && plock
->size
== 0) {
811 return NT_STATUS_INVALID_PARAMETER
;
814 /* Don't allow 64-bit lock wrap. */
815 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
816 return NT_STATUS_INVALID_PARAMETER
;
819 /* The worst case scenario here is we have to split an
820 existing POSIX lock range into two, and add our lock,
821 so we need at most 2 more entries. */
823 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 2);
825 return NT_STATUS_NO_MEMORY
;
828 count
= posix_count
= 0;
830 for (i
=0; i
< br_lck
->num_locks
; i
++) {
831 struct lock_struct
*curr_lock
= &locks
[i
];
833 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
834 /* Do any Windows flavour locks conflict ? */
835 if (brl_conflict(curr_lock
, plock
)) {
836 if (!serverid_exists(&curr_lock
->context
.pid
)) {
837 curr_lock
->context
.pid
.pid
= 0;
838 br_lck
->modified
= true;
841 /* No games with error messages. */
843 /* Remember who blocked us. */
844 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
845 return NT_STATUS_LOCK_NOT_GRANTED
;
847 /* Just copy the Windows lock into the new array. */
848 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
851 unsigned int tmp_count
= 0;
853 /* POSIX conflict semantics are different. */
854 if (brl_conflict_posix(curr_lock
, plock
)) {
855 if (!serverid_exists(&curr_lock
->context
.pid
)) {
856 curr_lock
->context
.pid
.pid
= 0;
857 br_lck
->modified
= true;
860 /* Can't block ourselves with POSIX locks. */
861 /* No games with error messages. */
863 /* Remember who blocked us. */
864 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
865 return NT_STATUS_LOCK_NOT_GRANTED
;
868 /* Work out overlaps. */
869 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
870 posix_count
+= tmp_count
;
876 * Break oplocks while we hold a brl. Since lock() and unlock() calls
877 * are not symmetric with POSIX semantics, we cannot guarantee our
878 * contend_level2_oplocks_begin/end calls will be acquired and
879 * released one-for-one as with Windows semantics. Therefore we only
880 * call contend_level2_oplocks_begin if this is the first POSIX brl on
883 break_oplocks
= (posix_count
== 0);
885 contend_level2_oplocks_begin(br_lck
->fsp
,
886 LEVEL2_CONTEND_POSIX_BRL
);
889 /* Try and add the lock in order, sorted by lock start. */
890 for (i
=0; i
< count
; i
++) {
891 struct lock_struct
*curr_lock
= &tp
[i
];
893 if (curr_lock
->start
<= plock
->start
) {
899 memmove(&tp
[i
+1], &tp
[i
],
900 (count
- i
)*sizeof(struct lock_struct
));
902 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
905 /* We can get the POSIX lock, now see if it needs to
906 be mapped into a lower level POSIX one, and if so can
909 if (lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
912 /* The lower layer just needs to attempt to
913 get the system POSIX lock. We've weeded out
914 any conflicts above. */
916 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
923 /* We don't know who blocked us. */
924 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
926 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
928 status
= NT_STATUS_LOCK_NOT_GRANTED
;
932 status
= map_nt_error_from_unix(errno
);
938 /* If we didn't use all the allocated size,
939 * Realloc so we don't leak entries per lock call. */
940 if (count
< br_lck
->num_locks
+ 2) {
941 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
943 status
= NT_STATUS_NO_MEMORY
;
948 br_lck
->num_locks
= count
;
949 TALLOC_FREE(br_lck
->lock_data
);
950 br_lck
->lock_data
= tp
;
952 br_lck
->modified
= True
;
954 /* A successful downgrade from write to read lock can trigger a lock
955 re-evalutation where waiting readers can now proceed. */
960 contend_level2_oplocks_end(br_lck
->fsp
,
961 LEVEL2_CONTEND_POSIX_BRL
);
966 /****************************************************************************
967 Lock a range of bytes.
968 ****************************************************************************/
971 struct byte_range_lock
*br_lck
,
973 struct server_id pid
,
976 enum brl_type lock_type
,
977 enum brl_flavour lock_flav
,
978 struct server_id
*blocker_pid
,
982 struct lock_struct lock
;
987 if (start
== 0 && size
== 0) {
988 DEBUG(0,("client sent 0/0 lock - please report this\n"));
992 lock
= (struct lock_struct
) {
993 .context
.smblctx
= smblctx
,
995 .context
.tid
= br_lck
->fsp
->conn
->cnum
,
998 .fnum
= br_lck
->fsp
->fnum
,
999 .lock_type
= lock_type
,
1000 .lock_flav
= lock_flav
1003 if (lock_flav
== WINDOWS_LOCK
) {
1004 ret
= SMB_VFS_BRL_LOCK_WINDOWS(
1005 br_lck
->fsp
->conn
, br_lck
, &lock
);
1007 ret
= brl_lock_posix(br_lck
, &lock
);
1011 /* sort the lock list */
1012 TYPESAFE_QSORT(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, lock_compare
);
1014 /* If we're returning an error, return who blocked us. */
1015 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
1016 *blocker_pid
= lock
.context
.pid
;
1017 *psmblctx
= lock
.context
.smblctx
;
1022 /****************************************************************************
1023 Unlock a range of bytes - Windows semantics.
1024 ****************************************************************************/
1026 bool brl_unlock_windows_default(struct byte_range_lock
*br_lck
,
1027 const struct lock_struct
*plock
)
1030 struct lock_struct
*locks
= br_lck
->lock_data
;
1031 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
1033 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
1036 /* Delete write locks by preference... The lock list
1037 is sorted in the zero zero case. */
1039 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1040 struct lock_struct
*lock
= &locks
[i
];
1042 if (lock
->lock_type
== WRITE_LOCK
&&
1043 brl_same_context(&lock
->context
, &plock
->context
) &&
1044 lock
->fnum
== plock
->fnum
&&
1045 lock
->lock_flav
== WINDOWS_LOCK
&&
1046 lock
->start
== plock
->start
&&
1047 lock
->size
== plock
->size
) {
1049 /* found it - delete it */
1050 deleted_lock_type
= lock
->lock_type
;
1055 if (i
!= br_lck
->num_locks
) {
1056 /* We found it - don't search again. */
1057 goto unlock_continue
;
1061 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1062 struct lock_struct
*lock
= &locks
[i
];
1064 /* Only remove our own locks that match in start, size, and flavour. */
1065 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1066 lock
->fnum
== plock
->fnum
&&
1067 lock
->lock_flav
== WINDOWS_LOCK
&&
1068 lock
->start
== plock
->start
&&
1069 lock
->size
== plock
->size
) {
1070 deleted_lock_type
= lock
->lock_type
;
1075 if (i
== br_lck
->num_locks
) {
1076 /* we didn't find it */
1084 ARRAY_DEL_ELEMENT(locks
, i
, br_lck
->num_locks
);
1085 br_lck
->num_locks
-= 1;
1086 br_lck
->modified
= True
;
1088 /* Unlock the underlying POSIX regions. */
1089 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1090 release_posix_lock_windows_flavour(br_lck
->fsp
,
1099 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1103 /****************************************************************************
1104 Unlock a range of bytes - POSIX semantics.
1105 ****************************************************************************/
1107 static bool brl_unlock_posix(struct byte_range_lock
*br_lck
,
1108 struct lock_struct
*plock
)
1110 unsigned int i
, count
;
1111 struct lock_struct
*tp
;
1112 struct lock_struct
*locks
= br_lck
->lock_data
;
1113 bool overlap_found
= False
;
1115 /* No zero-zero locks for POSIX. */
1116 if (plock
->start
== 0 && plock
->size
== 0) {
1120 /* Don't allow 64-bit lock wrap. */
1121 if (plock
->start
+ plock
->size
< plock
->start
||
1122 plock
->start
+ plock
->size
< plock
->size
) {
1123 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1127 /* The worst case scenario here is we have to split an
1128 existing POSIX lock range into two, so we need at most
1131 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 1);
1133 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1138 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1139 struct lock_struct
*lock
= &locks
[i
];
1140 unsigned int tmp_count
;
1142 /* Only remove our own locks - ignore fnum. */
1143 if (!brl_same_context(&lock
->context
, &plock
->context
)) {
1144 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1149 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1150 /* Do any Windows flavour locks conflict ? */
1151 if (brl_conflict(lock
, plock
)) {
1155 /* Just copy the Windows lock into the new array. */
1156 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1161 /* Work out overlaps. */
1162 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1164 if (tmp_count
== 0) {
1165 /* plock overlapped the existing lock completely,
1166 or replaced it. Don't copy the existing lock. */
1167 overlap_found
= true;
1168 } else if (tmp_count
== 1) {
1169 /* Either no overlap, (simple copy of existing lock) or
1170 * an overlap of an existing lock. */
1171 /* If the lock changed size, we had an overlap. */
1172 if (tp
[count
].size
!= lock
->size
) {
1173 overlap_found
= true;
1176 } else if (tmp_count
== 2) {
1177 /* We split a lock range in two. */
1178 overlap_found
= true;
1181 /* Optimisation... */
1182 /* We know we're finished here as we can't overlap any
1183 more POSIX locks. Copy the rest of the lock array. */
1185 if (i
< br_lck
->num_locks
- 1) {
1186 memcpy(&tp
[count
], &locks
[i
+1],
1187 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1188 count
+= ((br_lck
->num_locks
-1) - i
);
1195 if (!overlap_found
) {
1196 /* Just ignore - no change. */
1198 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1202 /* Unlock any POSIX regions. */
1203 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1204 release_posix_lock_posix_flavour(br_lck
->fsp
,
1212 /* Realloc so we don't leak entries per unlock call. */
1214 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
1216 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1220 /* We deleted the last lock. */
1225 contend_level2_oplocks_end(br_lck
->fsp
,
1226 LEVEL2_CONTEND_POSIX_BRL
);
1228 br_lck
->num_locks
= count
;
1229 TALLOC_FREE(br_lck
->lock_data
);
1231 br_lck
->lock_data
= tp
;
1232 br_lck
->modified
= True
;
1237 /****************************************************************************
1238 Unlock a range of bytes.
1239 ****************************************************************************/
1241 bool brl_unlock(struct byte_range_lock
*br_lck
,
1243 struct server_id pid
,
1246 enum brl_flavour lock_flav
)
1248 struct lock_struct lock
;
1250 lock
.context
.smblctx
= smblctx
;
1251 lock
.context
.pid
= pid
;
1252 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1255 lock
.fnum
= br_lck
->fsp
->fnum
;
1256 lock
.lock_type
= UNLOCK_LOCK
;
1257 lock
.lock_flav
= lock_flav
;
1259 if (lock_flav
== WINDOWS_LOCK
) {
1260 return SMB_VFS_BRL_UNLOCK_WINDOWS(
1261 br_lck
->fsp
->conn
, br_lck
, &lock
);
1263 return brl_unlock_posix(br_lck
, &lock
);
1267 /****************************************************************************
1268 Test if we could add a lock if we wanted to.
1269 Returns True if the region required is currently unlocked, False if locked.
1270 ****************************************************************************/
1272 bool brl_locktest(struct byte_range_lock
*br_lck
,
1273 const struct lock_struct
*rw_probe
)
1277 struct lock_struct
*locks
= br_lck
->lock_data
;
1278 files_struct
*fsp
= br_lck
->fsp
;
1280 /* Make sure existing locks don't conflict */
1281 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1283 * Our own locks don't conflict.
1285 if (brl_conflict_other(&locks
[i
], rw_probe
)) {
1286 if (br_lck
->record
== NULL
) {
1291 if (!serverid_exists(&locks
[i
].context
.pid
)) {
1292 locks
[i
].context
.pid
.pid
= 0;
1293 br_lck
->modified
= true;
1302 * There is no lock held by an SMB daemon, check to
1303 * see if there is a POSIX lock from a UNIX or NFS process.
1304 * This only conflicts with Windows locks, not POSIX locks.
1307 if(lp_posix_locking(fsp
->conn
->params
) &&
1308 (rw_probe
->lock_flav
== WINDOWS_LOCK
)) {
1310 * Make copies -- is_posix_locked might modify the values
1313 br_off start
= rw_probe
->start
;
1314 br_off size
= rw_probe
->size
;
1315 enum brl_type lock_type
= rw_probe
->lock_type
;
1317 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1319 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1320 "file %s\n", (uintmax_t)start
, (uintmax_t)size
,
1321 ret
? "locked" : "unlocked",
1322 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1324 /* We need to return the inverse of is_posix_locked. */
1328 /* no conflicts - we could have added it */
1332 /****************************************************************************
1333 Query for existing locks.
1334 ****************************************************************************/
1336 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1338 struct server_id pid
,
1341 enum brl_type
*plock_type
,
1342 enum brl_flavour lock_flav
)
1345 struct lock_struct lock
;
1346 const struct lock_struct
*locks
= br_lck
->lock_data
;
1347 files_struct
*fsp
= br_lck
->fsp
;
1349 lock
.context
.smblctx
= *psmblctx
;
1350 lock
.context
.pid
= pid
;
1351 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1352 lock
.start
= *pstart
;
1354 lock
.fnum
= fsp
->fnum
;
1355 lock
.lock_type
= *plock_type
;
1356 lock
.lock_flav
= lock_flav
;
1358 /* Make sure existing locks don't conflict */
1359 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1360 const struct lock_struct
*exlock
= &locks
[i
];
1361 bool conflict
= False
;
1363 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1364 conflict
= brl_conflict(exlock
, &lock
);
1366 conflict
= brl_conflict_posix(exlock
, &lock
);
1370 *psmblctx
= exlock
->context
.smblctx
;
1371 *pstart
= exlock
->start
;
1372 *psize
= exlock
->size
;
1373 *plock_type
= exlock
->lock_type
;
1374 return NT_STATUS_LOCK_NOT_GRANTED
;
1379 * There is no lock held by an SMB daemon, check to
1380 * see if there is a POSIX lock from a UNIX or NFS process.
1383 if(lp_posix_locking(fsp
->conn
->params
)) {
1384 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1386 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1387 "file %s\n", (uintmax_t)*pstart
,
1388 (uintmax_t)*psize
, ret
? "locked" : "unlocked",
1389 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1392 /* Hmmm. No clue what to set smblctx to - use -1. */
1393 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1394 return NT_STATUS_LOCK_NOT_GRANTED
;
1398 return NT_STATUS_OK
;
1402 /****************************************************************************
1403 Remove any locks associated with a open file.
1404 We return True if this process owns any other Windows locks on this
1405 fd and so we should not immediately close the fd.
1406 ****************************************************************************/
1408 void brl_close_fnum(struct byte_range_lock
*br_lck
)
1410 files_struct
*fsp
= br_lck
->fsp
;
1411 uint32_t tid
= fsp
->conn
->cnum
;
1412 uint64_t fnum
= fsp
->fnum
;
1414 struct lock_struct
*locks
= br_lck
->lock_data
;
1415 struct server_id pid
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1416 struct lock_struct
*locks_copy
;
1417 unsigned int num_locks_copy
;
1419 /* Copy the current lock array. */
1420 if (br_lck
->num_locks
) {
1421 locks_copy
= (struct lock_struct
*)talloc_memdup(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1423 smb_panic("brl_close_fnum: talloc failed");
1429 num_locks_copy
= br_lck
->num_locks
;
1431 for (i
=0; i
< num_locks_copy
; i
++) {
1432 struct lock_struct
*lock
= &locks_copy
[i
];
1434 if (lock
->context
.tid
== tid
&&
1435 server_id_equal(&lock
->context
.pid
, &pid
) &&
1436 (lock
->fnum
== fnum
)) {
1439 lock
->context
.smblctx
,
1448 bool brl_mark_disconnected(struct files_struct
*fsp
)
1450 uint32_t tid
= fsp
->conn
->cnum
;
1452 uint64_t fnum
= fsp
->fnum
;
1454 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1455 struct byte_range_lock
*br_lck
= NULL
;
1457 if (fsp
->op
== NULL
) {
1461 smblctx
= fsp
->op
->global
->open_persistent_id
;
1463 if (!fsp
->op
->global
->durable
) {
1467 if (fsp
->current_lock_count
== 0) {
1471 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1472 if (br_lck
== NULL
) {
1476 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1477 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1480 * as this is a durable handle, we only expect locks
1481 * of the current file handle!
1484 if (lock
->context
.smblctx
!= smblctx
) {
1485 TALLOC_FREE(br_lck
);
1489 if (lock
->context
.tid
!= tid
) {
1490 TALLOC_FREE(br_lck
);
1494 if (!server_id_equal(&lock
->context
.pid
, &self
)) {
1495 TALLOC_FREE(br_lck
);
1499 if (lock
->fnum
!= fnum
) {
1500 TALLOC_FREE(br_lck
);
1504 server_id_set_disconnected(&lock
->context
.pid
);
1505 lock
->context
.tid
= TID_FIELD_INVALID
;
1506 lock
->fnum
= FNUM_FIELD_INVALID
;
1509 br_lck
->modified
= true;
1510 TALLOC_FREE(br_lck
);
1514 bool brl_reconnect_disconnected(struct files_struct
*fsp
)
1516 uint32_t tid
= fsp
->conn
->cnum
;
1518 uint64_t fnum
= fsp
->fnum
;
1520 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1521 struct byte_range_lock
*br_lck
= NULL
;
1523 if (fsp
->op
== NULL
) {
1527 smblctx
= fsp
->op
->global
->open_persistent_id
;
1529 if (!fsp
->op
->global
->durable
) {
1534 * When reconnecting, we do not want to validate the brlock entries
1535 * and thereby remove our own (disconnected) entries but reactivate
1539 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1540 if (br_lck
== NULL
) {
1544 if (br_lck
->num_locks
== 0) {
1545 TALLOC_FREE(br_lck
);
1549 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1550 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1553 * as this is a durable handle we only expect locks
1554 * of the current file handle!
1557 if (lock
->context
.smblctx
!= smblctx
) {
1558 TALLOC_FREE(br_lck
);
1562 if (lock
->context
.tid
!= TID_FIELD_INVALID
) {
1563 TALLOC_FREE(br_lck
);
1567 if (!server_id_is_disconnected(&lock
->context
.pid
)) {
1568 TALLOC_FREE(br_lck
);
1572 if (lock
->fnum
!= FNUM_FIELD_INVALID
) {
1573 TALLOC_FREE(br_lck
);
1577 lock
->context
.pid
= self
;
1578 lock
->context
.tid
= tid
;
1582 fsp
->current_lock_count
= br_lck
->num_locks
;
1583 br_lck
->modified
= true;
1584 TALLOC_FREE(br_lck
);
1588 struct brl_forall_cb
{
1589 void (*fn
)(struct file_id id
, struct server_id pid
,
1590 enum brl_type lock_type
,
1591 enum brl_flavour lock_flav
,
1592 br_off start
, br_off size
,
1593 void *private_data
);
1597 /****************************************************************************
1598 Traverse the whole database with this function, calling traverse_callback
1600 ****************************************************************************/
1602 static int brl_traverse_fn(struct db_record
*rec
, void *state
)
1604 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1605 struct lock_struct
*locks
;
1606 struct file_id
*key
;
1608 unsigned int num_locks
= 0;
1612 dbkey
= dbwrap_record_get_key(rec
);
1613 value
= dbwrap_record_get_value(rec
);
1615 /* In a traverse function we must make a copy of
1616 dbuf before modifying it. */
1618 locks
= (struct lock_struct
*)talloc_memdup(
1619 talloc_tos(), value
.dptr
, value
.dsize
);
1621 return -1; /* Terminate traversal. */
1624 key
= (struct file_id
*)dbkey
.dptr
;
1625 num_locks
= value
.dsize
/sizeof(*locks
);
1628 for ( i
=0; i
<num_locks
; i
++) {
1630 locks
[i
].context
.pid
,
1643 /*******************************************************************
1644 Call the specified function on each lock in the database.
1645 ********************************************************************/
1647 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1648 enum brl_type lock_type
,
1649 enum brl_flavour lock_flav
,
1650 br_off start
, br_off size
,
1651 void *private_data
),
1654 struct brl_forall_cb cb
;
1662 cb
.private_data
= private_data
;
1663 status
= dbwrap_traverse(brlock_db
, brl_traverse_fn
, &cb
, &count
);
1665 if (!NT_STATUS_IS_OK(status
)) {
1672 /*******************************************************************
1673 Store a potentially modified set of byte range lock data back into
1676 ********************************************************************/
1678 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1681 struct lock_struct
*locks
= br_lck
->lock_data
;
1683 if (!br_lck
->modified
) {
1684 DEBUG(10, ("br_lck not modified\n"));
1690 while (i
< br_lck
->num_locks
) {
1691 if (locks
[i
].context
.pid
.pid
== 0) {
1693 * Autocleanup, the process conflicted and does not
1696 locks
[i
] = locks
[br_lck
->num_locks
-1];
1697 br_lck
->num_locks
-= 1;
1703 if (br_lck
->num_locks
== 0) {
1704 /* No locks - delete this entry. */
1705 NTSTATUS status
= dbwrap_record_delete(br_lck
->record
);
1706 if (!NT_STATUS_IS_OK(status
)) {
1707 DEBUG(0, ("delete_rec returned %s\n",
1708 nt_errstr(status
)));
1709 smb_panic("Could not delete byte range lock entry");
1713 .dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
),
1714 .dptr
= (uint8_t *)br_lck
->lock_data
,
1718 status
= dbwrap_record_store(br_lck
->record
, data
, TDB_REPLACE
);
1719 if (!NT_STATUS_IS_OK(status
)) {
1720 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1721 smb_panic("Could not store byte range mode entry");
1725 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db
)));
1728 br_lck
->modified
= false;
1729 TALLOC_FREE(br_lck
->record
);
1732 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1734 byte_range_lock_flush(br_lck
);
1738 static bool brl_parse_data(struct byte_range_lock
*br_lck
, TDB_DATA data
)
1742 if (data
.dsize
== 0) {
1745 if (data
.dsize
% sizeof(struct lock_struct
) != 0) {
1746 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data
.dsize
));
1750 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1751 data_len
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1753 br_lck
->lock_data
= talloc_memdup(br_lck
, data
.dptr
, data_len
);
1754 if (br_lck
->lock_data
== NULL
) {
1755 DEBUG(1, ("talloc_memdup failed\n"));
1761 /*******************************************************************
1762 Fetch a set of byte range lock data from the database.
1763 Leave the record locked.
1764 TALLOC_FREE(brl) will release the lock in the destructor.
1765 ********************************************************************/
1767 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
, files_struct
*fsp
)
1770 struct byte_range_lock
*br_lck
;
1772 br_lck
= talloc_zero(mem_ctx
, struct byte_range_lock
);
1773 if (br_lck
== NULL
) {
1779 key
.dptr
= (uint8_t *)&fsp
->file_id
;
1780 key
.dsize
= sizeof(struct file_id
);
1782 br_lck
->record
= dbwrap_fetch_locked(brlock_db
, br_lck
, key
);
1784 if (br_lck
->record
== NULL
) {
1785 DEBUG(3, ("Could not lock byte range lock entry\n"));
1786 TALLOC_FREE(br_lck
);
1790 data
= dbwrap_record_get_value(br_lck
->record
);
1792 if (!brl_parse_data(br_lck
, data
)) {
1793 TALLOC_FREE(br_lck
);
1797 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1799 if (DEBUGLEVEL
>= 10) {
1801 struct file_id_buf buf
;
1802 struct lock_struct
*locks
= br_lck
->lock_data
;
1803 DBG_DEBUG("%u current locks on file_id %s\n",
1805 file_id_str_buf(fsp
->file_id
, &buf
));
1806 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1807 print_lock_struct(i
, &locks
[i
]);
1814 struct byte_range_lock
*brl_get_locks_for_locking(TALLOC_CTX
*mem_ctx
,
1816 TALLOC_CTX
*req_mem_ctx
,
1817 const struct GUID
*req_guid
)
1819 struct byte_range_lock
*br_lck
= NULL
;
1821 br_lck
= brl_get_locks(mem_ctx
, fsp
);
1822 if (br_lck
== NULL
) {
1825 SMB_ASSERT(req_mem_ctx
!= NULL
);
1826 br_lck
->req_mem_ctx
= req_mem_ctx
;
1827 SMB_ASSERT(req_guid
!= NULL
);
1828 br_lck
->req_guid
= req_guid
;
1833 struct brl_get_locks_readonly_state
{
1834 TALLOC_CTX
*mem_ctx
;
1835 struct byte_range_lock
**br_lock
;
1838 static void brl_get_locks_readonly_parser(TDB_DATA key
, TDB_DATA data
,
1841 struct brl_get_locks_readonly_state
*state
=
1842 (struct brl_get_locks_readonly_state
*)private_data
;
1843 struct byte_range_lock
*br_lck
;
1845 br_lck
= talloc_pooled_object(
1846 state
->mem_ctx
, struct byte_range_lock
, 1, data
.dsize
);
1847 if (br_lck
== NULL
) {
1848 *state
->br_lock
= NULL
;
1851 *br_lck
= (struct byte_range_lock
) { 0 };
1852 if (!brl_parse_data(br_lck
, data
)) {
1853 *state
->br_lock
= NULL
;
1856 *state
->br_lock
= br_lck
;
1859 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1861 struct byte_range_lock
*br_lock
= NULL
;
1862 struct brl_get_locks_readonly_state state
;
1865 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
1866 dbwrap_get_seqnum(brlock_db
), fsp
->brlock_seqnum
));
1868 if ((fsp
->brlock_rec
!= NULL
)
1869 && (dbwrap_get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1871 * We have cached the brlock_rec and the database did not
1874 return fsp
->brlock_rec
;
1878 * Parse the record fresh from the database
1881 state
.mem_ctx
= fsp
;
1882 state
.br_lock
= &br_lock
;
1884 status
= dbwrap_parse_record(
1886 make_tdb_data((uint8_t *)&fsp
->file_id
,
1887 sizeof(fsp
->file_id
)),
1888 brl_get_locks_readonly_parser
, &state
);
1890 if (NT_STATUS_EQUAL(status
,NT_STATUS_NOT_FOUND
)) {
1892 * No locks on this file. Return an empty br_lock.
1894 br_lock
= talloc_zero(fsp
, struct byte_range_lock
);
1895 if (br_lock
== NULL
) {
1899 } else if (!NT_STATUS_IS_OK(status
)) {
1900 DEBUG(3, ("Could not parse byte range lock record: "
1901 "%s\n", nt_errstr(status
)));
1904 if (br_lock
== NULL
) {
1909 br_lock
->modified
= false;
1910 br_lock
->record
= NULL
;
1913 * Cache the brlock struct, invalidated when the dbwrap_seqnum
1914 * changes. See beginning of this routine.
1916 TALLOC_FREE(fsp
->brlock_rec
);
1917 fsp
->brlock_rec
= br_lock
;
1918 fsp
->brlock_seqnum
= dbwrap_get_seqnum(brlock_db
);
1923 bool brl_cleanup_disconnected(struct file_id fid
, uint64_t open_persistent_id
)
1926 TALLOC_CTX
*frame
= talloc_stackframe();
1928 struct db_record
*rec
;
1929 struct lock_struct
*lock
;
1931 struct file_id_buf buf
;
1934 key
= make_tdb_data((void*)&fid
, sizeof(fid
));
1936 rec
= dbwrap_fetch_locked(brlock_db
, frame
, key
);
1938 DBG_INFO("failed to fetch record for file %s\n",
1939 file_id_str_buf(fid
, &buf
));
1943 val
= dbwrap_record_get_value(rec
);
1944 lock
= (struct lock_struct
*)val
.dptr
;
1945 num
= val
.dsize
/ sizeof(struct lock_struct
);
1947 DBG_DEBUG("no byte range locks for file %s\n",
1948 file_id_str_buf(fid
, &buf
));
1953 for (n
=0; n
<num
; n
++) {
1954 struct lock_context
*ctx
= &lock
[n
].context
;
1956 if (!server_id_is_disconnected(&ctx
->pid
)) {
1957 struct server_id_buf tmp
;
1958 DBG_INFO("byte range lock "
1959 "%s used by server %s, do not cleanup\n",
1960 file_id_str_buf(fid
, &buf
),
1961 server_id_str_buf(ctx
->pid
, &tmp
));
1965 if (ctx
->smblctx
!= open_persistent_id
) {
1966 DBG_INFO("byte range lock %s expected smblctx %"PRIu64
" "
1967 "but found %"PRIu64
", do not cleanup\n",
1968 file_id_str_buf(fid
, &buf
),
1975 status
= dbwrap_record_delete(rec
);
1976 if (!NT_STATUS_IS_OK(status
)) {
1977 DBG_INFO("failed to delete record "
1978 "for file %s from %s, open %"PRIu64
": %s\n",
1979 file_id_str_buf(fid
, &buf
),
1980 dbwrap_name(brlock_db
),
1986 DBG_DEBUG("file %s cleaned up %u entries from open %"PRIu64
"\n",
1987 file_id_str_buf(fid
, &buf
),
1989 open_persistent_id
);