2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "lib/util/server_id.h"
30 #include "locking/proto.h"
31 #include "smbd/globals.h"
32 #include "dbwrap/dbwrap.h"
33 #include "dbwrap/dbwrap_open.h"
39 #define DBGC_CLASS DBGC_LOCKING
43 /* The open brlock.tdb database. */
45 static struct db_context
*brlock_db
;
47 struct byte_range_lock
{
48 struct files_struct
*fsp
;
49 TALLOC_CTX
*req_mem_ctx
;
50 const struct GUID
*req_guid
;
51 unsigned int num_locks
;
53 struct lock_struct
*lock_data
;
54 struct db_record
*record
;
57 /****************************************************************************
58 Debug info at level 10 for lock struct.
59 ****************************************************************************/
61 static void print_lock_struct(unsigned int i
, const struct lock_struct
*pls
)
63 struct server_id_buf tmp
;
65 DBG_DEBUG("[%u]: smblctx = %"PRIu64
", tid = %"PRIu32
", pid = %s, "
66 "start = %"PRIu64
", size = %"PRIu64
", fnum = %"PRIu64
", "
71 server_id_str_buf(pls
->context
.pid
, &tmp
),
75 lock_type_name(pls
->lock_type
),
76 lock_flav_name(pls
->lock_flav
));
79 unsigned int brl_num_locks(const struct byte_range_lock
*brl
)
81 return brl
->num_locks
;
84 struct files_struct
*brl_fsp(struct byte_range_lock
*brl
)
89 TALLOC_CTX
*brl_req_mem_ctx(const struct byte_range_lock
*brl
)
91 if (brl
->req_mem_ctx
== NULL
) {
92 return talloc_get_type_abort(brl
, struct byte_range_lock
);
95 return brl
->req_mem_ctx
;
98 const struct GUID
*brl_req_guid(const struct byte_range_lock
*brl
)
100 if (brl
->req_guid
== NULL
) {
101 static const struct GUID brl_zero_req_guid
;
102 return &brl_zero_req_guid
;
105 return brl
->req_guid
;
108 /****************************************************************************
109 See if two locking contexts are equal.
110 ****************************************************************************/
112 static bool brl_same_context(const struct lock_context
*ctx1
,
113 const struct lock_context
*ctx2
)
115 return (server_id_equal(&ctx1
->pid
, &ctx2
->pid
) &&
116 (ctx1
->smblctx
== ctx2
->smblctx
) &&
117 (ctx1
->tid
== ctx2
->tid
));
120 bool byte_range_valid(uint64_t ofs
, uint64_t len
)
122 uint64_t max_len
= UINT64_MAX
- ofs
;
123 uint64_t effective_len
;
126 * [MS-FSA] specifies this:
128 * If (((FileOffset + Length - 1) < FileOffset) && Length != 0) {
129 * return STATUS_INVALID_LOCK_RANGE
132 * We avoid integer wrapping and calculate
133 * max and effective len instead.
140 effective_len
= len
- 1;
141 if (effective_len
<= max_len
) {
148 bool byte_range_overlap(uint64_t ofs1
,
158 * This is based on [MS-FSA] 2.1.4.10
159 * Algorithm for Determining If a Range Access
160 * Conflicts with Byte-Range Locks
164 * The {0, 0} range doesn't conflict with any byte-range lock
166 if (ofs1
== 0 && len1
== 0) {
169 if (ofs2
== 0 && len2
== 0) {
174 * The caller should have checked that the ranges are
175 * valid. But currently we gracefully handle
176 * the overflow of a read/write check.
178 valid
= byte_range_valid(ofs1
, len1
);
180 last1
= ofs1
+ len1
- 1;
184 valid
= byte_range_valid(ofs2
, len2
);
186 last2
= ofs2
+ len2
- 1;
192 * If one range starts after the last
193 * byte of the other range there's
206 /****************************************************************************
207 See if lck1 and lck2 overlap.
208 ****************************************************************************/
210 static bool brl_overlap(const struct lock_struct
*lck1
,
211 const struct lock_struct
*lck2
)
213 return byte_range_overlap(lck1
->start
,
219 /****************************************************************************
220 See if lock2 can be added when lock1 is in place.
221 ****************************************************************************/
223 static bool brl_conflict(const struct lock_struct
*lck1
,
224 const struct lock_struct
*lck2
)
226 /* Read locks never conflict. */
227 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
231 /* A READ lock can stack on top of a WRITE lock if they have the same
233 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
234 brl_same_context(&lck1
->context
, &lck2
->context
) &&
235 lck1
->fnum
== lck2
->fnum
) {
239 return brl_overlap(lck1
, lck2
);
242 /****************************************************************************
243 See if lock2 can be added when lock1 is in place - when both locks are POSIX
244 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
246 ****************************************************************************/
248 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
249 const struct lock_struct
*lck2
)
251 #if defined(DEVELOPER)
252 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
253 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
256 /* Read locks never conflict. */
257 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
261 /* Locks on the same context don't conflict. Ignore fnum. */
262 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
266 /* One is read, the other write, or the context is different,
268 return brl_overlap(lck1
, lck2
);
272 static bool brl_conflict1(const struct lock_struct
*lck1
,
273 const struct lock_struct
*lck2
)
275 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
279 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
280 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
284 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
288 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
289 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
297 /****************************************************************************
298 Check to see if this lock conflicts, but ignore our own locks on the
299 same fnum only. This is the read/write lock check code path.
300 This is never used in the POSIX lock case.
301 ****************************************************************************/
303 static bool brl_conflict_other(const struct lock_struct
*lock
,
304 const struct lock_struct
*rw_probe
)
306 if (lock
->lock_type
== READ_LOCK
&& rw_probe
->lock_type
== READ_LOCK
) {
310 if (lock
->lock_flav
== POSIX_LOCK
&&
311 rw_probe
->lock_flav
== POSIX_LOCK
) {
313 * POSIX flavour locks never conflict here - this is only called
314 * in the read/write path.
319 if (!brl_overlap(lock
, rw_probe
)) {
321 * I/O can only conflict when overlapping a lock, thus let it
327 if (!brl_same_context(&lock
->context
, &rw_probe
->context
)) {
329 * Different process, conflict
334 if (lock
->fnum
!= rw_probe
->fnum
) {
336 * Different file handle, conflict
341 if ((lock
->lock_type
== READ_LOCK
) &&
342 (rw_probe
->lock_type
== WRITE_LOCK
)) {
344 * Incoming WRITE locks conflict with existing READ locks even
345 * if the context is the same. JRA. See LOCKTEST7 in
352 * I/O request compatible with existing lock, let it pass without
359 /****************************************************************************
360 Open up the brlock.tdb database.
361 ****************************************************************************/
363 void brl_init(bool read_only
)
376 TDB_INCOMPATIBLE_HASH
|
379 db_path
= lock_path(talloc_tos(), "brlock.tdb");
380 if (db_path
== NULL
) {
381 DEBUG(0, ("out of memory!\n"));
385 brlock_db
= db_open(NULL
, db_path
,
386 SMB_OPEN_DATABASE_TDB_HASH_SIZE
, tdb_flags
,
387 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644,
388 DBWRAP_LOCK_ORDER_2
, DBWRAP_FLAG_NONE
);
390 DEBUG(0,("Failed to open byte range locking database %s\n",
392 TALLOC_FREE(db_path
);
395 TALLOC_FREE(db_path
);
398 /****************************************************************************
399 Close down the brlock.tdb database.
400 ****************************************************************************/
402 void brl_shutdown(void)
404 TALLOC_FREE(brlock_db
);
408 /****************************************************************************
409 Compare two locks for sorting.
410 ****************************************************************************/
412 static int lock_compare(const struct lock_struct
*lck1
,
413 const struct lock_struct
*lck2
)
415 if (lck1
->start
!= lck2
->start
) {
416 return (lck1
->start
- lck2
->start
);
418 if (lck2
->size
!= lck1
->size
) {
419 return ((int)lck1
->size
- (int)lck2
->size
);
425 /****************************************************************************
426 Lock a range of bytes - Windows lock semantics.
427 ****************************************************************************/
429 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
430 struct lock_struct
*plock
)
433 files_struct
*fsp
= br_lck
->fsp
;
434 struct lock_struct
*locks
= br_lck
->lock_data
;
438 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
440 valid
= byte_range_valid(plock
->start
, plock
->size
);
442 return NT_STATUS_INVALID_LOCK_RANGE
;
445 for (i
=0; i
< br_lck
->num_locks
; i
++) {
446 /* Do any Windows or POSIX locks conflict ? */
447 if (brl_conflict(&locks
[i
], plock
)) {
448 if (!serverid_exists(&locks
[i
].context
.pid
)) {
449 locks
[i
].context
.pid
.pid
= 0;
450 br_lck
->modified
= true;
453 /* Remember who blocked us. */
454 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
455 return NT_STATUS_LOCK_NOT_GRANTED
;
458 if (plock
->start
== 0 && plock
->size
== 0 &&
459 locks
[i
].size
== 0) {
465 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
467 /* We can get the Windows lock, now see if it needs to
468 be mapped into a lower level POSIX one, and if so can
471 if (lp_posix_locking(fsp
->conn
->params
)) {
473 if (!set_posix_lock_windows_flavour(fsp
,
482 /* We don't know who blocked us. */
483 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
485 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
486 status
= NT_STATUS_LOCK_NOT_GRANTED
;
489 status
= map_nt_error_from_unix(errno
);
495 /* no conflicts - add it to the list of locks */
496 locks
= talloc_realloc(br_lck
, locks
, struct lock_struct
,
497 (br_lck
->num_locks
+ 1));
499 status
= NT_STATUS_NO_MEMORY
;
503 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
504 br_lck
->num_locks
+= 1;
505 br_lck
->lock_data
= locks
;
506 br_lck
->modified
= True
;
510 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
514 /****************************************************************************
515 Cope with POSIX range splits and merges.
516 ****************************************************************************/
518 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
519 struct lock_struct
*ex
, /* existing lock. */
520 struct lock_struct
*plock
) /* proposed lock. */
522 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
524 /* We can't merge non-conflicting locks on different context - ignore fnum. */
526 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
528 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
532 /* We now know we have the same context. */
534 /* Did we overlap ? */
536 /*********************************************
547 **********************************************/
549 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
550 (plock
->start
> (ex
->start
+ ex
->size
))) {
552 /* No overlap with this lock - copy existing. */
554 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
558 /*********************************************
559 +---------------------------+
561 +---------------------------+
562 +---------------------------+
563 | plock | -> replace with plock.
564 +---------------------------+
569 +---------------------------+
570 | plock | -> replace with plock.
571 +---------------------------+
573 **********************************************/
575 if ( (ex
->start
>= plock
->start
) &&
576 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
578 /* Replace - discard existing lock. */
583 /*********************************************
593 +---------------+-------+
594 | plock | ex | - different lock types.
595 +---------------+-------+
597 +-----------------------+
598 | plock | - same lock type.
599 +-----------------------+
600 **********************************************/
602 if (plock
->start
+ plock
->size
== ex
->start
) {
604 /* If the lock types are the same, we merge, if different, we
605 add the remainder of the old lock. */
607 if (lock_types_differ
) {
609 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
612 /* Merge - adjust incoming lock as we may have more
613 * merging to come. */
614 plock
->size
+= ex
->size
;
619 /*********************************************
628 +-------+---------------+
629 | ex | plock | - different lock types
630 +-------+---------------+
633 +-----------------------+
634 | plock | - same lock type.
635 +-----------------------+
637 **********************************************/
639 if (ex
->start
+ ex
->size
== plock
->start
) {
641 /* If the lock types are the same, we merge, if different, we
642 add the existing lock. */
644 if (lock_types_differ
) {
645 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
648 /* Merge - adjust incoming lock as we may have more
649 * merging to come. */
650 plock
->start
= ex
->start
;
651 plock
->size
+= ex
->size
;
656 /*********************************************
658 +-----------------------+
660 +-----------------------+
673 +---------------+-------+
674 | plock | ex | - different lock types.
675 +---------------+-------+
677 +-----------------------+
678 | plock | - same lock type.
679 +-----------------------+
680 **********************************************/
682 if ( (ex
->start
>= plock
->start
) &&
683 (ex
->start
<= plock
->start
+ plock
->size
) &&
684 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
686 /* If the lock types are the same, we merge, if different, we
687 add the remainder of the old lock. */
689 if (lock_types_differ
) {
690 /* Add remaining existing. */
691 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
692 /* Adjust existing start and size. */
693 lck_arr
[0].start
= plock
->start
+ plock
->size
;
694 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
697 /* Merge - adjust incoming lock as we may have more
698 * merging to come. */
699 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
704 /*********************************************
706 +-----------------------+
708 +-----------------------+
721 +-------+---------------+
722 | ex | plock | - different lock types
723 +-------+---------------+
726 +-----------------------+
727 | plock | - same lock type.
728 +-----------------------+
730 **********************************************/
732 if ( (ex
->start
< plock
->start
) &&
733 (ex
->start
+ ex
->size
>= plock
->start
) &&
734 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
736 /* If the lock types are the same, we merge, if different, we
737 add the truncated old lock. */
739 if (lock_types_differ
) {
740 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
741 /* Adjust existing size. */
742 lck_arr
[0].size
= plock
->start
- ex
->start
;
745 /* Merge - adjust incoming lock as we may have more
746 * merging to come. MUST ADJUST plock SIZE FIRST ! */
747 plock
->size
+= (plock
->start
- ex
->start
);
748 plock
->start
= ex
->start
;
753 /*********************************************
755 +---------------------------+
757 +---------------------------+
762 +-------+---------+---------+
763 | ex | plock | ex | - different lock types.
764 +-------+---------+---------+
766 +---------------------------+
767 | plock | - same lock type.
768 +---------------------------+
769 **********************************************/
771 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
773 if (lock_types_differ
) {
775 /* We have to split ex into two locks here. */
777 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
778 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
780 /* Adjust first existing size. */
781 lck_arr
[0].size
= plock
->start
- ex
->start
;
783 /* Adjust second existing start and size. */
784 lck_arr
[1].start
= plock
->start
+ plock
->size
;
785 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
788 /* Just eat the existing locks, merge them into plock. */
789 plock
->start
= ex
->start
;
790 plock
->size
= ex
->size
;
795 /* Never get here. */
796 smb_panic("brlock_posix_split_merge");
799 /* Keep some compilers happy. */
803 /****************************************************************************
804 Lock a range of bytes - POSIX lock semantics.
805 We must cope with range splits and merges.
806 ****************************************************************************/
808 static NTSTATUS
brl_lock_posix(struct byte_range_lock
*br_lck
,
809 struct lock_struct
*plock
)
811 unsigned int i
, count
, posix_count
;
812 struct lock_struct
*locks
= br_lck
->lock_data
;
813 struct lock_struct
*tp
;
814 bool break_oplocks
= false;
817 /* No zero-zero locks for POSIX. */
818 if (plock
->start
== 0 && plock
->size
== 0) {
819 return NT_STATUS_INVALID_PARAMETER
;
822 /* Don't allow 64-bit lock wrap. */
823 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
824 return NT_STATUS_INVALID_PARAMETER
;
827 /* The worst case scenario here is we have to split an
828 existing POSIX lock range into two, and add our lock,
829 so we need at most 2 more entries. */
831 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 2);
833 return NT_STATUS_NO_MEMORY
;
836 count
= posix_count
= 0;
838 for (i
=0; i
< br_lck
->num_locks
; i
++) {
839 struct lock_struct
*curr_lock
= &locks
[i
];
841 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
842 /* Do any Windows flavour locks conflict ? */
843 if (brl_conflict(curr_lock
, plock
)) {
844 if (!serverid_exists(&curr_lock
->context
.pid
)) {
845 curr_lock
->context
.pid
.pid
= 0;
846 br_lck
->modified
= true;
849 /* No games with error messages. */
851 /* Remember who blocked us. */
852 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
853 return NT_STATUS_LOCK_NOT_GRANTED
;
855 /* Just copy the Windows lock into the new array. */
856 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
859 unsigned int tmp_count
= 0;
861 /* POSIX conflict semantics are different. */
862 if (brl_conflict_posix(curr_lock
, plock
)) {
863 if (!serverid_exists(&curr_lock
->context
.pid
)) {
864 curr_lock
->context
.pid
.pid
= 0;
865 br_lck
->modified
= true;
868 /* Can't block ourselves with POSIX locks. */
869 /* No games with error messages. */
871 /* Remember who blocked us. */
872 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
873 return NT_STATUS_LOCK_NOT_GRANTED
;
876 /* Work out overlaps. */
877 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
878 posix_count
+= tmp_count
;
884 * Break oplocks while we hold a brl. Since lock() and unlock() calls
885 * are not symetric with POSIX semantics, we cannot guarantee our
886 * contend_level2_oplocks_begin/end calls will be acquired and
887 * released one-for-one as with Windows semantics. Therefore we only
888 * call contend_level2_oplocks_begin if this is the first POSIX brl on
891 break_oplocks
= (posix_count
== 0);
893 contend_level2_oplocks_begin(br_lck
->fsp
,
894 LEVEL2_CONTEND_POSIX_BRL
);
897 /* Try and add the lock in order, sorted by lock start. */
898 for (i
=0; i
< count
; i
++) {
899 struct lock_struct
*curr_lock
= &tp
[i
];
901 if (curr_lock
->start
<= plock
->start
) {
907 memmove(&tp
[i
+1], &tp
[i
],
908 (count
- i
)*sizeof(struct lock_struct
));
910 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
913 /* We can get the POSIX lock, now see if it needs to
914 be mapped into a lower level POSIX one, and if so can
917 if (lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
920 /* The lower layer just needs to attempt to
921 get the system POSIX lock. We've weeded out
922 any conflicts above. */
924 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
931 /* We don't know who blocked us. */
932 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
934 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
936 status
= NT_STATUS_LOCK_NOT_GRANTED
;
940 status
= map_nt_error_from_unix(errno
);
946 /* If we didn't use all the allocated size,
947 * Realloc so we don't leak entries per lock call. */
948 if (count
< br_lck
->num_locks
+ 2) {
949 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
951 status
= NT_STATUS_NO_MEMORY
;
956 br_lck
->num_locks
= count
;
957 TALLOC_FREE(br_lck
->lock_data
);
958 br_lck
->lock_data
= tp
;
960 br_lck
->modified
= True
;
962 /* A successful downgrade from write to read lock can trigger a lock
963 re-evalutation where waiting readers can now proceed. */
968 contend_level2_oplocks_end(br_lck
->fsp
,
969 LEVEL2_CONTEND_POSIX_BRL
);
974 NTSTATUS
smb_vfs_call_brl_lock_windows(struct vfs_handle_struct
*handle
,
975 struct byte_range_lock
*br_lck
,
976 struct lock_struct
*plock
)
978 VFS_FIND(brl_lock_windows
);
979 return handle
->fns
->brl_lock_windows_fn(handle
, br_lck
, plock
);
982 /****************************************************************************
983 Lock a range of bytes.
984 ****************************************************************************/
987 struct byte_range_lock
*br_lck
,
989 struct server_id pid
,
992 enum brl_type lock_type
,
993 enum brl_flavour lock_flav
,
994 struct server_id
*blocker_pid
,
998 struct lock_struct lock
;
1003 if (start
== 0 && size
== 0) {
1004 DEBUG(0,("client sent 0/0 lock - please report this\n"));
1008 lock
= (struct lock_struct
) {
1009 .context
.smblctx
= smblctx
,
1011 .context
.tid
= br_lck
->fsp
->conn
->cnum
,
1014 .fnum
= br_lck
->fsp
->fnum
,
1015 .lock_type
= lock_type
,
1016 .lock_flav
= lock_flav
1019 if (lock_flav
== WINDOWS_LOCK
) {
1020 ret
= SMB_VFS_BRL_LOCK_WINDOWS(
1021 br_lck
->fsp
->conn
, br_lck
, &lock
);
1023 ret
= brl_lock_posix(br_lck
, &lock
);
1027 /* sort the lock list */
1028 TYPESAFE_QSORT(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, lock_compare
);
1030 /* If we're returning an error, return who blocked us. */
1031 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
1032 *blocker_pid
= lock
.context
.pid
;
1033 *psmblctx
= lock
.context
.smblctx
;
1038 static void brl_delete_lock_struct(struct lock_struct
*locks
,
1042 if (del_idx
>= num_locks
) {
1045 memmove(&locks
[del_idx
], &locks
[del_idx
+1],
1046 sizeof(*locks
) * (num_locks
- del_idx
- 1));
1049 /****************************************************************************
1050 Unlock a range of bytes - Windows semantics.
1051 ****************************************************************************/
1053 bool brl_unlock_windows_default(struct byte_range_lock
*br_lck
,
1054 const struct lock_struct
*plock
)
1057 struct lock_struct
*locks
= br_lck
->lock_data
;
1058 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
1060 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
1063 /* Delete write locks by preference... The lock list
1064 is sorted in the zero zero case. */
1066 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1067 struct lock_struct
*lock
= &locks
[i
];
1069 if (lock
->lock_type
== WRITE_LOCK
&&
1070 brl_same_context(&lock
->context
, &plock
->context
) &&
1071 lock
->fnum
== plock
->fnum
&&
1072 lock
->lock_flav
== WINDOWS_LOCK
&&
1073 lock
->start
== plock
->start
&&
1074 lock
->size
== plock
->size
) {
1076 /* found it - delete it */
1077 deleted_lock_type
= lock
->lock_type
;
1082 if (i
!= br_lck
->num_locks
) {
1083 /* We found it - don't search again. */
1084 goto unlock_continue
;
1088 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1089 struct lock_struct
*lock
= &locks
[i
];
1091 /* Only remove our own locks that match in start, size, and flavour. */
1092 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1093 lock
->fnum
== plock
->fnum
&&
1094 lock
->lock_flav
== WINDOWS_LOCK
&&
1095 lock
->start
== plock
->start
&&
1096 lock
->size
== plock
->size
) {
1097 deleted_lock_type
= lock
->lock_type
;
1102 if (i
== br_lck
->num_locks
) {
1103 /* we didn't find it */
1111 brl_delete_lock_struct(locks
, br_lck
->num_locks
, i
);
1112 br_lck
->num_locks
-= 1;
1113 br_lck
->modified
= True
;
1115 /* Unlock the underlying POSIX regions. */
1116 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1117 release_posix_lock_windows_flavour(br_lck
->fsp
,
1126 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1130 /****************************************************************************
1131 Unlock a range of bytes - POSIX semantics.
1132 ****************************************************************************/
1134 static bool brl_unlock_posix(struct byte_range_lock
*br_lck
,
1135 struct lock_struct
*plock
)
1137 unsigned int i
, count
;
1138 struct lock_struct
*tp
;
1139 struct lock_struct
*locks
= br_lck
->lock_data
;
1140 bool overlap_found
= False
;
1142 /* No zero-zero locks for POSIX. */
1143 if (plock
->start
== 0 && plock
->size
== 0) {
1147 /* Don't allow 64-bit lock wrap. */
1148 if (plock
->start
+ plock
->size
< plock
->start
||
1149 plock
->start
+ plock
->size
< plock
->size
) {
1150 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1154 /* The worst case scenario here is we have to split an
1155 existing POSIX lock range into two, so we need at most
1158 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 1);
1160 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1165 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1166 struct lock_struct
*lock
= &locks
[i
];
1167 unsigned int tmp_count
;
1169 /* Only remove our own locks - ignore fnum. */
1170 if (!brl_same_context(&lock
->context
, &plock
->context
)) {
1171 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1176 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1177 /* Do any Windows flavour locks conflict ? */
1178 if (brl_conflict(lock
, plock
)) {
1182 /* Just copy the Windows lock into the new array. */
1183 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1188 /* Work out overlaps. */
1189 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1191 if (tmp_count
== 0) {
1192 /* plock overlapped the existing lock completely,
1193 or replaced it. Don't copy the existing lock. */
1194 overlap_found
= true;
1195 } else if (tmp_count
== 1) {
1196 /* Either no overlap, (simple copy of existing lock) or
1197 * an overlap of an existing lock. */
1198 /* If the lock changed size, we had an overlap. */
1199 if (tp
[count
].size
!= lock
->size
) {
1200 overlap_found
= true;
1203 } else if (tmp_count
== 2) {
1204 /* We split a lock range in two. */
1205 overlap_found
= true;
1208 /* Optimisation... */
1209 /* We know we're finished here as we can't overlap any
1210 more POSIX locks. Copy the rest of the lock array. */
1212 if (i
< br_lck
->num_locks
- 1) {
1213 memcpy(&tp
[count
], &locks
[i
+1],
1214 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1215 count
+= ((br_lck
->num_locks
-1) - i
);
1222 if (!overlap_found
) {
1223 /* Just ignore - no change. */
1225 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1229 /* Unlock any POSIX regions. */
1230 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1231 release_posix_lock_posix_flavour(br_lck
->fsp
,
1239 /* Realloc so we don't leak entries per unlock call. */
1241 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
1243 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1247 /* We deleted the last lock. */
1252 contend_level2_oplocks_end(br_lck
->fsp
,
1253 LEVEL2_CONTEND_POSIX_BRL
);
1255 br_lck
->num_locks
= count
;
1256 TALLOC_FREE(br_lck
->lock_data
);
1258 br_lck
->lock_data
= tp
;
1259 br_lck
->modified
= True
;
1264 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct
*handle
,
1265 struct byte_range_lock
*br_lck
,
1266 const struct lock_struct
*plock
)
1268 VFS_FIND(brl_unlock_windows
);
1269 return handle
->fns
->brl_unlock_windows_fn(handle
, br_lck
, plock
);
1272 /****************************************************************************
1273 Unlock a range of bytes.
1274 ****************************************************************************/
1276 bool brl_unlock(struct byte_range_lock
*br_lck
,
1278 struct server_id pid
,
1281 enum brl_flavour lock_flav
)
1283 struct lock_struct lock
;
1285 lock
.context
.smblctx
= smblctx
;
1286 lock
.context
.pid
= pid
;
1287 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1290 lock
.fnum
= br_lck
->fsp
->fnum
;
1291 lock
.lock_type
= UNLOCK_LOCK
;
1292 lock
.lock_flav
= lock_flav
;
1294 if (lock_flav
== WINDOWS_LOCK
) {
1295 return SMB_VFS_BRL_UNLOCK_WINDOWS(
1296 br_lck
->fsp
->conn
, br_lck
, &lock
);
1298 return brl_unlock_posix(br_lck
, &lock
);
1302 /****************************************************************************
1303 Test if we could add a lock if we wanted to.
1304 Returns True if the region required is currently unlocked, False if locked.
1305 ****************************************************************************/
1307 bool brl_locktest(struct byte_range_lock
*br_lck
,
1308 const struct lock_struct
*rw_probe
)
1312 struct lock_struct
*locks
= br_lck
->lock_data
;
1313 files_struct
*fsp
= br_lck
->fsp
;
1315 /* Make sure existing locks don't conflict */
1316 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1318 * Our own locks don't conflict.
1320 if (brl_conflict_other(&locks
[i
], rw_probe
)) {
1321 if (br_lck
->record
== NULL
) {
1326 if (!serverid_exists(&locks
[i
].context
.pid
)) {
1327 locks
[i
].context
.pid
.pid
= 0;
1328 br_lck
->modified
= true;
1337 * There is no lock held by an SMB daemon, check to
1338 * see if there is a POSIX lock from a UNIX or NFS process.
1339 * This only conflicts with Windows locks, not POSIX locks.
1342 if(lp_posix_locking(fsp
->conn
->params
) &&
1343 (rw_probe
->lock_flav
== WINDOWS_LOCK
)) {
1345 * Make copies -- is_posix_locked might modify the values
1348 br_off start
= rw_probe
->start
;
1349 br_off size
= rw_probe
->size
;
1350 enum brl_type lock_type
= rw_probe
->lock_type
;
1352 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1354 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1355 "file %s\n", (uintmax_t)start
, (uintmax_t)size
,
1356 ret
? "locked" : "unlocked",
1357 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1359 /* We need to return the inverse of is_posix_locked. */
1363 /* no conflicts - we could have added it */
1367 /****************************************************************************
1368 Query for existing locks.
1369 ****************************************************************************/
1371 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1373 struct server_id pid
,
1376 enum brl_type
*plock_type
,
1377 enum brl_flavour lock_flav
)
1380 struct lock_struct lock
;
1381 const struct lock_struct
*locks
= br_lck
->lock_data
;
1382 files_struct
*fsp
= br_lck
->fsp
;
1384 lock
.context
.smblctx
= *psmblctx
;
1385 lock
.context
.pid
= pid
;
1386 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1387 lock
.start
= *pstart
;
1389 lock
.fnum
= fsp
->fnum
;
1390 lock
.lock_type
= *plock_type
;
1391 lock
.lock_flav
= lock_flav
;
1393 /* Make sure existing locks don't conflict */
1394 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1395 const struct lock_struct
*exlock
= &locks
[i
];
1396 bool conflict
= False
;
1398 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1399 conflict
= brl_conflict(exlock
, &lock
);
1401 conflict
= brl_conflict_posix(exlock
, &lock
);
1405 *psmblctx
= exlock
->context
.smblctx
;
1406 *pstart
= exlock
->start
;
1407 *psize
= exlock
->size
;
1408 *plock_type
= exlock
->lock_type
;
1409 return NT_STATUS_LOCK_NOT_GRANTED
;
1414 * There is no lock held by an SMB daemon, check to
1415 * see if there is a POSIX lock from a UNIX or NFS process.
1418 if(lp_posix_locking(fsp
->conn
->params
)) {
1419 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1421 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1422 "file %s\n", (uintmax_t)*pstart
,
1423 (uintmax_t)*psize
, ret
? "locked" : "unlocked",
1424 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1427 /* Hmmm. No clue what to set smblctx to - use -1. */
1428 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1429 return NT_STATUS_LOCK_NOT_GRANTED
;
1433 return NT_STATUS_OK
;
1437 /****************************************************************************
1438 Remove any locks associated with a open file.
1439 We return True if this process owns any other Windows locks on this
1440 fd and so we should not immediately close the fd.
1441 ****************************************************************************/
1443 void brl_close_fnum(struct byte_range_lock
*br_lck
)
1445 files_struct
*fsp
= br_lck
->fsp
;
1446 uint32_t tid
= fsp
->conn
->cnum
;
1447 uint64_t fnum
= fsp
->fnum
;
1449 struct lock_struct
*locks
= br_lck
->lock_data
;
1450 struct server_id pid
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1451 struct lock_struct
*locks_copy
;
1452 unsigned int num_locks_copy
;
1454 /* Copy the current lock array. */
1455 if (br_lck
->num_locks
) {
1456 locks_copy
= (struct lock_struct
*)talloc_memdup(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1458 smb_panic("brl_close_fnum: talloc failed");
1464 num_locks_copy
= br_lck
->num_locks
;
1466 for (i
=0; i
< num_locks_copy
; i
++) {
1467 struct lock_struct
*lock
= &locks_copy
[i
];
1469 if (lock
->context
.tid
== tid
&&
1470 server_id_equal(&lock
->context
.pid
, &pid
) &&
1471 (lock
->fnum
== fnum
)) {
1474 lock
->context
.smblctx
,
1483 bool brl_mark_disconnected(struct files_struct
*fsp
)
1485 uint32_t tid
= fsp
->conn
->cnum
;
1487 uint64_t fnum
= fsp
->fnum
;
1489 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1490 struct byte_range_lock
*br_lck
= NULL
;
1492 if (fsp
->op
== NULL
) {
1496 smblctx
= fsp
->op
->global
->open_persistent_id
;
1498 if (!fsp
->op
->global
->durable
) {
1502 if (fsp
->current_lock_count
== 0) {
1506 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1507 if (br_lck
== NULL
) {
1511 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1512 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1515 * as this is a durable handle, we only expect locks
1516 * of the current file handle!
1519 if (lock
->context
.smblctx
!= smblctx
) {
1520 TALLOC_FREE(br_lck
);
1524 if (lock
->context
.tid
!= tid
) {
1525 TALLOC_FREE(br_lck
);
1529 if (!server_id_equal(&lock
->context
.pid
, &self
)) {
1530 TALLOC_FREE(br_lck
);
1534 if (lock
->fnum
!= fnum
) {
1535 TALLOC_FREE(br_lck
);
1539 server_id_set_disconnected(&lock
->context
.pid
);
1540 lock
->context
.tid
= TID_FIELD_INVALID
;
1541 lock
->fnum
= FNUM_FIELD_INVALID
;
1544 br_lck
->modified
= true;
1545 TALLOC_FREE(br_lck
);
1549 bool brl_reconnect_disconnected(struct files_struct
*fsp
)
1551 uint32_t tid
= fsp
->conn
->cnum
;
1553 uint64_t fnum
= fsp
->fnum
;
1555 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1556 struct byte_range_lock
*br_lck
= NULL
;
1558 if (fsp
->op
== NULL
) {
1562 smblctx
= fsp
->op
->global
->open_persistent_id
;
1564 if (!fsp
->op
->global
->durable
) {
1569 * When reconnecting, we do not want to validate the brlock entries
1570 * and thereby remove our own (disconnected) entries but reactivate
1574 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1575 if (br_lck
== NULL
) {
1579 if (br_lck
->num_locks
== 0) {
1580 TALLOC_FREE(br_lck
);
1584 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1585 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1588 * as this is a durable handle we only expect locks
1589 * of the current file handle!
1592 if (lock
->context
.smblctx
!= smblctx
) {
1593 TALLOC_FREE(br_lck
);
1597 if (lock
->context
.tid
!= TID_FIELD_INVALID
) {
1598 TALLOC_FREE(br_lck
);
1602 if (!server_id_is_disconnected(&lock
->context
.pid
)) {
1603 TALLOC_FREE(br_lck
);
1607 if (lock
->fnum
!= FNUM_FIELD_INVALID
) {
1608 TALLOC_FREE(br_lck
);
1612 lock
->context
.pid
= self
;
1613 lock
->context
.tid
= tid
;
1617 fsp
->current_lock_count
= br_lck
->num_locks
;
1618 br_lck
->modified
= true;
1619 TALLOC_FREE(br_lck
);
1623 struct brl_forall_cb
{
1624 void (*fn
)(struct file_id id
, struct server_id pid
,
1625 enum brl_type lock_type
,
1626 enum brl_flavour lock_flav
,
1627 br_off start
, br_off size
,
1628 void *private_data
);
1632 /****************************************************************************
1633 Traverse the whole database with this function, calling traverse_callback
1635 ****************************************************************************/
1637 static int brl_traverse_fn(struct db_record
*rec
, void *state
)
1639 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1640 struct lock_struct
*locks
;
1641 struct file_id
*key
;
1643 unsigned int num_locks
= 0;
1647 dbkey
= dbwrap_record_get_key(rec
);
1648 value
= dbwrap_record_get_value(rec
);
1650 /* In a traverse function we must make a copy of
1651 dbuf before modifying it. */
1653 locks
= (struct lock_struct
*)talloc_memdup(
1654 talloc_tos(), value
.dptr
, value
.dsize
);
1656 return -1; /* Terminate traversal. */
1659 key
= (struct file_id
*)dbkey
.dptr
;
1660 num_locks
= value
.dsize
/sizeof(*locks
);
1663 for ( i
=0; i
<num_locks
; i
++) {
1665 locks
[i
].context
.pid
,
1678 /*******************************************************************
1679 Call the specified function on each lock in the database.
1680 ********************************************************************/
1682 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1683 enum brl_type lock_type
,
1684 enum brl_flavour lock_flav
,
1685 br_off start
, br_off size
,
1686 void *private_data
),
1689 struct brl_forall_cb cb
;
1697 cb
.private_data
= private_data
;
1698 status
= dbwrap_traverse(brlock_db
, brl_traverse_fn
, &cb
, &count
);
1700 if (!NT_STATUS_IS_OK(status
)) {
1707 /*******************************************************************
1708 Store a potentially modified set of byte range lock data back into
1711 ********************************************************************/
1713 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1716 struct lock_struct
*locks
= br_lck
->lock_data
;
1718 if (!br_lck
->modified
) {
1719 DEBUG(10, ("br_lck not modified\n"));
1725 while (i
< br_lck
->num_locks
) {
1726 if (locks
[i
].context
.pid
.pid
== 0) {
1728 * Autocleanup, the process conflicted and does not
1731 locks
[i
] = locks
[br_lck
->num_locks
-1];
1732 br_lck
->num_locks
-= 1;
1738 if (br_lck
->num_locks
== 0) {
1739 /* No locks - delete this entry. */
1740 NTSTATUS status
= dbwrap_record_delete(br_lck
->record
);
1741 if (!NT_STATUS_IS_OK(status
)) {
1742 DEBUG(0, ("delete_rec returned %s\n",
1743 nt_errstr(status
)));
1744 smb_panic("Could not delete byte range lock entry");
1748 .dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
),
1749 .dptr
= (uint8_t *)br_lck
->lock_data
,
1753 status
= dbwrap_record_store(br_lck
->record
, data
, TDB_REPLACE
);
1754 if (!NT_STATUS_IS_OK(status
)) {
1755 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1756 smb_panic("Could not store byte range mode entry");
1760 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db
)));
1763 br_lck
->modified
= false;
1764 TALLOC_FREE(br_lck
->record
);
1767 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1769 byte_range_lock_flush(br_lck
);
1773 static bool brl_parse_data(struct byte_range_lock
*br_lck
, TDB_DATA data
)
1777 if (data
.dsize
== 0) {
1780 if (data
.dsize
% sizeof(struct lock_struct
) != 0) {
1781 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data
.dsize
));
1785 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1786 data_len
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1788 br_lck
->lock_data
= talloc_memdup(br_lck
, data
.dptr
, data_len
);
1789 if (br_lck
->lock_data
== NULL
) {
1790 DEBUG(1, ("talloc_memdup failed\n"));
1796 /*******************************************************************
1797 Fetch a set of byte range lock data from the database.
1798 Leave the record locked.
1799 TALLOC_FREE(brl) will release the lock in the destructor.
1800 ********************************************************************/
1802 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
, files_struct
*fsp
)
1805 struct byte_range_lock
*br_lck
;
1807 br_lck
= talloc_zero(mem_ctx
, struct byte_range_lock
);
1808 if (br_lck
== NULL
) {
1814 key
.dptr
= (uint8_t *)&fsp
->file_id
;
1815 key
.dsize
= sizeof(struct file_id
);
1817 br_lck
->record
= dbwrap_fetch_locked(brlock_db
, br_lck
, key
);
1819 if (br_lck
->record
== NULL
) {
1820 DEBUG(3, ("Could not lock byte range lock entry\n"));
1821 TALLOC_FREE(br_lck
);
1825 data
= dbwrap_record_get_value(br_lck
->record
);
1827 if (!brl_parse_data(br_lck
, data
)) {
1828 TALLOC_FREE(br_lck
);
1832 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1834 if (DEBUGLEVEL
>= 10) {
1836 struct file_id_buf buf
;
1837 struct lock_struct
*locks
= br_lck
->lock_data
;
1838 DBG_DEBUG("%u current locks on file_id %s\n",
1840 file_id_str_buf(fsp
->file_id
, &buf
));
1841 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1842 print_lock_struct(i
, &locks
[i
]);
1849 struct byte_range_lock
*brl_get_locks_for_locking(TALLOC_CTX
*mem_ctx
,
1851 TALLOC_CTX
*req_mem_ctx
,
1852 const struct GUID
*req_guid
)
1854 struct byte_range_lock
*br_lck
= NULL
;
1856 br_lck
= brl_get_locks(mem_ctx
, fsp
);
1857 if (br_lck
== NULL
) {
1860 SMB_ASSERT(req_mem_ctx
!= NULL
);
1861 br_lck
->req_mem_ctx
= req_mem_ctx
;
1862 SMB_ASSERT(req_guid
!= NULL
);
1863 br_lck
->req_guid
= req_guid
;
1868 struct brl_get_locks_readonly_state
{
1869 TALLOC_CTX
*mem_ctx
;
1870 struct byte_range_lock
**br_lock
;
1873 static void brl_get_locks_readonly_parser(TDB_DATA key
, TDB_DATA data
,
1876 struct brl_get_locks_readonly_state
*state
=
1877 (struct brl_get_locks_readonly_state
*)private_data
;
1878 struct byte_range_lock
*br_lck
;
1880 br_lck
= talloc_pooled_object(
1881 state
->mem_ctx
, struct byte_range_lock
, 1, data
.dsize
);
1882 if (br_lck
== NULL
) {
1883 *state
->br_lock
= NULL
;
1886 *br_lck
= (struct byte_range_lock
) { 0 };
1887 if (!brl_parse_data(br_lck
, data
)) {
1888 *state
->br_lock
= NULL
;
1891 *state
->br_lock
= br_lck
;
1894 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1896 struct byte_range_lock
*br_lock
= NULL
;
1897 struct brl_get_locks_readonly_state state
;
1900 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
1901 dbwrap_get_seqnum(brlock_db
), fsp
->brlock_seqnum
));
1903 if ((fsp
->brlock_rec
!= NULL
)
1904 && (dbwrap_get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1906 * We have cached the brlock_rec and the database did not
1909 return fsp
->brlock_rec
;
1913 * Parse the record fresh from the database
1916 state
.mem_ctx
= fsp
;
1917 state
.br_lock
= &br_lock
;
1919 status
= dbwrap_parse_record(
1921 make_tdb_data((uint8_t *)&fsp
->file_id
,
1922 sizeof(fsp
->file_id
)),
1923 brl_get_locks_readonly_parser
, &state
);
1925 if (NT_STATUS_EQUAL(status
,NT_STATUS_NOT_FOUND
)) {
1927 * No locks on this file. Return an empty br_lock.
1929 br_lock
= talloc_zero(fsp
, struct byte_range_lock
);
1930 if (br_lock
== NULL
) {
1934 } else if (!NT_STATUS_IS_OK(status
)) {
1935 DEBUG(3, ("Could not parse byte range lock record: "
1936 "%s\n", nt_errstr(status
)));
1939 if (br_lock
== NULL
) {
1944 br_lock
->modified
= false;
1945 br_lock
->record
= NULL
;
1948 * Cache the brlock struct, invalidated when the dbwrap_seqnum
1949 * changes. See beginning of this routine.
1951 TALLOC_FREE(fsp
->brlock_rec
);
1952 fsp
->brlock_rec
= br_lock
;
1953 fsp
->brlock_seqnum
= dbwrap_get_seqnum(brlock_db
);
1958 bool brl_cleanup_disconnected(struct file_id fid
, uint64_t open_persistent_id
)
1961 TALLOC_CTX
*frame
= talloc_stackframe();
1963 struct db_record
*rec
;
1964 struct lock_struct
*lock
;
1966 struct file_id_buf buf
;
1969 key
= make_tdb_data((void*)&fid
, sizeof(fid
));
1971 rec
= dbwrap_fetch_locked(brlock_db
, frame
, key
);
1973 DBG_INFO("failed to fetch record for file %s\n",
1974 file_id_str_buf(fid
, &buf
));
1978 val
= dbwrap_record_get_value(rec
);
1979 lock
= (struct lock_struct
*)val
.dptr
;
1980 num
= val
.dsize
/ sizeof(struct lock_struct
);
1982 DBG_DEBUG("no byte range locks for file %s\n",
1983 file_id_str_buf(fid
, &buf
));
1988 for (n
=0; n
<num
; n
++) {
1989 struct lock_context
*ctx
= &lock
[n
].context
;
1991 if (!server_id_is_disconnected(&ctx
->pid
)) {
1992 struct server_id_buf tmp
;
1993 DBG_INFO("byte range lock "
1994 "%s used by server %s, do not cleanup\n",
1995 file_id_str_buf(fid
, &buf
),
1996 server_id_str_buf(ctx
->pid
, &tmp
));
2000 if (ctx
->smblctx
!= open_persistent_id
) {
2001 DBG_INFO("byte range lock %s expected smblctx %"PRIu64
" "
2002 "but found %"PRIu64
", do not cleanup\n",
2003 file_id_str_buf(fid
, &buf
),
2010 status
= dbwrap_record_delete(rec
);
2011 if (!NT_STATUS_IS_OK(status
)) {
2012 DBG_INFO("failed to delete record "
2013 "for file %s from %s, open %"PRIu64
": %s\n",
2014 file_id_str_buf(fid
, &buf
),
2015 dbwrap_name(brlock_db
),
2021 DBG_DEBUG("file %s cleaned up %u entries from open %"PRIu64
"\n",
2022 file_id_str_buf(fid
, &buf
),
2024 open_persistent_id
);