2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "lib/util/server_id.h"
30 #include "locking/proto.h"
31 #include "smbd/globals.h"
32 #include "dbwrap/dbwrap.h"
33 #include "dbwrap/dbwrap_open.h"
39 #define DBGC_CLASS DBGC_LOCKING
43 /* The open brlock.tdb database. */
45 static struct db_context
*brlock_db
;
47 struct byte_range_lock
{
48 struct files_struct
*fsp
;
49 TALLOC_CTX
*req_mem_ctx
;
50 const struct GUID
*req_guid
;
51 unsigned int num_locks
;
53 struct lock_struct
*lock_data
;
54 struct db_record
*record
;
57 /****************************************************************************
58 Debug info at level 10 for lock struct.
59 ****************************************************************************/
61 static void print_lock_struct(unsigned int i
, const struct lock_struct
*pls
)
63 struct server_id_buf tmp
;
65 DBG_DEBUG("[%u]: smblctx = %"PRIu64
", tid = %"PRIu32
", pid = %s, "
66 "start = %"PRIu64
", size = %"PRIu64
", fnum = %"PRIu64
", "
71 server_id_str_buf(pls
->context
.pid
, &tmp
),
75 lock_type_name(pls
->lock_type
),
76 lock_flav_name(pls
->lock_flav
));
79 unsigned int brl_num_locks(const struct byte_range_lock
*brl
)
81 return brl
->num_locks
;
84 struct files_struct
*brl_fsp(struct byte_range_lock
*brl
)
89 TALLOC_CTX
*brl_req_mem_ctx(const struct byte_range_lock
*brl
)
91 if (brl
->req_mem_ctx
== NULL
) {
92 return talloc_get_type_abort(brl
, struct byte_range_lock
);
95 return brl
->req_mem_ctx
;
98 const struct GUID
*brl_req_guid(const struct byte_range_lock
*brl
)
100 if (brl
->req_guid
== NULL
) {
101 static const struct GUID brl_zero_req_guid
;
102 return &brl_zero_req_guid
;
105 return brl
->req_guid
;
108 /****************************************************************************
109 See if two locking contexts are equal.
110 ****************************************************************************/
112 static bool brl_same_context(const struct lock_context
*ctx1
,
113 const struct lock_context
*ctx2
)
115 return (server_id_equal(&ctx1
->pid
, &ctx2
->pid
) &&
116 (ctx1
->smblctx
== ctx2
->smblctx
) &&
117 (ctx1
->tid
== ctx2
->tid
));
120 bool byte_range_valid(uint64_t ofs
, uint64_t len
)
122 uint64_t max_len
= UINT64_MAX
- ofs
;
123 uint64_t effective_len
;
126 * [MS-FSA] specifies this:
128 * If (((FileOffset + Length - 1) < FileOffset) && Length != 0) {
129 * return STATUS_INVALID_LOCK_RANGE
132 * We avoid integer wrapping and calculate
133 * max and effective len instead.
140 effective_len
= len
- 1;
141 if (effective_len
<= max_len
) {
148 bool byte_range_overlap(uint64_t ofs1
,
158 * This is based on [MS-FSA] 2.1.4.10
159 * Algorithm for Determining If a Range Access
160 * Conflicts with Byte-Range Locks
164 * The {0, 0} range doesn't conflict with any byte-range lock
166 if (ofs1
== 0 && len1
== 0) {
169 if (ofs2
== 0 && len2
== 0) {
174 * The caller should have checked that the ranges are
175 * valid. But currently we gracefully handle
176 * the overflow of a read/write check.
178 valid
= byte_range_valid(ofs1
, len1
);
180 last1
= ofs1
+ len1
- 1;
184 valid
= byte_range_valid(ofs2
, len2
);
186 last2
= ofs2
+ len2
- 1;
192 * If one range starts after the last
193 * byte of the other range there's
206 /****************************************************************************
207 See if lck1 and lck2 overlap.
208 ****************************************************************************/
210 static bool brl_overlap(const struct lock_struct
*lck1
,
211 const struct lock_struct
*lck2
)
213 return byte_range_overlap(lck1
->start
,
219 /****************************************************************************
220 See if lock2 can be added when lock1 is in place.
221 ****************************************************************************/
223 static bool brl_conflict(const struct lock_struct
*lck1
,
224 const struct lock_struct
*lck2
)
226 /* Read locks never conflict. */
227 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
231 /* A READ lock can stack on top of a WRITE lock if they have the same
233 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
234 brl_same_context(&lck1
->context
, &lck2
->context
) &&
235 lck1
->fnum
== lck2
->fnum
) {
239 return brl_overlap(lck1
, lck2
);
242 /****************************************************************************
243 See if lock2 can be added when lock1 is in place - when both locks are POSIX
244 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
246 ****************************************************************************/
248 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
249 const struct lock_struct
*lck2
)
251 #if defined(DEVELOPER)
252 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
253 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
256 /* Read locks never conflict. */
257 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
261 /* Locks on the same context don't conflict. Ignore fnum. */
262 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
266 /* One is read, the other write, or the context is different,
268 return brl_overlap(lck1
, lck2
);
272 static bool brl_conflict1(const struct lock_struct
*lck1
,
273 const struct lock_struct
*lck2
)
275 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
279 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
280 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
284 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
288 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
289 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
297 /****************************************************************************
298 Check to see if this lock conflicts, but ignore our own locks on the
299 same fnum only. This is the read/write lock check code path.
300 This is never used in the POSIX lock case.
301 ****************************************************************************/
303 static bool brl_conflict_other(const struct lock_struct
*lock
,
304 const struct lock_struct
*rw_probe
)
306 if (lock
->lock_type
== READ_LOCK
&& rw_probe
->lock_type
== READ_LOCK
) {
310 if (lock
->lock_flav
== POSIX_LOCK
&&
311 rw_probe
->lock_flav
== POSIX_LOCK
) {
313 * POSIX flavour locks never conflict here - this is only called
314 * in the read/write path.
319 if (!brl_overlap(lock
, rw_probe
)) {
321 * I/O can only conflict when overlapping a lock, thus let it
327 if (!brl_same_context(&lock
->context
, &rw_probe
->context
)) {
329 * Different process, conflict
334 if (lock
->fnum
!= rw_probe
->fnum
) {
336 * Different file handle, conflict
341 if ((lock
->lock_type
== READ_LOCK
) &&
342 (rw_probe
->lock_type
== WRITE_LOCK
)) {
344 * Incoming WRITE locks conflict with existing READ locks even
345 * if the context is the same. JRA. See LOCKTEST7 in
352 * I/O request compatible with existing lock, let it pass without
359 /****************************************************************************
360 Open up the brlock.tdb database.
361 ****************************************************************************/
363 void brl_init(bool read_only
)
372 tdb_flags
= SMBD_VOLATILE_TDB_FLAGS
| TDB_SEQNUM
;
374 db_path
= lock_path(talloc_tos(), "brlock.tdb");
375 if (db_path
== NULL
) {
376 DEBUG(0, ("out of memory!\n"));
380 brlock_db
= db_open(NULL
, db_path
,
381 SMBD_VOLATILE_TDB_HASH_SIZE
, tdb_flags
,
382 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644,
383 DBWRAP_LOCK_ORDER_2
, DBWRAP_FLAG_NONE
);
385 DEBUG(0,("Failed to open byte range locking database %s\n",
387 TALLOC_FREE(db_path
);
390 TALLOC_FREE(db_path
);
393 /****************************************************************************
394 Close down the brlock.tdb database.
395 ****************************************************************************/
397 void brl_shutdown(void)
399 TALLOC_FREE(brlock_db
);
403 /****************************************************************************
404 Compare two locks for sorting.
405 ****************************************************************************/
407 static int lock_compare(const struct lock_struct
*lck1
,
408 const struct lock_struct
*lck2
)
410 if (lck1
->start
!= lck2
->start
) {
411 return (lck1
->start
- lck2
->start
);
413 if (lck2
->size
!= lck1
->size
) {
414 return ((int)lck1
->size
- (int)lck2
->size
);
420 /****************************************************************************
421 Lock a range of bytes - Windows lock semantics.
422 ****************************************************************************/
424 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
425 struct lock_struct
*plock
)
428 files_struct
*fsp
= br_lck
->fsp
;
429 struct lock_struct
*locks
= br_lck
->lock_data
;
433 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
435 valid
= byte_range_valid(plock
->start
, plock
->size
);
437 return NT_STATUS_INVALID_LOCK_RANGE
;
440 for (i
=0; i
< br_lck
->num_locks
; i
++) {
441 /* Do any Windows or POSIX locks conflict ? */
442 if (brl_conflict(&locks
[i
], plock
)) {
443 if (!serverid_exists(&locks
[i
].context
.pid
)) {
444 locks
[i
].context
.pid
.pid
= 0;
445 br_lck
->modified
= true;
448 /* Remember who blocked us. */
449 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
450 return NT_STATUS_LOCK_NOT_GRANTED
;
453 if (plock
->start
== 0 && plock
->size
== 0 &&
454 locks
[i
].size
== 0) {
460 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
462 /* We can get the Windows lock, now see if it needs to
463 be mapped into a lower level POSIX one, and if so can
466 if (lp_posix_locking(fsp
->conn
->params
)) {
468 if (!set_posix_lock_windows_flavour(fsp
,
477 /* We don't know who blocked us. */
478 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
480 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
481 status
= NT_STATUS_LOCK_NOT_GRANTED
;
484 status
= map_nt_error_from_unix(errno
);
490 /* no conflicts - add it to the list of locks */
491 locks
= talloc_realloc(br_lck
, locks
, struct lock_struct
,
492 (br_lck
->num_locks
+ 1));
494 status
= NT_STATUS_NO_MEMORY
;
498 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
499 br_lck
->num_locks
+= 1;
500 br_lck
->lock_data
= locks
;
501 br_lck
->modified
= True
;
505 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
509 /****************************************************************************
510 Cope with POSIX range splits and merges.
511 ****************************************************************************/
513 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
514 struct lock_struct
*ex
, /* existing lock. */
515 struct lock_struct
*plock
) /* proposed lock. */
517 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
519 /* We can't merge non-conflicting locks on different context - ignore fnum. */
521 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
523 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
527 /* We now know we have the same context. */
529 /* Did we overlap ? */
531 /*********************************************
542 **********************************************/
544 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
545 (plock
->start
> (ex
->start
+ ex
->size
))) {
547 /* No overlap with this lock - copy existing. */
549 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
553 /*********************************************
554 +---------------------------+
556 +---------------------------+
557 +---------------------------+
558 | plock | -> replace with plock.
559 +---------------------------+
564 +---------------------------+
565 | plock | -> replace with plock.
566 +---------------------------+
568 **********************************************/
570 if ( (ex
->start
>= plock
->start
) &&
571 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
573 /* Replace - discard existing lock. */
578 /*********************************************
588 +---------------+-------+
589 | plock | ex | - different lock types.
590 +---------------+-------+
592 +-----------------------+
593 | plock | - same lock type.
594 +-----------------------+
595 **********************************************/
597 if (plock
->start
+ plock
->size
== ex
->start
) {
599 /* If the lock types are the same, we merge, if different, we
600 add the remainder of the old lock. */
602 if (lock_types_differ
) {
604 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
607 /* Merge - adjust incoming lock as we may have more
608 * merging to come. */
609 plock
->size
+= ex
->size
;
614 /*********************************************
623 +-------+---------------+
624 | ex | plock | - different lock types
625 +-------+---------------+
628 +-----------------------+
629 | plock | - same lock type.
630 +-----------------------+
632 **********************************************/
634 if (ex
->start
+ ex
->size
== plock
->start
) {
636 /* If the lock types are the same, we merge, if different, we
637 add the existing lock. */
639 if (lock_types_differ
) {
640 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
643 /* Merge - adjust incoming lock as we may have more
644 * merging to come. */
645 plock
->start
= ex
->start
;
646 plock
->size
+= ex
->size
;
651 /*********************************************
653 +-----------------------+
655 +-----------------------+
668 +---------------+-------+
669 | plock | ex | - different lock types.
670 +---------------+-------+
672 +-----------------------+
673 | plock | - same lock type.
674 +-----------------------+
675 **********************************************/
677 if ( (ex
->start
>= plock
->start
) &&
678 (ex
->start
<= plock
->start
+ plock
->size
) &&
679 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
681 /* If the lock types are the same, we merge, if different, we
682 add the remainder of the old lock. */
684 if (lock_types_differ
) {
685 /* Add remaining existing. */
686 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
687 /* Adjust existing start and size. */
688 lck_arr
[0].start
= plock
->start
+ plock
->size
;
689 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
692 /* Merge - adjust incoming lock as we may have more
693 * merging to come. */
694 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
699 /*********************************************
701 +-----------------------+
703 +-----------------------+
716 +-------+---------------+
717 | ex | plock | - different lock types
718 +-------+---------------+
721 +-----------------------+
722 | plock | - same lock type.
723 +-----------------------+
725 **********************************************/
727 if ( (ex
->start
< plock
->start
) &&
728 (ex
->start
+ ex
->size
>= plock
->start
) &&
729 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
731 /* If the lock types are the same, we merge, if different, we
732 add the truncated old lock. */
734 if (lock_types_differ
) {
735 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
736 /* Adjust existing size. */
737 lck_arr
[0].size
= plock
->start
- ex
->start
;
740 /* Merge - adjust incoming lock as we may have more
741 * merging to come. MUST ADJUST plock SIZE FIRST ! */
742 plock
->size
+= (plock
->start
- ex
->start
);
743 plock
->start
= ex
->start
;
748 /*********************************************
750 +---------------------------+
752 +---------------------------+
757 +-------+---------+---------+
758 | ex | plock | ex | - different lock types.
759 +-------+---------+---------+
761 +---------------------------+
762 | plock | - same lock type.
763 +---------------------------+
764 **********************************************/
766 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
768 if (lock_types_differ
) {
770 /* We have to split ex into two locks here. */
772 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
773 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
775 /* Adjust first existing size. */
776 lck_arr
[0].size
= plock
->start
- ex
->start
;
778 /* Adjust second existing start and size. */
779 lck_arr
[1].start
= plock
->start
+ plock
->size
;
780 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
783 /* Just eat the existing locks, merge them into plock. */
784 plock
->start
= ex
->start
;
785 plock
->size
= ex
->size
;
790 /* Never get here. */
791 smb_panic("brlock_posix_split_merge");
794 /* Keep some compilers happy. */
798 /****************************************************************************
799 Lock a range of bytes - POSIX lock semantics.
800 We must cope with range splits and merges.
801 ****************************************************************************/
803 static NTSTATUS
brl_lock_posix(struct byte_range_lock
*br_lck
,
804 struct lock_struct
*plock
)
806 unsigned int i
, count
, posix_count
;
807 struct lock_struct
*locks
= br_lck
->lock_data
;
808 struct lock_struct
*tp
;
809 bool break_oplocks
= false;
812 /* No zero-zero locks for POSIX. */
813 if (plock
->start
== 0 && plock
->size
== 0) {
814 return NT_STATUS_INVALID_PARAMETER
;
817 /* Don't allow 64-bit lock wrap. */
818 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
819 return NT_STATUS_INVALID_PARAMETER
;
822 /* The worst case scenario here is we have to split an
823 existing POSIX lock range into two, and add our lock,
824 so we need at most 2 more entries. */
826 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 2);
828 return NT_STATUS_NO_MEMORY
;
831 count
= posix_count
= 0;
833 for (i
=0; i
< br_lck
->num_locks
; i
++) {
834 struct lock_struct
*curr_lock
= &locks
[i
];
836 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
837 /* Do any Windows flavour locks conflict ? */
838 if (brl_conflict(curr_lock
, plock
)) {
839 if (!serverid_exists(&curr_lock
->context
.pid
)) {
840 curr_lock
->context
.pid
.pid
= 0;
841 br_lck
->modified
= true;
844 /* No games with error messages. */
846 /* Remember who blocked us. */
847 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
848 return NT_STATUS_LOCK_NOT_GRANTED
;
850 /* Just copy the Windows lock into the new array. */
851 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
854 unsigned int tmp_count
= 0;
856 /* POSIX conflict semantics are different. */
857 if (brl_conflict_posix(curr_lock
, plock
)) {
858 if (!serverid_exists(&curr_lock
->context
.pid
)) {
859 curr_lock
->context
.pid
.pid
= 0;
860 br_lck
->modified
= true;
863 /* Can't block ourselves with POSIX locks. */
864 /* No games with error messages. */
866 /* Remember who blocked us. */
867 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
868 return NT_STATUS_LOCK_NOT_GRANTED
;
871 /* Work out overlaps. */
872 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
873 posix_count
+= tmp_count
;
879 * Break oplocks while we hold a brl. Since lock() and unlock() calls
880 * are not symetric with POSIX semantics, we cannot guarantee our
881 * contend_level2_oplocks_begin/end calls will be acquired and
882 * released one-for-one as with Windows semantics. Therefore we only
883 * call contend_level2_oplocks_begin if this is the first POSIX brl on
886 break_oplocks
= (posix_count
== 0);
888 contend_level2_oplocks_begin(br_lck
->fsp
,
889 LEVEL2_CONTEND_POSIX_BRL
);
892 /* Try and add the lock in order, sorted by lock start. */
893 for (i
=0; i
< count
; i
++) {
894 struct lock_struct
*curr_lock
= &tp
[i
];
896 if (curr_lock
->start
<= plock
->start
) {
902 memmove(&tp
[i
+1], &tp
[i
],
903 (count
- i
)*sizeof(struct lock_struct
));
905 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
908 /* We can get the POSIX lock, now see if it needs to
909 be mapped into a lower level POSIX one, and if so can
912 if (lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
915 /* The lower layer just needs to attempt to
916 get the system POSIX lock. We've weeded out
917 any conflicts above. */
919 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
926 /* We don't know who blocked us. */
927 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
929 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
931 status
= NT_STATUS_LOCK_NOT_GRANTED
;
935 status
= map_nt_error_from_unix(errno
);
941 /* If we didn't use all the allocated size,
942 * Realloc so we don't leak entries per lock call. */
943 if (count
< br_lck
->num_locks
+ 2) {
944 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
946 status
= NT_STATUS_NO_MEMORY
;
951 br_lck
->num_locks
= count
;
952 TALLOC_FREE(br_lck
->lock_data
);
953 br_lck
->lock_data
= tp
;
955 br_lck
->modified
= True
;
957 /* A successful downgrade from write to read lock can trigger a lock
958 re-evalutation where waiting readers can now proceed. */
963 contend_level2_oplocks_end(br_lck
->fsp
,
964 LEVEL2_CONTEND_POSIX_BRL
);
969 /****************************************************************************
970 Lock a range of bytes.
971 ****************************************************************************/
974 struct byte_range_lock
*br_lck
,
976 struct server_id pid
,
979 enum brl_type lock_type
,
980 enum brl_flavour lock_flav
,
981 struct server_id
*blocker_pid
,
985 struct lock_struct lock
;
990 if (start
== 0 && size
== 0) {
991 DEBUG(0,("client sent 0/0 lock - please report this\n"));
995 lock
= (struct lock_struct
) {
996 .context
.smblctx
= smblctx
,
998 .context
.tid
= br_lck
->fsp
->conn
->cnum
,
1001 .fnum
= br_lck
->fsp
->fnum
,
1002 .lock_type
= lock_type
,
1003 .lock_flav
= lock_flav
1006 if (lock_flav
== WINDOWS_LOCK
) {
1007 ret
= SMB_VFS_BRL_LOCK_WINDOWS(
1008 br_lck
->fsp
->conn
, br_lck
, &lock
);
1010 ret
= brl_lock_posix(br_lck
, &lock
);
1014 /* sort the lock list */
1015 TYPESAFE_QSORT(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, lock_compare
);
1017 /* If we're returning an error, return who blocked us. */
1018 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
1019 *blocker_pid
= lock
.context
.pid
;
1020 *psmblctx
= lock
.context
.smblctx
;
1025 /****************************************************************************
1026 Unlock a range of bytes - Windows semantics.
1027 ****************************************************************************/
1029 bool brl_unlock_windows_default(struct byte_range_lock
*br_lck
,
1030 const struct lock_struct
*plock
)
1033 struct lock_struct
*locks
= br_lck
->lock_data
;
1034 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
1036 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
1039 /* Delete write locks by preference... The lock list
1040 is sorted in the zero zero case. */
1042 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1043 struct lock_struct
*lock
= &locks
[i
];
1045 if (lock
->lock_type
== WRITE_LOCK
&&
1046 brl_same_context(&lock
->context
, &plock
->context
) &&
1047 lock
->fnum
== plock
->fnum
&&
1048 lock
->lock_flav
== WINDOWS_LOCK
&&
1049 lock
->start
== plock
->start
&&
1050 lock
->size
== plock
->size
) {
1052 /* found it - delete it */
1053 deleted_lock_type
= lock
->lock_type
;
1058 if (i
!= br_lck
->num_locks
) {
1059 /* We found it - don't search again. */
1060 goto unlock_continue
;
1064 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1065 struct lock_struct
*lock
= &locks
[i
];
1067 /* Only remove our own locks that match in start, size, and flavour. */
1068 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1069 lock
->fnum
== plock
->fnum
&&
1070 lock
->lock_flav
== WINDOWS_LOCK
&&
1071 lock
->start
== plock
->start
&&
1072 lock
->size
== plock
->size
) {
1073 deleted_lock_type
= lock
->lock_type
;
1078 if (i
== br_lck
->num_locks
) {
1079 /* we didn't find it */
1087 ARRAY_DEL_ELEMENT(locks
, i
, br_lck
->num_locks
);
1088 br_lck
->num_locks
-= 1;
1089 br_lck
->modified
= True
;
1091 /* Unlock the underlying POSIX regions. */
1092 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1093 release_posix_lock_windows_flavour(br_lck
->fsp
,
1102 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1106 /****************************************************************************
1107 Unlock a range of bytes - POSIX semantics.
1108 ****************************************************************************/
1110 static bool brl_unlock_posix(struct byte_range_lock
*br_lck
,
1111 struct lock_struct
*plock
)
1113 unsigned int i
, count
;
1114 struct lock_struct
*tp
;
1115 struct lock_struct
*locks
= br_lck
->lock_data
;
1116 bool overlap_found
= False
;
1118 /* No zero-zero locks for POSIX. */
1119 if (plock
->start
== 0 && plock
->size
== 0) {
1123 /* Don't allow 64-bit lock wrap. */
1124 if (plock
->start
+ plock
->size
< plock
->start
||
1125 plock
->start
+ plock
->size
< plock
->size
) {
1126 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1130 /* The worst case scenario here is we have to split an
1131 existing POSIX lock range into two, so we need at most
1134 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 1);
1136 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1141 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1142 struct lock_struct
*lock
= &locks
[i
];
1143 unsigned int tmp_count
;
1145 /* Only remove our own locks - ignore fnum. */
1146 if (!brl_same_context(&lock
->context
, &plock
->context
)) {
1147 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1152 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1153 /* Do any Windows flavour locks conflict ? */
1154 if (brl_conflict(lock
, plock
)) {
1158 /* Just copy the Windows lock into the new array. */
1159 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1164 /* Work out overlaps. */
1165 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1167 if (tmp_count
== 0) {
1168 /* plock overlapped the existing lock completely,
1169 or replaced it. Don't copy the existing lock. */
1170 overlap_found
= true;
1171 } else if (tmp_count
== 1) {
1172 /* Either no overlap, (simple copy of existing lock) or
1173 * an overlap of an existing lock. */
1174 /* If the lock changed size, we had an overlap. */
1175 if (tp
[count
].size
!= lock
->size
) {
1176 overlap_found
= true;
1179 } else if (tmp_count
== 2) {
1180 /* We split a lock range in two. */
1181 overlap_found
= true;
1184 /* Optimisation... */
1185 /* We know we're finished here as we can't overlap any
1186 more POSIX locks. Copy the rest of the lock array. */
1188 if (i
< br_lck
->num_locks
- 1) {
1189 memcpy(&tp
[count
], &locks
[i
+1],
1190 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1191 count
+= ((br_lck
->num_locks
-1) - i
);
1198 if (!overlap_found
) {
1199 /* Just ignore - no change. */
1201 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1205 /* Unlock any POSIX regions. */
1206 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1207 release_posix_lock_posix_flavour(br_lck
->fsp
,
1215 /* Realloc so we don't leak entries per unlock call. */
1217 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
1219 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1223 /* We deleted the last lock. */
1228 contend_level2_oplocks_end(br_lck
->fsp
,
1229 LEVEL2_CONTEND_POSIX_BRL
);
1231 br_lck
->num_locks
= count
;
1232 TALLOC_FREE(br_lck
->lock_data
);
1234 br_lck
->lock_data
= tp
;
1235 br_lck
->modified
= True
;
1240 /****************************************************************************
1241 Unlock a range of bytes.
1242 ****************************************************************************/
1244 bool brl_unlock(struct byte_range_lock
*br_lck
,
1246 struct server_id pid
,
1249 enum brl_flavour lock_flav
)
1251 struct lock_struct lock
;
1253 lock
.context
.smblctx
= smblctx
;
1254 lock
.context
.pid
= pid
;
1255 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1258 lock
.fnum
= br_lck
->fsp
->fnum
;
1259 lock
.lock_type
= UNLOCK_LOCK
;
1260 lock
.lock_flav
= lock_flav
;
1262 if (lock_flav
== WINDOWS_LOCK
) {
1263 return SMB_VFS_BRL_UNLOCK_WINDOWS(
1264 br_lck
->fsp
->conn
, br_lck
, &lock
);
1266 return brl_unlock_posix(br_lck
, &lock
);
1270 /****************************************************************************
1271 Test if we could add a lock if we wanted to.
1272 Returns True if the region required is currently unlocked, False if locked.
1273 ****************************************************************************/
1275 bool brl_locktest(struct byte_range_lock
*br_lck
,
1276 const struct lock_struct
*rw_probe
)
1280 struct lock_struct
*locks
= br_lck
->lock_data
;
1281 files_struct
*fsp
= br_lck
->fsp
;
1283 /* Make sure existing locks don't conflict */
1284 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1286 * Our own locks don't conflict.
1288 if (brl_conflict_other(&locks
[i
], rw_probe
)) {
1289 if (br_lck
->record
== NULL
) {
1294 if (!serverid_exists(&locks
[i
].context
.pid
)) {
1295 locks
[i
].context
.pid
.pid
= 0;
1296 br_lck
->modified
= true;
1305 * There is no lock held by an SMB daemon, check to
1306 * see if there is a POSIX lock from a UNIX or NFS process.
1307 * This only conflicts with Windows locks, not POSIX locks.
1310 if(lp_posix_locking(fsp
->conn
->params
) &&
1311 (rw_probe
->lock_flav
== WINDOWS_LOCK
)) {
1313 * Make copies -- is_posix_locked might modify the values
1316 br_off start
= rw_probe
->start
;
1317 br_off size
= rw_probe
->size
;
1318 enum brl_type lock_type
= rw_probe
->lock_type
;
1320 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1322 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1323 "file %s\n", (uintmax_t)start
, (uintmax_t)size
,
1324 ret
? "locked" : "unlocked",
1325 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1327 /* We need to return the inverse of is_posix_locked. */
1331 /* no conflicts - we could have added it */
1335 /****************************************************************************
1336 Query for existing locks.
1337 ****************************************************************************/
1339 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1341 struct server_id pid
,
1344 enum brl_type
*plock_type
,
1345 enum brl_flavour lock_flav
)
1348 struct lock_struct lock
;
1349 const struct lock_struct
*locks
= br_lck
->lock_data
;
1350 files_struct
*fsp
= br_lck
->fsp
;
1352 lock
.context
.smblctx
= *psmblctx
;
1353 lock
.context
.pid
= pid
;
1354 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1355 lock
.start
= *pstart
;
1357 lock
.fnum
= fsp
->fnum
;
1358 lock
.lock_type
= *plock_type
;
1359 lock
.lock_flav
= lock_flav
;
1361 /* Make sure existing locks don't conflict */
1362 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1363 const struct lock_struct
*exlock
= &locks
[i
];
1364 bool conflict
= False
;
1366 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1367 conflict
= brl_conflict(exlock
, &lock
);
1369 conflict
= brl_conflict_posix(exlock
, &lock
);
1373 *psmblctx
= exlock
->context
.smblctx
;
1374 *pstart
= exlock
->start
;
1375 *psize
= exlock
->size
;
1376 *plock_type
= exlock
->lock_type
;
1377 return NT_STATUS_LOCK_NOT_GRANTED
;
1382 * There is no lock held by an SMB daemon, check to
1383 * see if there is a POSIX lock from a UNIX or NFS process.
1386 if(lp_posix_locking(fsp
->conn
->params
)) {
1387 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1389 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1390 "file %s\n", (uintmax_t)*pstart
,
1391 (uintmax_t)*psize
, ret
? "locked" : "unlocked",
1392 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1395 /* Hmmm. No clue what to set smblctx to - use -1. */
1396 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1397 return NT_STATUS_LOCK_NOT_GRANTED
;
1401 return NT_STATUS_OK
;
1405 /****************************************************************************
1406 Remove any locks associated with a open file.
1407 We return True if this process owns any other Windows locks on this
1408 fd and so we should not immediately close the fd.
1409 ****************************************************************************/
1411 void brl_close_fnum(struct byte_range_lock
*br_lck
)
1413 files_struct
*fsp
= br_lck
->fsp
;
1414 uint32_t tid
= fsp
->conn
->cnum
;
1415 uint64_t fnum
= fsp
->fnum
;
1417 struct lock_struct
*locks
= br_lck
->lock_data
;
1418 struct server_id pid
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1419 struct lock_struct
*locks_copy
;
1420 unsigned int num_locks_copy
;
1422 /* Copy the current lock array. */
1423 if (br_lck
->num_locks
) {
1424 locks_copy
= (struct lock_struct
*)talloc_memdup(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1426 smb_panic("brl_close_fnum: talloc failed");
1432 num_locks_copy
= br_lck
->num_locks
;
1434 for (i
=0; i
< num_locks_copy
; i
++) {
1435 struct lock_struct
*lock
= &locks_copy
[i
];
1437 if (lock
->context
.tid
== tid
&&
1438 server_id_equal(&lock
->context
.pid
, &pid
) &&
1439 (lock
->fnum
== fnum
)) {
1442 lock
->context
.smblctx
,
1451 bool brl_mark_disconnected(struct files_struct
*fsp
)
1453 uint32_t tid
= fsp
->conn
->cnum
;
1455 uint64_t fnum
= fsp
->fnum
;
1457 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1458 struct byte_range_lock
*br_lck
= NULL
;
1460 if (fsp
->op
== NULL
) {
1464 smblctx
= fsp
->op
->global
->open_persistent_id
;
1466 if (!fsp
->op
->global
->durable
) {
1470 if (fsp
->current_lock_count
== 0) {
1474 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1475 if (br_lck
== NULL
) {
1479 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1480 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1483 * as this is a durable handle, we only expect locks
1484 * of the current file handle!
1487 if (lock
->context
.smblctx
!= smblctx
) {
1488 TALLOC_FREE(br_lck
);
1492 if (lock
->context
.tid
!= tid
) {
1493 TALLOC_FREE(br_lck
);
1497 if (!server_id_equal(&lock
->context
.pid
, &self
)) {
1498 TALLOC_FREE(br_lck
);
1502 if (lock
->fnum
!= fnum
) {
1503 TALLOC_FREE(br_lck
);
1507 server_id_set_disconnected(&lock
->context
.pid
);
1508 lock
->context
.tid
= TID_FIELD_INVALID
;
1509 lock
->fnum
= FNUM_FIELD_INVALID
;
1512 br_lck
->modified
= true;
1513 TALLOC_FREE(br_lck
);
1517 bool brl_reconnect_disconnected(struct files_struct
*fsp
)
1519 uint32_t tid
= fsp
->conn
->cnum
;
1521 uint64_t fnum
= fsp
->fnum
;
1523 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1524 struct byte_range_lock
*br_lck
= NULL
;
1526 if (fsp
->op
== NULL
) {
1530 smblctx
= fsp
->op
->global
->open_persistent_id
;
1532 if (!fsp
->op
->global
->durable
) {
1537 * When reconnecting, we do not want to validate the brlock entries
1538 * and thereby remove our own (disconnected) entries but reactivate
1542 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1543 if (br_lck
== NULL
) {
1547 if (br_lck
->num_locks
== 0) {
1548 TALLOC_FREE(br_lck
);
1552 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1553 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1556 * as this is a durable handle we only expect locks
1557 * of the current file handle!
1560 if (lock
->context
.smblctx
!= smblctx
) {
1561 TALLOC_FREE(br_lck
);
1565 if (lock
->context
.tid
!= TID_FIELD_INVALID
) {
1566 TALLOC_FREE(br_lck
);
1570 if (!server_id_is_disconnected(&lock
->context
.pid
)) {
1571 TALLOC_FREE(br_lck
);
1575 if (lock
->fnum
!= FNUM_FIELD_INVALID
) {
1576 TALLOC_FREE(br_lck
);
1580 lock
->context
.pid
= self
;
1581 lock
->context
.tid
= tid
;
1585 fsp
->current_lock_count
= br_lck
->num_locks
;
1586 br_lck
->modified
= true;
1587 TALLOC_FREE(br_lck
);
1591 struct brl_forall_cb
{
1592 void (*fn
)(struct file_id id
, struct server_id pid
,
1593 enum brl_type lock_type
,
1594 enum brl_flavour lock_flav
,
1595 br_off start
, br_off size
,
1596 void *private_data
);
1600 /****************************************************************************
1601 Traverse the whole database with this function, calling traverse_callback
1603 ****************************************************************************/
1605 static int brl_traverse_fn(struct db_record
*rec
, void *state
)
1607 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1608 struct lock_struct
*locks
;
1609 struct file_id
*key
;
1611 unsigned int num_locks
= 0;
1615 dbkey
= dbwrap_record_get_key(rec
);
1616 value
= dbwrap_record_get_value(rec
);
1618 /* In a traverse function we must make a copy of
1619 dbuf before modifying it. */
1621 locks
= (struct lock_struct
*)talloc_memdup(
1622 talloc_tos(), value
.dptr
, value
.dsize
);
1624 return -1; /* Terminate traversal. */
1627 key
= (struct file_id
*)dbkey
.dptr
;
1628 num_locks
= value
.dsize
/sizeof(*locks
);
1631 for ( i
=0; i
<num_locks
; i
++) {
1633 locks
[i
].context
.pid
,
1646 /*******************************************************************
1647 Call the specified function on each lock in the database.
1648 ********************************************************************/
1650 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1651 enum brl_type lock_type
,
1652 enum brl_flavour lock_flav
,
1653 br_off start
, br_off size
,
1654 void *private_data
),
1657 struct brl_forall_cb cb
;
1665 cb
.private_data
= private_data
;
1666 status
= dbwrap_traverse(brlock_db
, brl_traverse_fn
, &cb
, &count
);
1668 if (!NT_STATUS_IS_OK(status
)) {
1675 /*******************************************************************
1676 Store a potentially modified set of byte range lock data back into
1679 ********************************************************************/
1681 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1684 struct lock_struct
*locks
= br_lck
->lock_data
;
1686 if (!br_lck
->modified
) {
1687 DEBUG(10, ("br_lck not modified\n"));
1693 while (i
< br_lck
->num_locks
) {
1694 if (locks
[i
].context
.pid
.pid
== 0) {
1696 * Autocleanup, the process conflicted and does not
1699 locks
[i
] = locks
[br_lck
->num_locks
-1];
1700 br_lck
->num_locks
-= 1;
1706 if (br_lck
->num_locks
== 0) {
1707 /* No locks - delete this entry. */
1708 NTSTATUS status
= dbwrap_record_delete(br_lck
->record
);
1709 if (!NT_STATUS_IS_OK(status
)) {
1710 DEBUG(0, ("delete_rec returned %s\n",
1711 nt_errstr(status
)));
1712 smb_panic("Could not delete byte range lock entry");
1716 .dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
),
1717 .dptr
= (uint8_t *)br_lck
->lock_data
,
1721 status
= dbwrap_record_store(br_lck
->record
, data
, TDB_REPLACE
);
1722 if (!NT_STATUS_IS_OK(status
)) {
1723 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1724 smb_panic("Could not store byte range mode entry");
1728 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db
)));
1731 br_lck
->modified
= false;
1732 TALLOC_FREE(br_lck
->record
);
1735 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1737 byte_range_lock_flush(br_lck
);
1741 static bool brl_parse_data(struct byte_range_lock
*br_lck
, TDB_DATA data
)
1745 if (data
.dsize
== 0) {
1748 if (data
.dsize
% sizeof(struct lock_struct
) != 0) {
1749 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data
.dsize
));
1753 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1754 data_len
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1756 br_lck
->lock_data
= talloc_memdup(br_lck
, data
.dptr
, data_len
);
1757 if (br_lck
->lock_data
== NULL
) {
1758 DEBUG(1, ("talloc_memdup failed\n"));
1764 /*******************************************************************
1765 Fetch a set of byte range lock data from the database.
1766 Leave the record locked.
1767 TALLOC_FREE(brl) will release the lock in the destructor.
1768 ********************************************************************/
1770 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
, files_struct
*fsp
)
1773 struct byte_range_lock
*br_lck
;
1775 br_lck
= talloc_zero(mem_ctx
, struct byte_range_lock
);
1776 if (br_lck
== NULL
) {
1782 key
.dptr
= (uint8_t *)&fsp
->file_id
;
1783 key
.dsize
= sizeof(struct file_id
);
1785 br_lck
->record
= dbwrap_fetch_locked(brlock_db
, br_lck
, key
);
1787 if (br_lck
->record
== NULL
) {
1788 DEBUG(3, ("Could not lock byte range lock entry\n"));
1789 TALLOC_FREE(br_lck
);
1793 data
= dbwrap_record_get_value(br_lck
->record
);
1795 if (!brl_parse_data(br_lck
, data
)) {
1796 TALLOC_FREE(br_lck
);
1800 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1802 if (DEBUGLEVEL
>= 10) {
1804 struct file_id_buf buf
;
1805 struct lock_struct
*locks
= br_lck
->lock_data
;
1806 DBG_DEBUG("%u current locks on file_id %s\n",
1808 file_id_str_buf(fsp
->file_id
, &buf
));
1809 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1810 print_lock_struct(i
, &locks
[i
]);
1817 struct byte_range_lock
*brl_get_locks_for_locking(TALLOC_CTX
*mem_ctx
,
1819 TALLOC_CTX
*req_mem_ctx
,
1820 const struct GUID
*req_guid
)
1822 struct byte_range_lock
*br_lck
= NULL
;
1824 br_lck
= brl_get_locks(mem_ctx
, fsp
);
1825 if (br_lck
== NULL
) {
1828 SMB_ASSERT(req_mem_ctx
!= NULL
);
1829 br_lck
->req_mem_ctx
= req_mem_ctx
;
1830 SMB_ASSERT(req_guid
!= NULL
);
1831 br_lck
->req_guid
= req_guid
;
1836 struct brl_get_locks_readonly_state
{
1837 TALLOC_CTX
*mem_ctx
;
1838 struct byte_range_lock
**br_lock
;
1841 static void brl_get_locks_readonly_parser(TDB_DATA key
, TDB_DATA data
,
1844 struct brl_get_locks_readonly_state
*state
=
1845 (struct brl_get_locks_readonly_state
*)private_data
;
1846 struct byte_range_lock
*br_lck
;
1848 br_lck
= talloc_pooled_object(
1849 state
->mem_ctx
, struct byte_range_lock
, 1, data
.dsize
);
1850 if (br_lck
== NULL
) {
1851 *state
->br_lock
= NULL
;
1854 *br_lck
= (struct byte_range_lock
) { 0 };
1855 if (!brl_parse_data(br_lck
, data
)) {
1856 *state
->br_lock
= NULL
;
1859 *state
->br_lock
= br_lck
;
1862 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1864 struct byte_range_lock
*br_lock
= NULL
;
1865 struct brl_get_locks_readonly_state state
;
1868 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
1869 dbwrap_get_seqnum(brlock_db
), fsp
->brlock_seqnum
));
1871 if ((fsp
->brlock_rec
!= NULL
)
1872 && (dbwrap_get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1874 * We have cached the brlock_rec and the database did not
1877 return fsp
->brlock_rec
;
1881 * Parse the record fresh from the database
1884 state
.mem_ctx
= fsp
;
1885 state
.br_lock
= &br_lock
;
1887 status
= dbwrap_parse_record(
1889 make_tdb_data((uint8_t *)&fsp
->file_id
,
1890 sizeof(fsp
->file_id
)),
1891 brl_get_locks_readonly_parser
, &state
);
1893 if (NT_STATUS_EQUAL(status
,NT_STATUS_NOT_FOUND
)) {
1895 * No locks on this file. Return an empty br_lock.
1897 br_lock
= talloc_zero(fsp
, struct byte_range_lock
);
1898 if (br_lock
== NULL
) {
1902 } else if (!NT_STATUS_IS_OK(status
)) {
1903 DEBUG(3, ("Could not parse byte range lock record: "
1904 "%s\n", nt_errstr(status
)));
1907 if (br_lock
== NULL
) {
1912 br_lock
->modified
= false;
1913 br_lock
->record
= NULL
;
1916 * Cache the brlock struct, invalidated when the dbwrap_seqnum
1917 * changes. See beginning of this routine.
1919 TALLOC_FREE(fsp
->brlock_rec
);
1920 fsp
->brlock_rec
= br_lock
;
1921 fsp
->brlock_seqnum
= dbwrap_get_seqnum(brlock_db
);
1926 bool brl_cleanup_disconnected(struct file_id fid
, uint64_t open_persistent_id
)
1929 TALLOC_CTX
*frame
= talloc_stackframe();
1931 struct db_record
*rec
;
1932 struct lock_struct
*lock
;
1934 struct file_id_buf buf
;
1937 key
= make_tdb_data((void*)&fid
, sizeof(fid
));
1939 rec
= dbwrap_fetch_locked(brlock_db
, frame
, key
);
1941 DBG_INFO("failed to fetch record for file %s\n",
1942 file_id_str_buf(fid
, &buf
));
1946 val
= dbwrap_record_get_value(rec
);
1947 lock
= (struct lock_struct
*)val
.dptr
;
1948 num
= val
.dsize
/ sizeof(struct lock_struct
);
1950 DBG_DEBUG("no byte range locks for file %s\n",
1951 file_id_str_buf(fid
, &buf
));
1956 for (n
=0; n
<num
; n
++) {
1957 struct lock_context
*ctx
= &lock
[n
].context
;
1959 if (!server_id_is_disconnected(&ctx
->pid
)) {
1960 struct server_id_buf tmp
;
1961 DBG_INFO("byte range lock "
1962 "%s used by server %s, do not cleanup\n",
1963 file_id_str_buf(fid
, &buf
),
1964 server_id_str_buf(ctx
->pid
, &tmp
));
1968 if (ctx
->smblctx
!= open_persistent_id
) {
1969 DBG_INFO("byte range lock %s expected smblctx %"PRIu64
" "
1970 "but found %"PRIu64
", do not cleanup\n",
1971 file_id_str_buf(fid
, &buf
),
1978 status
= dbwrap_record_delete(rec
);
1979 if (!NT_STATUS_IS_OK(status
)) {
1980 DBG_INFO("failed to delete record "
1981 "for file %s from %s, open %"PRIu64
": %s\n",
1982 file_id_str_buf(fid
, &buf
),
1983 dbwrap_name(brlock_db
),
1989 DBG_DEBUG("file %s cleaned up %u entries from open %"PRIu64
"\n",
1990 file_id_str_buf(fid
, &buf
),
1992 open_persistent_id
);