2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "locking/proto.h"
30 #include "smbd/globals.h"
36 #define DBGC_CLASS DBGC_LOCKING
40 /* The open brlock.tdb database. */
42 static struct db_context
*brlock_db
;
44 /****************************************************************************
45 Debug info at level 10 for lock struct.
46 ****************************************************************************/
48 static void print_lock_struct(unsigned int i
, struct lock_struct
*pls
)
50 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
52 (unsigned long long)pls
->context
.smblctx
,
53 (unsigned int)pls
->context
.tid
,
54 procid_str(talloc_tos(), &pls
->context
.pid
) ));
56 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
60 lock_type_name(pls
->lock_type
),
61 lock_flav_name(pls
->lock_flav
) ));
64 /****************************************************************************
65 See if two locking contexts are equal.
66 ****************************************************************************/
68 bool brl_same_context(const struct lock_context
*ctx1
,
69 const struct lock_context
*ctx2
)
71 return (procid_equal(&ctx1
->pid
, &ctx2
->pid
) &&
72 (ctx1
->smblctx
== ctx2
->smblctx
) &&
73 (ctx1
->tid
== ctx2
->tid
));
76 /****************************************************************************
77 See if lck1 and lck2 overlap.
78 ****************************************************************************/
80 static bool brl_overlap(const struct lock_struct
*lck1
,
81 const struct lock_struct
*lck2
)
83 /* XXX Remove for Win7 compatibility. */
84 /* this extra check is not redundent - it copes with locks
85 that go beyond the end of 64 bit file space */
86 if (lck1
->size
!= 0 &&
87 lck1
->start
== lck2
->start
&&
88 lck1
->size
== lck2
->size
) {
92 if (lck1
->start
>= (lck2
->start
+lck2
->size
) ||
93 lck2
->start
>= (lck1
->start
+lck1
->size
)) {
99 /****************************************************************************
100 See if lock2 can be added when lock1 is in place.
101 ****************************************************************************/
103 static bool brl_conflict(const struct lock_struct
*lck1
,
104 const struct lock_struct
*lck2
)
106 /* Ignore PENDING locks. */
107 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
110 /* Read locks never conflict. */
111 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
115 /* A READ lock can stack on top of a WRITE lock if they have the same
117 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
118 brl_same_context(&lck1
->context
, &lck2
->context
) &&
119 lck1
->fnum
== lck2
->fnum
) {
123 return brl_overlap(lck1
, lck2
);
126 /****************************************************************************
127 See if lock2 can be added when lock1 is in place - when both locks are POSIX
128 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
130 ****************************************************************************/
132 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
133 const struct lock_struct
*lck2
)
135 #if defined(DEVELOPER)
136 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
137 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
140 /* Ignore PENDING locks. */
141 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
144 /* Read locks never conflict. */
145 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
149 /* Locks on the same context con't conflict. Ignore fnum. */
150 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
154 /* One is read, the other write, or the context is different,
156 return brl_overlap(lck1
, lck2
);
160 static bool brl_conflict1(const struct lock_struct
*lck1
,
161 const struct lock_struct
*lck2
)
163 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
166 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
170 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
171 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
175 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
179 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
180 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
188 /****************************************************************************
189 Check to see if this lock conflicts, but ignore our own locks on the
190 same fnum only. This is the read/write lock check code path.
191 This is never used in the POSIX lock case.
192 ****************************************************************************/
194 static bool brl_conflict_other(const struct lock_struct
*lck1
, const struct lock_struct
*lck2
)
196 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
199 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
)
202 /* POSIX flavour locks never conflict here - this is only called
203 in the read/write path. */
205 if (lck1
->lock_flav
== POSIX_LOCK
&& lck2
->lock_flav
== POSIX_LOCK
)
209 * Incoming WRITE locks conflict with existing READ locks even
210 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
213 if (!(lck2
->lock_type
== WRITE_LOCK
&& lck1
->lock_type
== READ_LOCK
)) {
214 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
215 lck1
->fnum
== lck2
->fnum
)
219 return brl_overlap(lck1
, lck2
);
222 /****************************************************************************
223 Check if an unlock overlaps a pending lock.
224 ****************************************************************************/
226 static bool brl_pending_overlap(const struct lock_struct
*lock
, const struct lock_struct
*pend_lock
)
228 if ((lock
->start
<= pend_lock
->start
) && (lock
->start
+ lock
->size
> pend_lock
->start
))
230 if ((lock
->start
>= pend_lock
->start
) && (lock
->start
<= pend_lock
->start
+ pend_lock
->size
))
235 /****************************************************************************
236 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
237 is the same as this one and changes its error code. I wonder if any
238 app depends on this ?
239 ****************************************************************************/
241 NTSTATUS
brl_lock_failed(files_struct
*fsp
, const struct lock_struct
*lock
, bool blocking_lock
)
243 if (lock
->start
>= 0xEF000000 && (lock
->start
>> 63) == 0) {
244 /* amazing the little things you learn with a test
245 suite. Locks beyond this offset (as a 64 bit
246 number!) always generate the conflict error code,
247 unless the top bit is set */
248 if (!blocking_lock
) {
249 fsp
->last_lock_failure
= *lock
;
251 return NT_STATUS_FILE_LOCK_CONFLICT
;
254 if (procid_equal(&lock
->context
.pid
, &fsp
->last_lock_failure
.context
.pid
) &&
255 lock
->context
.tid
== fsp
->last_lock_failure
.context
.tid
&&
256 lock
->fnum
== fsp
->last_lock_failure
.fnum
&&
257 lock
->start
== fsp
->last_lock_failure
.start
) {
258 return NT_STATUS_FILE_LOCK_CONFLICT
;
261 if (!blocking_lock
) {
262 fsp
->last_lock_failure
= *lock
;
264 return NT_STATUS_LOCK_NOT_GRANTED
;
267 /****************************************************************************
268 Open up the brlock.tdb database.
269 ****************************************************************************/
271 void brl_init(bool read_only
)
279 tdb_flags
= TDB_DEFAULT
|TDB_VOLATILE
|TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
;
281 if (!lp_clustering()) {
283 * We can't use the SEQNUM trick to cache brlock
284 * entries in the clustering case because ctdb seqnum
285 * propagation has a delay.
287 tdb_flags
|= TDB_SEQNUM
;
290 brlock_db
= db_open(NULL
, lock_path("brlock.tdb"),
291 lp_open_files_db_hash_size(), tdb_flags
,
292 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644 );
294 DEBUG(0,("Failed to open byte range locking database %s\n",
295 lock_path("brlock.tdb")));
300 /****************************************************************************
301 Close down the brlock.tdb database.
302 ****************************************************************************/
304 void brl_shutdown(void)
306 TALLOC_FREE(brlock_db
);
310 /****************************************************************************
311 Compare two locks for sorting.
312 ****************************************************************************/
314 static int lock_compare(const struct lock_struct
*lck1
,
315 const struct lock_struct
*lck2
)
317 if (lck1
->start
!= lck2
->start
) {
318 return (lck1
->start
- lck2
->start
);
320 if (lck2
->size
!= lck1
->size
) {
321 return ((int)lck1
->size
- (int)lck2
->size
);
327 /****************************************************************************
328 Lock a range of bytes - Windows lock semantics.
329 ****************************************************************************/
331 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
332 struct lock_struct
*plock
, bool blocking_lock
)
335 files_struct
*fsp
= br_lck
->fsp
;
336 struct lock_struct
*locks
= br_lck
->lock_data
;
339 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
341 if ((plock
->start
+ plock
->size
- 1 < plock
->start
) &&
343 return NT_STATUS_INVALID_LOCK_RANGE
;
346 for (i
=0; i
< br_lck
->num_locks
; i
++) {
347 /* Do any Windows or POSIX locks conflict ? */
348 if (brl_conflict(&locks
[i
], plock
)) {
349 /* Remember who blocked us. */
350 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
351 return brl_lock_failed(fsp
,plock
,blocking_lock
);
354 if (plock
->start
== 0 && plock
->size
== 0 &&
355 locks
[i
].size
== 0) {
361 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
362 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
365 /* We can get the Windows lock, now see if it needs to
366 be mapped into a lower level POSIX one, and if so can
369 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(fsp
->conn
->params
)) {
371 if (!set_posix_lock_windows_flavour(fsp
,
380 /* We don't know who blocked us. */
381 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
383 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
384 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
387 status
= map_nt_error_from_unix(errno
);
393 /* no conflicts - add it to the list of locks */
394 locks
= (struct lock_struct
*)SMB_REALLOC(locks
, (br_lck
->num_locks
+ 1) * sizeof(*locks
));
396 status
= NT_STATUS_NO_MEMORY
;
400 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
401 br_lck
->num_locks
+= 1;
402 br_lck
->lock_data
= locks
;
403 br_lck
->modified
= True
;
407 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
408 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
413 /****************************************************************************
414 Cope with POSIX range splits and merges.
415 ****************************************************************************/
417 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
418 struct lock_struct
*ex
, /* existing lock. */
419 struct lock_struct
*plock
) /* proposed lock. */
421 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
423 /* We can't merge non-conflicting locks on different context - ignore fnum. */
425 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
427 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
431 /* We now know we have the same context. */
433 /* Did we overlap ? */
435 /*********************************************
446 **********************************************/
448 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
449 (plock
->start
> (ex
->start
+ ex
->size
))) {
451 /* No overlap with this lock - copy existing. */
453 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
457 /*********************************************
458 +---------------------------+
460 +---------------------------+
461 +---------------------------+
462 | plock | -> replace with plock.
463 +---------------------------+
468 +---------------------------+
469 | plock | -> replace with plock.
470 +---------------------------+
472 **********************************************/
474 if ( (ex
->start
>= plock
->start
) &&
475 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
477 /* Replace - discard existing lock. */
482 /*********************************************
492 +---------------+-------+
493 | plock | ex | - different lock types.
494 +---------------+-------+
496 +-----------------------+
497 | plock | - same lock type.
498 +-----------------------+
499 **********************************************/
501 if (plock
->start
+ plock
->size
== ex
->start
) {
503 /* If the lock types are the same, we merge, if different, we
504 add the remainder of the old lock. */
506 if (lock_types_differ
) {
508 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
511 /* Merge - adjust incoming lock as we may have more
512 * merging to come. */
513 plock
->size
+= ex
->size
;
518 /*********************************************
527 +-------+---------------+
528 | ex | plock | - different lock types
529 +-------+---------------+
532 +-----------------------+
533 | plock | - same lock type.
534 +-----------------------+
536 **********************************************/
538 if (ex
->start
+ ex
->size
== plock
->start
) {
540 /* If the lock types are the same, we merge, if different, we
541 add the existing lock. */
543 if (lock_types_differ
) {
544 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
547 /* Merge - adjust incoming lock as we may have more
548 * merging to come. */
549 plock
->start
= ex
->start
;
550 plock
->size
+= ex
->size
;
555 /*********************************************
557 +-----------------------+
559 +-----------------------+
572 +---------------+-------+
573 | plock | ex | - different lock types.
574 +---------------+-------+
576 +-----------------------+
577 | plock | - same lock type.
578 +-----------------------+
579 **********************************************/
581 if ( (ex
->start
>= plock
->start
) &&
582 (ex
->start
<= plock
->start
+ plock
->size
) &&
583 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
585 /* If the lock types are the same, we merge, if different, we
586 add the remainder of the old lock. */
588 if (lock_types_differ
) {
589 /* Add remaining existing. */
590 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
591 /* Adjust existing start and size. */
592 lck_arr
[0].start
= plock
->start
+ plock
->size
;
593 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
596 /* Merge - adjust incoming lock as we may have more
597 * merging to come. */
598 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
603 /*********************************************
605 +-----------------------+
607 +-----------------------+
620 +-------+---------------+
621 | ex | plock | - different lock types
622 +-------+---------------+
625 +-----------------------+
626 | plock | - same lock type.
627 +-----------------------+
629 **********************************************/
631 if ( (ex
->start
< plock
->start
) &&
632 (ex
->start
+ ex
->size
>= plock
->start
) &&
633 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
635 /* If the lock types are the same, we merge, if different, we
636 add the truncated old lock. */
638 if (lock_types_differ
) {
639 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
640 /* Adjust existing size. */
641 lck_arr
[0].size
= plock
->start
- ex
->start
;
644 /* Merge - adjust incoming lock as we may have more
645 * merging to come. MUST ADJUST plock SIZE FIRST ! */
646 plock
->size
+= (plock
->start
- ex
->start
);
647 plock
->start
= ex
->start
;
652 /*********************************************
654 +---------------------------+
656 +---------------------------+
661 +-------+---------+---------+
662 | ex | plock | ex | - different lock types.
663 +-------+---------+---------+
665 +---------------------------+
666 | plock | - same lock type.
667 +---------------------------+
668 **********************************************/
670 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
672 if (lock_types_differ
) {
674 /* We have to split ex into two locks here. */
676 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
677 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
679 /* Adjust first existing size. */
680 lck_arr
[0].size
= plock
->start
- ex
->start
;
682 /* Adjust second existing start and size. */
683 lck_arr
[1].start
= plock
->start
+ plock
->size
;
684 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
687 /* Just eat the existing locks, merge them into plock. */
688 plock
->start
= ex
->start
;
689 plock
->size
= ex
->size
;
694 /* Never get here. */
695 smb_panic("brlock_posix_split_merge");
698 /* Keep some compilers happy. */
702 /****************************************************************************
703 Lock a range of bytes - POSIX lock semantics.
704 We must cope with range splits and merges.
705 ****************************************************************************/
707 static NTSTATUS
brl_lock_posix(struct messaging_context
*msg_ctx
,
708 struct byte_range_lock
*br_lck
,
709 struct lock_struct
*plock
)
711 unsigned int i
, count
, posix_count
;
712 struct lock_struct
*locks
= br_lck
->lock_data
;
713 struct lock_struct
*tp
;
714 bool signal_pending_read
= False
;
715 bool break_oplocks
= false;
718 /* No zero-zero locks for POSIX. */
719 if (plock
->start
== 0 && plock
->size
== 0) {
720 return NT_STATUS_INVALID_PARAMETER
;
723 /* Don't allow 64-bit lock wrap. */
724 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
725 return NT_STATUS_INVALID_PARAMETER
;
728 /* The worst case scenario here is we have to split an
729 existing POSIX lock range into two, and add our lock,
730 so we need at most 2 more entries. */
732 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 2));
734 return NT_STATUS_NO_MEMORY
;
737 count
= posix_count
= 0;
739 for (i
=0; i
< br_lck
->num_locks
; i
++) {
740 struct lock_struct
*curr_lock
= &locks
[i
];
742 /* If we have a pending read lock, a lock downgrade should
743 trigger a lock re-evaluation. */
744 if (curr_lock
->lock_type
== PENDING_READ_LOCK
&&
745 brl_pending_overlap(plock
, curr_lock
)) {
746 signal_pending_read
= True
;
749 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
750 /* Do any Windows flavour locks conflict ? */
751 if (brl_conflict(curr_lock
, plock
)) {
752 /* No games with error messages. */
754 /* Remember who blocked us. */
755 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
756 return NT_STATUS_FILE_LOCK_CONFLICT
;
758 /* Just copy the Windows lock into the new array. */
759 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
762 unsigned int tmp_count
= 0;
764 /* POSIX conflict semantics are different. */
765 if (brl_conflict_posix(curr_lock
, plock
)) {
766 /* Can't block ourselves with POSIX locks. */
767 /* No games with error messages. */
769 /* Remember who blocked us. */
770 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
771 return NT_STATUS_FILE_LOCK_CONFLICT
;
774 /* Work out overlaps. */
775 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
776 posix_count
+= tmp_count
;
782 * Break oplocks while we hold a brl. Since lock() and unlock() calls
783 * are not symetric with POSIX semantics, we cannot guarantee our
784 * contend_level2_oplocks_begin/end calls will be acquired and
785 * released one-for-one as with Windows semantics. Therefore we only
786 * call contend_level2_oplocks_begin if this is the first POSIX brl on
789 break_oplocks
= (!IS_PENDING_LOCK(plock
->lock_type
) &&
792 contend_level2_oplocks_begin(br_lck
->fsp
,
793 LEVEL2_CONTEND_POSIX_BRL
);
796 /* Try and add the lock in order, sorted by lock start. */
797 for (i
=0; i
< count
; i
++) {
798 struct lock_struct
*curr_lock
= &tp
[i
];
800 if (curr_lock
->start
<= plock
->start
) {
806 memmove(&tp
[i
+1], &tp
[i
],
807 (count
- i
)*sizeof(struct lock_struct
));
809 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
812 /* We can get the POSIX lock, now see if it needs to
813 be mapped into a lower level POSIX one, and if so can
816 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
819 /* The lower layer just needs to attempt to
820 get the system POSIX lock. We've weeded out
821 any conflicts above. */
823 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
829 /* We don't know who blocked us. */
830 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
832 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
834 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
838 status
= map_nt_error_from_unix(errno
);
844 /* If we didn't use all the allocated size,
845 * Realloc so we don't leak entries per lock call. */
846 if (count
< br_lck
->num_locks
+ 2) {
847 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
849 status
= NT_STATUS_NO_MEMORY
;
854 br_lck
->num_locks
= count
;
855 SAFE_FREE(br_lck
->lock_data
);
856 br_lck
->lock_data
= tp
;
858 br_lck
->modified
= True
;
860 /* A successful downgrade from write to read lock can trigger a lock
861 re-evalutation where waiting readers can now proceed. */
863 if (signal_pending_read
) {
864 /* Send unlock messages to any pending read waiters that overlap. */
865 for (i
=0; i
< br_lck
->num_locks
; i
++) {
866 struct lock_struct
*pend_lock
= &locks
[i
];
868 /* Ignore non-pending locks. */
869 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
873 if (pend_lock
->lock_type
== PENDING_READ_LOCK
&&
874 brl_pending_overlap(plock
, pend_lock
)) {
875 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
876 procid_str_static(&pend_lock
->context
.pid
)));
878 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
879 MSG_SMB_UNLOCK
, &data_blob_null
);
887 contend_level2_oplocks_end(br_lck
->fsp
,
888 LEVEL2_CONTEND_POSIX_BRL
);
893 NTSTATUS
smb_vfs_call_brl_lock_windows(struct vfs_handle_struct
*handle
,
894 struct byte_range_lock
*br_lck
,
895 struct lock_struct
*plock
,
897 struct blocking_lock_record
*blr
)
899 VFS_FIND(brl_lock_windows
);
900 return handle
->fns
->brl_lock_windows(handle
, br_lck
, plock
,
904 /****************************************************************************
905 Lock a range of bytes.
906 ****************************************************************************/
908 NTSTATUS
brl_lock(struct messaging_context
*msg_ctx
,
909 struct byte_range_lock
*br_lck
,
911 struct server_id pid
,
914 enum brl_type lock_type
,
915 enum brl_flavour lock_flav
,
918 struct blocking_lock_record
*blr
)
921 struct lock_struct lock
;
924 if (start
== 0 && size
== 0) {
925 DEBUG(0,("client sent 0/0 lock - please report this\n"));
930 /* Quieten valgrind on test. */
931 memset(&lock
, '\0', sizeof(lock
));
934 lock
.context
.smblctx
= smblctx
;
935 lock
.context
.pid
= pid
;
936 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
939 lock
.fnum
= br_lck
->fsp
->fnum
;
940 lock
.lock_type
= lock_type
;
941 lock
.lock_flav
= lock_flav
;
943 if (lock_flav
== WINDOWS_LOCK
) {
944 ret
= SMB_VFS_BRL_LOCK_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
945 &lock
, blocking_lock
, blr
);
947 ret
= brl_lock_posix(msg_ctx
, br_lck
, &lock
);
951 /* sort the lock list */
952 TYPESAFE_QSORT(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, lock_compare
);
955 /* If we're returning an error, return who blocked us. */
956 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
957 *psmblctx
= lock
.context
.smblctx
;
962 /****************************************************************************
963 Unlock a range of bytes - Windows semantics.
964 ****************************************************************************/
966 bool brl_unlock_windows_default(struct messaging_context
*msg_ctx
,
967 struct byte_range_lock
*br_lck
,
968 const struct lock_struct
*plock
)
971 struct lock_struct
*locks
= br_lck
->lock_data
;
972 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
974 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
977 /* Delete write locks by preference... The lock list
978 is sorted in the zero zero case. */
980 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
981 struct lock_struct
*lock
= &locks
[i
];
983 if (lock
->lock_type
== WRITE_LOCK
&&
984 brl_same_context(&lock
->context
, &plock
->context
) &&
985 lock
->fnum
== plock
->fnum
&&
986 lock
->lock_flav
== WINDOWS_LOCK
&&
987 lock
->start
== plock
->start
&&
988 lock
->size
== plock
->size
) {
990 /* found it - delete it */
991 deleted_lock_type
= lock
->lock_type
;
996 if (i
!= br_lck
->num_locks
) {
997 /* We found it - don't search again. */
998 goto unlock_continue
;
1002 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1003 struct lock_struct
*lock
= &locks
[i
];
1005 if (IS_PENDING_LOCK(lock
->lock_type
)) {
1009 /* Only remove our own locks that match in start, size, and flavour. */
1010 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1011 lock
->fnum
== plock
->fnum
&&
1012 lock
->lock_flav
== WINDOWS_LOCK
&&
1013 lock
->start
== plock
->start
&&
1014 lock
->size
== plock
->size
) {
1015 deleted_lock_type
= lock
->lock_type
;
1020 if (i
== br_lck
->num_locks
) {
1021 /* we didn't find it */
1029 /* Actually delete the lock. */
1030 if (i
< br_lck
->num_locks
- 1) {
1031 memmove(&locks
[i
], &locks
[i
+1],
1032 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1035 br_lck
->num_locks
-= 1;
1036 br_lck
->modified
= True
;
1038 /* Unlock the underlying POSIX regions. */
1039 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1040 release_posix_lock_windows_flavour(br_lck
->fsp
,
1049 /* Send unlock messages to any pending waiters that overlap. */
1050 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1051 struct lock_struct
*pend_lock
= &locks
[j
];
1053 /* Ignore non-pending locks. */
1054 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1058 /* We could send specific lock info here... */
1059 if (brl_pending_overlap(plock
, pend_lock
)) {
1060 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1061 procid_str_static(&pend_lock
->context
.pid
)));
1063 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1064 MSG_SMB_UNLOCK
, &data_blob_null
);
1068 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1072 /****************************************************************************
1073 Unlock a range of bytes - POSIX semantics.
1074 ****************************************************************************/
1076 static bool brl_unlock_posix(struct messaging_context
*msg_ctx
,
1077 struct byte_range_lock
*br_lck
,
1078 struct lock_struct
*plock
)
1080 unsigned int i
, j
, count
;
1081 struct lock_struct
*tp
;
1082 struct lock_struct
*locks
= br_lck
->lock_data
;
1083 bool overlap_found
= False
;
1085 /* No zero-zero locks for POSIX. */
1086 if (plock
->start
== 0 && plock
->size
== 0) {
1090 /* Don't allow 64-bit lock wrap. */
1091 if (plock
->start
+ plock
->size
< plock
->start
||
1092 plock
->start
+ plock
->size
< plock
->size
) {
1093 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1097 /* The worst case scenario here is we have to split an
1098 existing POSIX lock range into two, so we need at most
1101 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 1));
1103 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1108 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1109 struct lock_struct
*lock
= &locks
[i
];
1110 unsigned int tmp_count
;
1112 /* Only remove our own locks - ignore fnum. */
1113 if (IS_PENDING_LOCK(lock
->lock_type
) ||
1114 !brl_same_context(&lock
->context
, &plock
->context
)) {
1115 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1120 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1121 /* Do any Windows flavour locks conflict ? */
1122 if (brl_conflict(lock
, plock
)) {
1126 /* Just copy the Windows lock into the new array. */
1127 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1132 /* Work out overlaps. */
1133 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1135 if (tmp_count
== 0) {
1136 /* plock overlapped the existing lock completely,
1137 or replaced it. Don't copy the existing lock. */
1138 overlap_found
= true;
1139 } else if (tmp_count
== 1) {
1140 /* Either no overlap, (simple copy of existing lock) or
1141 * an overlap of an existing lock. */
1142 /* If the lock changed size, we had an overlap. */
1143 if (tp
[count
].size
!= lock
->size
) {
1144 overlap_found
= true;
1147 } else if (tmp_count
== 2) {
1148 /* We split a lock range in two. */
1149 overlap_found
= true;
1152 /* Optimisation... */
1153 /* We know we're finished here as we can't overlap any
1154 more POSIX locks. Copy the rest of the lock array. */
1156 if (i
< br_lck
->num_locks
- 1) {
1157 memcpy(&tp
[count
], &locks
[i
+1],
1158 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1159 count
+= ((br_lck
->num_locks
-1) - i
);
1166 if (!overlap_found
) {
1167 /* Just ignore - no change. */
1169 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1173 /* Unlock any POSIX regions. */
1174 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1175 release_posix_lock_posix_flavour(br_lck
->fsp
,
1183 /* Realloc so we don't leak entries per unlock call. */
1185 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
1187 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1191 /* We deleted the last lock. */
1196 contend_level2_oplocks_end(br_lck
->fsp
,
1197 LEVEL2_CONTEND_POSIX_BRL
);
1199 br_lck
->num_locks
= count
;
1200 SAFE_FREE(br_lck
->lock_data
);
1202 br_lck
->lock_data
= tp
;
1203 br_lck
->modified
= True
;
1205 /* Send unlock messages to any pending waiters that overlap. */
1207 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1208 struct lock_struct
*pend_lock
= &locks
[j
];
1210 /* Ignore non-pending locks. */
1211 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1215 /* We could send specific lock info here... */
1216 if (brl_pending_overlap(plock
, pend_lock
)) {
1217 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1218 procid_str_static(&pend_lock
->context
.pid
)));
1220 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1221 MSG_SMB_UNLOCK
, &data_blob_null
);
1228 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct
*handle
,
1229 struct messaging_context
*msg_ctx
,
1230 struct byte_range_lock
*br_lck
,
1231 const struct lock_struct
*plock
)
1233 VFS_FIND(brl_unlock_windows
);
1234 return handle
->fns
->brl_unlock_windows(handle
, msg_ctx
, br_lck
, plock
);
1237 /****************************************************************************
1238 Unlock a range of bytes.
1239 ****************************************************************************/
1241 bool brl_unlock(struct messaging_context
*msg_ctx
,
1242 struct byte_range_lock
*br_lck
,
1244 struct server_id pid
,
1247 enum brl_flavour lock_flav
)
1249 struct lock_struct lock
;
1251 lock
.context
.smblctx
= smblctx
;
1252 lock
.context
.pid
= pid
;
1253 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1256 lock
.fnum
= br_lck
->fsp
->fnum
;
1257 lock
.lock_type
= UNLOCK_LOCK
;
1258 lock
.lock_flav
= lock_flav
;
1260 if (lock_flav
== WINDOWS_LOCK
) {
1261 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck
->fsp
->conn
, msg_ctx
,
1264 return brl_unlock_posix(msg_ctx
, br_lck
, &lock
);
1268 /****************************************************************************
1269 Test if we could add a lock if we wanted to.
1270 Returns True if the region required is currently unlocked, False if locked.
1271 ****************************************************************************/
1273 bool brl_locktest(struct byte_range_lock
*br_lck
,
1275 struct server_id pid
,
1278 enum brl_type lock_type
,
1279 enum brl_flavour lock_flav
)
1283 struct lock_struct lock
;
1284 const struct lock_struct
*locks
= br_lck
->lock_data
;
1285 files_struct
*fsp
= br_lck
->fsp
;
1287 lock
.context
.smblctx
= smblctx
;
1288 lock
.context
.pid
= pid
;
1289 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1292 lock
.fnum
= fsp
->fnum
;
1293 lock
.lock_type
= lock_type
;
1294 lock
.lock_flav
= lock_flav
;
1296 /* Make sure existing locks don't conflict */
1297 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1299 * Our own locks don't conflict.
1301 if (brl_conflict_other(&locks
[i
], &lock
)) {
1307 * There is no lock held by an SMB daemon, check to
1308 * see if there is a POSIX lock from a UNIX or NFS process.
1309 * This only conflicts with Windows locks, not POSIX locks.
1312 if(lp_posix_locking(fsp
->conn
->params
) && (lock_flav
== WINDOWS_LOCK
)) {
1313 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1315 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1316 (double)start
, (double)size
, ret
? "locked" : "unlocked",
1317 fsp
->fnum
, fsp_str_dbg(fsp
)));
1319 /* We need to return the inverse of is_posix_locked. */
1323 /* no conflicts - we could have added it */
1327 /****************************************************************************
1328 Query for existing locks.
1329 ****************************************************************************/
1331 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1333 struct server_id pid
,
1336 enum brl_type
*plock_type
,
1337 enum brl_flavour lock_flav
)
1340 struct lock_struct lock
;
1341 const struct lock_struct
*locks
= br_lck
->lock_data
;
1342 files_struct
*fsp
= br_lck
->fsp
;
1344 lock
.context
.smblctx
= *psmblctx
;
1345 lock
.context
.pid
= pid
;
1346 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1347 lock
.start
= *pstart
;
1349 lock
.fnum
= fsp
->fnum
;
1350 lock
.lock_type
= *plock_type
;
1351 lock
.lock_flav
= lock_flav
;
1353 /* Make sure existing locks don't conflict */
1354 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1355 const struct lock_struct
*exlock
= &locks
[i
];
1356 bool conflict
= False
;
1358 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1359 conflict
= brl_conflict(exlock
, &lock
);
1361 conflict
= brl_conflict_posix(exlock
, &lock
);
1365 *psmblctx
= exlock
->context
.smblctx
;
1366 *pstart
= exlock
->start
;
1367 *psize
= exlock
->size
;
1368 *plock_type
= exlock
->lock_type
;
1369 return NT_STATUS_LOCK_NOT_GRANTED
;
1374 * There is no lock held by an SMB daemon, check to
1375 * see if there is a POSIX lock from a UNIX or NFS process.
1378 if(lp_posix_locking(fsp
->conn
->params
)) {
1379 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1381 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1382 (double)*pstart
, (double)*psize
, ret
? "locked" : "unlocked",
1383 fsp
->fnum
, fsp_str_dbg(fsp
)));
1386 /* Hmmm. No clue what to set smblctx to - use -1. */
1387 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1388 return NT_STATUS_LOCK_NOT_GRANTED
;
1392 return NT_STATUS_OK
;
1396 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct
*handle
,
1397 struct byte_range_lock
*br_lck
,
1398 struct lock_struct
*plock
,
1399 struct blocking_lock_record
*blr
)
1401 VFS_FIND(brl_cancel_windows
);
1402 return handle
->fns
->brl_cancel_windows(handle
, br_lck
, plock
, blr
);
1405 /****************************************************************************
1406 Remove a particular pending lock.
1407 ****************************************************************************/
1408 bool brl_lock_cancel(struct byte_range_lock
*br_lck
,
1410 struct server_id pid
,
1413 enum brl_flavour lock_flav
,
1414 struct blocking_lock_record
*blr
)
1417 struct lock_struct lock
;
1419 lock
.context
.smblctx
= smblctx
;
1420 lock
.context
.pid
= pid
;
1421 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1424 lock
.fnum
= br_lck
->fsp
->fnum
;
1425 lock
.lock_flav
= lock_flav
;
1426 /* lock.lock_type doesn't matter */
1428 if (lock_flav
== WINDOWS_LOCK
) {
1429 ret
= SMB_VFS_BRL_CANCEL_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
1432 ret
= brl_lock_cancel_default(br_lck
, &lock
);
1438 bool brl_lock_cancel_default(struct byte_range_lock
*br_lck
,
1439 struct lock_struct
*plock
)
1442 struct lock_struct
*locks
= br_lck
->lock_data
;
1446 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1447 struct lock_struct
*lock
= &locks
[i
];
1449 /* For pending locks we *always* care about the fnum. */
1450 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1451 lock
->fnum
== plock
->fnum
&&
1452 IS_PENDING_LOCK(lock
->lock_type
) &&
1453 lock
->lock_flav
== plock
->lock_flav
&&
1454 lock
->start
== plock
->start
&&
1455 lock
->size
== plock
->size
) {
1460 if (i
== br_lck
->num_locks
) {
1461 /* Didn't find it. */
1465 if (i
< br_lck
->num_locks
- 1) {
1466 /* Found this particular pending lock - delete it */
1467 memmove(&locks
[i
], &locks
[i
+1],
1468 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1471 br_lck
->num_locks
-= 1;
1472 br_lck
->modified
= True
;
1476 /****************************************************************************
1477 Remove any locks associated with a open file.
1478 We return True if this process owns any other Windows locks on this
1479 fd and so we should not immediately close the fd.
1480 ****************************************************************************/
1482 void brl_close_fnum(struct messaging_context
*msg_ctx
,
1483 struct byte_range_lock
*br_lck
)
1485 files_struct
*fsp
= br_lck
->fsp
;
1486 uint16 tid
= fsp
->conn
->cnum
;
1487 int fnum
= fsp
->fnum
;
1489 struct lock_struct
*locks
= br_lck
->lock_data
;
1490 struct server_id pid
= sconn_server_id(fsp
->conn
->sconn
);
1491 struct lock_struct
*locks_copy
;
1492 unsigned int num_locks_copy
;
1494 /* Copy the current lock array. */
1495 if (br_lck
->num_locks
) {
1496 locks_copy
= (struct lock_struct
*)TALLOC_MEMDUP(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1498 smb_panic("brl_close_fnum: talloc failed");
1504 num_locks_copy
= br_lck
->num_locks
;
1506 for (i
=0; i
< num_locks_copy
; i
++) {
1507 struct lock_struct
*lock
= &locks_copy
[i
];
1509 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
) &&
1510 (lock
->fnum
== fnum
)) {
1513 lock
->context
.smblctx
,
1522 /****************************************************************************
1523 Ensure this set of lock entries is valid.
1524 ****************************************************************************/
1525 static bool validate_lock_entries(unsigned int *pnum_entries
, struct lock_struct
**pplocks
)
1528 unsigned int num_valid_entries
= 0;
1529 struct lock_struct
*locks
= *pplocks
;
1531 for (i
= 0; i
< *pnum_entries
; i
++) {
1532 struct lock_struct
*lock_data
= &locks
[i
];
1533 if (!serverid_exists(&lock_data
->context
.pid
)) {
1534 /* This process no longer exists - mark this
1535 entry as invalid by zeroing it. */
1536 ZERO_STRUCTP(lock_data
);
1538 num_valid_entries
++;
1542 if (num_valid_entries
!= *pnum_entries
) {
1543 struct lock_struct
*new_lock_data
= NULL
;
1545 if (num_valid_entries
) {
1546 new_lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
, num_valid_entries
);
1547 if (!new_lock_data
) {
1548 DEBUG(3, ("malloc fail\n"));
1552 num_valid_entries
= 0;
1553 for (i
= 0; i
< *pnum_entries
; i
++) {
1554 struct lock_struct
*lock_data
= &locks
[i
];
1555 if (lock_data
->context
.smblctx
&&
1556 lock_data
->context
.tid
) {
1557 /* Valid (nonzero) entry - copy it. */
1558 memcpy(&new_lock_data
[num_valid_entries
],
1559 lock_data
, sizeof(struct lock_struct
));
1560 num_valid_entries
++;
1565 SAFE_FREE(*pplocks
);
1566 *pplocks
= new_lock_data
;
1567 *pnum_entries
= num_valid_entries
;
1573 struct brl_forall_cb
{
1574 void (*fn
)(struct file_id id
, struct server_id pid
,
1575 enum brl_type lock_type
,
1576 enum brl_flavour lock_flav
,
1577 br_off start
, br_off size
,
1578 void *private_data
);
1582 /****************************************************************************
1583 Traverse the whole database with this function, calling traverse_callback
1585 ****************************************************************************/
1587 static int traverse_fn(struct db_record
*rec
, void *state
)
1589 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1590 struct lock_struct
*locks
;
1591 struct file_id
*key
;
1593 unsigned int num_locks
= 0;
1594 unsigned int orig_num_locks
= 0;
1596 /* In a traverse function we must make a copy of
1597 dbuf before modifying it. */
1599 locks
= (struct lock_struct
*)memdup(rec
->value
.dptr
,
1602 return -1; /* Terminate traversal. */
1605 key
= (struct file_id
*)rec
->key
.dptr
;
1606 orig_num_locks
= num_locks
= rec
->value
.dsize
/sizeof(*locks
);
1608 /* Ensure the lock db is clean of entries from invalid processes. */
1610 if (!validate_lock_entries(&num_locks
, &locks
)) {
1612 return -1; /* Terminate traversal */
1615 if (orig_num_locks
!= num_locks
) {
1618 data
.dptr
= (uint8_t *)locks
;
1619 data
.dsize
= num_locks
*sizeof(struct lock_struct
);
1620 rec
->store(rec
, data
, TDB_REPLACE
);
1622 rec
->delete_rec(rec
);
1627 for ( i
=0; i
<num_locks
; i
++) {
1629 locks
[i
].context
.pid
,
1642 /*******************************************************************
1643 Call the specified function on each lock in the database.
1644 ********************************************************************/
1646 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1647 enum brl_type lock_type
,
1648 enum brl_flavour lock_flav
,
1649 br_off start
, br_off size
,
1650 void *private_data
),
1653 struct brl_forall_cb cb
;
1659 cb
.private_data
= private_data
;
1660 return brlock_db
->traverse(brlock_db
, traverse_fn
, &cb
);
1663 /*******************************************************************
1664 Store a potentially modified set of byte range lock data back into
1667 ********************************************************************/
1669 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1671 if (br_lck
->read_only
) {
1672 SMB_ASSERT(!br_lck
->modified
);
1675 if (!br_lck
->modified
) {
1679 if (br_lck
->num_locks
== 0) {
1680 /* No locks - delete this entry. */
1681 NTSTATUS status
= br_lck
->record
->delete_rec(br_lck
->record
);
1682 if (!NT_STATUS_IS_OK(status
)) {
1683 DEBUG(0, ("delete_rec returned %s\n",
1684 nt_errstr(status
)));
1685 smb_panic("Could not delete byte range lock entry");
1691 data
.dptr
= (uint8
*)br_lck
->lock_data
;
1692 data
.dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1694 status
= br_lck
->record
->store(br_lck
->record
, data
,
1696 if (!NT_STATUS_IS_OK(status
)) {
1697 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1698 smb_panic("Could not store byte range mode entry");
1704 br_lck
->read_only
= true;
1705 br_lck
->modified
= false;
1707 TALLOC_FREE(br_lck
->record
);
1710 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1712 byte_range_lock_flush(br_lck
);
1713 SAFE_FREE(br_lck
->lock_data
);
1717 /*******************************************************************
1718 Fetch a set of byte range lock data from the database.
1719 Leave the record locked.
1720 TALLOC_FREE(brl) will release the lock in the destructor.
1721 ********************************************************************/
1723 static struct byte_range_lock
*brl_get_locks_internal(TALLOC_CTX
*mem_ctx
,
1724 files_struct
*fsp
, bool read_only
)
1727 struct byte_range_lock
*br_lck
= TALLOC_P(mem_ctx
, struct byte_range_lock
);
1728 bool do_read_only
= read_only
;
1730 if (br_lck
== NULL
) {
1735 br_lck
->num_locks
= 0;
1736 br_lck
->modified
= False
;
1737 br_lck
->key
= fsp
->file_id
;
1739 key
.dptr
= (uint8
*)&br_lck
->key
;
1740 key
.dsize
= sizeof(struct file_id
);
1742 if (!fsp
->lockdb_clean
) {
1743 /* We must be read/write to clean
1744 the dead entries. */
1745 do_read_only
= false;
1749 if (brlock_db
->fetch(brlock_db
, br_lck
, key
, &data
) == -1) {
1750 DEBUG(3, ("Could not fetch byte range lock record\n"));
1751 TALLOC_FREE(br_lck
);
1754 br_lck
->record
= NULL
;
1756 br_lck
->record
= brlock_db
->fetch_locked(brlock_db
, br_lck
, key
);
1758 if (br_lck
->record
== NULL
) {
1759 DEBUG(3, ("Could not lock byte range lock entry\n"));
1760 TALLOC_FREE(br_lck
);
1764 data
= br_lck
->record
->value
;
1767 br_lck
->read_only
= do_read_only
;
1768 br_lck
->lock_data
= NULL
;
1770 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1772 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1774 if (br_lck
->num_locks
!= 0) {
1775 br_lck
->lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
,
1777 if (br_lck
->lock_data
== NULL
) {
1778 DEBUG(0, ("malloc failed\n"));
1779 TALLOC_FREE(br_lck
);
1783 memcpy(br_lck
->lock_data
, data
.dptr
, data
.dsize
);
1786 if (!fsp
->lockdb_clean
) {
1787 int orig_num_locks
= br_lck
->num_locks
;
1789 /* This is the first time we've accessed this. */
1790 /* Go through and ensure all entries exist - remove any that don't. */
1791 /* Makes the lockdb self cleaning at low cost. */
1793 if (!validate_lock_entries(&br_lck
->num_locks
,
1794 &br_lck
->lock_data
)) {
1795 SAFE_FREE(br_lck
->lock_data
);
1796 TALLOC_FREE(br_lck
);
1800 /* Ensure invalid locks are cleaned up in the destructor. */
1801 if (orig_num_locks
!= br_lck
->num_locks
) {
1802 br_lck
->modified
= True
;
1805 /* Mark the lockdb as "clean" as seen from this open file. */
1806 fsp
->lockdb_clean
= True
;
1809 if (DEBUGLEVEL
>= 10) {
1811 struct lock_struct
*locks
= br_lck
->lock_data
;
1812 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1814 file_id_string_tos(&fsp
->file_id
)));
1815 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1816 print_lock_struct(i
, &locks
[i
]);
1820 if (do_read_only
!= read_only
) {
1822 * this stores the record and gets rid of
1823 * the write lock that is needed for a cleanup
1825 byte_range_lock_flush(br_lck
);
1831 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
,
1834 return brl_get_locks_internal(mem_ctx
, fsp
, False
);
1837 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1839 struct byte_range_lock
*br_lock
;
1841 if (lp_clustering()) {
1842 return brl_get_locks_internal(talloc_tos(), fsp
, true);
1845 if ((fsp
->brlock_rec
!= NULL
)
1846 && (brlock_db
->get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1847 return fsp
->brlock_rec
;
1850 TALLOC_FREE(fsp
->brlock_rec
);
1852 br_lock
= brl_get_locks_internal(talloc_tos(), fsp
, true);
1853 if (br_lock
== NULL
) {
1856 fsp
->brlock_seqnum
= brlock_db
->get_seqnum(brlock_db
);
1858 fsp
->brlock_rec
= talloc_move(fsp
, &br_lock
);
1860 return fsp
->brlock_rec
;
1863 struct brl_revalidate_state
{
1866 struct server_id
*pids
;
1870 * Collect PIDs of all processes with pending entries
1873 static void brl_revalidate_collect(struct file_id id
, struct server_id pid
,
1874 enum brl_type lock_type
,
1875 enum brl_flavour lock_flav
,
1876 br_off start
, br_off size
,
1879 struct brl_revalidate_state
*state
=
1880 (struct brl_revalidate_state
*)private_data
;
1882 if (!IS_PENDING_LOCK(lock_type
)) {
1886 add_to_large_array(state
, sizeof(pid
), (void *)&pid
,
1887 &state
->pids
, &state
->num_pids
,
1888 &state
->array_size
);
1892 * qsort callback to sort the processes
1895 static int compare_procids(const void *p1
, const void *p2
)
1897 const struct server_id
*i1
= (struct server_id
*)p1
;
1898 const struct server_id
*i2
= (struct server_id
*)p2
;
1900 if (i1
->pid
< i2
->pid
) return -1;
1901 if (i2
->pid
> i2
->pid
) return 1;
1906 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
1907 * locks so that they retry. Mainly used in the cluster code after a node has
1910 * Done in two steps to avoid double-sends: First we collect all entries in an
1911 * array, then qsort that array and only send to non-dupes.
1914 static void brl_revalidate(struct messaging_context
*msg_ctx
,
1917 struct server_id server_id
,
1920 struct brl_revalidate_state
*state
;
1922 struct server_id last_pid
;
1924 if (!(state
= TALLOC_ZERO_P(NULL
, struct brl_revalidate_state
))) {
1925 DEBUG(0, ("talloc failed\n"));
1929 brl_forall(brl_revalidate_collect
, state
);
1931 if (state
->array_size
== -1) {
1932 DEBUG(0, ("talloc failed\n"));
1936 if (state
->num_pids
== 0) {
1940 TYPESAFE_QSORT(state
->pids
, state
->num_pids
, compare_procids
);
1942 ZERO_STRUCT(last_pid
);
1944 for (i
=0; i
<state
->num_pids
; i
++) {
1945 if (procid_equal(&last_pid
, &state
->pids
[i
])) {
1947 * We've seen that one already
1952 messaging_send(msg_ctx
, state
->pids
[i
], MSG_SMB_UNLOCK
,
1954 last_pid
= state
->pids
[i
];
1962 void brl_register_msgs(struct messaging_context
*msg_ctx
)
1964 messaging_register(msg_ctx
, NULL
, MSG_SMB_BRL_VALIDATE
,