2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "librpc/gen_ndr/messaging.h"
29 #include "smbd/globals.h"
34 #define DBGC_CLASS DBGC_LOCKING
38 /* The open brlock.tdb database. */
40 static struct db_context
*brlock_db
;
42 /****************************************************************************
43 Debug info at level 10 for lock struct.
44 ****************************************************************************/
46 static void print_lock_struct(unsigned int i
, struct lock_struct
*pls
)
48 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
50 (unsigned long long)pls
->context
.smblctx
,
51 (unsigned int)pls
->context
.tid
,
52 procid_str(talloc_tos(), &pls
->context
.pid
) ));
54 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
58 lock_type_name(pls
->lock_type
),
59 lock_flav_name(pls
->lock_flav
) ));
62 /****************************************************************************
63 See if two locking contexts are equal.
64 ****************************************************************************/
66 bool brl_same_context(const struct lock_context
*ctx1
,
67 const struct lock_context
*ctx2
)
69 return (procid_equal(&ctx1
->pid
, &ctx2
->pid
) &&
70 (ctx1
->smblctx
== ctx2
->smblctx
) &&
71 (ctx1
->tid
== ctx2
->tid
));
74 /****************************************************************************
75 See if lck1 and lck2 overlap.
76 ****************************************************************************/
78 static bool brl_overlap(const struct lock_struct
*lck1
,
79 const struct lock_struct
*lck2
)
81 /* XXX Remove for Win7 compatibility. */
82 /* this extra check is not redundent - it copes with locks
83 that go beyond the end of 64 bit file space */
84 if (lck1
->size
!= 0 &&
85 lck1
->start
== lck2
->start
&&
86 lck1
->size
== lck2
->size
) {
90 if (lck1
->start
>= (lck2
->start
+lck2
->size
) ||
91 lck2
->start
>= (lck1
->start
+lck1
->size
)) {
97 /****************************************************************************
98 See if lock2 can be added when lock1 is in place.
99 ****************************************************************************/
101 static bool brl_conflict(const struct lock_struct
*lck1
,
102 const struct lock_struct
*lck2
)
104 /* Ignore PENDING locks. */
105 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
108 /* Read locks never conflict. */
109 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
113 /* A READ lock can stack on top of a WRITE lock if they have the same
115 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
116 brl_same_context(&lck1
->context
, &lck2
->context
) &&
117 lck1
->fnum
== lck2
->fnum
) {
121 return brl_overlap(lck1
, lck2
);
124 /****************************************************************************
125 See if lock2 can be added when lock1 is in place - when both locks are POSIX
126 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
128 ****************************************************************************/
130 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
131 const struct lock_struct
*lck2
)
133 #if defined(DEVELOPER)
134 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
135 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
138 /* Ignore PENDING locks. */
139 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
142 /* Read locks never conflict. */
143 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
147 /* Locks on the same context con't conflict. Ignore fnum. */
148 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
152 /* One is read, the other write, or the context is different,
154 return brl_overlap(lck1
, lck2
);
158 static bool brl_conflict1(const struct lock_struct
*lck1
,
159 const struct lock_struct
*lck2
)
161 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
164 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
168 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
169 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
173 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
177 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
178 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
186 /****************************************************************************
187 Check to see if this lock conflicts, but ignore our own locks on the
188 same fnum only. This is the read/write lock check code path.
189 This is never used in the POSIX lock case.
190 ****************************************************************************/
192 static bool brl_conflict_other(const struct lock_struct
*lck1
, const struct lock_struct
*lck2
)
194 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
197 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
)
200 /* POSIX flavour locks never conflict here - this is only called
201 in the read/write path. */
203 if (lck1
->lock_flav
== POSIX_LOCK
&& lck2
->lock_flav
== POSIX_LOCK
)
207 * Incoming WRITE locks conflict with existing READ locks even
208 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
211 if (!(lck2
->lock_type
== WRITE_LOCK
&& lck1
->lock_type
== READ_LOCK
)) {
212 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
213 lck1
->fnum
== lck2
->fnum
)
217 return brl_overlap(lck1
, lck2
);
220 /****************************************************************************
221 Check if an unlock overlaps a pending lock.
222 ****************************************************************************/
224 static bool brl_pending_overlap(const struct lock_struct
*lock
, const struct lock_struct
*pend_lock
)
226 if ((lock
->start
<= pend_lock
->start
) && (lock
->start
+ lock
->size
> pend_lock
->start
))
228 if ((lock
->start
>= pend_lock
->start
) && (lock
->start
<= pend_lock
->start
+ pend_lock
->size
))
233 /****************************************************************************
234 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
235 is the same as this one and changes its error code. I wonder if any
236 app depends on this ?
237 ****************************************************************************/
239 NTSTATUS
brl_lock_failed(files_struct
*fsp
, const struct lock_struct
*lock
, bool blocking_lock
)
241 if (lock
->start
>= 0xEF000000 && (lock
->start
>> 63) == 0) {
242 /* amazing the little things you learn with a test
243 suite. Locks beyond this offset (as a 64 bit
244 number!) always generate the conflict error code,
245 unless the top bit is set */
246 if (!blocking_lock
) {
247 fsp
->last_lock_failure
= *lock
;
249 return NT_STATUS_FILE_LOCK_CONFLICT
;
252 if (procid_equal(&lock
->context
.pid
, &fsp
->last_lock_failure
.context
.pid
) &&
253 lock
->context
.tid
== fsp
->last_lock_failure
.context
.tid
&&
254 lock
->fnum
== fsp
->last_lock_failure
.fnum
&&
255 lock
->start
== fsp
->last_lock_failure
.start
) {
256 return NT_STATUS_FILE_LOCK_CONFLICT
;
259 if (!blocking_lock
) {
260 fsp
->last_lock_failure
= *lock
;
262 return NT_STATUS_LOCK_NOT_GRANTED
;
265 /****************************************************************************
266 Open up the brlock.tdb database.
267 ****************************************************************************/
269 void brl_init(bool read_only
)
277 tdb_flags
= TDB_DEFAULT
|TDB_VOLATILE
|TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
;
279 if (!lp_clustering()) {
281 * We can't use the SEQNUM trick to cache brlock
282 * entries in the clustering case because ctdb seqnum
283 * propagation has a delay.
285 tdb_flags
|= TDB_SEQNUM
;
288 brlock_db
= db_open(NULL
, lock_path("brlock.tdb"),
289 lp_open_files_db_hash_size(), tdb_flags
,
290 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644 );
292 DEBUG(0,("Failed to open byte range locking database %s\n",
293 lock_path("brlock.tdb")));
298 /****************************************************************************
299 Close down the brlock.tdb database.
300 ****************************************************************************/
302 void brl_shutdown(void)
304 TALLOC_FREE(brlock_db
);
308 /****************************************************************************
309 Compare two locks for sorting.
310 ****************************************************************************/
312 static int lock_compare(const struct lock_struct
*lck1
,
313 const struct lock_struct
*lck2
)
315 if (lck1
->start
!= lck2
->start
) {
316 return (lck1
->start
- lck2
->start
);
318 if (lck2
->size
!= lck1
->size
) {
319 return ((int)lck1
->size
- (int)lck2
->size
);
325 /****************************************************************************
326 Lock a range of bytes - Windows lock semantics.
327 ****************************************************************************/
329 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
330 struct lock_struct
*plock
, bool blocking_lock
)
333 files_struct
*fsp
= br_lck
->fsp
;
334 struct lock_struct
*locks
= br_lck
->lock_data
;
337 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
339 if ((plock
->start
+ plock
->size
- 1 < plock
->start
) &&
341 return NT_STATUS_INVALID_LOCK_RANGE
;
344 for (i
=0; i
< br_lck
->num_locks
; i
++) {
345 /* Do any Windows or POSIX locks conflict ? */
346 if (brl_conflict(&locks
[i
], plock
)) {
347 /* Remember who blocked us. */
348 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
349 return brl_lock_failed(fsp
,plock
,blocking_lock
);
352 if (plock
->start
== 0 && plock
->size
== 0 &&
353 locks
[i
].size
== 0) {
359 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
360 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
363 /* We can get the Windows lock, now see if it needs to
364 be mapped into a lower level POSIX one, and if so can
367 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(fsp
->conn
->params
)) {
369 if (!set_posix_lock_windows_flavour(fsp
,
378 /* We don't know who blocked us. */
379 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
381 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
382 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
385 status
= map_nt_error_from_unix(errno
);
391 /* no conflicts - add it to the list of locks */
392 locks
= (struct lock_struct
*)SMB_REALLOC(locks
, (br_lck
->num_locks
+ 1) * sizeof(*locks
));
394 status
= NT_STATUS_NO_MEMORY
;
398 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
399 br_lck
->num_locks
+= 1;
400 br_lck
->lock_data
= locks
;
401 br_lck
->modified
= True
;
405 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
406 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
411 /****************************************************************************
412 Cope with POSIX range splits and merges.
413 ****************************************************************************/
415 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
416 struct lock_struct
*ex
, /* existing lock. */
417 struct lock_struct
*plock
) /* proposed lock. */
419 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
421 /* We can't merge non-conflicting locks on different context - ignore fnum. */
423 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
425 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
429 /* We now know we have the same context. */
431 /* Did we overlap ? */
433 /*********************************************
444 **********************************************/
446 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
447 (plock
->start
> (ex
->start
+ ex
->size
))) {
449 /* No overlap with this lock - copy existing. */
451 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
455 /*********************************************
456 +---------------------------+
458 +---------------------------+
459 +---------------------------+
460 | plock | -> replace with plock.
461 +---------------------------+
466 +---------------------------+
467 | plock | -> replace with plock.
468 +---------------------------+
470 **********************************************/
472 if ( (ex
->start
>= plock
->start
) &&
473 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
475 /* Replace - discard existing lock. */
480 /*********************************************
490 +---------------+-------+
491 | plock | ex | - different lock types.
492 +---------------+-------+
494 +-----------------------+
495 | plock | - same lock type.
496 +-----------------------+
497 **********************************************/
499 if (plock
->start
+ plock
->size
== ex
->start
) {
501 /* If the lock types are the same, we merge, if different, we
502 add the remainder of the old lock. */
504 if (lock_types_differ
) {
506 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
509 /* Merge - adjust incoming lock as we may have more
510 * merging to come. */
511 plock
->size
+= ex
->size
;
516 /*********************************************
525 +-------+---------------+
526 | ex | plock | - different lock types
527 +-------+---------------+
530 +-----------------------+
531 | plock | - same lock type.
532 +-----------------------+
534 **********************************************/
536 if (ex
->start
+ ex
->size
== plock
->start
) {
538 /* If the lock types are the same, we merge, if different, we
539 add the existing lock. */
541 if (lock_types_differ
) {
542 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
545 /* Merge - adjust incoming lock as we may have more
546 * merging to come. */
547 plock
->start
= ex
->start
;
548 plock
->size
+= ex
->size
;
553 /*********************************************
555 +-----------------------+
557 +-----------------------+
570 +---------------+-------+
571 | plock | ex | - different lock types.
572 +---------------+-------+
574 +-----------------------+
575 | plock | - same lock type.
576 +-----------------------+
577 **********************************************/
579 if ( (ex
->start
>= plock
->start
) &&
580 (ex
->start
<= plock
->start
+ plock
->size
) &&
581 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
583 /* If the lock types are the same, we merge, if different, we
584 add the remainder of the old lock. */
586 if (lock_types_differ
) {
587 /* Add remaining existing. */
588 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
589 /* Adjust existing start and size. */
590 lck_arr
[0].start
= plock
->start
+ plock
->size
;
591 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
594 /* Merge - adjust incoming lock as we may have more
595 * merging to come. */
596 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
601 /*********************************************
603 +-----------------------+
605 +-----------------------+
618 +-------+---------------+
619 | ex | plock | - different lock types
620 +-------+---------------+
623 +-----------------------+
624 | plock | - same lock type.
625 +-----------------------+
627 **********************************************/
629 if ( (ex
->start
< plock
->start
) &&
630 (ex
->start
+ ex
->size
>= plock
->start
) &&
631 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
633 /* If the lock types are the same, we merge, if different, we
634 add the truncated old lock. */
636 if (lock_types_differ
) {
637 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
638 /* Adjust existing size. */
639 lck_arr
[0].size
= plock
->start
- ex
->start
;
642 /* Merge - adjust incoming lock as we may have more
643 * merging to come. MUST ADJUST plock SIZE FIRST ! */
644 plock
->size
+= (plock
->start
- ex
->start
);
645 plock
->start
= ex
->start
;
650 /*********************************************
652 +---------------------------+
654 +---------------------------+
659 +-------+---------+---------+
660 | ex | plock | ex | - different lock types.
661 +-------+---------+---------+
663 +---------------------------+
664 | plock | - same lock type.
665 +---------------------------+
666 **********************************************/
668 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
670 if (lock_types_differ
) {
672 /* We have to split ex into two locks here. */
674 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
675 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
677 /* Adjust first existing size. */
678 lck_arr
[0].size
= plock
->start
- ex
->start
;
680 /* Adjust second existing start and size. */
681 lck_arr
[1].start
= plock
->start
+ plock
->size
;
682 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
685 /* Just eat the existing locks, merge them into plock. */
686 plock
->start
= ex
->start
;
687 plock
->size
= ex
->size
;
692 /* Never get here. */
693 smb_panic("brlock_posix_split_merge");
696 /* Keep some compilers happy. */
700 /****************************************************************************
701 Lock a range of bytes - POSIX lock semantics.
702 We must cope with range splits and merges.
703 ****************************************************************************/
705 static NTSTATUS
brl_lock_posix(struct messaging_context
*msg_ctx
,
706 struct byte_range_lock
*br_lck
,
707 struct lock_struct
*plock
)
709 unsigned int i
, count
, posix_count
;
710 struct lock_struct
*locks
= br_lck
->lock_data
;
711 struct lock_struct
*tp
;
712 bool signal_pending_read
= False
;
713 bool break_oplocks
= false;
716 /* No zero-zero locks for POSIX. */
717 if (plock
->start
== 0 && plock
->size
== 0) {
718 return NT_STATUS_INVALID_PARAMETER
;
721 /* Don't allow 64-bit lock wrap. */
722 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
723 return NT_STATUS_INVALID_PARAMETER
;
726 /* The worst case scenario here is we have to split an
727 existing POSIX lock range into two, and add our lock,
728 so we need at most 2 more entries. */
730 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 2));
732 return NT_STATUS_NO_MEMORY
;
735 count
= posix_count
= 0;
737 for (i
=0; i
< br_lck
->num_locks
; i
++) {
738 struct lock_struct
*curr_lock
= &locks
[i
];
740 /* If we have a pending read lock, a lock downgrade should
741 trigger a lock re-evaluation. */
742 if (curr_lock
->lock_type
== PENDING_READ_LOCK
&&
743 brl_pending_overlap(plock
, curr_lock
)) {
744 signal_pending_read
= True
;
747 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
748 /* Do any Windows flavour locks conflict ? */
749 if (brl_conflict(curr_lock
, plock
)) {
750 /* No games with error messages. */
752 /* Remember who blocked us. */
753 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
754 return NT_STATUS_FILE_LOCK_CONFLICT
;
756 /* Just copy the Windows lock into the new array. */
757 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
760 unsigned int tmp_count
= 0;
762 /* POSIX conflict semantics are different. */
763 if (brl_conflict_posix(curr_lock
, plock
)) {
764 /* Can't block ourselves with POSIX locks. */
765 /* No games with error messages. */
767 /* Remember who blocked us. */
768 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
769 return NT_STATUS_FILE_LOCK_CONFLICT
;
772 /* Work out overlaps. */
773 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
774 posix_count
+= tmp_count
;
780 * Break oplocks while we hold a brl. Since lock() and unlock() calls
781 * are not symetric with POSIX semantics, we cannot guarantee our
782 * contend_level2_oplocks_begin/end calls will be acquired and
783 * released one-for-one as with Windows semantics. Therefore we only
784 * call contend_level2_oplocks_begin if this is the first POSIX brl on
787 break_oplocks
= (!IS_PENDING_LOCK(plock
->lock_type
) &&
790 contend_level2_oplocks_begin(br_lck
->fsp
,
791 LEVEL2_CONTEND_POSIX_BRL
);
794 /* Try and add the lock in order, sorted by lock start. */
795 for (i
=0; i
< count
; i
++) {
796 struct lock_struct
*curr_lock
= &tp
[i
];
798 if (curr_lock
->start
<= plock
->start
) {
804 memmove(&tp
[i
+1], &tp
[i
],
805 (count
- i
)*sizeof(struct lock_struct
));
807 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
810 /* We can get the POSIX lock, now see if it needs to
811 be mapped into a lower level POSIX one, and if so can
814 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
817 /* The lower layer just needs to attempt to
818 get the system POSIX lock. We've weeded out
819 any conflicts above. */
821 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
827 /* We don't know who blocked us. */
828 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
830 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
832 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
836 status
= map_nt_error_from_unix(errno
);
842 /* If we didn't use all the allocated size,
843 * Realloc so we don't leak entries per lock call. */
844 if (count
< br_lck
->num_locks
+ 2) {
845 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
847 status
= NT_STATUS_NO_MEMORY
;
852 br_lck
->num_locks
= count
;
853 SAFE_FREE(br_lck
->lock_data
);
854 br_lck
->lock_data
= tp
;
856 br_lck
->modified
= True
;
858 /* A successful downgrade from write to read lock can trigger a lock
859 re-evalutation where waiting readers can now proceed. */
861 if (signal_pending_read
) {
862 /* Send unlock messages to any pending read waiters that overlap. */
863 for (i
=0; i
< br_lck
->num_locks
; i
++) {
864 struct lock_struct
*pend_lock
= &locks
[i
];
866 /* Ignore non-pending locks. */
867 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
871 if (pend_lock
->lock_type
== PENDING_READ_LOCK
&&
872 brl_pending_overlap(plock
, pend_lock
)) {
873 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
874 procid_str_static(&pend_lock
->context
.pid
)));
876 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
877 MSG_SMB_UNLOCK
, &data_blob_null
);
885 contend_level2_oplocks_end(br_lck
->fsp
,
886 LEVEL2_CONTEND_POSIX_BRL
);
891 NTSTATUS
smb_vfs_call_brl_lock_windows(struct vfs_handle_struct
*handle
,
892 struct byte_range_lock
*br_lck
,
893 struct lock_struct
*plock
,
895 struct blocking_lock_record
*blr
)
897 VFS_FIND(brl_lock_windows
);
898 return handle
->fns
->brl_lock_windows(handle
, br_lck
, plock
,
902 /****************************************************************************
903 Lock a range of bytes.
904 ****************************************************************************/
906 NTSTATUS
brl_lock(struct messaging_context
*msg_ctx
,
907 struct byte_range_lock
*br_lck
,
909 struct server_id pid
,
912 enum brl_type lock_type
,
913 enum brl_flavour lock_flav
,
916 struct blocking_lock_record
*blr
)
919 struct lock_struct lock
;
922 if (start
== 0 && size
== 0) {
923 DEBUG(0,("client sent 0/0 lock - please report this\n"));
928 /* Quieten valgrind on test. */
929 memset(&lock
, '\0', sizeof(lock
));
932 lock
.context
.smblctx
= smblctx
;
933 lock
.context
.pid
= pid
;
934 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
937 lock
.fnum
= br_lck
->fsp
->fnum
;
938 lock
.lock_type
= lock_type
;
939 lock
.lock_flav
= lock_flav
;
941 if (lock_flav
== WINDOWS_LOCK
) {
942 ret
= SMB_VFS_BRL_LOCK_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
943 &lock
, blocking_lock
, blr
);
945 ret
= brl_lock_posix(msg_ctx
, br_lck
, &lock
);
949 /* sort the lock list */
950 TYPESAFE_QSORT(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, lock_compare
);
953 /* If we're returning an error, return who blocked us. */
954 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
955 *psmblctx
= lock
.context
.smblctx
;
960 /****************************************************************************
961 Unlock a range of bytes - Windows semantics.
962 ****************************************************************************/
964 bool brl_unlock_windows_default(struct messaging_context
*msg_ctx
,
965 struct byte_range_lock
*br_lck
,
966 const struct lock_struct
*plock
)
969 struct lock_struct
*locks
= br_lck
->lock_data
;
970 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
972 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
975 /* Delete write locks by preference... The lock list
976 is sorted in the zero zero case. */
978 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
979 struct lock_struct
*lock
= &locks
[i
];
981 if (lock
->lock_type
== WRITE_LOCK
&&
982 brl_same_context(&lock
->context
, &plock
->context
) &&
983 lock
->fnum
== plock
->fnum
&&
984 lock
->lock_flav
== WINDOWS_LOCK
&&
985 lock
->start
== plock
->start
&&
986 lock
->size
== plock
->size
) {
988 /* found it - delete it */
989 deleted_lock_type
= lock
->lock_type
;
994 if (i
!= br_lck
->num_locks
) {
995 /* We found it - don't search again. */
996 goto unlock_continue
;
1000 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1001 struct lock_struct
*lock
= &locks
[i
];
1003 if (IS_PENDING_LOCK(lock
->lock_type
)) {
1007 /* Only remove our own locks that match in start, size, and flavour. */
1008 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1009 lock
->fnum
== plock
->fnum
&&
1010 lock
->lock_flav
== WINDOWS_LOCK
&&
1011 lock
->start
== plock
->start
&&
1012 lock
->size
== plock
->size
) {
1013 deleted_lock_type
= lock
->lock_type
;
1018 if (i
== br_lck
->num_locks
) {
1019 /* we didn't find it */
1027 /* Actually delete the lock. */
1028 if (i
< br_lck
->num_locks
- 1) {
1029 memmove(&locks
[i
], &locks
[i
+1],
1030 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1033 br_lck
->num_locks
-= 1;
1034 br_lck
->modified
= True
;
1036 /* Unlock the underlying POSIX regions. */
1037 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1038 release_posix_lock_windows_flavour(br_lck
->fsp
,
1047 /* Send unlock messages to any pending waiters that overlap. */
1048 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1049 struct lock_struct
*pend_lock
= &locks
[j
];
1051 /* Ignore non-pending locks. */
1052 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1056 /* We could send specific lock info here... */
1057 if (brl_pending_overlap(plock
, pend_lock
)) {
1058 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1059 procid_str_static(&pend_lock
->context
.pid
)));
1061 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1062 MSG_SMB_UNLOCK
, &data_blob_null
);
1066 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1070 /****************************************************************************
1071 Unlock a range of bytes - POSIX semantics.
1072 ****************************************************************************/
1074 static bool brl_unlock_posix(struct messaging_context
*msg_ctx
,
1075 struct byte_range_lock
*br_lck
,
1076 struct lock_struct
*plock
)
1078 unsigned int i
, j
, count
;
1079 struct lock_struct
*tp
;
1080 struct lock_struct
*locks
= br_lck
->lock_data
;
1081 bool overlap_found
= False
;
1083 /* No zero-zero locks for POSIX. */
1084 if (plock
->start
== 0 && plock
->size
== 0) {
1088 /* Don't allow 64-bit lock wrap. */
1089 if (plock
->start
+ plock
->size
< plock
->start
||
1090 plock
->start
+ plock
->size
< plock
->size
) {
1091 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1095 /* The worst case scenario here is we have to split an
1096 existing POSIX lock range into two, so we need at most
1099 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 1));
1101 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1106 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1107 struct lock_struct
*lock
= &locks
[i
];
1108 unsigned int tmp_count
;
1110 /* Only remove our own locks - ignore fnum. */
1111 if (IS_PENDING_LOCK(lock
->lock_type
) ||
1112 !brl_same_context(&lock
->context
, &plock
->context
)) {
1113 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1118 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1119 /* Do any Windows flavour locks conflict ? */
1120 if (brl_conflict(lock
, plock
)) {
1124 /* Just copy the Windows lock into the new array. */
1125 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1130 /* Work out overlaps. */
1131 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1133 if (tmp_count
== 0) {
1134 /* plock overlapped the existing lock completely,
1135 or replaced it. Don't copy the existing lock. */
1136 overlap_found
= true;
1137 } else if (tmp_count
== 1) {
1138 /* Either no overlap, (simple copy of existing lock) or
1139 * an overlap of an existing lock. */
1140 /* If the lock changed size, we had an overlap. */
1141 if (tp
[count
].size
!= lock
->size
) {
1142 overlap_found
= true;
1145 } else if (tmp_count
== 2) {
1146 /* We split a lock range in two. */
1147 overlap_found
= true;
1150 /* Optimisation... */
1151 /* We know we're finished here as we can't overlap any
1152 more POSIX locks. Copy the rest of the lock array. */
1154 if (i
< br_lck
->num_locks
- 1) {
1155 memcpy(&tp
[count
], &locks
[i
+1],
1156 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1157 count
+= ((br_lck
->num_locks
-1) - i
);
1164 if (!overlap_found
) {
1165 /* Just ignore - no change. */
1167 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1171 /* Unlock any POSIX regions. */
1172 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1173 release_posix_lock_posix_flavour(br_lck
->fsp
,
1181 /* Realloc so we don't leak entries per unlock call. */
1183 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
1185 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1189 /* We deleted the last lock. */
1194 contend_level2_oplocks_end(br_lck
->fsp
,
1195 LEVEL2_CONTEND_POSIX_BRL
);
1197 br_lck
->num_locks
= count
;
1198 SAFE_FREE(br_lck
->lock_data
);
1200 br_lck
->lock_data
= tp
;
1201 br_lck
->modified
= True
;
1203 /* Send unlock messages to any pending waiters that overlap. */
1205 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1206 struct lock_struct
*pend_lock
= &locks
[j
];
1208 /* Ignore non-pending locks. */
1209 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1213 /* We could send specific lock info here... */
1214 if (brl_pending_overlap(plock
, pend_lock
)) {
1215 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1216 procid_str_static(&pend_lock
->context
.pid
)));
1218 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1219 MSG_SMB_UNLOCK
, &data_blob_null
);
1226 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct
*handle
,
1227 struct messaging_context
*msg_ctx
,
1228 struct byte_range_lock
*br_lck
,
1229 const struct lock_struct
*plock
)
1231 VFS_FIND(brl_unlock_windows
);
1232 return handle
->fns
->brl_unlock_windows(handle
, msg_ctx
, br_lck
, plock
);
1235 /****************************************************************************
1236 Unlock a range of bytes.
1237 ****************************************************************************/
1239 bool brl_unlock(struct messaging_context
*msg_ctx
,
1240 struct byte_range_lock
*br_lck
,
1242 struct server_id pid
,
1245 enum brl_flavour lock_flav
)
1247 struct lock_struct lock
;
1249 lock
.context
.smblctx
= smblctx
;
1250 lock
.context
.pid
= pid
;
1251 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1254 lock
.fnum
= br_lck
->fsp
->fnum
;
1255 lock
.lock_type
= UNLOCK_LOCK
;
1256 lock
.lock_flav
= lock_flav
;
1258 if (lock_flav
== WINDOWS_LOCK
) {
1259 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck
->fsp
->conn
, msg_ctx
,
1262 return brl_unlock_posix(msg_ctx
, br_lck
, &lock
);
1266 /****************************************************************************
1267 Test if we could add a lock if we wanted to.
1268 Returns True if the region required is currently unlocked, False if locked.
1269 ****************************************************************************/
1271 bool brl_locktest(struct byte_range_lock
*br_lck
,
1273 struct server_id pid
,
1276 enum brl_type lock_type
,
1277 enum brl_flavour lock_flav
)
1281 struct lock_struct lock
;
1282 const struct lock_struct
*locks
= br_lck
->lock_data
;
1283 files_struct
*fsp
= br_lck
->fsp
;
1285 lock
.context
.smblctx
= smblctx
;
1286 lock
.context
.pid
= pid
;
1287 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1290 lock
.fnum
= fsp
->fnum
;
1291 lock
.lock_type
= lock_type
;
1292 lock
.lock_flav
= lock_flav
;
1294 /* Make sure existing locks don't conflict */
1295 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1297 * Our own locks don't conflict.
1299 if (brl_conflict_other(&locks
[i
], &lock
)) {
1305 * There is no lock held by an SMB daemon, check to
1306 * see if there is a POSIX lock from a UNIX or NFS process.
1307 * This only conflicts with Windows locks, not POSIX locks.
1310 if(lp_posix_locking(fsp
->conn
->params
) && (lock_flav
== WINDOWS_LOCK
)) {
1311 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1313 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1314 (double)start
, (double)size
, ret
? "locked" : "unlocked",
1315 fsp
->fnum
, fsp_str_dbg(fsp
)));
1317 /* We need to return the inverse of is_posix_locked. */
1321 /* no conflicts - we could have added it */
1325 /****************************************************************************
1326 Query for existing locks.
1327 ****************************************************************************/
1329 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1331 struct server_id pid
,
1334 enum brl_type
*plock_type
,
1335 enum brl_flavour lock_flav
)
1338 struct lock_struct lock
;
1339 const struct lock_struct
*locks
= br_lck
->lock_data
;
1340 files_struct
*fsp
= br_lck
->fsp
;
1342 lock
.context
.smblctx
= *psmblctx
;
1343 lock
.context
.pid
= pid
;
1344 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1345 lock
.start
= *pstart
;
1347 lock
.fnum
= fsp
->fnum
;
1348 lock
.lock_type
= *plock_type
;
1349 lock
.lock_flav
= lock_flav
;
1351 /* Make sure existing locks don't conflict */
1352 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1353 const struct lock_struct
*exlock
= &locks
[i
];
1354 bool conflict
= False
;
1356 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1357 conflict
= brl_conflict(exlock
, &lock
);
1359 conflict
= brl_conflict_posix(exlock
, &lock
);
1363 *psmblctx
= exlock
->context
.smblctx
;
1364 *pstart
= exlock
->start
;
1365 *psize
= exlock
->size
;
1366 *plock_type
= exlock
->lock_type
;
1367 return NT_STATUS_LOCK_NOT_GRANTED
;
1372 * There is no lock held by an SMB daemon, check to
1373 * see if there is a POSIX lock from a UNIX or NFS process.
1376 if(lp_posix_locking(fsp
->conn
->params
)) {
1377 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1379 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1380 (double)*pstart
, (double)*psize
, ret
? "locked" : "unlocked",
1381 fsp
->fnum
, fsp_str_dbg(fsp
)));
1384 /* Hmmm. No clue what to set smblctx to - use -1. */
1385 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1386 return NT_STATUS_LOCK_NOT_GRANTED
;
1390 return NT_STATUS_OK
;
1394 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct
*handle
,
1395 struct byte_range_lock
*br_lck
,
1396 struct lock_struct
*plock
,
1397 struct blocking_lock_record
*blr
)
1399 VFS_FIND(brl_cancel_windows
);
1400 return handle
->fns
->brl_cancel_windows(handle
, br_lck
, plock
, blr
);
1403 /****************************************************************************
1404 Remove a particular pending lock.
1405 ****************************************************************************/
1406 bool brl_lock_cancel(struct byte_range_lock
*br_lck
,
1408 struct server_id pid
,
1411 enum brl_flavour lock_flav
,
1412 struct blocking_lock_record
*blr
)
1415 struct lock_struct lock
;
1417 lock
.context
.smblctx
= smblctx
;
1418 lock
.context
.pid
= pid
;
1419 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1422 lock
.fnum
= br_lck
->fsp
->fnum
;
1423 lock
.lock_flav
= lock_flav
;
1424 /* lock.lock_type doesn't matter */
1426 if (lock_flav
== WINDOWS_LOCK
) {
1427 ret
= SMB_VFS_BRL_CANCEL_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
1430 ret
= brl_lock_cancel_default(br_lck
, &lock
);
1436 bool brl_lock_cancel_default(struct byte_range_lock
*br_lck
,
1437 struct lock_struct
*plock
)
1440 struct lock_struct
*locks
= br_lck
->lock_data
;
1444 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1445 struct lock_struct
*lock
= &locks
[i
];
1447 /* For pending locks we *always* care about the fnum. */
1448 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1449 lock
->fnum
== plock
->fnum
&&
1450 IS_PENDING_LOCK(lock
->lock_type
) &&
1451 lock
->lock_flav
== plock
->lock_flav
&&
1452 lock
->start
== plock
->start
&&
1453 lock
->size
== plock
->size
) {
1458 if (i
== br_lck
->num_locks
) {
1459 /* Didn't find it. */
1463 if (i
< br_lck
->num_locks
- 1) {
1464 /* Found this particular pending lock - delete it */
1465 memmove(&locks
[i
], &locks
[i
+1],
1466 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1469 br_lck
->num_locks
-= 1;
1470 br_lck
->modified
= True
;
1474 /****************************************************************************
1475 Remove any locks associated with a open file.
1476 We return True if this process owns any other Windows locks on this
1477 fd and so we should not immediately close the fd.
1478 ****************************************************************************/
1480 void brl_close_fnum(struct messaging_context
*msg_ctx
,
1481 struct byte_range_lock
*br_lck
)
1483 files_struct
*fsp
= br_lck
->fsp
;
1484 uint16 tid
= fsp
->conn
->cnum
;
1485 int fnum
= fsp
->fnum
;
1486 unsigned int i
, j
, dcount
=0;
1487 int num_deleted_windows_locks
= 0;
1488 struct lock_struct
*locks
= br_lck
->lock_data
;
1489 struct server_id pid
= sconn_server_id(fsp
->conn
->sconn
);
1490 bool unlock_individually
= False
;
1491 bool posix_level2_contention_ended
= false;
1493 if(lp_posix_locking(fsp
->conn
->params
)) {
1495 /* Check if there are any Windows locks associated with this dev/ino
1496 pair that are not this fnum. If so we need to call unlock on each
1497 one in order to release the system POSIX locks correctly. */
1499 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1500 struct lock_struct
*lock
= &locks
[i
];
1502 if (!procid_equal(&lock
->context
.pid
, &pid
)) {
1506 if (lock
->lock_type
!= READ_LOCK
&& lock
->lock_type
!= WRITE_LOCK
) {
1507 continue; /* Ignore pending. */
1510 if (lock
->context
.tid
!= tid
|| lock
->fnum
!= fnum
) {
1511 unlock_individually
= True
;
1516 if (unlock_individually
) {
1517 struct lock_struct
*locks_copy
;
1518 unsigned int num_locks_copy
;
1520 /* Copy the current lock array. */
1521 if (br_lck
->num_locks
) {
1522 locks_copy
= (struct lock_struct
*)TALLOC_MEMDUP(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1524 smb_panic("brl_close_fnum: talloc failed");
1530 num_locks_copy
= br_lck
->num_locks
;
1532 for (i
=0; i
< num_locks_copy
; i
++) {
1533 struct lock_struct
*lock
= &locks_copy
[i
];
1535 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
) &&
1536 (lock
->fnum
== fnum
)) {
1539 lock
->context
.smblctx
,
1550 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1552 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1554 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1555 struct lock_struct
*lock
= &locks
[i
];
1556 bool del_this_lock
= False
;
1558 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
)) {
1559 if ((lock
->lock_flav
== WINDOWS_LOCK
) && (lock
->fnum
== fnum
)) {
1560 del_this_lock
= True
;
1561 num_deleted_windows_locks
++;
1562 contend_level2_oplocks_end(br_lck
->fsp
,
1563 LEVEL2_CONTEND_WINDOWS_BRL
);
1564 } else if (lock
->lock_flav
== POSIX_LOCK
) {
1565 del_this_lock
= True
;
1567 /* Only end level2 contention once for posix */
1568 if (!posix_level2_contention_ended
) {
1569 posix_level2_contention_ended
= true;
1570 contend_level2_oplocks_end(br_lck
->fsp
,
1571 LEVEL2_CONTEND_POSIX_BRL
);
1576 if (del_this_lock
) {
1577 /* Send unlock messages to any pending waiters that overlap. */
1578 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1579 struct lock_struct
*pend_lock
= &locks
[j
];
1581 /* Ignore our own or non-pending locks. */
1582 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1586 /* Optimisation - don't send to this fnum as we're
1588 if (pend_lock
->context
.tid
== tid
&&
1589 procid_equal(&pend_lock
->context
.pid
, &pid
) &&
1590 pend_lock
->fnum
== fnum
) {
1594 /* We could send specific lock info here... */
1595 if (brl_pending_overlap(lock
, pend_lock
)) {
1596 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1597 MSG_SMB_UNLOCK
, &data_blob_null
);
1601 /* found it - delete it */
1602 if (br_lck
->num_locks
> 1 && i
< br_lck
->num_locks
- 1) {
1603 memmove(&locks
[i
], &locks
[i
+1],
1604 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1606 br_lck
->num_locks
--;
1607 br_lck
->modified
= True
;
1613 if(lp_posix_locking(fsp
->conn
->params
) && num_deleted_windows_locks
) {
1614 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1615 reduce_windows_lock_ref_count(fsp
, num_deleted_windows_locks
);
1619 /****************************************************************************
1620 Ensure this set of lock entries is valid.
1621 ****************************************************************************/
1622 static bool validate_lock_entries(unsigned int *pnum_entries
, struct lock_struct
**pplocks
)
1625 unsigned int num_valid_entries
= 0;
1626 struct lock_struct
*locks
= *pplocks
;
1628 for (i
= 0; i
< *pnum_entries
; i
++) {
1629 struct lock_struct
*lock_data
= &locks
[i
];
1630 if (!serverid_exists(&lock_data
->context
.pid
)) {
1631 /* This process no longer exists - mark this
1632 entry as invalid by zeroing it. */
1633 ZERO_STRUCTP(lock_data
);
1635 num_valid_entries
++;
1639 if (num_valid_entries
!= *pnum_entries
) {
1640 struct lock_struct
*new_lock_data
= NULL
;
1642 if (num_valid_entries
) {
1643 new_lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
, num_valid_entries
);
1644 if (!new_lock_data
) {
1645 DEBUG(3, ("malloc fail\n"));
1649 num_valid_entries
= 0;
1650 for (i
= 0; i
< *pnum_entries
; i
++) {
1651 struct lock_struct
*lock_data
= &locks
[i
];
1652 if (lock_data
->context
.smblctx
&&
1653 lock_data
->context
.tid
) {
1654 /* Valid (nonzero) entry - copy it. */
1655 memcpy(&new_lock_data
[num_valid_entries
],
1656 lock_data
, sizeof(struct lock_struct
));
1657 num_valid_entries
++;
1662 SAFE_FREE(*pplocks
);
1663 *pplocks
= new_lock_data
;
1664 *pnum_entries
= num_valid_entries
;
1670 struct brl_forall_cb
{
1671 void (*fn
)(struct file_id id
, struct server_id pid
,
1672 enum brl_type lock_type
,
1673 enum brl_flavour lock_flav
,
1674 br_off start
, br_off size
,
1675 void *private_data
);
1679 /****************************************************************************
1680 Traverse the whole database with this function, calling traverse_callback
1682 ****************************************************************************/
1684 static int traverse_fn(struct db_record
*rec
, void *state
)
1686 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1687 struct lock_struct
*locks
;
1688 struct file_id
*key
;
1690 unsigned int num_locks
= 0;
1691 unsigned int orig_num_locks
= 0;
1693 /* In a traverse function we must make a copy of
1694 dbuf before modifying it. */
1696 locks
= (struct lock_struct
*)memdup(rec
->value
.dptr
,
1699 return -1; /* Terminate traversal. */
1702 key
= (struct file_id
*)rec
->key
.dptr
;
1703 orig_num_locks
= num_locks
= rec
->value
.dsize
/sizeof(*locks
);
1705 /* Ensure the lock db is clean of entries from invalid processes. */
1707 if (!validate_lock_entries(&num_locks
, &locks
)) {
1709 return -1; /* Terminate traversal */
1712 if (orig_num_locks
!= num_locks
) {
1715 data
.dptr
= (uint8_t *)locks
;
1716 data
.dsize
= num_locks
*sizeof(struct lock_struct
);
1717 rec
->store(rec
, data
, TDB_REPLACE
);
1719 rec
->delete_rec(rec
);
1724 for ( i
=0; i
<num_locks
; i
++) {
1726 locks
[i
].context
.pid
,
1739 /*******************************************************************
1740 Call the specified function on each lock in the database.
1741 ********************************************************************/
1743 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1744 enum brl_type lock_type
,
1745 enum brl_flavour lock_flav
,
1746 br_off start
, br_off size
,
1747 void *private_data
),
1750 struct brl_forall_cb cb
;
1756 cb
.private_data
= private_data
;
1757 return brlock_db
->traverse(brlock_db
, traverse_fn
, &cb
);
1760 /*******************************************************************
1761 Store a potentially modified set of byte range lock data back into
1764 ********************************************************************/
1766 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1768 if (br_lck
->read_only
) {
1769 SMB_ASSERT(!br_lck
->modified
);
1772 if (!br_lck
->modified
) {
1776 if (br_lck
->num_locks
== 0) {
1777 /* No locks - delete this entry. */
1778 NTSTATUS status
= br_lck
->record
->delete_rec(br_lck
->record
);
1779 if (!NT_STATUS_IS_OK(status
)) {
1780 DEBUG(0, ("delete_rec returned %s\n",
1781 nt_errstr(status
)));
1782 smb_panic("Could not delete byte range lock entry");
1788 data
.dptr
= (uint8
*)br_lck
->lock_data
;
1789 data
.dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1791 status
= br_lck
->record
->store(br_lck
->record
, data
,
1793 if (!NT_STATUS_IS_OK(status
)) {
1794 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1795 smb_panic("Could not store byte range mode entry");
1801 br_lck
->read_only
= true;
1802 br_lck
->modified
= false;
1804 TALLOC_FREE(br_lck
->record
);
1807 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1809 byte_range_lock_flush(br_lck
);
1810 SAFE_FREE(br_lck
->lock_data
);
1814 /*******************************************************************
1815 Fetch a set of byte range lock data from the database.
1816 Leave the record locked.
1817 TALLOC_FREE(brl) will release the lock in the destructor.
1818 ********************************************************************/
1820 static struct byte_range_lock
*brl_get_locks_internal(TALLOC_CTX
*mem_ctx
,
1821 files_struct
*fsp
, bool read_only
)
1824 struct byte_range_lock
*br_lck
= TALLOC_P(mem_ctx
, struct byte_range_lock
);
1825 bool do_read_only
= read_only
;
1827 if (br_lck
== NULL
) {
1832 br_lck
->num_locks
= 0;
1833 br_lck
->modified
= False
;
1834 br_lck
->key
= fsp
->file_id
;
1836 key
.dptr
= (uint8
*)&br_lck
->key
;
1837 key
.dsize
= sizeof(struct file_id
);
1839 if (!fsp
->lockdb_clean
) {
1840 /* We must be read/write to clean
1841 the dead entries. */
1842 do_read_only
= false;
1846 if (brlock_db
->fetch(brlock_db
, br_lck
, key
, &data
) == -1) {
1847 DEBUG(3, ("Could not fetch byte range lock record\n"));
1848 TALLOC_FREE(br_lck
);
1851 br_lck
->record
= NULL
;
1853 br_lck
->record
= brlock_db
->fetch_locked(brlock_db
, br_lck
, key
);
1855 if (br_lck
->record
== NULL
) {
1856 DEBUG(3, ("Could not lock byte range lock entry\n"));
1857 TALLOC_FREE(br_lck
);
1861 data
= br_lck
->record
->value
;
1864 br_lck
->read_only
= do_read_only
;
1865 br_lck
->lock_data
= NULL
;
1867 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1869 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1871 if (br_lck
->num_locks
!= 0) {
1872 br_lck
->lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
,
1874 if (br_lck
->lock_data
== NULL
) {
1875 DEBUG(0, ("malloc failed\n"));
1876 TALLOC_FREE(br_lck
);
1880 memcpy(br_lck
->lock_data
, data
.dptr
, data
.dsize
);
1883 if (!fsp
->lockdb_clean
) {
1884 int orig_num_locks
= br_lck
->num_locks
;
1886 /* This is the first time we've accessed this. */
1887 /* Go through and ensure all entries exist - remove any that don't. */
1888 /* Makes the lockdb self cleaning at low cost. */
1890 if (!validate_lock_entries(&br_lck
->num_locks
,
1891 &br_lck
->lock_data
)) {
1892 SAFE_FREE(br_lck
->lock_data
);
1893 TALLOC_FREE(br_lck
);
1897 /* Ensure invalid locks are cleaned up in the destructor. */
1898 if (orig_num_locks
!= br_lck
->num_locks
) {
1899 br_lck
->modified
= True
;
1902 /* Mark the lockdb as "clean" as seen from this open file. */
1903 fsp
->lockdb_clean
= True
;
1906 if (DEBUGLEVEL
>= 10) {
1908 struct lock_struct
*locks
= br_lck
->lock_data
;
1909 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1911 file_id_string_tos(&fsp
->file_id
)));
1912 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1913 print_lock_struct(i
, &locks
[i
]);
1917 if (do_read_only
!= read_only
) {
1919 * this stores the record and gets rid of
1920 * the write lock that is needed for a cleanup
1922 byte_range_lock_flush(br_lck
);
1928 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
,
1931 return brl_get_locks_internal(mem_ctx
, fsp
, False
);
1934 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1936 struct byte_range_lock
*br_lock
;
1938 if (lp_clustering()) {
1939 return brl_get_locks_internal(talloc_tos(), fsp
, true);
1942 if ((fsp
->brlock_rec
!= NULL
)
1943 && (brlock_db
->get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1944 return fsp
->brlock_rec
;
1947 TALLOC_FREE(fsp
->brlock_rec
);
1949 br_lock
= brl_get_locks_internal(talloc_tos(), fsp
, true);
1950 if (br_lock
== NULL
) {
1953 fsp
->brlock_seqnum
= brlock_db
->get_seqnum(brlock_db
);
1955 fsp
->brlock_rec
= talloc_move(fsp
, &br_lock
);
1957 return fsp
->brlock_rec
;
1960 struct brl_revalidate_state
{
1963 struct server_id
*pids
;
1967 * Collect PIDs of all processes with pending entries
1970 static void brl_revalidate_collect(struct file_id id
, struct server_id pid
,
1971 enum brl_type lock_type
,
1972 enum brl_flavour lock_flav
,
1973 br_off start
, br_off size
,
1976 struct brl_revalidate_state
*state
=
1977 (struct brl_revalidate_state
*)private_data
;
1979 if (!IS_PENDING_LOCK(lock_type
)) {
1983 add_to_large_array(state
, sizeof(pid
), (void *)&pid
,
1984 &state
->pids
, &state
->num_pids
,
1985 &state
->array_size
);
1989 * qsort callback to sort the processes
1992 static int compare_procids(const void *p1
, const void *p2
)
1994 const struct server_id
*i1
= (struct server_id
*)p1
;
1995 const struct server_id
*i2
= (struct server_id
*)p2
;
1997 if (i1
->pid
< i2
->pid
) return -1;
1998 if (i2
->pid
> i2
->pid
) return 1;
2003 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2004 * locks so that they retry. Mainly used in the cluster code after a node has
2007 * Done in two steps to avoid double-sends: First we collect all entries in an
2008 * array, then qsort that array and only send to non-dupes.
2011 static void brl_revalidate(struct messaging_context
*msg_ctx
,
2014 struct server_id server_id
,
2017 struct brl_revalidate_state
*state
;
2019 struct server_id last_pid
;
2021 if (!(state
= TALLOC_ZERO_P(NULL
, struct brl_revalidate_state
))) {
2022 DEBUG(0, ("talloc failed\n"));
2026 brl_forall(brl_revalidate_collect
, state
);
2028 if (state
->array_size
== -1) {
2029 DEBUG(0, ("talloc failed\n"));
2033 if (state
->num_pids
== 0) {
2037 TYPESAFE_QSORT(state
->pids
, state
->num_pids
, compare_procids
);
2039 ZERO_STRUCT(last_pid
);
2041 for (i
=0; i
<state
->num_pids
; i
++) {
2042 if (procid_equal(&last_pid
, &state
->pids
[i
])) {
2044 * We've seen that one already
2049 messaging_send(msg_ctx
, state
->pids
[i
], MSG_SMB_UNLOCK
,
2051 last_pid
= state
->pids
[i
];
2059 void brl_register_msgs(struct messaging_context
*msg_ctx
)
2061 messaging_register(msg_ctx
, NULL
, MSG_SMB_BRL_VALIDATE
,