2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
30 #define DBGC_CLASS DBGC_LOCKING
34 /* The open brlock.tdb database. */
36 static struct db_context
*brlock_db
;
38 /****************************************************************************
39 Debug info at level 10 for lock struct.
40 ****************************************************************************/
42 static void print_lock_struct(unsigned int i
, struct lock_struct
*pls
)
44 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %s, ",
46 (unsigned int)pls
->context
.smbpid
,
47 (unsigned int)pls
->context
.tid
,
48 procid_str(talloc_tos(), &pls
->context
.pid
) ));
50 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
54 lock_type_name(pls
->lock_type
),
55 lock_flav_name(pls
->lock_flav
) ));
58 /****************************************************************************
59 See if two locking contexts are equal.
60 ****************************************************************************/
62 bool brl_same_context(const struct lock_context
*ctx1
,
63 const struct lock_context
*ctx2
)
65 return (procid_equal(&ctx1
->pid
, &ctx2
->pid
) &&
66 (ctx1
->smbpid
== ctx2
->smbpid
) &&
67 (ctx1
->tid
== ctx2
->tid
));
70 /****************************************************************************
71 See if lck1 and lck2 overlap.
72 ****************************************************************************/
74 static bool brl_overlap(const struct lock_struct
*lck1
,
75 const struct lock_struct
*lck2
)
77 /* XXX Remove for Win7 compatibility. */
78 /* this extra check is not redundent - it copes with locks
79 that go beyond the end of 64 bit file space */
80 if (lck1
->size
!= 0 &&
81 lck1
->start
== lck2
->start
&&
82 lck1
->size
== lck2
->size
) {
86 if (lck1
->start
>= (lck2
->start
+lck2
->size
) ||
87 lck2
->start
>= (lck1
->start
+lck1
->size
)) {
93 /****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
97 static bool brl_conflict(const struct lock_struct
*lck1
,
98 const struct lock_struct
*lck2
)
100 /* Ignore PENDING locks. */
101 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
104 /* Read locks never conflict. */
105 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
109 /* A READ lock can stack on top of a WRITE lock if they have the same
111 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
112 brl_same_context(&lck1
->context
, &lck2
->context
) &&
113 lck1
->fnum
== lck2
->fnum
) {
117 return brl_overlap(lck1
, lck2
);
120 /****************************************************************************
121 See if lock2 can be added when lock1 is in place - when both locks are POSIX
122 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
124 ****************************************************************************/
126 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
127 const struct lock_struct
*lck2
)
129 #if defined(DEVELOPER)
130 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
131 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
134 /* Ignore PENDING locks. */
135 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
138 /* Read locks never conflict. */
139 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
143 /* Locks on the same context con't conflict. Ignore fnum. */
144 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
148 /* One is read, the other write, or the context is different,
150 return brl_overlap(lck1
, lck2
);
154 static bool brl_conflict1(const struct lock_struct
*lck1
,
155 const struct lock_struct
*lck2
)
157 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
160 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
164 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
165 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
169 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
173 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
174 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
182 /****************************************************************************
183 Check to see if this lock conflicts, but ignore our own locks on the
184 same fnum only. This is the read/write lock check code path.
185 This is never used in the POSIX lock case.
186 ****************************************************************************/
188 static bool brl_conflict_other(const struct lock_struct
*lck1
, const struct lock_struct
*lck2
)
190 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
193 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
)
196 /* POSIX flavour locks never conflict here - this is only called
197 in the read/write path. */
199 if (lck1
->lock_flav
== POSIX_LOCK
&& lck2
->lock_flav
== POSIX_LOCK
)
203 * Incoming WRITE locks conflict with existing READ locks even
204 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
207 if (!(lck2
->lock_type
== WRITE_LOCK
&& lck1
->lock_type
== READ_LOCK
)) {
208 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
209 lck1
->fnum
== lck2
->fnum
)
213 return brl_overlap(lck1
, lck2
);
216 /****************************************************************************
217 Check if an unlock overlaps a pending lock.
218 ****************************************************************************/
220 static bool brl_pending_overlap(const struct lock_struct
*lock
, const struct lock_struct
*pend_lock
)
222 if ((lock
->start
<= pend_lock
->start
) && (lock
->start
+ lock
->size
> pend_lock
->start
))
224 if ((lock
->start
>= pend_lock
->start
) && (lock
->start
<= pend_lock
->start
+ pend_lock
->size
))
229 /****************************************************************************
230 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
231 is the same as this one and changes its error code. I wonder if any
232 app depends on this ?
233 ****************************************************************************/
235 NTSTATUS
brl_lock_failed(files_struct
*fsp
, const struct lock_struct
*lock
, bool blocking_lock
)
237 if (lock
->start
>= 0xEF000000 && (lock
->start
>> 63) == 0) {
238 /* amazing the little things you learn with a test
239 suite. Locks beyond this offset (as a 64 bit
240 number!) always generate the conflict error code,
241 unless the top bit is set */
242 if (!blocking_lock
) {
243 fsp
->last_lock_failure
= *lock
;
245 return NT_STATUS_FILE_LOCK_CONFLICT
;
248 if (procid_equal(&lock
->context
.pid
, &fsp
->last_lock_failure
.context
.pid
) &&
249 lock
->context
.tid
== fsp
->last_lock_failure
.context
.tid
&&
250 lock
->fnum
== fsp
->last_lock_failure
.fnum
&&
251 lock
->start
== fsp
->last_lock_failure
.start
) {
252 return NT_STATUS_FILE_LOCK_CONFLICT
;
255 if (!blocking_lock
) {
256 fsp
->last_lock_failure
= *lock
;
258 return NT_STATUS_LOCK_NOT_GRANTED
;
261 /****************************************************************************
262 Open up the brlock.tdb database.
263 ****************************************************************************/
265 void brl_init(bool read_only
)
273 tdb_flags
= TDB_DEFAULT
|TDB_VOLATILE
|TDB_CLEAR_IF_FIRST
;
275 if (!lp_clustering()) {
277 * We can't use the SEQNUM trick to cache brlock
278 * entries in the clustering case because ctdb seqnum
279 * propagation has a delay.
281 tdb_flags
|= TDB_SEQNUM
;
284 brlock_db
= db_open(NULL
, lock_path("brlock.tdb"),
285 lp_open_files_db_hash_size(), tdb_flags
,
286 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644 );
288 DEBUG(0,("Failed to open byte range locking database %s\n",
289 lock_path("brlock.tdb")));
294 /****************************************************************************
295 Close down the brlock.tdb database.
296 ****************************************************************************/
298 void brl_shutdown(void)
300 TALLOC_FREE(brlock_db
);
304 /****************************************************************************
305 Compare two locks for sorting.
306 ****************************************************************************/
308 static int lock_compare(const struct lock_struct
*lck1
,
309 const struct lock_struct
*lck2
)
311 if (lck1
->start
!= lck2
->start
) {
312 return (lck1
->start
- lck2
->start
);
314 if (lck2
->size
!= lck1
->size
) {
315 return ((int)lck1
->size
- (int)lck2
->size
);
321 /****************************************************************************
322 Lock a range of bytes - Windows lock semantics.
323 ****************************************************************************/
325 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
326 struct lock_struct
*plock
, bool blocking_lock
)
329 files_struct
*fsp
= br_lck
->fsp
;
330 struct lock_struct
*locks
= br_lck
->lock_data
;
333 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
335 for (i
=0; i
< br_lck
->num_locks
; i
++) {
336 /* Do any Windows or POSIX locks conflict ? */
337 if (brl_conflict(&locks
[i
], plock
)) {
338 /* Remember who blocked us. */
339 plock
->context
.smbpid
= locks
[i
].context
.smbpid
;
340 return brl_lock_failed(fsp
,plock
,blocking_lock
);
343 if (plock
->start
== 0 && plock
->size
== 0 &&
344 locks
[i
].size
== 0) {
350 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
351 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
354 /* We can get the Windows lock, now see if it needs to
355 be mapped into a lower level POSIX one, and if so can
358 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(fsp
->conn
->params
)) {
360 if (!set_posix_lock_windows_flavour(fsp
,
369 /* We don't know who blocked us. */
370 plock
->context
.smbpid
= 0xFFFFFFFF;
372 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
373 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
376 status
= map_nt_error_from_unix(errno
);
382 /* no conflicts - add it to the list of locks */
383 locks
= (struct lock_struct
*)SMB_REALLOC(locks
, (br_lck
->num_locks
+ 1) * sizeof(*locks
));
385 status
= NT_STATUS_NO_MEMORY
;
389 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
390 br_lck
->num_locks
+= 1;
391 br_lck
->lock_data
= locks
;
392 br_lck
->modified
= True
;
396 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
397 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
402 /****************************************************************************
403 Cope with POSIX range splits and merges.
404 ****************************************************************************/
406 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
407 struct lock_struct
*ex
, /* existing lock. */
408 struct lock_struct
*plock
) /* proposed lock. */
410 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
412 /* We can't merge non-conflicting locks on different context - ignore fnum. */
414 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
416 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
420 /* We now know we have the same context. */
422 /* Did we overlap ? */
424 /*********************************************
435 **********************************************/
437 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
438 (plock
->start
> (ex
->start
+ ex
->size
))) {
440 /* No overlap with this lock - copy existing. */
442 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
446 /*********************************************
447 +---------------------------+
449 +---------------------------+
450 +---------------------------+
451 | plock | -> replace with plock.
452 +---------------------------+
457 +---------------------------+
458 | plock | -> replace with plock.
459 +---------------------------+
461 **********************************************/
463 if ( (ex
->start
>= plock
->start
) &&
464 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
466 /* Replace - discard existing lock. */
471 /*********************************************
481 +---------------+-------+
482 | plock | ex | - different lock types.
483 +---------------+-------+
485 +-----------------------+
486 | plock | - same lock type.
487 +-----------------------+
488 **********************************************/
490 if (plock
->start
+ plock
->size
== ex
->start
) {
492 /* If the lock types are the same, we merge, if different, we
493 add the remainder of the old lock. */
495 if (lock_types_differ
) {
497 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
500 /* Merge - adjust incoming lock as we may have more
501 * merging to come. */
502 plock
->size
+= ex
->size
;
507 /*********************************************
516 +-------+---------------+
517 | ex | plock | - different lock types
518 +-------+---------------+
521 +-----------------------+
522 | plock | - same lock type.
523 +-----------------------+
525 **********************************************/
527 if (ex
->start
+ ex
->size
== plock
->start
) {
529 /* If the lock types are the same, we merge, if different, we
530 add the existing lock. */
532 if (lock_types_differ
) {
533 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
536 /* Merge - adjust incoming lock as we may have more
537 * merging to come. */
538 plock
->start
= ex
->start
;
539 plock
->size
+= ex
->size
;
544 /*********************************************
546 +-----------------------+
548 +-----------------------+
561 +---------------+-------+
562 | plock | ex | - different lock types.
563 +---------------+-------+
565 +-----------------------+
566 | plock | - same lock type.
567 +-----------------------+
568 **********************************************/
570 if ( (ex
->start
>= plock
->start
) &&
571 (ex
->start
<= plock
->start
+ plock
->size
) &&
572 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
574 /* If the lock types are the same, we merge, if different, we
575 add the remainder of the old lock. */
577 if (lock_types_differ
) {
578 /* Add remaining existing. */
579 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
580 /* Adjust existing start and size. */
581 lck_arr
[0].start
= plock
->start
+ plock
->size
;
582 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
585 /* Merge - adjust incoming lock as we may have more
586 * merging to come. */
587 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
592 /*********************************************
594 +-----------------------+
596 +-----------------------+
609 +-------+---------------+
610 | ex | plock | - different lock types
611 +-------+---------------+
614 +-----------------------+
615 | plock | - same lock type.
616 +-----------------------+
618 **********************************************/
620 if ( (ex
->start
< plock
->start
) &&
621 (ex
->start
+ ex
->size
>= plock
->start
) &&
622 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
624 /* If the lock types are the same, we merge, if different, we
625 add the truncated old lock. */
627 if (lock_types_differ
) {
628 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
629 /* Adjust existing size. */
630 lck_arr
[0].size
= plock
->start
- ex
->start
;
633 /* Merge - adjust incoming lock as we may have more
634 * merging to come. MUST ADJUST plock SIZE FIRST ! */
635 plock
->size
+= (plock
->start
- ex
->start
);
636 plock
->start
= ex
->start
;
641 /*********************************************
643 +---------------------------+
645 +---------------------------+
650 +-------+---------+---------+
651 | ex | plock | ex | - different lock types.
652 +-------+---------+---------+
654 +---------------------------+
655 | plock | - same lock type.
656 +---------------------------+
657 **********************************************/
659 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
661 if (lock_types_differ
) {
663 /* We have to split ex into two locks here. */
665 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
666 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
668 /* Adjust first existing size. */
669 lck_arr
[0].size
= plock
->start
- ex
->start
;
671 /* Adjust second existing start and size. */
672 lck_arr
[1].start
= plock
->start
+ plock
->size
;
673 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
676 /* Just eat the existing locks, merge them into plock. */
677 plock
->start
= ex
->start
;
678 plock
->size
= ex
->size
;
683 /* Never get here. */
684 smb_panic("brlock_posix_split_merge");
687 /* Keep some compilers happy. */
691 /****************************************************************************
692 Lock a range of bytes - POSIX lock semantics.
693 We must cope with range splits and merges.
694 ****************************************************************************/
696 static NTSTATUS
brl_lock_posix(struct messaging_context
*msg_ctx
,
697 struct byte_range_lock
*br_lck
,
698 struct lock_struct
*plock
)
700 unsigned int i
, count
, posix_count
;
701 struct lock_struct
*locks
= br_lck
->lock_data
;
702 struct lock_struct
*tp
;
703 bool signal_pending_read
= False
;
704 bool break_oplocks
= false;
707 /* No zero-zero locks for POSIX. */
708 if (plock
->start
== 0 && plock
->size
== 0) {
709 return NT_STATUS_INVALID_PARAMETER
;
712 /* Don't allow 64-bit lock wrap. */
713 if (plock
->start
+ plock
->size
< plock
->start
||
714 plock
->start
+ plock
->size
< plock
->size
) {
715 return NT_STATUS_INVALID_PARAMETER
;
718 /* The worst case scenario here is we have to split an
719 existing POSIX lock range into two, and add our lock,
720 so we need at most 2 more entries. */
722 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 2));
724 return NT_STATUS_NO_MEMORY
;
727 count
= posix_count
= 0;
729 for (i
=0; i
< br_lck
->num_locks
; i
++) {
730 struct lock_struct
*curr_lock
= &locks
[i
];
732 /* If we have a pending read lock, a lock downgrade should
733 trigger a lock re-evaluation. */
734 if (curr_lock
->lock_type
== PENDING_READ_LOCK
&&
735 brl_pending_overlap(plock
, curr_lock
)) {
736 signal_pending_read
= True
;
739 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
740 /* Do any Windows flavour locks conflict ? */
741 if (brl_conflict(curr_lock
, plock
)) {
742 /* No games with error messages. */
744 /* Remember who blocked us. */
745 plock
->context
.smbpid
= curr_lock
->context
.smbpid
;
746 return NT_STATUS_FILE_LOCK_CONFLICT
;
748 /* Just copy the Windows lock into the new array. */
749 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
752 unsigned int tmp_count
= 0;
754 /* POSIX conflict semantics are different. */
755 if (brl_conflict_posix(curr_lock
, plock
)) {
756 /* Can't block ourselves with POSIX locks. */
757 /* No games with error messages. */
759 /* Remember who blocked us. */
760 plock
->context
.smbpid
= curr_lock
->context
.smbpid
;
761 return NT_STATUS_FILE_LOCK_CONFLICT
;
764 /* Work out overlaps. */
765 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
766 posix_count
+= tmp_count
;
772 * Break oplocks while we hold a brl. Since lock() and unlock() calls
773 * are not symetric with POSIX semantics, we cannot guarantee our
774 * contend_level2_oplocks_begin/end calls will be acquired and
775 * released one-for-one as with Windows semantics. Therefore we only
776 * call contend_level2_oplocks_begin if this is the first POSIX brl on
779 break_oplocks
= (!IS_PENDING_LOCK(plock
->lock_type
) &&
782 contend_level2_oplocks_begin(br_lck
->fsp
,
783 LEVEL2_CONTEND_POSIX_BRL
);
786 /* Try and add the lock in order, sorted by lock start. */
787 for (i
=0; i
< count
; i
++) {
788 struct lock_struct
*curr_lock
= &tp
[i
];
790 if (curr_lock
->start
<= plock
->start
) {
796 memmove(&tp
[i
+1], &tp
[i
],
797 (count
- i
)*sizeof(struct lock_struct
));
799 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
802 /* We can get the POSIX lock, now see if it needs to
803 be mapped into a lower level POSIX one, and if so can
806 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
809 /* The lower layer just needs to attempt to
810 get the system POSIX lock. We've weeded out
811 any conflicts above. */
813 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
819 /* We don't know who blocked us. */
820 plock
->context
.smbpid
= 0xFFFFFFFF;
822 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
824 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
828 status
= map_nt_error_from_unix(errno
);
834 /* If we didn't use all the allocated size,
835 * Realloc so we don't leak entries per lock call. */
836 if (count
< br_lck
->num_locks
+ 2) {
837 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
839 status
= NT_STATUS_NO_MEMORY
;
844 br_lck
->num_locks
= count
;
845 SAFE_FREE(br_lck
->lock_data
);
846 br_lck
->lock_data
= tp
;
848 br_lck
->modified
= True
;
850 /* A successful downgrade from write to read lock can trigger a lock
851 re-evalutation where waiting readers can now proceed. */
853 if (signal_pending_read
) {
854 /* Send unlock messages to any pending read waiters that overlap. */
855 for (i
=0; i
< br_lck
->num_locks
; i
++) {
856 struct lock_struct
*pend_lock
= &locks
[i
];
858 /* Ignore non-pending locks. */
859 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
863 if (pend_lock
->lock_type
== PENDING_READ_LOCK
&&
864 brl_pending_overlap(plock
, pend_lock
)) {
865 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
866 procid_str_static(&pend_lock
->context
.pid
)));
868 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
869 MSG_SMB_UNLOCK
, &data_blob_null
);
877 contend_level2_oplocks_end(br_lck
->fsp
,
878 LEVEL2_CONTEND_POSIX_BRL
);
883 NTSTATUS
smb_vfs_call_brl_lock_windows(struct vfs_handle_struct
*handle
,
884 struct byte_range_lock
*br_lck
,
885 struct lock_struct
*plock
,
887 struct blocking_lock_record
*blr
)
889 VFS_FIND(brl_lock_windows
);
890 return handle
->fns
->brl_lock_windows(handle
, br_lck
, plock
,
894 /****************************************************************************
895 Lock a range of bytes.
896 ****************************************************************************/
898 NTSTATUS
brl_lock(struct messaging_context
*msg_ctx
,
899 struct byte_range_lock
*br_lck
,
901 struct server_id pid
,
904 enum brl_type lock_type
,
905 enum brl_flavour lock_flav
,
908 struct blocking_lock_record
*blr
)
911 struct lock_struct lock
;
914 if (start
== 0 && size
== 0) {
915 DEBUG(0,("client sent 0/0 lock - please report this\n"));
920 /* Quieten valgrind on test. */
921 memset(&lock
, '\0', sizeof(lock
));
924 lock
.context
.smbpid
= smbpid
;
925 lock
.context
.pid
= pid
;
926 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
929 lock
.fnum
= br_lck
->fsp
->fnum
;
930 lock
.lock_type
= lock_type
;
931 lock
.lock_flav
= lock_flav
;
933 if (lock_flav
== WINDOWS_LOCK
) {
934 ret
= SMB_VFS_BRL_LOCK_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
935 &lock
, blocking_lock
, blr
);
937 ret
= brl_lock_posix(msg_ctx
, br_lck
, &lock
);
941 /* sort the lock list */
942 qsort(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, sizeof(lock
), lock_compare
);
945 /* If we're returning an error, return who blocked us. */
946 if (!NT_STATUS_IS_OK(ret
) && psmbpid
) {
947 *psmbpid
= lock
.context
.smbpid
;
952 /****************************************************************************
953 Unlock a range of bytes - Windows semantics.
954 ****************************************************************************/
956 bool brl_unlock_windows_default(struct messaging_context
*msg_ctx
,
957 struct byte_range_lock
*br_lck
,
958 const struct lock_struct
*plock
)
961 struct lock_struct
*locks
= br_lck
->lock_data
;
962 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
964 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
967 /* Delete write locks by preference... The lock list
968 is sorted in the zero zero case. */
970 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
971 struct lock_struct
*lock
= &locks
[i
];
973 if (lock
->lock_type
== WRITE_LOCK
&&
974 brl_same_context(&lock
->context
, &plock
->context
) &&
975 lock
->fnum
== plock
->fnum
&&
976 lock
->lock_flav
== WINDOWS_LOCK
&&
977 lock
->start
== plock
->start
&&
978 lock
->size
== plock
->size
) {
980 /* found it - delete it */
981 deleted_lock_type
= lock
->lock_type
;
986 if (i
!= br_lck
->num_locks
) {
987 /* We found it - don't search again. */
988 goto unlock_continue
;
992 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
993 struct lock_struct
*lock
= &locks
[i
];
995 /* Only remove our own locks that match in start, size, and flavour. */
996 if (brl_same_context(&lock
->context
, &plock
->context
) &&
997 lock
->fnum
== plock
->fnum
&&
998 lock
->lock_flav
== WINDOWS_LOCK
&&
999 lock
->start
== plock
->start
&&
1000 lock
->size
== plock
->size
) {
1001 deleted_lock_type
= lock
->lock_type
;
1006 if (i
== br_lck
->num_locks
) {
1007 /* we didn't find it */
1015 /* Actually delete the lock. */
1016 if (i
< br_lck
->num_locks
- 1) {
1017 memmove(&locks
[i
], &locks
[i
+1],
1018 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1021 br_lck
->num_locks
-= 1;
1022 br_lck
->modified
= True
;
1024 /* Unlock the underlying POSIX regions. */
1025 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1026 release_posix_lock_windows_flavour(br_lck
->fsp
,
1035 /* Send unlock messages to any pending waiters that overlap. */
1036 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1037 struct lock_struct
*pend_lock
= &locks
[j
];
1039 /* Ignore non-pending locks. */
1040 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1044 /* We could send specific lock info here... */
1045 if (brl_pending_overlap(plock
, pend_lock
)) {
1046 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1047 procid_str_static(&pend_lock
->context
.pid
)));
1049 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1050 MSG_SMB_UNLOCK
, &data_blob_null
);
1054 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1058 /****************************************************************************
1059 Unlock a range of bytes - POSIX semantics.
1060 ****************************************************************************/
1062 static bool brl_unlock_posix(struct messaging_context
*msg_ctx
,
1063 struct byte_range_lock
*br_lck
,
1064 struct lock_struct
*plock
)
1066 unsigned int i
, j
, count
;
1067 struct lock_struct
*tp
;
1068 struct lock_struct
*locks
= br_lck
->lock_data
;
1069 bool overlap_found
= False
;
1071 /* No zero-zero locks for POSIX. */
1072 if (plock
->start
== 0 && plock
->size
== 0) {
1076 /* Don't allow 64-bit lock wrap. */
1077 if (plock
->start
+ plock
->size
< plock
->start
||
1078 plock
->start
+ plock
->size
< plock
->size
) {
1079 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1083 /* The worst case scenario here is we have to split an
1084 existing POSIX lock range into two, so we need at most
1087 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 1));
1089 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1094 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1095 struct lock_struct
*lock
= &locks
[i
];
1096 unsigned int tmp_count
;
1098 /* Only remove our own locks - ignore fnum. */
1099 if (IS_PENDING_LOCK(lock
->lock_type
) ||
1100 !brl_same_context(&lock
->context
, &plock
->context
)) {
1101 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1106 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1107 /* Do any Windows flavour locks conflict ? */
1108 if (brl_conflict(lock
, plock
)) {
1112 /* Just copy the Windows lock into the new array. */
1113 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1118 /* Work out overlaps. */
1119 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1121 if (tmp_count
== 0) {
1122 /* plock overlapped the existing lock completely,
1123 or replaced it. Don't copy the existing lock. */
1124 overlap_found
= true;
1125 } else if (tmp_count
== 1) {
1126 /* Either no overlap, (simple copy of existing lock) or
1127 * an overlap of an existing lock. */
1128 /* If the lock changed size, we had an overlap. */
1129 if (tp
[count
].size
!= lock
->size
) {
1130 overlap_found
= true;
1133 } else if (tmp_count
== 2) {
1134 /* We split a lock range in two. */
1135 overlap_found
= true;
1138 /* Optimisation... */
1139 /* We know we're finished here as we can't overlap any
1140 more POSIX locks. Copy the rest of the lock array. */
1142 if (i
< br_lck
->num_locks
- 1) {
1143 memcpy(&tp
[count
], &locks
[i
+1],
1144 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1145 count
+= ((br_lck
->num_locks
-1) - i
);
1152 if (!overlap_found
) {
1153 /* Just ignore - no change. */
1155 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1159 /* Unlock any POSIX regions. */
1160 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1161 release_posix_lock_posix_flavour(br_lck
->fsp
,
1169 /* Realloc so we don't leak entries per unlock call. */
1171 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
1173 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1177 /* We deleted the last lock. */
1182 contend_level2_oplocks_end(br_lck
->fsp
,
1183 LEVEL2_CONTEND_POSIX_BRL
);
1185 br_lck
->num_locks
= count
;
1186 SAFE_FREE(br_lck
->lock_data
);
1188 br_lck
->lock_data
= tp
;
1189 br_lck
->modified
= True
;
1191 /* Send unlock messages to any pending waiters that overlap. */
1193 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1194 struct lock_struct
*pend_lock
= &locks
[j
];
1196 /* Ignore non-pending locks. */
1197 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1201 /* We could send specific lock info here... */
1202 if (brl_pending_overlap(plock
, pend_lock
)) {
1203 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1204 procid_str_static(&pend_lock
->context
.pid
)));
1206 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1207 MSG_SMB_UNLOCK
, &data_blob_null
);
1214 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct
*handle
,
1215 struct messaging_context
*msg_ctx
,
1216 struct byte_range_lock
*br_lck
,
1217 const struct lock_struct
*plock
)
1219 VFS_FIND(brl_unlock_windows
);
1220 return handle
->fns
->brl_unlock_windows(handle
, msg_ctx
, br_lck
, plock
);
1223 /****************************************************************************
1224 Unlock a range of bytes.
1225 ****************************************************************************/
1227 bool brl_unlock(struct messaging_context
*msg_ctx
,
1228 struct byte_range_lock
*br_lck
,
1230 struct server_id pid
,
1233 enum brl_flavour lock_flav
)
1235 struct lock_struct lock
;
1237 lock
.context
.smbpid
= smbpid
;
1238 lock
.context
.pid
= pid
;
1239 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1242 lock
.fnum
= br_lck
->fsp
->fnum
;
1243 lock
.lock_type
= UNLOCK_LOCK
;
1244 lock
.lock_flav
= lock_flav
;
1246 if (lock_flav
== WINDOWS_LOCK
) {
1247 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck
->fsp
->conn
, msg_ctx
,
1250 return brl_unlock_posix(msg_ctx
, br_lck
, &lock
);
1254 /****************************************************************************
1255 Test if we could add a lock if we wanted to.
1256 Returns True if the region required is currently unlocked, False if locked.
1257 ****************************************************************************/
1259 bool brl_locktest(struct byte_range_lock
*br_lck
,
1261 struct server_id pid
,
1264 enum brl_type lock_type
,
1265 enum brl_flavour lock_flav
)
1269 struct lock_struct lock
;
1270 const struct lock_struct
*locks
= br_lck
->lock_data
;
1271 files_struct
*fsp
= br_lck
->fsp
;
1273 lock
.context
.smbpid
= smbpid
;
1274 lock
.context
.pid
= pid
;
1275 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1278 lock
.fnum
= fsp
->fnum
;
1279 lock
.lock_type
= lock_type
;
1280 lock
.lock_flav
= lock_flav
;
1282 /* Make sure existing locks don't conflict */
1283 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1285 * Our own locks don't conflict.
1287 if (brl_conflict_other(&locks
[i
], &lock
)) {
1293 * There is no lock held by an SMB daemon, check to
1294 * see if there is a POSIX lock from a UNIX or NFS process.
1295 * This only conflicts with Windows locks, not POSIX locks.
1298 if(lp_posix_locking(fsp
->conn
->params
) && (lock_flav
== WINDOWS_LOCK
)) {
1299 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1301 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1302 (double)start
, (double)size
, ret
? "locked" : "unlocked",
1303 fsp
->fnum
, fsp_str_dbg(fsp
)));
1305 /* We need to return the inverse of is_posix_locked. */
1309 /* no conflicts - we could have added it */
1313 /****************************************************************************
1314 Query for existing locks.
1315 ****************************************************************************/
1317 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1319 struct server_id pid
,
1322 enum brl_type
*plock_type
,
1323 enum brl_flavour lock_flav
)
1326 struct lock_struct lock
;
1327 const struct lock_struct
*locks
= br_lck
->lock_data
;
1328 files_struct
*fsp
= br_lck
->fsp
;
1330 lock
.context
.smbpid
= *psmbpid
;
1331 lock
.context
.pid
= pid
;
1332 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1333 lock
.start
= *pstart
;
1335 lock
.fnum
= fsp
->fnum
;
1336 lock
.lock_type
= *plock_type
;
1337 lock
.lock_flav
= lock_flav
;
1339 /* Make sure existing locks don't conflict */
1340 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1341 const struct lock_struct
*exlock
= &locks
[i
];
1342 bool conflict
= False
;
1344 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1345 conflict
= brl_conflict(exlock
, &lock
);
1347 conflict
= brl_conflict_posix(exlock
, &lock
);
1351 *psmbpid
= exlock
->context
.smbpid
;
1352 *pstart
= exlock
->start
;
1353 *psize
= exlock
->size
;
1354 *plock_type
= exlock
->lock_type
;
1355 return NT_STATUS_LOCK_NOT_GRANTED
;
1360 * There is no lock held by an SMB daemon, check to
1361 * see if there is a POSIX lock from a UNIX or NFS process.
1364 if(lp_posix_locking(fsp
->conn
->params
)) {
1365 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1367 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1368 (double)*pstart
, (double)*psize
, ret
? "locked" : "unlocked",
1369 fsp
->fnum
, fsp_str_dbg(fsp
)));
1372 /* Hmmm. No clue what to set smbpid to - use -1. */
1374 return NT_STATUS_LOCK_NOT_GRANTED
;
1378 return NT_STATUS_OK
;
1382 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct
*handle
,
1383 struct byte_range_lock
*br_lck
,
1384 struct lock_struct
*plock
,
1385 struct blocking_lock_record
*blr
)
1387 VFS_FIND(brl_cancel_windows
);
1388 return handle
->fns
->brl_cancel_windows(handle
, br_lck
, plock
, blr
);
1391 /****************************************************************************
1392 Remove a particular pending lock.
1393 ****************************************************************************/
1394 bool brl_lock_cancel(struct byte_range_lock
*br_lck
,
1396 struct server_id pid
,
1399 enum brl_flavour lock_flav
,
1400 struct blocking_lock_record
*blr
)
1403 struct lock_struct lock
;
1405 lock
.context
.smbpid
= smbpid
;
1406 lock
.context
.pid
= pid
;
1407 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1410 lock
.fnum
= br_lck
->fsp
->fnum
;
1411 lock
.lock_flav
= lock_flav
;
1412 /* lock.lock_type doesn't matter */
1414 if (lock_flav
== WINDOWS_LOCK
) {
1415 ret
= SMB_VFS_BRL_CANCEL_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
1418 ret
= brl_lock_cancel_default(br_lck
, &lock
);
1424 bool brl_lock_cancel_default(struct byte_range_lock
*br_lck
,
1425 struct lock_struct
*plock
)
1428 struct lock_struct
*locks
= br_lck
->lock_data
;
1432 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1433 struct lock_struct
*lock
= &locks
[i
];
1435 /* For pending locks we *always* care about the fnum. */
1436 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1437 lock
->fnum
== plock
->fnum
&&
1438 IS_PENDING_LOCK(lock
->lock_type
) &&
1439 lock
->lock_flav
== plock
->lock_flav
&&
1440 lock
->start
== plock
->start
&&
1441 lock
->size
== plock
->size
) {
1446 if (i
== br_lck
->num_locks
) {
1447 /* Didn't find it. */
1451 if (i
< br_lck
->num_locks
- 1) {
1452 /* Found this particular pending lock - delete it */
1453 memmove(&locks
[i
], &locks
[i
+1],
1454 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1457 br_lck
->num_locks
-= 1;
1458 br_lck
->modified
= True
;
1462 /****************************************************************************
1463 Remove any locks associated with a open file.
1464 We return True if this process owns any other Windows locks on this
1465 fd and so we should not immediately close the fd.
1466 ****************************************************************************/
1468 void brl_close_fnum(struct messaging_context
*msg_ctx
,
1469 struct byte_range_lock
*br_lck
)
1471 files_struct
*fsp
= br_lck
->fsp
;
1472 uint16 tid
= fsp
->conn
->cnum
;
1473 int fnum
= fsp
->fnum
;
1474 unsigned int i
, j
, dcount
=0;
1475 int num_deleted_windows_locks
= 0;
1476 struct lock_struct
*locks
= br_lck
->lock_data
;
1477 struct server_id pid
= procid_self();
1478 bool unlock_individually
= False
;
1479 bool posix_level2_contention_ended
= false;
1481 if(lp_posix_locking(fsp
->conn
->params
)) {
1483 /* Check if there are any Windows locks associated with this dev/ino
1484 pair that are not this fnum. If so we need to call unlock on each
1485 one in order to release the system POSIX locks correctly. */
1487 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1488 struct lock_struct
*lock
= &locks
[i
];
1490 if (!procid_equal(&lock
->context
.pid
, &pid
)) {
1494 if (lock
->lock_type
!= READ_LOCK
&& lock
->lock_type
!= WRITE_LOCK
) {
1495 continue; /* Ignore pending. */
1498 if (lock
->context
.tid
!= tid
|| lock
->fnum
!= fnum
) {
1499 unlock_individually
= True
;
1504 if (unlock_individually
) {
1505 struct lock_struct
*locks_copy
;
1506 unsigned int num_locks_copy
;
1508 /* Copy the current lock array. */
1509 if (br_lck
->num_locks
) {
1510 locks_copy
= (struct lock_struct
*)TALLOC_MEMDUP(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1512 smb_panic("brl_close_fnum: talloc failed");
1518 num_locks_copy
= br_lck
->num_locks
;
1520 for (i
=0; i
< num_locks_copy
; i
++) {
1521 struct lock_struct
*lock
= &locks_copy
[i
];
1523 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
) &&
1524 (lock
->fnum
== fnum
)) {
1527 lock
->context
.smbpid
,
1538 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1540 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1542 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1543 struct lock_struct
*lock
= &locks
[i
];
1544 bool del_this_lock
= False
;
1546 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
)) {
1547 if ((lock
->lock_flav
== WINDOWS_LOCK
) && (lock
->fnum
== fnum
)) {
1548 del_this_lock
= True
;
1549 num_deleted_windows_locks
++;
1550 contend_level2_oplocks_end(br_lck
->fsp
,
1551 LEVEL2_CONTEND_WINDOWS_BRL
);
1552 } else if (lock
->lock_flav
== POSIX_LOCK
) {
1553 del_this_lock
= True
;
1555 /* Only end level2 contention once for posix */
1556 if (!posix_level2_contention_ended
) {
1557 posix_level2_contention_ended
= true;
1558 contend_level2_oplocks_end(br_lck
->fsp
,
1559 LEVEL2_CONTEND_POSIX_BRL
);
1564 if (del_this_lock
) {
1565 /* Send unlock messages to any pending waiters that overlap. */
1566 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1567 struct lock_struct
*pend_lock
= &locks
[j
];
1569 /* Ignore our own or non-pending locks. */
1570 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1574 /* Optimisation - don't send to this fnum as we're
1576 if (pend_lock
->context
.tid
== tid
&&
1577 procid_equal(&pend_lock
->context
.pid
, &pid
) &&
1578 pend_lock
->fnum
== fnum
) {
1582 /* We could send specific lock info here... */
1583 if (brl_pending_overlap(lock
, pend_lock
)) {
1584 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1585 MSG_SMB_UNLOCK
, &data_blob_null
);
1589 /* found it - delete it */
1590 if (br_lck
->num_locks
> 1 && i
< br_lck
->num_locks
- 1) {
1591 memmove(&locks
[i
], &locks
[i
+1],
1592 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1594 br_lck
->num_locks
--;
1595 br_lck
->modified
= True
;
1601 if(lp_posix_locking(fsp
->conn
->params
) && num_deleted_windows_locks
) {
1602 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1603 reduce_windows_lock_ref_count(fsp
, num_deleted_windows_locks
);
1607 /****************************************************************************
1608 Ensure this set of lock entries is valid.
1609 ****************************************************************************/
1610 static bool validate_lock_entries(unsigned int *pnum_entries
, struct lock_struct
**pplocks
)
1613 unsigned int num_valid_entries
= 0;
1614 struct lock_struct
*locks
= *pplocks
;
1616 for (i
= 0; i
< *pnum_entries
; i
++) {
1617 struct lock_struct
*lock_data
= &locks
[i
];
1618 if (!process_exists(lock_data
->context
.pid
)) {
1619 /* This process no longer exists - mark this
1620 entry as invalid by zeroing it. */
1621 ZERO_STRUCTP(lock_data
);
1623 num_valid_entries
++;
1627 if (num_valid_entries
!= *pnum_entries
) {
1628 struct lock_struct
*new_lock_data
= NULL
;
1630 if (num_valid_entries
) {
1631 new_lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
, num_valid_entries
);
1632 if (!new_lock_data
) {
1633 DEBUG(3, ("malloc fail\n"));
1637 num_valid_entries
= 0;
1638 for (i
= 0; i
< *pnum_entries
; i
++) {
1639 struct lock_struct
*lock_data
= &locks
[i
];
1640 if (lock_data
->context
.smbpid
&&
1641 lock_data
->context
.tid
) {
1642 /* Valid (nonzero) entry - copy it. */
1643 memcpy(&new_lock_data
[num_valid_entries
],
1644 lock_data
, sizeof(struct lock_struct
));
1645 num_valid_entries
++;
1650 SAFE_FREE(*pplocks
);
1651 *pplocks
= new_lock_data
;
1652 *pnum_entries
= num_valid_entries
;
1658 struct brl_forall_cb
{
1659 void (*fn
)(struct file_id id
, struct server_id pid
,
1660 enum brl_type lock_type
,
1661 enum brl_flavour lock_flav
,
1662 br_off start
, br_off size
,
1663 void *private_data
);
1667 /****************************************************************************
1668 Traverse the whole database with this function, calling traverse_callback
1670 ****************************************************************************/
1672 static int traverse_fn(struct db_record
*rec
, void *state
)
1674 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1675 struct lock_struct
*locks
;
1676 struct file_id
*key
;
1678 unsigned int num_locks
= 0;
1679 unsigned int orig_num_locks
= 0;
1681 /* In a traverse function we must make a copy of
1682 dbuf before modifying it. */
1684 locks
= (struct lock_struct
*)memdup(rec
->value
.dptr
,
1687 return -1; /* Terminate traversal. */
1690 key
= (struct file_id
*)rec
->key
.dptr
;
1691 orig_num_locks
= num_locks
= rec
->value
.dsize
/sizeof(*locks
);
1693 /* Ensure the lock db is clean of entries from invalid processes. */
1695 if (!validate_lock_entries(&num_locks
, &locks
)) {
1697 return -1; /* Terminate traversal */
1700 if (orig_num_locks
!= num_locks
) {
1703 data
.dptr
= (uint8_t *)locks
;
1704 data
.dsize
= num_locks
*sizeof(struct lock_struct
);
1705 rec
->store(rec
, data
, TDB_REPLACE
);
1707 rec
->delete_rec(rec
);
1712 for ( i
=0; i
<num_locks
; i
++) {
1714 locks
[i
].context
.pid
,
1727 /*******************************************************************
1728 Call the specified function on each lock in the database.
1729 ********************************************************************/
1731 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1732 enum brl_type lock_type
,
1733 enum brl_flavour lock_flav
,
1734 br_off start
, br_off size
,
1735 void *private_data
),
1738 struct brl_forall_cb cb
;
1744 cb
.private_data
= private_data
;
1745 return brlock_db
->traverse(brlock_db
, traverse_fn
, &cb
);
1748 /*******************************************************************
1749 Store a potentially modified set of byte range lock data back into
1752 ********************************************************************/
1754 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1756 if (br_lck
->read_only
) {
1757 SMB_ASSERT(!br_lck
->modified
);
1760 if (!br_lck
->modified
) {
1764 if (br_lck
->num_locks
== 0) {
1765 /* No locks - delete this entry. */
1766 NTSTATUS status
= br_lck
->record
->delete_rec(br_lck
->record
);
1767 if (!NT_STATUS_IS_OK(status
)) {
1768 DEBUG(0, ("delete_rec returned %s\n",
1769 nt_errstr(status
)));
1770 smb_panic("Could not delete byte range lock entry");
1776 data
.dptr
= (uint8
*)br_lck
->lock_data
;
1777 data
.dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1779 status
= br_lck
->record
->store(br_lck
->record
, data
,
1781 if (!NT_STATUS_IS_OK(status
)) {
1782 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1783 smb_panic("Could not store byte range mode entry");
1789 SAFE_FREE(br_lck
->lock_data
);
1790 TALLOC_FREE(br_lck
->record
);
1794 /*******************************************************************
1795 Fetch a set of byte range lock data from the database.
1796 Leave the record locked.
1797 TALLOC_FREE(brl) will release the lock in the destructor.
1798 ********************************************************************/
1800 static struct byte_range_lock
*brl_get_locks_internal(TALLOC_CTX
*mem_ctx
,
1801 files_struct
*fsp
, bool read_only
)
1804 struct byte_range_lock
*br_lck
= TALLOC_P(mem_ctx
, struct byte_range_lock
);
1806 if (br_lck
== NULL
) {
1811 br_lck
->num_locks
= 0;
1812 br_lck
->modified
= False
;
1813 br_lck
->key
= fsp
->file_id
;
1815 key
.dptr
= (uint8
*)&br_lck
->key
;
1816 key
.dsize
= sizeof(struct file_id
);
1818 if (!fsp
->lockdb_clean
) {
1819 /* We must be read/write to clean
1820 the dead entries. */
1825 if (brlock_db
->fetch(brlock_db
, br_lck
, key
, &data
) == -1) {
1826 DEBUG(3, ("Could not fetch byte range lock record\n"));
1827 TALLOC_FREE(br_lck
);
1830 br_lck
->record
= NULL
;
1833 br_lck
->record
= brlock_db
->fetch_locked(brlock_db
, br_lck
, key
);
1835 if (br_lck
->record
== NULL
) {
1836 DEBUG(3, ("Could not lock byte range lock entry\n"));
1837 TALLOC_FREE(br_lck
);
1841 data
= br_lck
->record
->value
;
1844 br_lck
->read_only
= read_only
;
1845 br_lck
->lock_data
= NULL
;
1847 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1849 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1851 if (br_lck
->num_locks
!= 0) {
1852 br_lck
->lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
,
1854 if (br_lck
->lock_data
== NULL
) {
1855 DEBUG(0, ("malloc failed\n"));
1856 TALLOC_FREE(br_lck
);
1860 memcpy(br_lck
->lock_data
, data
.dptr
, data
.dsize
);
1863 if (!fsp
->lockdb_clean
) {
1864 int orig_num_locks
= br_lck
->num_locks
;
1866 /* This is the first time we've accessed this. */
1867 /* Go through and ensure all entries exist - remove any that don't. */
1868 /* Makes the lockdb self cleaning at low cost. */
1870 if (!validate_lock_entries(&br_lck
->num_locks
,
1871 &br_lck
->lock_data
)) {
1872 SAFE_FREE(br_lck
->lock_data
);
1873 TALLOC_FREE(br_lck
);
1877 /* Ensure invalid locks are cleaned up in the destructor. */
1878 if (orig_num_locks
!= br_lck
->num_locks
) {
1879 br_lck
->modified
= True
;
1882 /* Mark the lockdb as "clean" as seen from this open file. */
1883 fsp
->lockdb_clean
= True
;
1886 if (DEBUGLEVEL
>= 10) {
1888 struct lock_struct
*locks
= br_lck
->lock_data
;
1889 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1891 file_id_string_tos(&fsp
->file_id
)));
1892 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1893 print_lock_struct(i
, &locks
[i
]);
1899 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
,
1902 return brl_get_locks_internal(mem_ctx
, fsp
, False
);
1905 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
1907 struct byte_range_lock
*br_lock
;
1909 if (lp_clustering()) {
1910 return brl_get_locks_internal(talloc_tos(), fsp
, true);
1913 if ((fsp
->brlock_rec
!= NULL
)
1914 && (brlock_db
->get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
1915 return fsp
->brlock_rec
;
1918 TALLOC_FREE(fsp
->brlock_rec
);
1920 br_lock
= brl_get_locks_internal(talloc_tos(), fsp
, false);
1921 if (br_lock
== NULL
) {
1924 fsp
->brlock_seqnum
= brlock_db
->get_seqnum(brlock_db
);
1926 fsp
->brlock_rec
= talloc_zero(fsp
, struct byte_range_lock
);
1927 if (fsp
->brlock_rec
== NULL
) {
1930 fsp
->brlock_rec
->fsp
= fsp
;
1931 fsp
->brlock_rec
->num_locks
= br_lock
->num_locks
;
1932 fsp
->brlock_rec
->read_only
= true;
1933 fsp
->brlock_rec
->key
= br_lock
->key
;
1935 fsp
->brlock_rec
->lock_data
= (struct lock_struct
*)
1936 talloc_memdup(fsp
->brlock_rec
, br_lock
->lock_data
,
1937 sizeof(struct lock_struct
) * br_lock
->num_locks
);
1938 if (fsp
->brlock_rec
->lock_data
== NULL
) {
1942 TALLOC_FREE(br_lock
);
1943 return fsp
->brlock_rec
;
1945 TALLOC_FREE(br_lock
);
1946 TALLOC_FREE(fsp
->brlock_rec
);
1950 struct brl_revalidate_state
{
1953 struct server_id
*pids
;
1957 * Collect PIDs of all processes with pending entries
1960 static void brl_revalidate_collect(struct file_id id
, struct server_id pid
,
1961 enum brl_type lock_type
,
1962 enum brl_flavour lock_flav
,
1963 br_off start
, br_off size
,
1966 struct brl_revalidate_state
*state
=
1967 (struct brl_revalidate_state
*)private_data
;
1969 if (!IS_PENDING_LOCK(lock_type
)) {
1973 add_to_large_array(state
, sizeof(pid
), (void *)&pid
,
1974 &state
->pids
, &state
->num_pids
,
1975 &state
->array_size
);
1979 * qsort callback to sort the processes
1982 static int compare_procids(const void *p1
, const void *p2
)
1984 const struct server_id
*i1
= (struct server_id
*)p1
;
1985 const struct server_id
*i2
= (struct server_id
*)p2
;
1987 if (i1
->pid
< i2
->pid
) return -1;
1988 if (i2
->pid
> i2
->pid
) return 1;
1993 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
1994 * locks so that they retry. Mainly used in the cluster code after a node has
1997 * Done in two steps to avoid double-sends: First we collect all entries in an
1998 * array, then qsort that array and only send to non-dupes.
2001 static void brl_revalidate(struct messaging_context
*msg_ctx
,
2004 struct server_id server_id
,
2007 struct brl_revalidate_state
*state
;
2009 struct server_id last_pid
;
2011 if (!(state
= TALLOC_ZERO_P(NULL
, struct brl_revalidate_state
))) {
2012 DEBUG(0, ("talloc failed\n"));
2016 brl_forall(brl_revalidate_collect
, state
);
2018 if (state
->array_size
== -1) {
2019 DEBUG(0, ("talloc failed\n"));
2023 if (state
->num_pids
== 0) {
2027 qsort(state
->pids
, state
->num_pids
, sizeof(state
->pids
[0]),
2030 ZERO_STRUCT(last_pid
);
2032 for (i
=0; i
<state
->num_pids
; i
++) {
2033 if (procid_equal(&last_pid
, &state
->pids
[i
])) {
2035 * We've seen that one already
2040 messaging_send(msg_ctx
, state
->pids
[i
], MSG_SMB_UNLOCK
,
2042 last_pid
= state
->pids
[i
];
2050 void brl_register_msgs(struct messaging_context
*msg_ctx
)
2052 messaging_register(msg_ctx
, NULL
, MSG_SMB_BRL_VALIDATE
,