2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
28 #include "system/filesys.h"
29 #include "locking/proto.h"
30 #include "smbd/globals.h"
31 #include "dbwrap/dbwrap.h"
32 #include "dbwrap/dbwrap_open.h"
38 #define DBGC_CLASS DBGC_LOCKING
42 /* The open brlock.tdb database. */
44 static struct db_context
*brlock_db
;
46 struct byte_range_lock
{
47 struct files_struct
*fsp
;
48 unsigned int num_locks
;
50 uint32_t num_read_oplocks
;
51 struct lock_struct
*lock_data
;
52 struct db_record
*record
;
55 /****************************************************************************
56 Debug info at level 10 for lock struct.
57 ****************************************************************************/
59 static void print_lock_struct(unsigned int i
, const struct lock_struct
*pls
)
61 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
63 (unsigned long long)pls
->context
.smblctx
,
64 (unsigned int)pls
->context
.tid
,
65 server_id_str(talloc_tos(), &pls
->context
.pid
) ));
67 DEBUG(10, ("start = %ju, size = %ju, fnum = %ju, %s %s\n",
68 (uintmax_t)pls
->start
,
71 lock_type_name(pls
->lock_type
),
72 lock_flav_name(pls
->lock_flav
)));
75 unsigned int brl_num_locks(const struct byte_range_lock
*brl
)
77 return brl
->num_locks
;
80 struct files_struct
*brl_fsp(struct byte_range_lock
*brl
)
85 uint32_t brl_num_read_oplocks(const struct byte_range_lock
*brl
)
87 return brl
->num_read_oplocks
;
90 void brl_set_num_read_oplocks(struct byte_range_lock
*brl
,
91 uint32_t num_read_oplocks
)
93 DEBUG(10, ("Setting num_read_oplocks to %"PRIu32
"\n",
95 SMB_ASSERT(brl
->record
!= NULL
); /* otherwise we're readonly */
96 brl
->num_read_oplocks
= num_read_oplocks
;
100 /****************************************************************************
101 See if two locking contexts are equal.
102 ****************************************************************************/
104 static bool brl_same_context(const struct lock_context
*ctx1
,
105 const struct lock_context
*ctx2
)
107 return (serverid_equal(&ctx1
->pid
, &ctx2
->pid
) &&
108 (ctx1
->smblctx
== ctx2
->smblctx
) &&
109 (ctx1
->tid
== ctx2
->tid
));
112 /****************************************************************************
113 See if lck1 and lck2 overlap.
114 ****************************************************************************/
116 static bool brl_overlap(const struct lock_struct
*lck1
,
117 const struct lock_struct
*lck2
)
119 /* XXX Remove for Win7 compatibility. */
120 /* this extra check is not redundant - it copes with locks
121 that go beyond the end of 64 bit file space */
122 if (lck1
->size
!= 0 &&
123 lck1
->start
== lck2
->start
&&
124 lck1
->size
== lck2
->size
) {
128 if (lck1
->start
>= (lck2
->start
+lck2
->size
) ||
129 lck2
->start
>= (lck1
->start
+lck1
->size
)) {
135 /****************************************************************************
136 See if lock2 can be added when lock1 is in place.
137 ****************************************************************************/
139 static bool brl_conflict(const struct lock_struct
*lck1
,
140 const struct lock_struct
*lck2
)
142 /* Ignore PENDING locks. */
143 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
146 /* Read locks never conflict. */
147 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
151 /* A READ lock can stack on top of a WRITE lock if they have the same
153 if (lck1
->lock_type
== WRITE_LOCK
&& lck2
->lock_type
== READ_LOCK
&&
154 brl_same_context(&lck1
->context
, &lck2
->context
) &&
155 lck1
->fnum
== lck2
->fnum
) {
159 return brl_overlap(lck1
, lck2
);
162 /****************************************************************************
163 See if lock2 can be added when lock1 is in place - when both locks are POSIX
164 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
166 ****************************************************************************/
168 static bool brl_conflict_posix(const struct lock_struct
*lck1
,
169 const struct lock_struct
*lck2
)
171 #if defined(DEVELOPER)
172 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
173 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
176 /* Ignore PENDING locks. */
177 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
180 /* Read locks never conflict. */
181 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
185 /* Locks on the same context don't conflict. Ignore fnum. */
186 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
190 /* One is read, the other write, or the context is different,
192 return brl_overlap(lck1
, lck2
);
196 static bool brl_conflict1(const struct lock_struct
*lck1
,
197 const struct lock_struct
*lck2
)
199 if (IS_PENDING_LOCK(lck1
->lock_type
) || IS_PENDING_LOCK(lck2
->lock_type
))
202 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
206 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
207 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
211 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
215 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
216 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
224 /****************************************************************************
225 Check to see if this lock conflicts, but ignore our own locks on the
226 same fnum only. This is the read/write lock check code path.
227 This is never used in the POSIX lock case.
228 ****************************************************************************/
230 static bool brl_conflict_other(const struct lock_struct
*lock
,
231 const struct lock_struct
*rw_probe
)
233 if (IS_PENDING_LOCK(lock
->lock_type
) ||
234 IS_PENDING_LOCK(rw_probe
->lock_type
)) {
238 if (lock
->lock_type
== READ_LOCK
&& rw_probe
->lock_type
== READ_LOCK
) {
242 if (lock
->lock_flav
== POSIX_LOCK
&&
243 rw_probe
->lock_flav
== POSIX_LOCK
) {
245 * POSIX flavour locks never conflict here - this is only called
246 * in the read/write path.
251 if (!brl_overlap(lock
, rw_probe
)) {
253 * I/O can only conflict when overlapping a lock, thus let it
259 if (!brl_same_context(&lock
->context
, &rw_probe
->context
)) {
261 * Different process, conflict
266 if (lock
->fnum
!= rw_probe
->fnum
) {
268 * Different file handle, conflict
273 if ((lock
->lock_type
== READ_LOCK
) &&
274 (rw_probe
->lock_type
== WRITE_LOCK
)) {
276 * Incoming WRITE locks conflict with existing READ locks even
277 * if the context is the same. JRA. See LOCKTEST7 in
284 * I/O request compatible with existing lock, let it pass without
291 /****************************************************************************
292 Check if an unlock overlaps a pending lock.
293 ****************************************************************************/
295 static bool brl_pending_overlap(const struct lock_struct
*lock
, const struct lock_struct
*pend_lock
)
297 if ((lock
->start
<= pend_lock
->start
) && (lock
->start
+ lock
->size
> pend_lock
->start
))
299 if ((lock
->start
>= pend_lock
->start
) && (lock
->start
< pend_lock
->start
+ pend_lock
->size
))
304 /****************************************************************************
305 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
306 is the same as this one and changes its error code. I wonder if any
307 app depends on this ?
308 ****************************************************************************/
310 static NTSTATUS
brl_lock_failed(files_struct
*fsp
,
311 const struct lock_struct
*lock
,
314 if (lock
->start
>= 0xEF000000 && (lock
->start
>> 63) == 0) {
315 /* amazing the little things you learn with a test
316 suite. Locks beyond this offset (as a 64 bit
317 number!) always generate the conflict error code,
318 unless the top bit is set */
319 if (!blocking_lock
) {
320 fsp
->last_lock_failure
= *lock
;
322 return NT_STATUS_FILE_LOCK_CONFLICT
;
325 if (serverid_equal(&lock
->context
.pid
, &fsp
->last_lock_failure
.context
.pid
) &&
326 lock
->context
.tid
== fsp
->last_lock_failure
.context
.tid
&&
327 lock
->fnum
== fsp
->last_lock_failure
.fnum
&&
328 lock
->start
== fsp
->last_lock_failure
.start
) {
329 return NT_STATUS_FILE_LOCK_CONFLICT
;
332 if (!blocking_lock
) {
333 fsp
->last_lock_failure
= *lock
;
335 return NT_STATUS_LOCK_NOT_GRANTED
;
338 /****************************************************************************
339 Open up the brlock.tdb database.
340 ****************************************************************************/
342 void brl_init(bool read_only
)
351 tdb_flags
= TDB_DEFAULT
|TDB_VOLATILE
|TDB_CLEAR_IF_FIRST
|TDB_INCOMPATIBLE_HASH
;
353 if (!lp_clustering()) {
355 * We can't use the SEQNUM trick to cache brlock
356 * entries in the clustering case because ctdb seqnum
357 * propagation has a delay.
359 tdb_flags
|= TDB_SEQNUM
;
362 db_path
= lock_path("brlock.tdb");
363 if (db_path
== NULL
) {
364 DEBUG(0, ("out of memory!\n"));
368 brlock_db
= db_open(NULL
, db_path
,
369 SMB_OPEN_DATABASE_TDB_HASH_SIZE
, tdb_flags
,
370 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644,
371 DBWRAP_LOCK_ORDER_2
, DBWRAP_FLAG_NONE
);
373 DEBUG(0,("Failed to open byte range locking database %s\n",
375 TALLOC_FREE(db_path
);
378 TALLOC_FREE(db_path
);
381 /****************************************************************************
382 Close down the brlock.tdb database.
383 ****************************************************************************/
385 void brl_shutdown(void)
387 TALLOC_FREE(brlock_db
);
391 /****************************************************************************
392 Compare two locks for sorting.
393 ****************************************************************************/
395 static int lock_compare(const struct lock_struct
*lck1
,
396 const struct lock_struct
*lck2
)
398 if (lck1
->start
!= lck2
->start
) {
399 return (lck1
->start
- lck2
->start
);
401 if (lck2
->size
!= lck1
->size
) {
402 return ((int)lck1
->size
- (int)lck2
->size
);
408 /****************************************************************************
409 Lock a range of bytes - Windows lock semantics.
410 ****************************************************************************/
412 NTSTATUS
brl_lock_windows_default(struct byte_range_lock
*br_lck
,
413 struct lock_struct
*plock
, bool blocking_lock
)
416 files_struct
*fsp
= br_lck
->fsp
;
417 struct lock_struct
*locks
= br_lck
->lock_data
;
420 SMB_ASSERT(plock
->lock_type
!= UNLOCK_LOCK
);
422 if ((plock
->start
+ plock
->size
- 1 < plock
->start
) &&
424 return NT_STATUS_INVALID_LOCK_RANGE
;
427 for (i
=0; i
< br_lck
->num_locks
; i
++) {
428 /* Do any Windows or POSIX locks conflict ? */
429 if (brl_conflict(&locks
[i
], plock
)) {
430 if (!serverid_exists(&locks
[i
].context
.pid
)) {
431 locks
[i
].context
.pid
.pid
= 0;
432 br_lck
->modified
= true;
435 /* Remember who blocked us. */
436 plock
->context
.smblctx
= locks
[i
].context
.smblctx
;
437 return brl_lock_failed(fsp
,plock
,blocking_lock
);
440 if (plock
->start
== 0 && plock
->size
== 0 &&
441 locks
[i
].size
== 0) {
447 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
448 contend_level2_oplocks_begin(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
451 /* We can get the Windows lock, now see if it needs to
452 be mapped into a lower level POSIX one, and if so can
455 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(fsp
->conn
->params
)) {
457 if (!set_posix_lock_windows_flavour(fsp
,
466 /* We don't know who blocked us. */
467 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
469 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
470 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
473 status
= map_nt_error_from_unix(errno
);
479 /* no conflicts - add it to the list of locks */
480 locks
= talloc_realloc(br_lck
, locks
, struct lock_struct
,
481 (br_lck
->num_locks
+ 1));
483 status
= NT_STATUS_NO_MEMORY
;
487 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
488 br_lck
->num_locks
+= 1;
489 br_lck
->lock_data
= locks
;
490 br_lck
->modified
= True
;
494 if (!IS_PENDING_LOCK(plock
->lock_type
)) {
495 contend_level2_oplocks_end(fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
500 /****************************************************************************
501 Cope with POSIX range splits and merges.
502 ****************************************************************************/
504 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
505 struct lock_struct
*ex
, /* existing lock. */
506 struct lock_struct
*plock
) /* proposed lock. */
508 bool lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
510 /* We can't merge non-conflicting locks on different context - ignore fnum. */
512 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
514 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
518 /* We now know we have the same context. */
520 /* Did we overlap ? */
522 /*********************************************
533 **********************************************/
535 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
536 (plock
->start
> (ex
->start
+ ex
->size
))) {
538 /* No overlap with this lock - copy existing. */
540 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
544 /*********************************************
545 +---------------------------+
547 +---------------------------+
548 +---------------------------+
549 | plock | -> replace with plock.
550 +---------------------------+
555 +---------------------------+
556 | plock | -> replace with plock.
557 +---------------------------+
559 **********************************************/
561 if ( (ex
->start
>= plock
->start
) &&
562 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
564 /* Replace - discard existing lock. */
569 /*********************************************
579 +---------------+-------+
580 | plock | ex | - different lock types.
581 +---------------+-------+
583 +-----------------------+
584 | plock | - same lock type.
585 +-----------------------+
586 **********************************************/
588 if (plock
->start
+ plock
->size
== ex
->start
) {
590 /* If the lock types are the same, we merge, if different, we
591 add the remainder of the old lock. */
593 if (lock_types_differ
) {
595 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
598 /* Merge - adjust incoming lock as we may have more
599 * merging to come. */
600 plock
->size
+= ex
->size
;
605 /*********************************************
614 +-------+---------------+
615 | ex | plock | - different lock types
616 +-------+---------------+
619 +-----------------------+
620 | plock | - same lock type.
621 +-----------------------+
623 **********************************************/
625 if (ex
->start
+ ex
->size
== plock
->start
) {
627 /* If the lock types are the same, we merge, if different, we
628 add the existing lock. */
630 if (lock_types_differ
) {
631 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
634 /* Merge - adjust incoming lock as we may have more
635 * merging to come. */
636 plock
->start
= ex
->start
;
637 plock
->size
+= ex
->size
;
642 /*********************************************
644 +-----------------------+
646 +-----------------------+
659 +---------------+-------+
660 | plock | ex | - different lock types.
661 +---------------+-------+
663 +-----------------------+
664 | plock | - same lock type.
665 +-----------------------+
666 **********************************************/
668 if ( (ex
->start
>= plock
->start
) &&
669 (ex
->start
<= plock
->start
+ plock
->size
) &&
670 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
672 /* If the lock types are the same, we merge, if different, we
673 add the remainder of the old lock. */
675 if (lock_types_differ
) {
676 /* Add remaining existing. */
677 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
678 /* Adjust existing start and size. */
679 lck_arr
[0].start
= plock
->start
+ plock
->size
;
680 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
683 /* Merge - adjust incoming lock as we may have more
684 * merging to come. */
685 plock
->size
+= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
690 /*********************************************
692 +-----------------------+
694 +-----------------------+
707 +-------+---------------+
708 | ex | plock | - different lock types
709 +-------+---------------+
712 +-----------------------+
713 | plock | - same lock type.
714 +-----------------------+
716 **********************************************/
718 if ( (ex
->start
< plock
->start
) &&
719 (ex
->start
+ ex
->size
>= plock
->start
) &&
720 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
722 /* If the lock types are the same, we merge, if different, we
723 add the truncated old lock. */
725 if (lock_types_differ
) {
726 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
727 /* Adjust existing size. */
728 lck_arr
[0].size
= plock
->start
- ex
->start
;
731 /* Merge - adjust incoming lock as we may have more
732 * merging to come. MUST ADJUST plock SIZE FIRST ! */
733 plock
->size
+= (plock
->start
- ex
->start
);
734 plock
->start
= ex
->start
;
739 /*********************************************
741 +---------------------------+
743 +---------------------------+
748 +-------+---------+---------+
749 | ex | plock | ex | - different lock types.
750 +-------+---------+---------+
752 +---------------------------+
753 | plock | - same lock type.
754 +---------------------------+
755 **********************************************/
757 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
759 if (lock_types_differ
) {
761 /* We have to split ex into two locks here. */
763 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
764 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
766 /* Adjust first existing size. */
767 lck_arr
[0].size
= plock
->start
- ex
->start
;
769 /* Adjust second existing start and size. */
770 lck_arr
[1].start
= plock
->start
+ plock
->size
;
771 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
774 /* Just eat the existing locks, merge them into plock. */
775 plock
->start
= ex
->start
;
776 plock
->size
= ex
->size
;
781 /* Never get here. */
782 smb_panic("brlock_posix_split_merge");
785 /* Keep some compilers happy. */
789 /****************************************************************************
790 Lock a range of bytes - POSIX lock semantics.
791 We must cope with range splits and merges.
792 ****************************************************************************/
794 static NTSTATUS
brl_lock_posix(struct messaging_context
*msg_ctx
,
795 struct byte_range_lock
*br_lck
,
796 struct lock_struct
*plock
)
798 unsigned int i
, count
, posix_count
;
799 struct lock_struct
*locks
= br_lck
->lock_data
;
800 struct lock_struct
*tp
;
801 bool signal_pending_read
= False
;
802 bool break_oplocks
= false;
805 /* No zero-zero locks for POSIX. */
806 if (plock
->start
== 0 && plock
->size
== 0) {
807 return NT_STATUS_INVALID_PARAMETER
;
810 /* Don't allow 64-bit lock wrap. */
811 if (plock
->start
+ plock
->size
- 1 < plock
->start
) {
812 return NT_STATUS_INVALID_PARAMETER
;
815 /* The worst case scenario here is we have to split an
816 existing POSIX lock range into two, and add our lock,
817 so we need at most 2 more entries. */
819 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 2);
821 return NT_STATUS_NO_MEMORY
;
824 count
= posix_count
= 0;
826 for (i
=0; i
< br_lck
->num_locks
; i
++) {
827 struct lock_struct
*curr_lock
= &locks
[i
];
829 /* If we have a pending read lock, a lock downgrade should
830 trigger a lock re-evaluation. */
831 if (curr_lock
->lock_type
== PENDING_READ_LOCK
&&
832 brl_pending_overlap(plock
, curr_lock
)) {
833 signal_pending_read
= True
;
836 if (curr_lock
->lock_flav
== WINDOWS_LOCK
) {
837 /* Do any Windows flavour locks conflict ? */
838 if (brl_conflict(curr_lock
, plock
)) {
839 if (!serverid_exists(&curr_lock
->context
.pid
)) {
840 curr_lock
->context
.pid
.pid
= 0;
841 br_lck
->modified
= true;
844 /* No games with error messages. */
846 /* Remember who blocked us. */
847 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
848 return NT_STATUS_FILE_LOCK_CONFLICT
;
850 /* Just copy the Windows lock into the new array. */
851 memcpy(&tp
[count
], curr_lock
, sizeof(struct lock_struct
));
854 unsigned int tmp_count
= 0;
856 /* POSIX conflict semantics are different. */
857 if (brl_conflict_posix(curr_lock
, plock
)) {
858 if (!serverid_exists(&curr_lock
->context
.pid
)) {
859 curr_lock
->context
.pid
.pid
= 0;
860 br_lck
->modified
= true;
863 /* Can't block ourselves with POSIX locks. */
864 /* No games with error messages. */
866 /* Remember who blocked us. */
867 plock
->context
.smblctx
= curr_lock
->context
.smblctx
;
868 return NT_STATUS_FILE_LOCK_CONFLICT
;
871 /* Work out overlaps. */
872 tmp_count
+= brlock_posix_split_merge(&tp
[count
], curr_lock
, plock
);
873 posix_count
+= tmp_count
;
879 * Break oplocks while we hold a brl. Since lock() and unlock() calls
880 * are not symetric with POSIX semantics, we cannot guarantee our
881 * contend_level2_oplocks_begin/end calls will be acquired and
882 * released one-for-one as with Windows semantics. Therefore we only
883 * call contend_level2_oplocks_begin if this is the first POSIX brl on
886 break_oplocks
= (!IS_PENDING_LOCK(plock
->lock_type
) &&
889 contend_level2_oplocks_begin(br_lck
->fsp
,
890 LEVEL2_CONTEND_POSIX_BRL
);
893 /* Try and add the lock in order, sorted by lock start. */
894 for (i
=0; i
< count
; i
++) {
895 struct lock_struct
*curr_lock
= &tp
[i
];
897 if (curr_lock
->start
<= plock
->start
) {
903 memmove(&tp
[i
+1], &tp
[i
],
904 (count
- i
)*sizeof(struct lock_struct
));
906 memcpy(&tp
[i
], plock
, sizeof(struct lock_struct
));
909 /* We can get the POSIX lock, now see if it needs to
910 be mapped into a lower level POSIX one, and if so can
913 if (!IS_PENDING_LOCK(plock
->lock_type
) && lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
916 /* The lower layer just needs to attempt to
917 get the system POSIX lock. We've weeded out
918 any conflicts above. */
920 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
926 /* We don't know who blocked us. */
927 plock
->context
.smblctx
= 0xFFFFFFFFFFFFFFFFLL
;
929 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
931 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
935 status
= map_nt_error_from_unix(errno
);
941 /* If we didn't use all the allocated size,
942 * Realloc so we don't leak entries per lock call. */
943 if (count
< br_lck
->num_locks
+ 2) {
944 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
946 status
= NT_STATUS_NO_MEMORY
;
951 br_lck
->num_locks
= count
;
952 TALLOC_FREE(br_lck
->lock_data
);
953 br_lck
->lock_data
= tp
;
955 br_lck
->modified
= True
;
957 /* A successful downgrade from write to read lock can trigger a lock
958 re-evalutation where waiting readers can now proceed. */
960 if (signal_pending_read
) {
961 /* Send unlock messages to any pending read waiters that overlap. */
962 for (i
=0; i
< br_lck
->num_locks
; i
++) {
963 struct lock_struct
*pend_lock
= &locks
[i
];
965 /* Ignore non-pending locks. */
966 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
970 if (pend_lock
->lock_type
== PENDING_READ_LOCK
&&
971 brl_pending_overlap(plock
, pend_lock
)) {
972 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
973 procid_str_static(&pend_lock
->context
.pid
)));
975 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
976 MSG_SMB_UNLOCK
, &data_blob_null
);
984 contend_level2_oplocks_end(br_lck
->fsp
,
985 LEVEL2_CONTEND_POSIX_BRL
);
990 NTSTATUS
smb_vfs_call_brl_lock_windows(struct vfs_handle_struct
*handle
,
991 struct byte_range_lock
*br_lck
,
992 struct lock_struct
*plock
,
995 VFS_FIND(brl_lock_windows
);
996 return handle
->fns
->brl_lock_windows_fn(handle
, br_lck
, plock
,
1000 /****************************************************************************
1001 Lock a range of bytes.
1002 ****************************************************************************/
1004 NTSTATUS
brl_lock(struct messaging_context
*msg_ctx
,
1005 struct byte_range_lock
*br_lck
,
1007 struct server_id pid
,
1010 enum brl_type lock_type
,
1011 enum brl_flavour lock_flav
,
1016 struct lock_struct lock
;
1019 if (start
== 0 && size
== 0) {
1020 DEBUG(0,("client sent 0/0 lock - please report this\n"));
1024 lock
= (struct lock_struct
) {
1025 .context
.smblctx
= smblctx
,
1027 .context
.tid
= br_lck
->fsp
->conn
->cnum
,
1030 .fnum
= br_lck
->fsp
->fnum
,
1031 .lock_type
= lock_type
,
1032 .lock_flav
= lock_flav
1035 if (lock_flav
== WINDOWS_LOCK
) {
1036 ret
= SMB_VFS_BRL_LOCK_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
1037 &lock
, blocking_lock
);
1039 ret
= brl_lock_posix(msg_ctx
, br_lck
, &lock
);
1043 /* sort the lock list */
1044 TYPESAFE_QSORT(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, lock_compare
);
1047 /* If we're returning an error, return who blocked us. */
1048 if (!NT_STATUS_IS_OK(ret
) && psmblctx
) {
1049 *psmblctx
= lock
.context
.smblctx
;
1054 static void brl_delete_lock_struct(struct lock_struct
*locks
,
1058 if (del_idx
>= num_locks
) {
1061 memmove(&locks
[del_idx
], &locks
[del_idx
+1],
1062 sizeof(*locks
) * (num_locks
- del_idx
- 1));
1065 /****************************************************************************
1066 Unlock a range of bytes - Windows semantics.
1067 ****************************************************************************/
1069 bool brl_unlock_windows_default(struct messaging_context
*msg_ctx
,
1070 struct byte_range_lock
*br_lck
,
1071 const struct lock_struct
*plock
)
1074 struct lock_struct
*locks
= br_lck
->lock_data
;
1075 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
1077 SMB_ASSERT(plock
->lock_type
== UNLOCK_LOCK
);
1080 /* Delete write locks by preference... The lock list
1081 is sorted in the zero zero case. */
1083 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1084 struct lock_struct
*lock
= &locks
[i
];
1086 if (lock
->lock_type
== WRITE_LOCK
&&
1087 brl_same_context(&lock
->context
, &plock
->context
) &&
1088 lock
->fnum
== plock
->fnum
&&
1089 lock
->lock_flav
== WINDOWS_LOCK
&&
1090 lock
->start
== plock
->start
&&
1091 lock
->size
== plock
->size
) {
1093 /* found it - delete it */
1094 deleted_lock_type
= lock
->lock_type
;
1099 if (i
!= br_lck
->num_locks
) {
1100 /* We found it - don't search again. */
1101 goto unlock_continue
;
1105 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1106 struct lock_struct
*lock
= &locks
[i
];
1108 if (IS_PENDING_LOCK(lock
->lock_type
)) {
1112 /* Only remove our own locks that match in start, size, and flavour. */
1113 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1114 lock
->fnum
== plock
->fnum
&&
1115 lock
->lock_flav
== WINDOWS_LOCK
&&
1116 lock
->start
== plock
->start
&&
1117 lock
->size
== plock
->size
) {
1118 deleted_lock_type
= lock
->lock_type
;
1123 if (i
== br_lck
->num_locks
) {
1124 /* we didn't find it */
1132 brl_delete_lock_struct(locks
, br_lck
->num_locks
, i
);
1133 br_lck
->num_locks
-= 1;
1134 br_lck
->modified
= True
;
1136 /* Unlock the underlying POSIX regions. */
1137 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1138 release_posix_lock_windows_flavour(br_lck
->fsp
,
1147 /* Send unlock messages to any pending waiters that overlap. */
1148 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1149 struct lock_struct
*pend_lock
= &locks
[j
];
1151 /* Ignore non-pending locks. */
1152 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1156 /* We could send specific lock info here... */
1157 if (brl_pending_overlap(plock
, pend_lock
)) {
1158 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1159 procid_str_static(&pend_lock
->context
.pid
)));
1161 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1162 MSG_SMB_UNLOCK
, &data_blob_null
);
1166 contend_level2_oplocks_end(br_lck
->fsp
, LEVEL2_CONTEND_WINDOWS_BRL
);
1170 /****************************************************************************
1171 Unlock a range of bytes - POSIX semantics.
1172 ****************************************************************************/
1174 static bool brl_unlock_posix(struct messaging_context
*msg_ctx
,
1175 struct byte_range_lock
*br_lck
,
1176 struct lock_struct
*plock
)
1178 unsigned int i
, j
, count
;
1179 struct lock_struct
*tp
;
1180 struct lock_struct
*locks
= br_lck
->lock_data
;
1181 bool overlap_found
= False
;
1183 /* No zero-zero locks for POSIX. */
1184 if (plock
->start
== 0 && plock
->size
== 0) {
1188 /* Don't allow 64-bit lock wrap. */
1189 if (plock
->start
+ plock
->size
< plock
->start
||
1190 plock
->start
+ plock
->size
< plock
->size
) {
1191 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1195 /* The worst case scenario here is we have to split an
1196 existing POSIX lock range into two, so we need at most
1199 tp
= talloc_array(br_lck
, struct lock_struct
, br_lck
->num_locks
+ 1);
1201 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1206 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1207 struct lock_struct
*lock
= &locks
[i
];
1208 unsigned int tmp_count
;
1210 /* Only remove our own locks - ignore fnum. */
1211 if (IS_PENDING_LOCK(lock
->lock_type
) ||
1212 !brl_same_context(&lock
->context
, &plock
->context
)) {
1213 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1218 if (lock
->lock_flav
== WINDOWS_LOCK
) {
1219 /* Do any Windows flavour locks conflict ? */
1220 if (brl_conflict(lock
, plock
)) {
1224 /* Just copy the Windows lock into the new array. */
1225 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
1230 /* Work out overlaps. */
1231 tmp_count
= brlock_posix_split_merge(&tp
[count
], lock
, plock
);
1233 if (tmp_count
== 0) {
1234 /* plock overlapped the existing lock completely,
1235 or replaced it. Don't copy the existing lock. */
1236 overlap_found
= true;
1237 } else if (tmp_count
== 1) {
1238 /* Either no overlap, (simple copy of existing lock) or
1239 * an overlap of an existing lock. */
1240 /* If the lock changed size, we had an overlap. */
1241 if (tp
[count
].size
!= lock
->size
) {
1242 overlap_found
= true;
1245 } else if (tmp_count
== 2) {
1246 /* We split a lock range in two. */
1247 overlap_found
= true;
1250 /* Optimisation... */
1251 /* We know we're finished here as we can't overlap any
1252 more POSIX locks. Copy the rest of the lock array. */
1254 if (i
< br_lck
->num_locks
- 1) {
1255 memcpy(&tp
[count
], &locks
[i
+1],
1256 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1257 count
+= ((br_lck
->num_locks
-1) - i
);
1264 if (!overlap_found
) {
1265 /* Just ignore - no change. */
1267 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1271 /* Unlock any POSIX regions. */
1272 if(lp_posix_locking(br_lck
->fsp
->conn
->params
)) {
1273 release_posix_lock_posix_flavour(br_lck
->fsp
,
1281 /* Realloc so we don't leak entries per unlock call. */
1283 tp
= talloc_realloc(br_lck
, tp
, struct lock_struct
, count
);
1285 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1289 /* We deleted the last lock. */
1294 contend_level2_oplocks_end(br_lck
->fsp
,
1295 LEVEL2_CONTEND_POSIX_BRL
);
1297 br_lck
->num_locks
= count
;
1298 TALLOC_FREE(br_lck
->lock_data
);
1300 br_lck
->lock_data
= tp
;
1301 br_lck
->modified
= True
;
1303 /* Send unlock messages to any pending waiters that overlap. */
1305 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1306 struct lock_struct
*pend_lock
= &locks
[j
];
1308 /* Ignore non-pending locks. */
1309 if (!IS_PENDING_LOCK(pend_lock
->lock_type
)) {
1313 /* We could send specific lock info here... */
1314 if (brl_pending_overlap(plock
, pend_lock
)) {
1315 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1316 procid_str_static(&pend_lock
->context
.pid
)));
1318 messaging_send(msg_ctx
, pend_lock
->context
.pid
,
1319 MSG_SMB_UNLOCK
, &data_blob_null
);
1326 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct
*handle
,
1327 struct messaging_context
*msg_ctx
,
1328 struct byte_range_lock
*br_lck
,
1329 const struct lock_struct
*plock
)
1331 VFS_FIND(brl_unlock_windows
);
1332 return handle
->fns
->brl_unlock_windows_fn(handle
, msg_ctx
, br_lck
,
1336 /****************************************************************************
1337 Unlock a range of bytes.
1338 ****************************************************************************/
1340 bool brl_unlock(struct messaging_context
*msg_ctx
,
1341 struct byte_range_lock
*br_lck
,
1343 struct server_id pid
,
1346 enum brl_flavour lock_flav
)
1348 struct lock_struct lock
;
1350 lock
.context
.smblctx
= smblctx
;
1351 lock
.context
.pid
= pid
;
1352 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1355 lock
.fnum
= br_lck
->fsp
->fnum
;
1356 lock
.lock_type
= UNLOCK_LOCK
;
1357 lock
.lock_flav
= lock_flav
;
1359 if (lock_flav
== WINDOWS_LOCK
) {
1360 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck
->fsp
->conn
, msg_ctx
,
1363 return brl_unlock_posix(msg_ctx
, br_lck
, &lock
);
1367 /****************************************************************************
1368 Test if we could add a lock if we wanted to.
1369 Returns True if the region required is currently unlocked, False if locked.
1370 ****************************************************************************/
1372 bool brl_locktest(struct byte_range_lock
*br_lck
,
1373 const struct lock_struct
*rw_probe
)
1377 struct lock_struct
*locks
= br_lck
->lock_data
;
1378 files_struct
*fsp
= br_lck
->fsp
;
1380 /* Make sure existing locks don't conflict */
1381 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1383 * Our own locks don't conflict.
1385 if (brl_conflict_other(&locks
[i
], rw_probe
)) {
1386 if (br_lck
->record
== NULL
) {
1391 if (!serverid_exists(&locks
[i
].context
.pid
)) {
1392 locks
[i
].context
.pid
.pid
= 0;
1393 br_lck
->modified
= true;
1402 * There is no lock held by an SMB daemon, check to
1403 * see if there is a POSIX lock from a UNIX or NFS process.
1404 * This only conflicts with Windows locks, not POSIX locks.
1407 if(lp_posix_locking(fsp
->conn
->params
) &&
1408 (rw_probe
->lock_flav
== WINDOWS_LOCK
)) {
1410 * Make copies -- is_posix_locked might modify the values
1413 br_off start
= rw_probe
->start
;
1414 br_off size
= rw_probe
->size
;
1415 enum brl_type lock_type
= rw_probe
->lock_type
;
1417 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1419 DEBUG(10, ("brl_locktest: posix start=%ju len=%ju %s for %s "
1420 "file %s\n", (uintmax_t)start
, (uintmax_t)size
,
1421 ret
? "locked" : "unlocked",
1422 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1424 /* We need to return the inverse of is_posix_locked. */
1428 /* no conflicts - we could have added it */
1432 /****************************************************************************
1433 Query for existing locks.
1434 ****************************************************************************/
1436 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1438 struct server_id pid
,
1441 enum brl_type
*plock_type
,
1442 enum brl_flavour lock_flav
)
1445 struct lock_struct lock
;
1446 const struct lock_struct
*locks
= br_lck
->lock_data
;
1447 files_struct
*fsp
= br_lck
->fsp
;
1449 lock
.context
.smblctx
= *psmblctx
;
1450 lock
.context
.pid
= pid
;
1451 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1452 lock
.start
= *pstart
;
1454 lock
.fnum
= fsp
->fnum
;
1455 lock
.lock_type
= *plock_type
;
1456 lock
.lock_flav
= lock_flav
;
1458 /* Make sure existing locks don't conflict */
1459 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1460 const struct lock_struct
*exlock
= &locks
[i
];
1461 bool conflict
= False
;
1463 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1464 conflict
= brl_conflict(exlock
, &lock
);
1466 conflict
= brl_conflict_posix(exlock
, &lock
);
1470 *psmblctx
= exlock
->context
.smblctx
;
1471 *pstart
= exlock
->start
;
1472 *psize
= exlock
->size
;
1473 *plock_type
= exlock
->lock_type
;
1474 return NT_STATUS_LOCK_NOT_GRANTED
;
1479 * There is no lock held by an SMB daemon, check to
1480 * see if there is a POSIX lock from a UNIX or NFS process.
1483 if(lp_posix_locking(fsp
->conn
->params
)) {
1484 bool ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1486 DEBUG(10, ("brl_lockquery: posix start=%ju len=%ju %s for %s "
1487 "file %s\n", (uintmax_t)*pstart
,
1488 (uintmax_t)*psize
, ret
? "locked" : "unlocked",
1489 fsp_fnum_dbg(fsp
), fsp_str_dbg(fsp
)));
1492 /* Hmmm. No clue what to set smblctx to - use -1. */
1493 *psmblctx
= 0xFFFFFFFFFFFFFFFFLL
;
1494 return NT_STATUS_LOCK_NOT_GRANTED
;
1498 return NT_STATUS_OK
;
1502 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct
*handle
,
1503 struct byte_range_lock
*br_lck
,
1504 struct lock_struct
*plock
)
1506 VFS_FIND(brl_cancel_windows
);
1507 return handle
->fns
->brl_cancel_windows_fn(handle
, br_lck
, plock
);
1510 /****************************************************************************
1511 Remove a particular pending lock.
1512 ****************************************************************************/
1513 bool brl_lock_cancel(struct byte_range_lock
*br_lck
,
1515 struct server_id pid
,
1518 enum brl_flavour lock_flav
)
1521 struct lock_struct lock
;
1523 lock
.context
.smblctx
= smblctx
;
1524 lock
.context
.pid
= pid
;
1525 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1528 lock
.fnum
= br_lck
->fsp
->fnum
;
1529 lock
.lock_flav
= lock_flav
;
1530 /* lock.lock_type doesn't matter */
1532 if (lock_flav
== WINDOWS_LOCK
) {
1533 ret
= SMB_VFS_BRL_CANCEL_WINDOWS(br_lck
->fsp
->conn
, br_lck
,
1536 ret
= brl_lock_cancel_default(br_lck
, &lock
);
1542 bool brl_lock_cancel_default(struct byte_range_lock
*br_lck
,
1543 struct lock_struct
*plock
)
1546 struct lock_struct
*locks
= br_lck
->lock_data
;
1550 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1551 struct lock_struct
*lock
= &locks
[i
];
1553 /* For pending locks we *always* care about the fnum. */
1554 if (brl_same_context(&lock
->context
, &plock
->context
) &&
1555 lock
->fnum
== plock
->fnum
&&
1556 IS_PENDING_LOCK(lock
->lock_type
) &&
1557 lock
->lock_flav
== plock
->lock_flav
&&
1558 lock
->start
== plock
->start
&&
1559 lock
->size
== plock
->size
) {
1564 if (i
== br_lck
->num_locks
) {
1565 /* Didn't find it. */
1569 brl_delete_lock_struct(locks
, br_lck
->num_locks
, i
);
1570 br_lck
->num_locks
-= 1;
1571 br_lck
->modified
= True
;
1575 /****************************************************************************
1576 Remove any locks associated with a open file.
1577 We return True if this process owns any other Windows locks on this
1578 fd and so we should not immediately close the fd.
1579 ****************************************************************************/
1581 void brl_close_fnum(struct messaging_context
*msg_ctx
,
1582 struct byte_range_lock
*br_lck
)
1584 files_struct
*fsp
= br_lck
->fsp
;
1585 uint32_t tid
= fsp
->conn
->cnum
;
1586 uint64_t fnum
= fsp
->fnum
;
1588 struct lock_struct
*locks
= br_lck
->lock_data
;
1589 struct server_id pid
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1590 struct lock_struct
*locks_copy
;
1591 unsigned int num_locks_copy
;
1593 /* Copy the current lock array. */
1594 if (br_lck
->num_locks
) {
1595 locks_copy
= (struct lock_struct
*)talloc_memdup(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1597 smb_panic("brl_close_fnum: talloc failed");
1603 num_locks_copy
= br_lck
->num_locks
;
1605 for (i
=0; i
< num_locks_copy
; i
++) {
1606 struct lock_struct
*lock
= &locks_copy
[i
];
1608 if (lock
->context
.tid
== tid
&& serverid_equal(&lock
->context
.pid
, &pid
) &&
1609 (lock
->fnum
== fnum
)) {
1612 lock
->context
.smblctx
,
1621 bool brl_mark_disconnected(struct files_struct
*fsp
)
1623 uint32_t tid
= fsp
->conn
->cnum
;
1625 uint64_t fnum
= fsp
->fnum
;
1627 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1628 struct byte_range_lock
*br_lck
= NULL
;
1630 if (fsp
->op
== NULL
) {
1634 smblctx
= fsp
->op
->global
->open_persistent_id
;
1636 if (!fsp
->op
->global
->durable
) {
1640 if (fsp
->current_lock_count
== 0) {
1644 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1645 if (br_lck
== NULL
) {
1649 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1650 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1653 * as this is a durable handle, we only expect locks
1654 * of the current file handle!
1657 if (lock
->context
.smblctx
!= smblctx
) {
1658 TALLOC_FREE(br_lck
);
1662 if (lock
->context
.tid
!= tid
) {
1663 TALLOC_FREE(br_lck
);
1667 if (!serverid_equal(&lock
->context
.pid
, &self
)) {
1668 TALLOC_FREE(br_lck
);
1672 if (lock
->fnum
!= fnum
) {
1673 TALLOC_FREE(br_lck
);
1677 server_id_set_disconnected(&lock
->context
.pid
);
1678 lock
->context
.tid
= TID_FIELD_INVALID
;
1679 lock
->fnum
= FNUM_FIELD_INVALID
;
1682 br_lck
->modified
= true;
1683 TALLOC_FREE(br_lck
);
1687 bool brl_reconnect_disconnected(struct files_struct
*fsp
)
1689 uint32_t tid
= fsp
->conn
->cnum
;
1691 uint64_t fnum
= fsp
->fnum
;
1693 struct server_id self
= messaging_server_id(fsp
->conn
->sconn
->msg_ctx
);
1694 struct byte_range_lock
*br_lck
= NULL
;
1696 if (fsp
->op
== NULL
) {
1700 smblctx
= fsp
->op
->global
->open_persistent_id
;
1702 if (!fsp
->op
->global
->durable
) {
1707 * When reconnecting, we do not want to validate the brlock entries
1708 * and thereby remove our own (disconnected) entries but reactivate
1712 br_lck
= brl_get_locks(talloc_tos(), fsp
);
1713 if (br_lck
== NULL
) {
1717 if (br_lck
->num_locks
== 0) {
1718 TALLOC_FREE(br_lck
);
1722 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1723 struct lock_struct
*lock
= &br_lck
->lock_data
[i
];
1726 * as this is a durable handle we only expect locks
1727 * of the current file handle!
1730 if (lock
->context
.smblctx
!= smblctx
) {
1731 TALLOC_FREE(br_lck
);
1735 if (lock
->context
.tid
!= TID_FIELD_INVALID
) {
1736 TALLOC_FREE(br_lck
);
1740 if (!server_id_is_disconnected(&lock
->context
.pid
)) {
1741 TALLOC_FREE(br_lck
);
1745 if (lock
->fnum
!= FNUM_FIELD_INVALID
) {
1746 TALLOC_FREE(br_lck
);
1750 lock
->context
.pid
= self
;
1751 lock
->context
.tid
= tid
;
1755 fsp
->current_lock_count
= br_lck
->num_locks
;
1756 br_lck
->modified
= true;
1757 TALLOC_FREE(br_lck
);
1761 struct brl_forall_cb
{
1762 void (*fn
)(struct file_id id
, struct server_id pid
,
1763 enum brl_type lock_type
,
1764 enum brl_flavour lock_flav
,
1765 br_off start
, br_off size
,
1766 void *private_data
);
1770 /****************************************************************************
1771 Traverse the whole database with this function, calling traverse_callback
1773 ****************************************************************************/
1775 static int brl_traverse_fn(struct db_record
*rec
, void *state
)
1777 struct brl_forall_cb
*cb
= (struct brl_forall_cb
*)state
;
1778 struct lock_struct
*locks
;
1779 struct file_id
*key
;
1781 unsigned int num_locks
= 0;
1785 dbkey
= dbwrap_record_get_key(rec
);
1786 value
= dbwrap_record_get_value(rec
);
1788 /* In a traverse function we must make a copy of
1789 dbuf before modifying it. */
1791 locks
= (struct lock_struct
*)talloc_memdup(
1792 talloc_tos(), value
.dptr
, value
.dsize
);
1794 return -1; /* Terminate traversal. */
1797 key
= (struct file_id
*)dbkey
.dptr
;
1798 num_locks
= value
.dsize
/sizeof(*locks
);
1801 for ( i
=0; i
<num_locks
; i
++) {
1803 locks
[i
].context
.pid
,
1816 /*******************************************************************
1817 Call the specified function on each lock in the database.
1818 ********************************************************************/
1820 int brl_forall(void (*fn
)(struct file_id id
, struct server_id pid
,
1821 enum brl_type lock_type
,
1822 enum brl_flavour lock_flav
,
1823 br_off start
, br_off size
,
1824 void *private_data
),
1827 struct brl_forall_cb cb
;
1835 cb
.private_data
= private_data
;
1836 status
= dbwrap_traverse(brlock_db
, brl_traverse_fn
, &cb
, &count
);
1838 if (!NT_STATUS_IS_OK(status
)) {
1845 /*******************************************************************
1846 Store a potentially modified set of byte range lock data back into
1849 ********************************************************************/
1851 static void byte_range_lock_flush(struct byte_range_lock
*br_lck
)
1854 struct lock_struct
*locks
= br_lck
->lock_data
;
1856 if (!br_lck
->modified
) {
1857 DEBUG(10, ("br_lck not modified\n"));
1863 while (i
< br_lck
->num_locks
) {
1864 if (locks
[i
].context
.pid
.pid
== 0) {
1866 * Autocleanup, the process conflicted and does not
1869 locks
[i
] = locks
[br_lck
->num_locks
-1];
1870 br_lck
->num_locks
-= 1;
1876 if ((br_lck
->num_locks
== 0) && (br_lck
->num_read_oplocks
== 0)) {
1877 /* No locks - delete this entry. */
1878 NTSTATUS status
= dbwrap_record_delete(br_lck
->record
);
1879 if (!NT_STATUS_IS_OK(status
)) {
1880 DEBUG(0, ("delete_rec returned %s\n",
1881 nt_errstr(status
)));
1882 smb_panic("Could not delete byte range lock entry");
1885 size_t lock_len
, data_len
;
1889 lock_len
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1890 data_len
= lock_len
+ sizeof(br_lck
->num_read_oplocks
);
1892 data
.dsize
= data_len
;
1893 data
.dptr
= talloc_array(talloc_tos(), uint8_t, data_len
);
1894 SMB_ASSERT(data
.dptr
!= NULL
);
1896 memcpy(data
.dptr
, br_lck
->lock_data
, lock_len
);
1897 memcpy(data
.dptr
+ lock_len
, &br_lck
->num_read_oplocks
,
1898 sizeof(br_lck
->num_read_oplocks
));
1900 status
= dbwrap_record_store(br_lck
->record
, data
, TDB_REPLACE
);
1901 TALLOC_FREE(data
.dptr
);
1902 if (!NT_STATUS_IS_OK(status
)) {
1903 DEBUG(0, ("store returned %s\n", nt_errstr(status
)));
1904 smb_panic("Could not store byte range mode entry");
1908 DEBUG(10, ("seqnum=%d\n", dbwrap_get_seqnum(brlock_db
)));
1911 br_lck
->modified
= false;
1912 TALLOC_FREE(br_lck
->record
);
1915 static int byte_range_lock_destructor(struct byte_range_lock
*br_lck
)
1917 byte_range_lock_flush(br_lck
);
1921 static bool brl_parse_data(struct byte_range_lock
*br_lck
, TDB_DATA data
)
1925 if (data
.dsize
== 0) {
1928 if (data
.dsize
% sizeof(struct lock_struct
) !=
1929 sizeof(br_lck
->num_read_oplocks
)) {
1930 DEBUG(1, ("Invalid data size: %u\n", (unsigned)data
.dsize
));
1934 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1935 data_len
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1937 br_lck
->lock_data
= talloc_memdup(br_lck
, data
.dptr
, data_len
);
1938 if (br_lck
->lock_data
== NULL
) {
1939 DEBUG(1, ("talloc_memdup failed\n"));
1942 memcpy(&br_lck
->num_read_oplocks
, data
.dptr
+ data_len
,
1943 sizeof(br_lck
->num_read_oplocks
));
1947 /*******************************************************************
1948 Fetch a set of byte range lock data from the database.
1949 Leave the record locked.
1950 TALLOC_FREE(brl) will release the lock in the destructor.
1951 ********************************************************************/
1953 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
, files_struct
*fsp
)
1956 struct byte_range_lock
*br_lck
;
1958 br_lck
= talloc_zero(mem_ctx
, struct byte_range_lock
);
1959 if (br_lck
== NULL
) {
1965 key
.dptr
= (uint8
*)&fsp
->file_id
;
1966 key
.dsize
= sizeof(struct file_id
);
1968 br_lck
->record
= dbwrap_fetch_locked(brlock_db
, br_lck
, key
);
1970 if (br_lck
->record
== NULL
) {
1971 DEBUG(3, ("Could not lock byte range lock entry\n"));
1972 TALLOC_FREE(br_lck
);
1976 data
= dbwrap_record_get_value(br_lck
->record
);
1978 if (!brl_parse_data(br_lck
, data
)) {
1979 TALLOC_FREE(br_lck
);
1983 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1985 if (DEBUGLEVEL
>= 10) {
1987 struct lock_struct
*locks
= br_lck
->lock_data
;
1988 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1990 file_id_string_tos(&fsp
->file_id
)));
1991 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1992 print_lock_struct(i
, &locks
[i
]);
1999 struct brl_get_locks_readonly_state
{
2000 TALLOC_CTX
*mem_ctx
;
2001 struct byte_range_lock
**br_lock
;
2004 static void brl_get_locks_readonly_parser(TDB_DATA key
, TDB_DATA data
,
2007 struct brl_get_locks_readonly_state
*state
=
2008 (struct brl_get_locks_readonly_state
*)private_data
;
2009 struct byte_range_lock
*br_lck
;
2011 br_lck
= talloc_pooled_object(
2012 state
->mem_ctx
, struct byte_range_lock
, 1, data
.dsize
);
2013 if (br_lck
== NULL
) {
2014 *state
->br_lock
= NULL
;
2017 *br_lck
= (struct byte_range_lock
) {};
2018 if (!brl_parse_data(br_lck
, data
)) {
2019 *state
->br_lock
= NULL
;
2022 *state
->br_lock
= br_lck
;
2025 struct byte_range_lock
*brl_get_locks_readonly(files_struct
*fsp
)
2027 struct byte_range_lock
*br_lock
= NULL
;
2028 struct brl_get_locks_readonly_state state
;
2031 DEBUG(10, ("seqnum=%d, fsp->brlock_seqnum=%d\n",
2032 dbwrap_get_seqnum(brlock_db
), fsp
->brlock_seqnum
));
2034 if ((fsp
->brlock_rec
!= NULL
)
2035 && (dbwrap_get_seqnum(brlock_db
) == fsp
->brlock_seqnum
)) {
2037 * We have cached the brlock_rec and the database did not
2040 return fsp
->brlock_rec
;
2044 * Parse the record fresh from the database
2047 state
.mem_ctx
= fsp
;
2048 state
.br_lock
= &br_lock
;
2050 status
= dbwrap_parse_record(
2052 make_tdb_data((uint8_t *)&fsp
->file_id
,
2053 sizeof(fsp
->file_id
)),
2054 brl_get_locks_readonly_parser
, &state
);
2056 if (NT_STATUS_EQUAL(status
,NT_STATUS_NOT_FOUND
)) {
2058 * No locks on this file. Return an empty br_lock.
2060 br_lock
= talloc(fsp
, struct byte_range_lock
);
2061 if (br_lock
== NULL
) {
2065 br_lock
->num_read_oplocks
= 0;
2066 br_lock
->num_locks
= 0;
2067 br_lock
->lock_data
= NULL
;
2069 } else if (!NT_STATUS_IS_OK(status
)) {
2070 DEBUG(3, ("Could not parse byte range lock record: "
2071 "%s\n", nt_errstr(status
)));
2074 if (br_lock
== NULL
) {
2079 br_lock
->modified
= false;
2080 br_lock
->record
= NULL
;
2082 if (lp_clustering()) {
2084 * In the cluster case we can't cache the brlock struct
2085 * because dbwrap_get_seqnum does not work reliably over
2086 * ctdb. Thus we have to throw away the brlock struct soon.
2088 talloc_steal(talloc_tos(), br_lock
);
2091 * Cache the brlock struct, invalidated when the dbwrap_seqnum
2092 * changes. See beginning of this routine.
2094 TALLOC_FREE(fsp
->brlock_rec
);
2095 fsp
->brlock_rec
= br_lock
;
2096 fsp
->brlock_seqnum
= dbwrap_get_seqnum(brlock_db
);
2102 struct brl_revalidate_state
{
2105 struct server_id
*pids
;
2109 * Collect PIDs of all processes with pending entries
2112 static void brl_revalidate_collect(struct file_id id
, struct server_id pid
,
2113 enum brl_type lock_type
,
2114 enum brl_flavour lock_flav
,
2115 br_off start
, br_off size
,
2118 struct brl_revalidate_state
*state
=
2119 (struct brl_revalidate_state
*)private_data
;
2121 if (!IS_PENDING_LOCK(lock_type
)) {
2125 add_to_large_array(state
, sizeof(pid
), (void *)&pid
,
2126 &state
->pids
, &state
->num_pids
,
2127 &state
->array_size
);
2131 * qsort callback to sort the processes
2134 static int compare_procids(const void *p1
, const void *p2
)
2136 const struct server_id
*i1
= (const struct server_id
*)p1
;
2137 const struct server_id
*i2
= (const struct server_id
*)p2
;
2139 if (i1
->pid
< i2
->pid
) return -1;
2140 if (i1
->pid
> i2
->pid
) return 1;
2145 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2146 * locks so that they retry. Mainly used in the cluster code after a node has
2149 * Done in two steps to avoid double-sends: First we collect all entries in an
2150 * array, then qsort that array and only send to non-dupes.
2153 void brl_revalidate(struct messaging_context
*msg_ctx
,
2156 struct server_id server_id
,
2159 struct brl_revalidate_state
*state
;
2161 struct server_id last_pid
;
2163 if (!(state
= talloc_zero(NULL
, struct brl_revalidate_state
))) {
2164 DEBUG(0, ("talloc failed\n"));
2168 brl_forall(brl_revalidate_collect
, state
);
2170 if (state
->array_size
== -1) {
2171 DEBUG(0, ("talloc failed\n"));
2175 if (state
->num_pids
== 0) {
2179 TYPESAFE_QSORT(state
->pids
, state
->num_pids
, compare_procids
);
2181 ZERO_STRUCT(last_pid
);
2183 for (i
=0; i
<state
->num_pids
; i
++) {
2184 if (serverid_equal(&last_pid
, &state
->pids
[i
])) {
2186 * We've seen that one already
2191 messaging_send(msg_ctx
, state
->pids
[i
], MSG_SMB_UNLOCK
,
2193 last_pid
= state
->pids
[i
];
2201 bool brl_cleanup_disconnected(struct file_id fid
, uint64_t open_persistent_id
)
2204 TALLOC_CTX
*frame
= talloc_stackframe();
2206 struct db_record
*rec
;
2207 struct lock_struct
*lock
;
2211 key
= make_tdb_data((void*)&fid
, sizeof(fid
));
2213 rec
= dbwrap_fetch_locked(brlock_db
, frame
, key
);
2215 DEBUG(5, ("brl_cleanup_disconnected: failed to fetch record "
2216 "for file %s\n", file_id_string(frame
, &fid
)));
2220 val
= dbwrap_record_get_value(rec
);
2221 lock
= (struct lock_struct
*)val
.dptr
;
2222 num
= val
.dsize
/ sizeof(struct lock_struct
);
2224 DEBUG(10, ("brl_cleanup_disconnected: no byte range locks for "
2225 "file %s\n", file_id_string(frame
, &fid
)));
2230 for (n
=0; n
<num
; n
++) {
2231 struct lock_context
*ctx
= &lock
[n
].context
;
2233 if (!server_id_is_disconnected(&ctx
->pid
)) {
2234 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2235 "%s used by server %s, do not cleanup\n",
2236 file_id_string(frame
, &fid
),
2237 server_id_str(frame
, &ctx
->pid
)));
2241 if (ctx
->smblctx
!= open_persistent_id
) {
2242 DEBUG(5, ("brl_cleanup_disconnected: byte range lock "
2243 "%s expected smblctx %llu but found %llu"
2244 ", do not cleanup\n",
2245 file_id_string(frame
, &fid
),
2246 (unsigned long long)open_persistent_id
,
2247 (unsigned long long)ctx
->smblctx
));
2252 status
= dbwrap_record_delete(rec
);
2253 if (!NT_STATUS_IS_OK(status
)) {
2254 DEBUG(5, ("brl_cleanup_disconnected: failed to delete record "
2255 "for file %s from %s, open %llu: %s\n",
2256 file_id_string(frame
, &fid
), dbwrap_name(brlock_db
),
2257 (unsigned long long)open_persistent_id
,
2258 nt_errstr(status
)));
2262 DEBUG(10, ("brl_cleanup_disconnected: "
2263 "file %s cleaned up %u entries from open %llu\n",
2264 file_id_string(frame
, &fid
), num
,
2265 (unsigned long long)open_persistent_id
));