2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 /* This module implements a tdb based byte range locking service,
25 replacing the fcntl() based byte range locking previously
26 used. This allows us to provide the same semantics as NT */
31 #define DBGC_CLASS DBGC_LOCKING
35 /* The open brlock.tdb database. */
37 static TDB_CONTEXT
*tdb
;
39 /****************************************************************************
40 Debug info at level 10 for lock struct.
41 ****************************************************************************/
43 static void print_lock_struct(unsigned int i
, struct lock_struct
*pls
)
45 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
47 (unsigned int)pls
->context
.smbpid
,
48 (unsigned int)pls
->context
.tid
,
49 (unsigned int)procid_to_pid(&pls
->context
.pid
) ));
51 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
55 lock_type_name(pls
->lock_type
),
56 lock_flav_name(pls
->lock_flav
) ));
59 /****************************************************************************
60 See if two locking contexts are equal.
61 ****************************************************************************/
63 BOOL
brl_same_context(const struct lock_context
*ctx1
,
64 const struct lock_context
*ctx2
)
66 return (procid_equal(&ctx1
->pid
, &ctx2
->pid
) &&
67 (ctx1
->smbpid
== ctx2
->smbpid
) &&
68 (ctx1
->tid
== ctx2
->tid
));
71 /****************************************************************************
72 See if lck1 and lck2 overlap.
73 ****************************************************************************/
75 static BOOL
brl_overlap(const struct lock_struct
*lck1
,
76 const struct lock_struct
*lck2
)
78 /* this extra check is not redundent - it copes with locks
79 that go beyond the end of 64 bit file space */
80 if (lck1
->size
!= 0 &&
81 lck1
->start
== lck2
->start
&&
82 lck1
->size
== lck2
->size
) {
86 if (lck1
->start
>= (lck2
->start
+lck2
->size
) ||
87 lck2
->start
>= (lck1
->start
+lck1
->size
)) {
93 /****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
97 static BOOL
brl_conflict(const struct lock_struct
*lck1
,
98 const struct lock_struct
*lck2
)
100 /* Ignore PENDING locks. */
101 if (lck1
->lock_type
== PENDING_LOCK
|| lck2
->lock_type
== PENDING_LOCK
)
104 /* Read locks never conflict. */
105 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
109 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
110 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
114 return brl_overlap(lck1
, lck2
);
117 /****************************************************************************
118 See if lock2 can be added when lock1 is in place - when both locks are POSIX
119 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
121 ****************************************************************************/
123 static BOOL
brl_conflict_posix(const struct lock_struct
*lck1
,
124 const struct lock_struct
*lck2
)
126 #if defined(DEVELOPER)
127 SMB_ASSERT(lck1
->lock_flav
== POSIX_LOCK
);
128 SMB_ASSERT(lck2
->lock_flav
== POSIX_LOCK
);
131 /* Ignore PENDING locks. */
132 if (lck1
->lock_type
== PENDING_LOCK
|| lck2
->lock_type
== PENDING_LOCK
)
135 /* Read locks never conflict. */
136 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
140 /* Locks on the same context con't conflict. Ignore fnum. */
141 if (brl_same_context(&lck1
->context
, &lck2
->context
)) {
145 /* One is read, the other write, or the context is different,
147 return brl_overlap(lck1
, lck2
);
151 static BOOL
brl_conflict1(const struct lock_struct
*lck1
,
152 const struct lock_struct
*lck2
)
154 if (lck1
->lock_type
== PENDING_LOCK
|| lck2
->lock_type
== PENDING_LOCK
)
157 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
) {
161 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
162 lck2
->lock_type
== READ_LOCK
&& lck1
->fnum
== lck2
->fnum
) {
166 if (lck2
->start
== 0 && lck2
->size
== 0 && lck1
->size
!= 0) {
170 if (lck1
->start
>= (lck2
->start
+ lck2
->size
) ||
171 lck2
->start
>= (lck1
->start
+ lck1
->size
)) {
179 /****************************************************************************
180 Check to see if this lock conflicts, but ignore our own locks on the
181 same fnum only. This is the read/write lock check code path.
182 This is never used in the POSIX lock case.
183 ****************************************************************************/
185 static BOOL
brl_conflict_other(const struct lock_struct
*lck1
, const struct lock_struct
*lck2
)
187 if (lck1
->lock_type
== PENDING_LOCK
|| lck2
->lock_type
== PENDING_LOCK
)
190 if (lck1
->lock_type
== READ_LOCK
&& lck2
->lock_type
== READ_LOCK
)
193 /* POSIX flavour locks never conflict here - this is only called
194 in the read/write path. */
196 if (lck1
->lock_flav
== POSIX_LOCK
&& lck2
->lock_flav
== POSIX_LOCK
)
200 * Incoming WRITE locks conflict with existing READ locks even
201 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
204 if (!(lck2
->lock_type
== WRITE_LOCK
&& lck1
->lock_type
== READ_LOCK
)) {
205 if (brl_same_context(&lck1
->context
, &lck2
->context
) &&
206 lck1
->fnum
== lck2
->fnum
)
210 return brl_overlap(lck1
, lck2
);
213 /****************************************************************************
214 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
215 is the same as this one and changes its error code. I wonder if any
216 app depends on this ?
217 ****************************************************************************/
219 static NTSTATUS
brl_lock_failed(files_struct
*fsp
, const struct lock_struct
*lock
, BOOL blocking_lock
)
221 if (lock
->start
>= 0xEF000000 && (lock
->start
>> 63) == 0) {
222 /* amazing the little things you learn with a test
223 suite. Locks beyond this offset (as a 64 bit
224 number!) always generate the conflict error code,
225 unless the top bit is set */
226 if (!blocking_lock
) {
227 fsp
->last_lock_failure
= *lock
;
229 return NT_STATUS_FILE_LOCK_CONFLICT
;
232 if (procid_equal(&lock
->context
.pid
, &fsp
->last_lock_failure
.context
.pid
) &&
233 lock
->context
.tid
== fsp
->last_lock_failure
.context
.tid
&&
234 lock
->fnum
== fsp
->last_lock_failure
.fnum
&&
235 lock
->start
== fsp
->last_lock_failure
.start
) {
236 return NT_STATUS_FILE_LOCK_CONFLICT
;
239 if (!blocking_lock
) {
240 fsp
->last_lock_failure
= *lock
;
242 return NT_STATUS_LOCK_NOT_GRANTED
;
245 /****************************************************************************
246 Open up the brlock.tdb database.
247 ****************************************************************************/
249 void brl_init(int read_only
)
254 tdb
= tdb_open_log(lock_path("brlock.tdb"),
255 lp_open_files_db_hash_size(),
256 TDB_DEFAULT
|(read_only
?0x0:TDB_CLEAR_IF_FIRST
),
257 read_only
?O_RDONLY
:(O_RDWR
|O_CREAT
), 0644 );
259 DEBUG(0,("Failed to open byte range locking database %s\n",
260 lock_path("brlock.tdb")));
265 /****************************************************************************
266 Close down the brlock.tdb database.
267 ****************************************************************************/
269 void brl_shutdown(int read_only
)
278 /****************************************************************************
279 Compare two locks for sorting.
280 ****************************************************************************/
282 static int lock_compare(const struct lock_struct
*lck1
,
283 const struct lock_struct
*lck2
)
285 if (lck1
->start
!= lck2
->start
) {
286 return (lck1
->start
- lck2
->start
);
288 if (lck2
->size
!= lck1
->size
) {
289 return ((int)lck1
->size
- (int)lck2
->size
);
295 /****************************************************************************
296 Lock a range of bytes - Windows lock semantics.
297 ****************************************************************************/
299 static NTSTATUS
brl_lock_windows(struct byte_range_lock
*br_lck
,
300 const struct lock_struct
*plock
, BOOL blocking_lock
)
303 files_struct
*fsp
= br_lck
->fsp
;
304 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
306 for (i
=0; i
< br_lck
->num_locks
; i
++) {
307 /* Do any Windows or POSIX locks conflict ? */
308 if (brl_conflict(&locks
[i
], plock
)) {
309 return brl_lock_failed(fsp
,plock
,blocking_lock
);
312 if (plock
->start
== 0 && plock
->size
== 0 &&
313 locks
[i
].size
== 0) {
319 /* We can get the Windows lock, now see if it needs to
320 be mapped into a lower level POSIX one, and if so can
323 if ((plock
->lock_type
!= PENDING_LOCK
) && lp_posix_locking(SNUM(fsp
->conn
))) {
325 if (!set_posix_lock_windows_flavour(fsp
,
333 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
334 return NT_STATUS_FILE_LOCK_CONFLICT
;
336 return map_nt_error_from_unix(errno
);
341 /* no conflicts - add it to the list of locks */
342 locks
= (struct lock_struct
*)SMB_REALLOC(locks
, (br_lck
->num_locks
+ 1) * sizeof(*locks
));
344 return NT_STATUS_NO_MEMORY
;
347 memcpy(&locks
[br_lck
->num_locks
], plock
, sizeof(struct lock_struct
));
348 br_lck
->num_locks
+= 1;
349 br_lck
->lock_data
= (void *)locks
;
350 br_lck
->modified
= True
;
355 /****************************************************************************
356 Cope with POSIX range splits and merges.
357 ****************************************************************************/
359 static unsigned int brlock_posix_split_merge(struct lock_struct
*lck_arr
, /* Output array. */
360 const struct lock_struct
*ex
, /* existing lock. */
361 const struct lock_struct
*plock
, /* proposed lock. */
362 BOOL
*lock_was_added
)
364 BOOL lock_types_differ
= (ex
->lock_type
!= plock
->lock_type
);
366 /* We can't merge non-conflicting locks on different context - ignore fnum. */
368 if (!brl_same_context(&ex
->context
, &plock
->context
)) {
370 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
374 /* We now know we have the same context. */
376 /* Did we overlap ? */
378 /*********************************************
389 **********************************************/
391 if ( (ex
->start
> (plock
->start
+ plock
->size
)) ||
392 (plock
->start
> (ex
->start
+ ex
->size
))) {
393 /* No overlap with this lock - copy existing. */
394 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
398 /*********************************************
399 +---------------------------+
401 +---------------------------+
402 +---------------------------+
403 | plock | -> replace with plock.
404 +---------------------------+
405 **********************************************/
407 if ( (ex
->start
>= plock
->start
) &&
408 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
409 memcpy(&lck_arr
[0], plock
, sizeof(struct lock_struct
));
410 *lock_was_added
= True
;
414 /*********************************************
415 +-----------------------+
417 +-----------------------+
430 +---------------+-------+
431 | plock | ex | - different lock types.
432 +---------------+-------+
434 +-----------------------+
435 | ex | - same lock type.
436 +-----------------------+
437 **********************************************/
439 if ( (ex
->start
>= plock
->start
) &&
440 (ex
->start
<= plock
->start
+ plock
->size
) &&
441 (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
443 *lock_was_added
= True
;
445 /* If the lock types are the same, we merge, if different, we
446 add the new lock before the old. */
448 if (lock_types_differ
) {
450 memcpy(&lck_arr
[0], plock
, sizeof(struct lock_struct
));
451 memcpy(&lck_arr
[1], ex
, sizeof(struct lock_struct
));
452 /* Adjust existing start and size. */
453 lck_arr
[1].start
= plock
->start
+ plock
->size
;
454 lck_arr
[1].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
458 memcpy(&lck_arr
[0], plock
, sizeof(struct lock_struct
));
459 /* Set new start and size. */
460 lck_arr
[0].start
= plock
->start
;
461 lck_arr
[0].size
= (ex
->start
+ ex
->size
) - plock
->start
;
466 /*********************************************
467 +-----------------------+
469 +-----------------------+
481 +-------+---------------+
482 | ex | plock | - different lock types
483 +-------+---------------+
486 +-----------------------+
487 | ex | - same lock type.
488 +-----------------------+
490 **********************************************/
492 if ( (ex
->start
< plock
->start
) &&
493 (ex
->start
+ ex
->size
>= plock
->start
) &&
494 (ex
->start
+ ex
->size
<= plock
->start
+ plock
->size
) ) {
496 *lock_was_added
= True
;
498 /* If the lock types are the same, we merge, if different, we
499 add the new lock after the old. */
501 if (lock_types_differ
) {
502 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
503 memcpy(&lck_arr
[1], plock
, sizeof(struct lock_struct
));
504 /* Adjust existing size. */
505 lck_arr
[0].size
= plock
->start
- ex
->start
;
509 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
510 /* Adjust existing size. */
511 lck_arr
[0].size
= (plock
->start
+ plock
->size
) - ex
->start
;
516 /*********************************************
517 +---------------------------+
519 +---------------------------+
524 +-------+---------+---------+
525 | ex | plock | ex | - different lock types.
526 +-------+---------+---------+
528 +---------------------------+
529 | ex | - same lock type.
530 +---------------------------+
531 **********************************************/
533 if ( (ex
->start
< plock
->start
) && (ex
->start
+ ex
->size
> plock
->start
+ plock
->size
) ) {
534 *lock_was_added
= True
;
536 if (lock_types_differ
) {
538 /* We have to split ex into two locks here. */
540 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
541 memcpy(&lck_arr
[1], plock
, sizeof(struct lock_struct
));
542 memcpy(&lck_arr
[2], ex
, sizeof(struct lock_struct
));
544 /* Adjust first existing size. */
545 lck_arr
[0].size
= plock
->start
- ex
->start
;
547 /* Adjust second existing start and size. */
548 lck_arr
[2].start
= plock
->start
+ plock
->size
;
549 lck_arr
[2].size
= (ex
->start
+ ex
->size
) - (plock
->start
+ plock
->size
);
552 /* Just eat plock. */
553 memcpy(&lck_arr
[0], ex
, sizeof(struct lock_struct
));
558 /* Never get here. */
559 smb_panic("brlock_posix_split_merge\n");
562 /* Keep some compilers happy. */
566 /****************************************************************************
567 Lock a range of bytes - POSIX lock semantics.
568 We must cope with range splits and merges.
569 ****************************************************************************/
571 static NTSTATUS
brl_lock_posix(struct byte_range_lock
*br_lck
,
572 const struct lock_struct
*plock
)
574 unsigned int i
, count
;
575 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
576 struct lock_struct
*tp
;
577 BOOL lock_was_added
= False
;
579 /* No zero-zero locks for POSIX. */
580 if (plock
->start
== 0 && plock
->size
== 0) {
581 return NT_STATUS_INVALID_PARAMETER
;
584 /* Don't allow 64-bit lock wrap. */
585 if (plock
->start
+ plock
->size
< plock
->start
||
586 plock
->start
+ plock
->size
< plock
->size
) {
587 return NT_STATUS_INVALID_PARAMETER
;
590 /* The worst case scenario here is we have to split an
591 existing POSIX lock range into two, and add our lock,
592 so we need at most 2 more entries. */
594 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 2));
596 return NT_STATUS_NO_MEMORY
;
600 for (i
=0; i
< br_lck
->num_locks
; i
++) {
601 if (locks
[i
].lock_flav
== WINDOWS_LOCK
) {
602 /* Do any Windows flavour locks conflict ? */
603 if (brl_conflict(&locks
[i
], plock
)) {
604 /* No games with error messages. */
606 return NT_STATUS_FILE_LOCK_CONFLICT
;
608 /* Just copy the Windows lock into the new array. */
609 memcpy(&tp
[count
], &locks
[i
], sizeof(struct lock_struct
));
612 /* POSIX conflict semantics are different. */
613 if (brl_conflict_posix(&locks
[i
], plock
)) {
614 /* Can't block ourselves with POSIX locks. */
615 /* No games with error messages. */
617 return NT_STATUS_FILE_LOCK_CONFLICT
;
620 /* Work out overlaps. */
621 count
+= brlock_posix_split_merge(&tp
[count
], &locks
[i
], plock
, &lock_was_added
);
625 if (!lock_was_added
) {
626 memcpy(&tp
[count
], plock
, sizeof(struct lock_struct
));
630 /* We can get the POSIX lock, now see if it needs to
631 be mapped into a lower level POSIX one, and if so can
634 if ((plock
->lock_type
!= PENDING_LOCK
) && lp_posix_locking(SNUM(br_lck
->fsp
->conn
))) {
637 /* The lower layer just needs to attempt to
638 get the system POSIX lock. We've weeded out
639 any conflicts above. */
641 if (!set_posix_lock_posix_flavour(br_lck
->fsp
,
646 if (errno_ret
== EACCES
|| errno_ret
== EAGAIN
) {
648 return NT_STATUS_FILE_LOCK_CONFLICT
;
651 return map_nt_error_from_unix(errno
);
656 /* Realloc so we don't leak entries per lock call. */
657 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
659 return NT_STATUS_NO_MEMORY
;
661 br_lck
->num_locks
= count
;
662 SAFE_FREE(br_lck
->lock_data
);
663 br_lck
->lock_data
= (void *)tp
;
664 br_lck
->modified
= True
;
668 /****************************************************************************
669 Lock a range of bytes.
670 ****************************************************************************/
672 NTSTATUS
brl_lock(struct byte_range_lock
*br_lck
,
674 struct process_id pid
,
677 enum brl_type lock_type
,
678 enum brl_flavour lock_flav
,
682 struct lock_struct lock
;
685 if (start
== 0 && size
== 0) {
686 DEBUG(0,("client sent 0/0 lock - please report this\n"));
690 lock
.context
.smbpid
= smbpid
;
691 lock
.context
.pid
= pid
;
692 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
695 lock
.fnum
= br_lck
->fsp
->fnum
;
696 lock
.lock_type
= lock_type
;
697 lock
.lock_flav
= lock_flav
;
699 if (lock_flav
== WINDOWS_LOCK
) {
700 ret
= brl_lock_windows(br_lck
, &lock
, blocking_lock
);
702 ret
= brl_lock_posix(br_lck
, &lock
);
706 /* sort the lock list */
707 qsort(br_lck
->lock_data
, (size_t)br_lck
->num_locks
, sizeof(lock
), lock_compare
);
713 /****************************************************************************
714 Check if an unlock overlaps a pending lock.
715 ****************************************************************************/
717 static BOOL
brl_pending_overlap(const struct lock_struct
*lock
, const struct lock_struct
*pend_lock
)
719 if ((lock
->start
<= pend_lock
->start
) && (lock
->start
+ lock
->size
> pend_lock
->start
))
721 if ((lock
->start
>= pend_lock
->start
) && (lock
->start
<= pend_lock
->start
+ pend_lock
->size
))
726 /****************************************************************************
727 Unlock a range of bytes - Windows semantics.
728 ****************************************************************************/
730 static BOOL
brl_unlock_windows(struct byte_range_lock
*br_lck
, const struct lock_struct
*plock
)
733 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
734 enum brl_type deleted_lock_type
= READ_LOCK
; /* shut the compiler up.... */
737 /* Delete write locks by preference... The lock list
738 is sorted in the zero zero case. */
740 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
741 struct lock_struct
*lock
= &locks
[i
];
743 if (lock
->lock_type
== WRITE_LOCK
&&
744 brl_same_context(&lock
->context
, &plock
->context
) &&
745 lock
->fnum
== plock
->fnum
&&
746 lock
->lock_flav
== WINDOWS_LOCK
&&
747 lock
->start
== plock
->start
&&
748 lock
->size
== plock
->size
) {
750 /* found it - delete it */
751 deleted_lock_type
= lock
->lock_type
;
756 if (i
!= br_lck
->num_locks
) {
757 /* We found it - don't search again. */
758 goto unlock_continue
;
762 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
763 struct lock_struct
*lock
= &locks
[i
];
765 /* Only remove our own locks that match in start, size, and flavour. */
766 if (brl_same_context(&lock
->context
, &plock
->context
) &&
767 lock
->fnum
== plock
->fnum
&&
768 lock
->lock_flav
== WINDOWS_LOCK
&&
769 lock
->start
== plock
->start
&&
770 lock
->size
== plock
->size
) {
771 deleted_lock_type
= lock
->lock_type
;
776 if (i
== br_lck
->num_locks
) {
777 /* we didn't find it */
785 /* Actually delete the lock. */
786 if (i
< br_lck
->num_locks
- 1) {
787 memmove(&locks
[i
], &locks
[i
+1],
788 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
791 br_lck
->num_locks
-= 1;
792 br_lck
->modified
= True
;
794 /* Unlock the underlying POSIX regions. */
795 if(lp_posix_locking(br_lck
->fsp
->conn
->cnum
)) {
796 release_posix_lock_windows_flavour(br_lck
->fsp
,
805 /* Send unlock messages to any pending waiters that overlap. */
806 for (j
=0; j
< br_lck
->num_locks
; j
++) {
807 struct lock_struct
*pend_lock
= &locks
[j
];
809 /* Ignore non-pending locks. */
810 if (pend_lock
->lock_type
!= PENDING_LOCK
) {
814 /* We could send specific lock info here... */
815 if (brl_pending_overlap(plock
, pend_lock
)) {
816 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
817 procid_str_static(&pend_lock
->context
.pid
)));
820 message_send_pid(pend_lock
->context
.pid
,
830 /****************************************************************************
831 Unlock a range of bytes - POSIX semantics.
832 ****************************************************************************/
834 static BOOL
brl_unlock_posix(struct byte_range_lock
*br_lck
, const struct lock_struct
*plock
)
836 unsigned int i
, j
, count
;
837 struct lock_struct
*tp
;
838 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
839 BOOL overlap_found
= False
;
841 /* No zero-zero locks for POSIX. */
842 if (plock
->start
== 0 && plock
->size
== 0) {
846 /* Don't allow 64-bit lock wrap. */
847 if (plock
->start
+ plock
->size
< plock
->start
||
848 plock
->start
+ plock
->size
< plock
->size
) {
849 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
853 /* The worst case scenario here is we have to split an
854 existing POSIX lock range into two, so we need at most
857 tp
= SMB_MALLOC_ARRAY(struct lock_struct
, (br_lck
->num_locks
+ 1));
859 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
864 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
865 struct lock_struct
*lock
= &locks
[i
];
866 struct lock_struct tmp_lock
[3];
867 BOOL lock_was_added
= False
;
868 unsigned int tmp_count
;
870 /* Only remove our own locks - ignore fnum. */
871 if (lock
->lock_type
== PENDING_LOCK
||
872 !brl_same_context(&lock
->context
, &plock
->context
)) {
873 memcpy(&tp
[count
], lock
, sizeof(struct lock_struct
));
878 /* Work out overlaps. */
879 tmp_count
= brlock_posix_split_merge(&tmp_lock
[0], &locks
[i
], plock
, &lock_was_added
);
881 if (tmp_count
== 1) {
882 /* Ether the locks didn't overlap, or the unlock completely
883 overlapped this lock. If it didn't overlap, then there's
884 no change in the locks. */
885 if (tmp_lock
[0].lock_type
!= UNLOCK_LOCK
) {
886 SMB_ASSERT(tmp_lock
[0].lock_type
== locks
[i
].lock_type
);
887 /* No change in this lock. */
888 memcpy(&tp
[count
], &tmp_lock
[0], sizeof(struct lock_struct
));
891 SMB_ASSERT(tmp_lock
[0].lock_type
== UNLOCK_LOCK
);
892 overlap_found
= True
;
895 } else if (tmp_count
== 2) {
896 /* The unlock overlapped an existing lock. Copy the truncated
897 lock into the lock array. */
898 if (tmp_lock
[0].lock_type
!= UNLOCK_LOCK
) {
899 SMB_ASSERT(tmp_lock
[0].lock_type
== locks
[i
].lock_type
);
900 SMB_ASSERT(tmp_lock
[1].lock_type
== UNLOCK_LOCK
);
901 memcpy(&tp
[count
], &tmp_lock
[0], sizeof(struct lock_struct
));
902 if (tmp_lock
[0].size
!= locks
[i
].size
) {
903 overlap_found
= True
;
906 SMB_ASSERT(tmp_lock
[0].lock_type
== UNLOCK_LOCK
);
907 SMB_ASSERT(tmp_lock
[1].lock_type
== locks
[i
].lock_type
);
908 memcpy(&tp
[count
], &tmp_lock
[1], sizeof(struct lock_struct
));
909 if (tmp_lock
[1].start
!= locks
[i
].start
) {
910 overlap_found
= True
;
916 /* tmp_count == 3 - (we split a lock range in two). */
917 SMB_ASSERT(tmp_lock
[0].lock_type
== locks
[i
].lock_type
);
918 SMB_ASSERT(tmp_lock
[1].lock_type
== UNLOCK_LOCK
);
919 SMB_ASSERT(tmp_lock
[2].lock_type
== locks
[i
].lock_type
);
921 memcpy(&tp
[count
], &tmp_lock
[0], sizeof(struct lock_struct
));
923 memcpy(&tp
[count
], &tmp_lock
[2], sizeof(struct lock_struct
));
925 overlap_found
= True
;
926 /* Optimisation... */
927 /* We know we're finished here as we can't overlap any
928 more POSIX locks. Copy the rest of the lock array. */
929 if (i
< br_lck
->num_locks
- 1) {
930 memcpy(&tp
[count
], &locks
[i
+1],
931 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
932 count
+= ((br_lck
->num_locks
-1) - i
);
938 if (!overlap_found
) {
939 /* Just ignore - no change. */
941 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
945 /* Unlock any POSIX regions. */
946 if(lp_posix_locking(br_lck
->fsp
->conn
->cnum
)) {
947 release_posix_lock_posix_flavour(br_lck
->fsp
,
955 /* Realloc so we don't leak entries per unlock call. */
957 tp
= (struct lock_struct
*)SMB_REALLOC(tp
, count
* sizeof(*locks
));
959 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
963 /* We deleted the last lock. */
968 br_lck
->num_locks
= count
;
969 SAFE_FREE(br_lck
->lock_data
);
970 locks
= br_lck
->lock_data
= (void *)tp
;
971 br_lck
->modified
= True
;
973 /* Send unlock messages to any pending waiters that overlap. */
975 for (j
=0; j
< br_lck
->num_locks
; j
++) {
976 struct lock_struct
*pend_lock
= &locks
[j
];
978 /* Ignore non-pending locks. */
979 if (pend_lock
->lock_type
!= PENDING_LOCK
) {
983 /* We could send specific lock info here... */
984 if (brl_pending_overlap(plock
, pend_lock
)) {
985 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
986 procid_str_static(&pend_lock
->context
.pid
)));
989 message_send_pid(pend_lock
->context
.pid
,
999 /****************************************************************************
1000 Unlock a range of bytes.
1001 ****************************************************************************/
1003 BOOL
brl_unlock(struct byte_range_lock
*br_lck
,
1005 struct process_id pid
,
1008 enum brl_flavour lock_flav
)
1010 struct lock_struct lock
;
1012 lock
.context
.smbpid
= smbpid
;
1013 lock
.context
.pid
= pid
;
1014 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1017 lock
.fnum
= br_lck
->fsp
->fnum
;
1018 lock
.lock_type
= UNLOCK_LOCK
;
1019 lock
.lock_flav
= lock_flav
;
1021 if (lock_flav
== WINDOWS_LOCK
) {
1022 return brl_unlock_windows(br_lck
, &lock
);
1024 return brl_unlock_posix(br_lck
, &lock
);
1028 /****************************************************************************
1029 Test if we could add a lock if we wanted to.
1030 Returns True if the region required is currently unlocked, False if locked.
1031 ****************************************************************************/
1033 BOOL
brl_locktest(struct byte_range_lock
*br_lck
,
1035 struct process_id pid
,
1038 enum brl_type lock_type
,
1039 enum brl_flavour lock_flav
)
1043 struct lock_struct lock
;
1044 const struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
1045 files_struct
*fsp
= br_lck
->fsp
;
1047 lock
.context
.smbpid
= smbpid
;
1048 lock
.context
.pid
= pid
;
1049 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1052 lock
.fnum
= fsp
->fnum
;
1053 lock
.lock_type
= lock_type
;
1054 lock
.lock_flav
= lock_flav
;
1056 /* Make sure existing locks don't conflict */
1057 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1059 * Our own locks don't conflict.
1061 if (brl_conflict_other(&locks
[i
], &lock
)) {
1067 * There is no lock held by an SMB daemon, check to
1068 * see if there is a POSIX lock from a UNIX or NFS process.
1069 * This only conflicts with Windows locks, not POSIX locks.
1072 if(lp_posix_locking(fsp
->conn
->cnum
) && (lock_flav
== WINDOWS_LOCK
)) {
1073 ret
= is_posix_locked(fsp
, &start
, &size
, &lock_type
, WINDOWS_LOCK
);
1075 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1076 (double)start
, (double)size
, ret
? "locked" : "unlocked",
1077 fsp
->fnum
, fsp
->fsp_name
));
1079 /* We need to return the inverse of is_posix_locked. */
1083 /* no conflicts - we could have added it */
1087 /****************************************************************************
1088 Query for existing locks.
1089 ****************************************************************************/
1091 NTSTATUS
brl_lockquery(struct byte_range_lock
*br_lck
,
1093 struct process_id pid
,
1096 enum brl_type
*plock_type
,
1097 enum brl_flavour lock_flav
)
1100 struct lock_struct lock
;
1101 const struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
1102 files_struct
*fsp
= br_lck
->fsp
;
1104 lock
.context
.smbpid
= *psmbpid
;
1105 lock
.context
.pid
= pid
;
1106 lock
.context
.tid
= br_lck
->fsp
->conn
->cnum
;
1107 lock
.start
= *pstart
;
1109 lock
.fnum
= fsp
->fnum
;
1110 lock
.lock_type
= *plock_type
;
1111 lock
.lock_flav
= lock_flav
;
1113 /* Make sure existing locks don't conflict */
1114 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1115 const struct lock_struct
*exlock
= &locks
[i
];
1116 BOOL conflict
= False
;
1118 if (exlock
->lock_flav
== WINDOWS_LOCK
) {
1119 conflict
= brl_conflict(exlock
, &lock
);
1121 conflict
= brl_conflict_posix(exlock
, &lock
);
1125 *psmbpid
= exlock
->context
.smbpid
;
1126 *pstart
= exlock
->start
;
1127 *psize
= exlock
->size
;
1128 *plock_type
= exlock
->lock_type
;
1129 return NT_STATUS_LOCK_NOT_GRANTED
;
1134 * There is no lock held by an SMB daemon, check to
1135 * see if there is a POSIX lock from a UNIX or NFS process.
1138 if(lp_posix_locking(fsp
->conn
->cnum
)) {
1139 BOOL ret
= is_posix_locked(fsp
, pstart
, psize
, plock_type
, POSIX_LOCK
);
1141 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1142 (double)*pstart
, (double)*psize
, ret
? "locked" : "unlocked",
1143 fsp
->fnum
, fsp
->fsp_name
));
1146 /* Hmmm. No clue what to set smbpid to - use -1. */
1148 return NT_STATUS_LOCK_NOT_GRANTED
;
1152 return NT_STATUS_OK
;
1155 /****************************************************************************
1156 Remove a particular pending lock.
1157 ****************************************************************************/
1159 BOOL
brl_lock_cancel(struct byte_range_lock
*br_lck
,
1161 struct process_id pid
,
1164 enum brl_flavour lock_flav
)
1167 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
1168 struct lock_context context
;
1170 context
.smbpid
= smbpid
;
1172 context
.tid
= br_lck
->fsp
->conn
->cnum
;
1174 for (i
= 0; i
< br_lck
->num_locks
; i
++) {
1175 struct lock_struct
*lock
= &locks
[i
];
1177 /* For pending locks we *always* care about the fnum. */
1178 if (brl_same_context(&lock
->context
, &context
) &&
1179 lock
->fnum
== br_lck
->fsp
->fnum
&&
1180 lock
->lock_type
== PENDING_LOCK
&&
1181 lock
->lock_flav
== lock_flav
&&
1182 lock
->start
== start
&&
1183 lock
->size
== size
) {
1188 if (i
== br_lck
->num_locks
) {
1189 /* Didn't find it. */
1193 if (i
< br_lck
->num_locks
- 1) {
1194 /* Found this particular pending lock - delete it */
1195 memmove(&locks
[i
], &locks
[i
+1],
1196 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1199 br_lck
->num_locks
-= 1;
1200 br_lck
->modified
= True
;
1204 /****************************************************************************
1205 Remove any locks associated with a open file.
1206 We return True if this process owns any other Windows locks on this
1207 fd and so we should not immediately close the fd.
1208 ****************************************************************************/
1210 void brl_close_fnum(struct byte_range_lock
*br_lck
)
1212 files_struct
*fsp
= br_lck
->fsp
;
1213 uint16 tid
= fsp
->conn
->cnum
;
1214 int fnum
= fsp
->fnum
;
1215 unsigned int i
, j
, dcount
=0;
1216 int num_deleted_windows_locks
= 0;
1217 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
1218 struct process_id pid
= procid_self();
1219 BOOL unlock_individually
= False
;
1221 if(lp_posix_locking(fsp
->conn
->cnum
)) {
1223 /* Check if there are any Windows locks associated with this dev/ino
1224 pair that are not this fnum. If so we need to call unlock on each
1225 one in order to release the system POSIX locks correctly. */
1227 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1228 struct lock_struct
*lock
= &locks
[i
];
1230 if (!procid_equal(&lock
->context
.pid
, &pid
)) {
1234 if (lock
->lock_type
!= READ_LOCK
&& lock
->lock_type
!= WRITE_LOCK
) {
1235 continue; /* Ignore pending. */
1238 if (lock
->context
.tid
!= tid
|| lock
->fnum
!= fnum
) {
1239 unlock_individually
= True
;
1244 if (unlock_individually
) {
1245 struct lock_struct
*locks_copy
;
1246 unsigned int num_locks_copy
;
1248 /* Copy the current lock array. */
1249 locks_copy
= TALLOC_MEMDUP(br_lck
, locks
, br_lck
->num_locks
* sizeof(struct lock_struct
));
1251 smb_panic("brl_close_fnum: talloc fail.\n");
1253 num_locks_copy
= br_lck
->num_locks
;
1255 for (i
=0; i
< num_locks_copy
; i
++) {
1256 struct lock_struct
*lock
= &locks_copy
[i
];
1258 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
) &&
1259 (lock
->fnum
== fnum
)) {
1261 lock
->context
.smbpid
,
1272 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1274 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1276 for (i
=0; i
< br_lck
->num_locks
; i
++) {
1277 struct lock_struct
*lock
= &locks
[i
];
1278 BOOL del_this_lock
= False
;
1280 if (lock
->context
.tid
== tid
&& procid_equal(&lock
->context
.pid
, &pid
)) {
1281 if ((lock
->lock_flav
== WINDOWS_LOCK
) && (lock
->fnum
== fnum
)) {
1282 del_this_lock
= True
;
1283 num_deleted_windows_locks
++;
1284 } else if (lock
->lock_flav
== POSIX_LOCK
) {
1285 del_this_lock
= True
;
1289 if (del_this_lock
) {
1290 /* Send unlock messages to any pending waiters that overlap. */
1291 for (j
=0; j
< br_lck
->num_locks
; j
++) {
1292 struct lock_struct
*pend_lock
= &locks
[j
];
1294 /* Ignore our own or non-pending locks. */
1295 if (pend_lock
->lock_type
!= PENDING_LOCK
) {
1299 /* Optimisation - don't send to this fnum as we're
1301 if (pend_lock
->context
.tid
== tid
&&
1302 procid_equal(&pend_lock
->context
.pid
, &pid
) &&
1303 pend_lock
->fnum
== fnum
) {
1307 /* We could send specific lock info here... */
1308 if (brl_pending_overlap(lock
, pend_lock
)) {
1310 message_send_pid(pend_lock
->context
.pid
,
1317 /* found it - delete it */
1318 if (br_lck
->num_locks
> 1 && i
< br_lck
->num_locks
- 1) {
1319 memmove(&locks
[i
], &locks
[i
+1],
1320 sizeof(*locks
)*((br_lck
->num_locks
-1) - i
));
1322 br_lck
->num_locks
--;
1323 br_lck
->modified
= True
;
1329 if (num_deleted_windows_locks
) {
1330 /* Reduce the Windows lock reference count on this dev/ino pair. */
1331 reduce_windows_lock_ref_count(fsp
, num_deleted_windows_locks
);
1335 /****************************************************************************
1336 Ensure this set of lock entries is valid.
1337 ****************************************************************************/
1339 static BOOL
validate_lock_entries(unsigned int *pnum_entries
, struct lock_struct
**pplocks
)
1342 unsigned int num_valid_entries
= 0;
1343 struct lock_struct
*locks
= *pplocks
;
1345 for (i
= 0; i
< *pnum_entries
; i
++) {
1346 struct lock_struct
*lock_data
= &locks
[i
];
1347 if (!process_exists(lock_data
->context
.pid
)) {
1348 /* This process no longer exists - mark this
1349 entry as invalid by zeroing it. */
1350 ZERO_STRUCTP(lock_data
);
1352 num_valid_entries
++;
1356 if (num_valid_entries
!= *pnum_entries
) {
1357 struct lock_struct
*new_lock_data
= NULL
;
1359 if (num_valid_entries
) {
1360 new_lock_data
= SMB_MALLOC_ARRAY(struct lock_struct
, num_valid_entries
);
1361 if (!new_lock_data
) {
1362 DEBUG(3, ("malloc fail\n"));
1366 num_valid_entries
= 0;
1367 for (i
= 0; i
< *pnum_entries
; i
++) {
1368 struct lock_struct
*lock_data
= &locks
[i
];
1369 if (lock_data
->context
.smbpid
&&
1370 lock_data
->context
.tid
) {
1371 /* Valid (nonzero) entry - copy it. */
1372 memcpy(&new_lock_data
[num_valid_entries
],
1373 lock_data
, sizeof(struct lock_struct
));
1374 num_valid_entries
++;
1379 SAFE_FREE(*pplocks
);
1380 *pplocks
= new_lock_data
;
1381 *pnum_entries
= num_valid_entries
;
1387 /****************************************************************************
1388 Traverse the whole database with this function, calling traverse_callback
1390 ****************************************************************************/
1392 static int traverse_fn(TDB_CONTEXT
*ttdb
, TDB_DATA kbuf
, TDB_DATA dbuf
, void *state
)
1394 struct lock_struct
*locks
;
1395 struct lock_key
*key
;
1397 unsigned int num_locks
= 0;
1398 unsigned int orig_num_locks
= 0;
1400 BRLOCK_FN(traverse_callback
) = (BRLOCK_FN_CAST())state
;
1402 /* In a traverse function we must make a copy of
1403 dbuf before modifying it. */
1405 locks
= (struct lock_struct
*)memdup(dbuf
.dptr
, dbuf
.dsize
);
1407 return -1; /* Terminate traversal. */
1410 key
= (struct lock_key
*)kbuf
.dptr
;
1411 orig_num_locks
= num_locks
= dbuf
.dsize
/sizeof(*locks
);
1413 /* Ensure the lock db is clean of entries from invalid processes. */
1415 if (!validate_lock_entries(&num_locks
, &locks
)) {
1417 return -1; /* Terminate traversal */
1420 if (orig_num_locks
!= num_locks
) {
1421 dbuf
.dptr
= (char *)locks
;
1422 dbuf
.dsize
= num_locks
* sizeof(*locks
);
1425 tdb_store(ttdb
, kbuf
, dbuf
, TDB_REPLACE
);
1427 tdb_delete(ttdb
, kbuf
);
1431 for ( i
=0; i
<num_locks
; i
++) {
1432 traverse_callback(key
->device
,
1434 locks
[i
].context
.pid
,
1445 /*******************************************************************
1446 Call the specified function on each lock in the database.
1447 ********************************************************************/
1449 int brl_forall(BRLOCK_FN(fn
))
1454 return tdb_traverse(tdb
, traverse_fn
, (void *)fn
);
1457 /*******************************************************************
1458 Store a potentially modified set of byte range lock data back into
1461 ********************************************************************/
1463 static int byte_range_lock_destructor(void *p
)
1465 struct byte_range_lock
*br_lck
=
1466 talloc_get_type_abort(p
, struct byte_range_lock
);
1469 key
.dptr
= (char *)&br_lck
->key
;
1470 key
.dsize
= sizeof(struct lock_key
);
1472 if (!br_lck
->modified
) {
1476 if (br_lck
->num_locks
== 0) {
1477 /* No locks - delete this entry. */
1478 if (tdb_delete(tdb
, key
) == -1) {
1479 smb_panic("Could not delete byte range lock entry\n");
1483 data
.dptr
= (char *)br_lck
->lock_data
;
1484 data
.dsize
= br_lck
->num_locks
* sizeof(struct lock_struct
);
1486 if (tdb_store(tdb
, key
, data
, TDB_REPLACE
) == -1) {
1487 smb_panic("Could not store byte range mode entry\n");
1493 tdb_chainunlock(tdb
, key
);
1494 SAFE_FREE(br_lck
->lock_data
);
1498 /*******************************************************************
1499 Fetch a set of byte range lock data from the database.
1500 Leave the record locked.
1501 TALLOC_FREE(brl) will release the lock in the destructor.
1502 ********************************************************************/
1504 struct byte_range_lock
*brl_get_locks(TALLOC_CTX
*mem_ctx
,
1509 struct byte_range_lock
*br_lck
= TALLOC_P(mem_ctx
, struct byte_range_lock
);
1511 if (br_lck
== NULL
) {
1516 br_lck
->num_locks
= 0;
1517 br_lck
->modified
= False
;
1518 memset(&br_lck
->key
, '\0', sizeof(struct lock_key
));
1519 br_lck
->key
.device
= fsp
->dev
;
1520 br_lck
->key
.inode
= fsp
->inode
;
1522 key
.dptr
= (char *)&br_lck
->key
;
1523 key
.dsize
= sizeof(struct lock_key
);
1525 if (tdb_chainlock(tdb
, key
) != 0) {
1526 DEBUG(3, ("Could not lock byte range lock entry\n"));
1527 TALLOC_FREE(br_lck
);
1531 talloc_set_destructor(br_lck
, byte_range_lock_destructor
);
1533 data
= tdb_fetch(tdb
, key
);
1534 br_lck
->lock_data
= (void *)data
.dptr
;
1535 br_lck
->num_locks
= data
.dsize
/ sizeof(struct lock_struct
);
1537 if (!fsp
->lockdb_clean
) {
1539 /* This is the first time we've accessed this. */
1540 /* Go through and ensure all entries exist - remove any that don't. */
1541 /* Makes the lockdb self cleaning at low cost. */
1543 struct lock_struct
*locks
=
1544 (struct lock_struct
*)br_lck
->lock_data
;
1546 if (!validate_lock_entries(&br_lck
->num_locks
, &locks
)) {
1547 SAFE_FREE(br_lck
->lock_data
);
1548 TALLOC_FREE(br_lck
);
1553 * validate_lock_entries might have changed locks. We can't
1554 * use a direct pointer here because otherwise gcc warnes
1555 * about strict aliasing rules being violated.
1557 br_lck
->lock_data
= locks
;
1559 /* Mark the lockdb as "clean" as seen from this open file. */
1560 fsp
->lockdb_clean
= True
;
1563 if (DEBUGLEVEL
>= 10) {
1565 struct lock_struct
*locks
= (struct lock_struct
*)br_lck
->lock_data
;
1566 DEBUG(10,("brl_get_locks: %u current locks on dev=%.0f, inode=%.0f\n",
1568 (double)fsp
->dev
, (double)fsp
->inode
));
1569 for( i
= 0; i
< br_lck
->num_locks
; i
++) {
1570 print_lock_struct(i
, &locks
[i
]);