s4:provision Only create references to our server DN after the self join
[Samba.git] / source3 / locking / brlock.c
blob05d6c7f95d09fa9eb21f949f3fb0d73691d93e51
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
27 #include "includes.h"
29 #undef DBGC_CLASS
30 #define DBGC_CLASS DBGC_LOCKING
32 #define ZERO_ZERO 0
34 /* The open brlock.tdb database. */
36 static struct db_context *brlock_db;
38 /****************************************************************************
39 Debug info at level 10 for lock struct.
40 ****************************************************************************/
42 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
44 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %s, ",
46 (unsigned int)pls->context.smbpid,
47 (unsigned int)pls->context.tid,
48 procid_str(debug_ctx(), &pls->context.pid) ));
50 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
51 (double)pls->start,
52 (double)pls->size,
53 pls->fnum,
54 lock_type_name(pls->lock_type),
55 lock_flav_name(pls->lock_flav) ));
58 /****************************************************************************
59 See if two locking contexts are equal.
60 ****************************************************************************/
62 bool brl_same_context(const struct lock_context *ctx1,
63 const struct lock_context *ctx2)
65 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
66 (ctx1->smbpid == ctx2->smbpid) &&
67 (ctx1->tid == ctx2->tid));
70 /****************************************************************************
71 See if lck1 and lck2 overlap.
72 ****************************************************************************/
74 static bool brl_overlap(const struct lock_struct *lck1,
75 const struct lock_struct *lck2)
77 /* XXX Remove for Win7 compatibility. */
78 /* this extra check is not redundent - it copes with locks
79 that go beyond the end of 64 bit file space */
80 if (lck1->size != 0 &&
81 lck1->start == lck2->start &&
82 lck1->size == lck2->size) {
83 return True;
86 if (lck1->start >= (lck2->start+lck2->size) ||
87 lck2->start >= (lck1->start+lck1->size)) {
88 return False;
90 return True;
93 /****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
97 static bool brl_conflict(const struct lock_struct *lck1,
98 const struct lock_struct *lck2)
100 /* Ignore PENDING locks. */
101 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
102 return False;
104 /* Read locks never conflict. */
105 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106 return False;
109 /* A READ lock can stack on top of a WRITE lock if they have the same
110 * context & fnum. */
111 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
112 brl_same_context(&lck1->context, &lck2->context) &&
113 lck1->fnum == lck2->fnum) {
114 return False;
117 return brl_overlap(lck1, lck2);
120 /****************************************************************************
121 See if lock2 can be added when lock1 is in place - when both locks are POSIX
122 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
123 know already match.
124 ****************************************************************************/
126 static bool brl_conflict_posix(const struct lock_struct *lck1,
127 const struct lock_struct *lck2)
129 #if defined(DEVELOPER)
130 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
131 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
132 #endif
134 /* Ignore PENDING locks. */
135 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
136 return False;
138 /* Read locks never conflict. */
139 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
140 return False;
143 /* Locks on the same context con't conflict. Ignore fnum. */
144 if (brl_same_context(&lck1->context, &lck2->context)) {
145 return False;
148 /* One is read, the other write, or the context is different,
149 do they overlap ? */
150 return brl_overlap(lck1, lck2);
153 #if ZERO_ZERO
154 static bool brl_conflict1(const struct lock_struct *lck1,
155 const struct lock_struct *lck2)
157 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
158 return False;
160 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
161 return False;
164 if (brl_same_context(&lck1->context, &lck2->context) &&
165 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
166 return False;
169 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
170 return True;
173 if (lck1->start >= (lck2->start + lck2->size) ||
174 lck2->start >= (lck1->start + lck1->size)) {
175 return False;
178 return True;
180 #endif
182 /****************************************************************************
183 Check to see if this lock conflicts, but ignore our own locks on the
184 same fnum only. This is the read/write lock check code path.
185 This is never used in the POSIX lock case.
186 ****************************************************************************/
188 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
190 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
191 return False;
193 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
194 return False;
196 /* POSIX flavour locks never conflict here - this is only called
197 in the read/write path. */
199 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
200 return False;
203 * Incoming WRITE locks conflict with existing READ locks even
204 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
207 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
208 if (brl_same_context(&lck1->context, &lck2->context) &&
209 lck1->fnum == lck2->fnum)
210 return False;
213 return brl_overlap(lck1, lck2);
216 /****************************************************************************
217 Check if an unlock overlaps a pending lock.
218 ****************************************************************************/
220 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
222 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
223 return True;
224 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
225 return True;
226 return False;
229 /****************************************************************************
230 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
231 is the same as this one and changes its error code. I wonder if any
232 app depends on this ?
233 ****************************************************************************/
235 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
237 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
238 /* amazing the little things you learn with a test
239 suite. Locks beyond this offset (as a 64 bit
240 number!) always generate the conflict error code,
241 unless the top bit is set */
242 if (!blocking_lock) {
243 fsp->last_lock_failure = *lock;
245 return NT_STATUS_FILE_LOCK_CONFLICT;
248 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
249 lock->context.tid == fsp->last_lock_failure.context.tid &&
250 lock->fnum == fsp->last_lock_failure.fnum &&
251 lock->start == fsp->last_lock_failure.start) {
252 return NT_STATUS_FILE_LOCK_CONFLICT;
255 if (!blocking_lock) {
256 fsp->last_lock_failure = *lock;
258 return NT_STATUS_LOCK_NOT_GRANTED;
261 /****************************************************************************
262 Open up the brlock.tdb database.
263 ****************************************************************************/
265 void brl_init(bool read_only)
267 if (brlock_db) {
268 return;
270 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
271 lp_open_files_db_hash_size(),
272 TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST,
273 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
274 if (!brlock_db) {
275 DEBUG(0,("Failed to open byte range locking database %s\n",
276 lock_path("brlock.tdb")));
277 return;
281 /****************************************************************************
282 Close down the brlock.tdb database.
283 ****************************************************************************/
285 void brl_shutdown(void)
287 TALLOC_FREE(brlock_db);
290 #if ZERO_ZERO
291 /****************************************************************************
292 Compare two locks for sorting.
293 ****************************************************************************/
295 static int lock_compare(const struct lock_struct *lck1,
296 const struct lock_struct *lck2)
298 if (lck1->start != lck2->start) {
299 return (lck1->start - lck2->start);
301 if (lck2->size != lck1->size) {
302 return ((int)lck1->size - (int)lck2->size);
304 return 0;
306 #endif
308 /****************************************************************************
309 Lock a range of bytes - Windows lock semantics.
310 ****************************************************************************/
312 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
313 struct lock_struct *plock, bool blocking_lock)
315 unsigned int i;
316 files_struct *fsp = br_lck->fsp;
317 struct lock_struct *locks = br_lck->lock_data;
318 NTSTATUS status;
320 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
322 for (i=0; i < br_lck->num_locks; i++) {
323 /* Do any Windows or POSIX locks conflict ? */
324 if (brl_conflict(&locks[i], plock)) {
325 /* Remember who blocked us. */
326 plock->context.smbpid = locks[i].context.smbpid;
327 return brl_lock_failed(fsp,plock,blocking_lock);
329 #if ZERO_ZERO
330 if (plock->start == 0 && plock->size == 0 &&
331 locks[i].size == 0) {
332 break;
334 #endif
337 if (!IS_PENDING_LOCK(plock->lock_type)) {
338 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
341 /* We can get the Windows lock, now see if it needs to
342 be mapped into a lower level POSIX one, and if so can
343 we get it ? */
345 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
346 int errno_ret;
347 if (!set_posix_lock_windows_flavour(fsp,
348 plock->start,
349 plock->size,
350 plock->lock_type,
351 &plock->context,
352 locks,
353 br_lck->num_locks,
354 &errno_ret)) {
356 /* We don't know who blocked us. */
357 plock->context.smbpid = 0xFFFFFFFF;
359 if (errno_ret == EACCES || errno_ret == EAGAIN) {
360 status = NT_STATUS_FILE_LOCK_CONFLICT;
361 goto fail;
362 } else {
363 status = map_nt_error_from_unix(errno);
364 goto fail;
369 /* no conflicts - add it to the list of locks */
370 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
371 if (!locks) {
372 status = NT_STATUS_NO_MEMORY;
373 goto fail;
376 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
377 br_lck->num_locks += 1;
378 br_lck->lock_data = locks;
379 br_lck->modified = True;
381 return NT_STATUS_OK;
382 fail:
383 if (!IS_PENDING_LOCK(plock->lock_type)) {
384 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
386 return status;
389 /****************************************************************************
390 Cope with POSIX range splits and merges.
391 ****************************************************************************/
393 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
394 const struct lock_struct *ex, /* existing lock. */
395 const struct lock_struct *plock, /* proposed lock. */
396 bool *lock_was_added)
398 bool lock_types_differ = (ex->lock_type != plock->lock_type);
400 /* We can't merge non-conflicting locks on different context - ignore fnum. */
402 if (!brl_same_context(&ex->context, &plock->context)) {
403 /* Just copy. */
404 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
405 return 1;
408 /* We now know we have the same context. */
410 /* Did we overlap ? */
412 /*********************************************
413 +---------+
414 | ex |
415 +---------+
416 +-------+
417 | plock |
418 +-------+
419 OR....
420 +---------+
421 | ex |
422 +---------+
423 **********************************************/
425 if ( (ex->start > (plock->start + plock->size)) ||
426 (plock->start > (ex->start + ex->size))) {
427 /* No overlap with this lock - copy existing. */
428 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
429 return 1;
432 /*********************************************
433 +---------------------------+
434 | ex |
435 +---------------------------+
436 +---------------------------+
437 | plock | -> replace with plock.
438 +---------------------------+
439 **********************************************/
441 if ( (ex->start >= plock->start) &&
442 (ex->start + ex->size <= plock->start + plock->size) ) {
443 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
444 *lock_was_added = True;
445 return 1;
448 /*********************************************
449 +-----------------------+
450 | ex |
451 +-----------------------+
452 +---------------+
453 | plock |
454 +---------------+
455 OR....
456 +-------+
457 | ex |
458 +-------+
459 +---------------+
460 | plock |
461 +---------------+
463 BECOMES....
464 +---------------+-------+
465 | plock | ex | - different lock types.
466 +---------------+-------+
467 OR.... (merge)
468 +-----------------------+
469 | ex | - same lock type.
470 +-----------------------+
471 **********************************************/
473 if ( (ex->start >= plock->start) &&
474 (ex->start <= plock->start + plock->size) &&
475 (ex->start + ex->size > plock->start + plock->size) ) {
477 *lock_was_added = True;
479 /* If the lock types are the same, we merge, if different, we
480 add the new lock before the old. */
482 if (lock_types_differ) {
483 /* Add new. */
484 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
485 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
486 /* Adjust existing start and size. */
487 lck_arr[1].start = plock->start + plock->size;
488 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
489 return 2;
490 } else {
491 /* Merge. */
492 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
493 /* Set new start and size. */
494 lck_arr[0].start = plock->start;
495 lck_arr[0].size = (ex->start + ex->size) - plock->start;
496 return 1;
500 /*********************************************
501 +-----------------------+
502 | ex |
503 +-----------------------+
504 +---------------+
505 | plock |
506 +---------------+
507 OR....
508 +-------+
509 | ex |
510 +-------+
511 +---------------+
512 | plock |
513 +---------------+
514 BECOMES....
515 +-------+---------------+
516 | ex | plock | - different lock types
517 +-------+---------------+
519 OR.... (merge)
520 +-----------------------+
521 | ex | - same lock type.
522 +-----------------------+
524 **********************************************/
526 if ( (ex->start < plock->start) &&
527 (ex->start + ex->size >= plock->start) &&
528 (ex->start + ex->size <= plock->start + plock->size) ) {
530 *lock_was_added = True;
532 /* If the lock types are the same, we merge, if different, we
533 add the new lock after the old. */
535 if (lock_types_differ) {
536 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
537 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
538 /* Adjust existing size. */
539 lck_arr[0].size = plock->start - ex->start;
540 return 2;
541 } else {
542 /* Merge. */
543 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
544 /* Adjust existing size. */
545 lck_arr[0].size = (plock->start + plock->size) - ex->start;
546 return 1;
550 /*********************************************
551 +---------------------------+
552 | ex |
553 +---------------------------+
554 +---------+
555 | plock |
556 +---------+
557 BECOMES.....
558 +-------+---------+---------+
559 | ex | plock | ex | - different lock types.
560 +-------+---------+---------+
562 +---------------------------+
563 | ex | - same lock type.
564 +---------------------------+
565 **********************************************/
567 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
568 *lock_was_added = True;
570 if (lock_types_differ) {
572 /* We have to split ex into two locks here. */
574 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
575 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
576 memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
578 /* Adjust first existing size. */
579 lck_arr[0].size = plock->start - ex->start;
581 /* Adjust second existing start and size. */
582 lck_arr[2].start = plock->start + plock->size;
583 lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
584 return 3;
585 } else {
586 /* Just eat plock. */
587 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
588 return 1;
592 /* Never get here. */
593 smb_panic("brlock_posix_split_merge");
594 /* Notreached. */
596 /* Keep some compilers happy. */
597 return 0;
600 /****************************************************************************
601 Lock a range of bytes - POSIX lock semantics.
602 We must cope with range splits and merges.
603 ****************************************************************************/
605 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
606 struct byte_range_lock *br_lck,
607 struct lock_struct *plock)
609 unsigned int i, count, posix_count;
610 struct lock_struct *locks = br_lck->lock_data;
611 struct lock_struct *tp;
612 bool lock_was_added = False;
613 bool signal_pending_read = False;
614 bool break_oplocks = false;
615 NTSTATUS status;
617 /* No zero-zero locks for POSIX. */
618 if (plock->start == 0 && plock->size == 0) {
619 return NT_STATUS_INVALID_PARAMETER;
622 /* Don't allow 64-bit lock wrap. */
623 if (plock->start + plock->size < plock->start ||
624 plock->start + plock->size < plock->size) {
625 return NT_STATUS_INVALID_PARAMETER;
628 /* The worst case scenario here is we have to split an
629 existing POSIX lock range into two, and add our lock,
630 so we need at most 2 more entries. */
632 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
633 if (!tp) {
634 return NT_STATUS_NO_MEMORY;
637 count = posix_count = 0;
638 for (i=0; i < br_lck->num_locks; i++) {
639 struct lock_struct *curr_lock = &locks[i];
641 /* If we have a pending read lock, a lock downgrade should
642 trigger a lock re-evaluation. */
643 if (curr_lock->lock_type == PENDING_READ_LOCK &&
644 brl_pending_overlap(plock, curr_lock)) {
645 signal_pending_read = True;
648 if (curr_lock->lock_flav == WINDOWS_LOCK) {
649 /* Do any Windows flavour locks conflict ? */
650 if (brl_conflict(curr_lock, plock)) {
651 /* No games with error messages. */
652 SAFE_FREE(tp);
653 /* Remember who blocked us. */
654 plock->context.smbpid = curr_lock->context.smbpid;
655 return NT_STATUS_FILE_LOCK_CONFLICT;
657 /* Just copy the Windows lock into the new array. */
658 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
659 count++;
660 } else {
661 unsigned int tmp_count = 0;
663 /* POSIX conflict semantics are different. */
664 if (brl_conflict_posix(curr_lock, plock)) {
665 /* Can't block ourselves with POSIX locks. */
666 /* No games with error messages. */
667 SAFE_FREE(tp);
668 /* Remember who blocked us. */
669 plock->context.smbpid = curr_lock->context.smbpid;
670 return NT_STATUS_FILE_LOCK_CONFLICT;
673 /* Work out overlaps. */
674 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
675 posix_count += tmp_count;
676 count += tmp_count;
681 * Break oplocks while we hold a brl. Since lock() and unlock() calls
682 * are not symetric with POSIX semantics, we cannot guarantee our
683 * contend_level2_oplocks_begin/end calls will be acquired and
684 * released one-for-one as with Windows semantics. Therefore we only
685 * call contend_level2_oplocks_begin if this is the first POSIX brl on
686 * the file.
688 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
689 posix_count == 0);
690 if (break_oplocks) {
691 contend_level2_oplocks_begin(br_lck->fsp,
692 LEVEL2_CONTEND_POSIX_BRL);
695 if (!lock_was_added) {
696 memcpy(&tp[count], plock, sizeof(struct lock_struct));
697 count++;
700 /* We can get the POSIX lock, now see if it needs to
701 be mapped into a lower level POSIX one, and if so can
702 we get it ? */
704 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
705 int errno_ret;
707 /* The lower layer just needs to attempt to
708 get the system POSIX lock. We've weeded out
709 any conflicts above. */
711 if (!set_posix_lock_posix_flavour(br_lck->fsp,
712 plock->start,
713 plock->size,
714 plock->lock_type,
715 &errno_ret)) {
717 /* We don't know who blocked us. */
718 plock->context.smbpid = 0xFFFFFFFF;
720 if (errno_ret == EACCES || errno_ret == EAGAIN) {
721 SAFE_FREE(tp);
722 status = NT_STATUS_FILE_LOCK_CONFLICT;
723 goto fail;
724 } else {
725 SAFE_FREE(tp);
726 status = map_nt_error_from_unix(errno);
727 goto fail;
732 /* Realloc so we don't leak entries per lock call. */
733 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
734 if (!tp) {
735 status = NT_STATUS_NO_MEMORY;
736 goto fail;
738 br_lck->num_locks = count;
739 SAFE_FREE(br_lck->lock_data);
740 br_lck->lock_data = tp;
741 locks = tp;
742 br_lck->modified = True;
744 /* A successful downgrade from write to read lock can trigger a lock
745 re-evalutation where waiting readers can now proceed. */
747 if (signal_pending_read) {
748 /* Send unlock messages to any pending read waiters that overlap. */
749 for (i=0; i < br_lck->num_locks; i++) {
750 struct lock_struct *pend_lock = &locks[i];
752 /* Ignore non-pending locks. */
753 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
754 continue;
757 if (pend_lock->lock_type == PENDING_READ_LOCK &&
758 brl_pending_overlap(plock, pend_lock)) {
759 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
760 procid_str_static(&pend_lock->context.pid )));
762 messaging_send(msg_ctx, pend_lock->context.pid,
763 MSG_SMB_UNLOCK, &data_blob_null);
768 return NT_STATUS_OK;
769 fail:
770 if (break_oplocks) {
771 contend_level2_oplocks_end(br_lck->fsp,
772 LEVEL2_CONTEND_POSIX_BRL);
774 return status;
777 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
778 struct byte_range_lock *br_lck,
779 struct lock_struct *plock,
780 bool blocking_lock,
781 struct blocking_lock_record *blr)
783 VFS_FIND(brl_lock_windows);
784 return handle->fns->brl_lock_windows(handle, br_lck, plock,
785 blocking_lock, blr);
788 /****************************************************************************
789 Lock a range of bytes.
790 ****************************************************************************/
792 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
793 struct byte_range_lock *br_lck,
794 uint32 smbpid,
795 struct server_id pid,
796 br_off start,
797 br_off size,
798 enum brl_type lock_type,
799 enum brl_flavour lock_flav,
800 bool blocking_lock,
801 uint32 *psmbpid,
802 struct blocking_lock_record *blr)
804 NTSTATUS ret;
805 struct lock_struct lock;
807 #if !ZERO_ZERO
808 if (start == 0 && size == 0) {
809 DEBUG(0,("client sent 0/0 lock - please report this\n"));
811 #endif
813 #ifdef DEVELOPER
814 /* Quieten valgrind on test. */
815 memset(&lock, '\0', sizeof(lock));
816 #endif
818 lock.context.smbpid = smbpid;
819 lock.context.pid = pid;
820 lock.context.tid = br_lck->fsp->conn->cnum;
821 lock.start = start;
822 lock.size = size;
823 lock.fnum = br_lck->fsp->fnum;
824 lock.lock_type = lock_type;
825 lock.lock_flav = lock_flav;
827 if (lock_flav == WINDOWS_LOCK) {
828 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
829 &lock, blocking_lock, blr);
830 } else {
831 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
834 #if ZERO_ZERO
835 /* sort the lock list */
836 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
837 #endif
839 /* If we're returning an error, return who blocked us. */
840 if (!NT_STATUS_IS_OK(ret) && psmbpid) {
841 *psmbpid = lock.context.smbpid;
843 return ret;
846 /****************************************************************************
847 Unlock a range of bytes - Windows semantics.
848 ****************************************************************************/
850 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
851 struct byte_range_lock *br_lck,
852 const struct lock_struct *plock)
854 unsigned int i, j;
855 struct lock_struct *locks = br_lck->lock_data;
856 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
858 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
860 #if ZERO_ZERO
861 /* Delete write locks by preference... The lock list
862 is sorted in the zero zero case. */
864 for (i = 0; i < br_lck->num_locks; i++) {
865 struct lock_struct *lock = &locks[i];
867 if (lock->lock_type == WRITE_LOCK &&
868 brl_same_context(&lock->context, &plock->context) &&
869 lock->fnum == plock->fnum &&
870 lock->lock_flav == WINDOWS_LOCK &&
871 lock->start == plock->start &&
872 lock->size == plock->size) {
874 /* found it - delete it */
875 deleted_lock_type = lock->lock_type;
876 break;
880 if (i != br_lck->num_locks) {
881 /* We found it - don't search again. */
882 goto unlock_continue;
884 #endif
886 for (i = 0; i < br_lck->num_locks; i++) {
887 struct lock_struct *lock = &locks[i];
889 /* Only remove our own locks that match in start, size, and flavour. */
890 if (brl_same_context(&lock->context, &plock->context) &&
891 lock->fnum == plock->fnum &&
892 lock->lock_flav == WINDOWS_LOCK &&
893 lock->start == plock->start &&
894 lock->size == plock->size ) {
895 deleted_lock_type = lock->lock_type;
896 break;
900 if (i == br_lck->num_locks) {
901 /* we didn't find it */
902 return False;
905 #if ZERO_ZERO
906 unlock_continue:
907 #endif
909 /* Actually delete the lock. */
910 if (i < br_lck->num_locks - 1) {
911 memmove(&locks[i], &locks[i+1],
912 sizeof(*locks)*((br_lck->num_locks-1) - i));
915 br_lck->num_locks -= 1;
916 br_lck->modified = True;
918 /* Unlock the underlying POSIX regions. */
919 if(lp_posix_locking(br_lck->fsp->conn->params)) {
920 release_posix_lock_windows_flavour(br_lck->fsp,
921 plock->start,
922 plock->size,
923 deleted_lock_type,
924 &plock->context,
925 locks,
926 br_lck->num_locks);
929 /* Send unlock messages to any pending waiters that overlap. */
930 for (j=0; j < br_lck->num_locks; j++) {
931 struct lock_struct *pend_lock = &locks[j];
933 /* Ignore non-pending locks. */
934 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
935 continue;
938 /* We could send specific lock info here... */
939 if (brl_pending_overlap(plock, pend_lock)) {
940 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
941 procid_str_static(&pend_lock->context.pid )));
943 messaging_send(msg_ctx, pend_lock->context.pid,
944 MSG_SMB_UNLOCK, &data_blob_null);
948 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
949 return True;
952 /****************************************************************************
953 Unlock a range of bytes - POSIX semantics.
954 ****************************************************************************/
956 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
957 struct byte_range_lock *br_lck,
958 const struct lock_struct *plock)
960 unsigned int i, j, count, posix_count;
961 struct lock_struct *tp;
962 struct lock_struct *locks = br_lck->lock_data;
963 bool overlap_found = False;
965 /* No zero-zero locks for POSIX. */
966 if (plock->start == 0 && plock->size == 0) {
967 return False;
970 /* Don't allow 64-bit lock wrap. */
971 if (plock->start + plock->size < plock->start ||
972 plock->start + plock->size < plock->size) {
973 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
974 return False;
977 /* The worst case scenario here is we have to split an
978 existing POSIX lock range into two, so we need at most
979 1 more entry. */
981 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
982 if (!tp) {
983 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
984 return False;
987 count = posix_count = 0;
988 for (i = 0; i < br_lck->num_locks; i++) {
989 struct lock_struct *lock = &locks[i];
990 struct lock_struct tmp_lock[3];
991 bool lock_was_added = False;
992 unsigned int tmp_count;
994 /* Only remove our own locks - ignore fnum. */
995 if (IS_PENDING_LOCK(lock->lock_type) ||
996 !brl_same_context(&lock->context, &plock->context)) {
997 memcpy(&tp[count], lock, sizeof(struct lock_struct));
998 count++;
999 continue;
1002 /* Work out overlaps. */
1003 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
1005 if (tmp_count == 1) {
1006 /* Ether the locks didn't overlap, or the unlock completely
1007 overlapped this lock. If it didn't overlap, then there's
1008 no change in the locks. */
1009 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
1010 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
1011 /* No change in this lock. */
1012 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
1013 count++;
1014 posix_count++;
1015 } else {
1016 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
1017 overlap_found = True;
1019 continue;
1020 } else if (tmp_count == 2) {
1021 /* The unlock overlapped an existing lock. Copy the truncated
1022 lock into the lock array. */
1023 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
1024 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
1025 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
1026 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
1027 if (tmp_lock[0].size != locks[i].size) {
1028 overlap_found = True;
1030 } else {
1031 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
1032 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
1033 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
1034 if (tmp_lock[1].start != locks[i].start) {
1035 overlap_found = True;
1038 count++;
1039 posix_count++;
1040 continue;
1041 } else {
1042 /* tmp_count == 3 - (we split a lock range in two). */
1043 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
1044 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
1045 SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
1047 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
1048 count++;
1049 posix_count++;
1050 memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
1051 count++;
1052 posix_count++;
1053 overlap_found = True;
1054 /* Optimisation... */
1055 /* We know we're finished here as we can't overlap any
1056 more POSIX locks. Copy the rest of the lock array. */
1057 if (i < br_lck->num_locks - 1) {
1058 memcpy(&tp[count], &locks[i+1],
1059 sizeof(*locks)*((br_lck->num_locks-1) - i));
1060 count += ((br_lck->num_locks-1) - i);
1062 break;
1066 if (!overlap_found) {
1067 /* Just ignore - no change. */
1068 SAFE_FREE(tp);
1069 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1070 return True;
1073 /* Unlock any POSIX regions. */
1074 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1075 release_posix_lock_posix_flavour(br_lck->fsp,
1076 plock->start,
1077 plock->size,
1078 &plock->context,
1080 count);
1083 /* Realloc so we don't leak entries per unlock call. */
1084 if (count) {
1085 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1086 if (!tp) {
1087 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1088 return False;
1090 } else {
1091 /* We deleted the last lock. */
1092 SAFE_FREE(tp);
1093 tp = NULL;
1096 if (posix_count == 0) {
1097 contend_level2_oplocks_end(br_lck->fsp,
1098 LEVEL2_CONTEND_POSIX_BRL);
1101 br_lck->num_locks = count;
1102 SAFE_FREE(br_lck->lock_data);
1103 locks = tp;
1104 br_lck->lock_data = tp;
1105 br_lck->modified = True;
1107 /* Send unlock messages to any pending waiters that overlap. */
1109 for (j=0; j < br_lck->num_locks; j++) {
1110 struct lock_struct *pend_lock = &locks[j];
1112 /* Ignore non-pending locks. */
1113 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1114 continue;
1117 /* We could send specific lock info here... */
1118 if (brl_pending_overlap(plock, pend_lock)) {
1119 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1120 procid_str_static(&pend_lock->context.pid )));
1122 messaging_send(msg_ctx, pend_lock->context.pid,
1123 MSG_SMB_UNLOCK, &data_blob_null);
1127 return True;
1130 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1131 struct messaging_context *msg_ctx,
1132 struct byte_range_lock *br_lck,
1133 const struct lock_struct *plock)
1135 VFS_FIND(brl_unlock_windows);
1136 return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock);
1139 /****************************************************************************
1140 Unlock a range of bytes.
1141 ****************************************************************************/
1143 bool brl_unlock(struct messaging_context *msg_ctx,
1144 struct byte_range_lock *br_lck,
1145 uint32 smbpid,
1146 struct server_id pid,
1147 br_off start,
1148 br_off size,
1149 enum brl_flavour lock_flav)
1151 struct lock_struct lock;
1153 lock.context.smbpid = smbpid;
1154 lock.context.pid = pid;
1155 lock.context.tid = br_lck->fsp->conn->cnum;
1156 lock.start = start;
1157 lock.size = size;
1158 lock.fnum = br_lck->fsp->fnum;
1159 lock.lock_type = UNLOCK_LOCK;
1160 lock.lock_flav = lock_flav;
1162 if (lock_flav == WINDOWS_LOCK) {
1163 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1164 br_lck, &lock);
1165 } else {
1166 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1170 /****************************************************************************
1171 Test if we could add a lock if we wanted to.
1172 Returns True if the region required is currently unlocked, False if locked.
1173 ****************************************************************************/
1175 bool brl_locktest(struct byte_range_lock *br_lck,
1176 uint32 smbpid,
1177 struct server_id pid,
1178 br_off start,
1179 br_off size,
1180 enum brl_type lock_type,
1181 enum brl_flavour lock_flav)
1183 bool ret = True;
1184 unsigned int i;
1185 struct lock_struct lock;
1186 const struct lock_struct *locks = br_lck->lock_data;
1187 files_struct *fsp = br_lck->fsp;
1189 lock.context.smbpid = smbpid;
1190 lock.context.pid = pid;
1191 lock.context.tid = br_lck->fsp->conn->cnum;
1192 lock.start = start;
1193 lock.size = size;
1194 lock.fnum = fsp->fnum;
1195 lock.lock_type = lock_type;
1196 lock.lock_flav = lock_flav;
1198 /* Make sure existing locks don't conflict */
1199 for (i=0; i < br_lck->num_locks; i++) {
1201 * Our own locks don't conflict.
1203 if (brl_conflict_other(&locks[i], &lock)) {
1204 return False;
1209 * There is no lock held by an SMB daemon, check to
1210 * see if there is a POSIX lock from a UNIX or NFS process.
1211 * This only conflicts with Windows locks, not POSIX locks.
1214 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1215 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1217 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1218 (double)start, (double)size, ret ? "locked" : "unlocked",
1219 fsp->fnum, fsp_str_dbg(fsp)));
1221 /* We need to return the inverse of is_posix_locked. */
1222 ret = !ret;
1225 /* no conflicts - we could have added it */
1226 return ret;
1229 /****************************************************************************
1230 Query for existing locks.
1231 ****************************************************************************/
1233 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1234 uint32 *psmbpid,
1235 struct server_id pid,
1236 br_off *pstart,
1237 br_off *psize,
1238 enum brl_type *plock_type,
1239 enum brl_flavour lock_flav)
1241 unsigned int i;
1242 struct lock_struct lock;
1243 const struct lock_struct *locks = br_lck->lock_data;
1244 files_struct *fsp = br_lck->fsp;
1246 lock.context.smbpid = *psmbpid;
1247 lock.context.pid = pid;
1248 lock.context.tid = br_lck->fsp->conn->cnum;
1249 lock.start = *pstart;
1250 lock.size = *psize;
1251 lock.fnum = fsp->fnum;
1252 lock.lock_type = *plock_type;
1253 lock.lock_flav = lock_flav;
1255 /* Make sure existing locks don't conflict */
1256 for (i=0; i < br_lck->num_locks; i++) {
1257 const struct lock_struct *exlock = &locks[i];
1258 bool conflict = False;
1260 if (exlock->lock_flav == WINDOWS_LOCK) {
1261 conflict = brl_conflict(exlock, &lock);
1262 } else {
1263 conflict = brl_conflict_posix(exlock, &lock);
1266 if (conflict) {
1267 *psmbpid = exlock->context.smbpid;
1268 *pstart = exlock->start;
1269 *psize = exlock->size;
1270 *plock_type = exlock->lock_type;
1271 return NT_STATUS_LOCK_NOT_GRANTED;
1276 * There is no lock held by an SMB daemon, check to
1277 * see if there is a POSIX lock from a UNIX or NFS process.
1280 if(lp_posix_locking(fsp->conn->params)) {
1281 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1283 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1284 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1285 fsp->fnum, fsp_str_dbg(fsp)));
1287 if (ret) {
1288 /* Hmmm. No clue what to set smbpid to - use -1. */
1289 *psmbpid = 0xFFFF;
1290 return NT_STATUS_LOCK_NOT_GRANTED;
1294 return NT_STATUS_OK;
1298 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1299 struct byte_range_lock *br_lck,
1300 struct lock_struct *plock,
1301 struct blocking_lock_record *blr)
1303 VFS_FIND(brl_cancel_windows);
1304 return handle->fns->brl_cancel_windows(handle, br_lck, plock, blr);
1307 /****************************************************************************
1308 Remove a particular pending lock.
1309 ****************************************************************************/
1310 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1311 uint32 smbpid,
1312 struct server_id pid,
1313 br_off start,
1314 br_off size,
1315 enum brl_flavour lock_flav,
1316 struct blocking_lock_record *blr)
1318 bool ret;
1319 struct lock_struct lock;
1321 lock.context.smbpid = smbpid;
1322 lock.context.pid = pid;
1323 lock.context.tid = br_lck->fsp->conn->cnum;
1324 lock.start = start;
1325 lock.size = size;
1326 lock.fnum = br_lck->fsp->fnum;
1327 lock.lock_flav = lock_flav;
1328 /* lock.lock_type doesn't matter */
1330 if (lock_flav == WINDOWS_LOCK) {
1331 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1332 &lock, blr);
1333 } else {
1334 ret = brl_lock_cancel_default(br_lck, &lock);
1337 return ret;
1340 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1341 struct lock_struct *plock)
1343 unsigned int i;
1344 struct lock_struct *locks = br_lck->lock_data;
1346 SMB_ASSERT(plock);
1348 for (i = 0; i < br_lck->num_locks; i++) {
1349 struct lock_struct *lock = &locks[i];
1351 /* For pending locks we *always* care about the fnum. */
1352 if (brl_same_context(&lock->context, &plock->context) &&
1353 lock->fnum == plock->fnum &&
1354 IS_PENDING_LOCK(lock->lock_type) &&
1355 lock->lock_flav == plock->lock_flav &&
1356 lock->start == plock->start &&
1357 lock->size == plock->size) {
1358 break;
1362 if (i == br_lck->num_locks) {
1363 /* Didn't find it. */
1364 return False;
1367 if (i < br_lck->num_locks - 1) {
1368 /* Found this particular pending lock - delete it */
1369 memmove(&locks[i], &locks[i+1],
1370 sizeof(*locks)*((br_lck->num_locks-1) - i));
1373 br_lck->num_locks -= 1;
1374 br_lck->modified = True;
1375 return True;
1378 /****************************************************************************
1379 Remove any locks associated with a open file.
1380 We return True if this process owns any other Windows locks on this
1381 fd and so we should not immediately close the fd.
1382 ****************************************************************************/
1384 void brl_close_fnum(struct messaging_context *msg_ctx,
1385 struct byte_range_lock *br_lck)
1387 files_struct *fsp = br_lck->fsp;
1388 uint16 tid = fsp->conn->cnum;
1389 int fnum = fsp->fnum;
1390 unsigned int i, j, dcount=0;
1391 int num_deleted_windows_locks = 0;
1392 struct lock_struct *locks = br_lck->lock_data;
1393 struct server_id pid = procid_self();
1394 bool unlock_individually = False;
1395 bool posix_level2_contention_ended = false;
1397 if(lp_posix_locking(fsp->conn->params)) {
1399 /* Check if there are any Windows locks associated with this dev/ino
1400 pair that are not this fnum. If so we need to call unlock on each
1401 one in order to release the system POSIX locks correctly. */
1403 for (i=0; i < br_lck->num_locks; i++) {
1404 struct lock_struct *lock = &locks[i];
1406 if (!procid_equal(&lock->context.pid, &pid)) {
1407 continue;
1410 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1411 continue; /* Ignore pending. */
1414 if (lock->context.tid != tid || lock->fnum != fnum) {
1415 unlock_individually = True;
1416 break;
1420 if (unlock_individually) {
1421 struct lock_struct *locks_copy;
1422 unsigned int num_locks_copy;
1424 /* Copy the current lock array. */
1425 if (br_lck->num_locks) {
1426 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1427 if (!locks_copy) {
1428 smb_panic("brl_close_fnum: talloc failed");
1430 } else {
1431 locks_copy = NULL;
1434 num_locks_copy = br_lck->num_locks;
1436 for (i=0; i < num_locks_copy; i++) {
1437 struct lock_struct *lock = &locks_copy[i];
1439 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1440 (lock->fnum == fnum)) {
1441 brl_unlock(msg_ctx,
1442 br_lck,
1443 lock->context.smbpid,
1444 pid,
1445 lock->start,
1446 lock->size,
1447 lock->lock_flav);
1450 return;
1454 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1456 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1458 for (i=0; i < br_lck->num_locks; i++) {
1459 struct lock_struct *lock = &locks[i];
1460 bool del_this_lock = False;
1462 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1463 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1464 del_this_lock = True;
1465 num_deleted_windows_locks++;
1466 contend_level2_oplocks_end(br_lck->fsp,
1467 LEVEL2_CONTEND_WINDOWS_BRL);
1468 } else if (lock->lock_flav == POSIX_LOCK) {
1469 del_this_lock = True;
1471 /* Only end level2 contention once for posix */
1472 if (!posix_level2_contention_ended) {
1473 posix_level2_contention_ended = true;
1474 contend_level2_oplocks_end(br_lck->fsp,
1475 LEVEL2_CONTEND_POSIX_BRL);
1480 if (del_this_lock) {
1481 /* Send unlock messages to any pending waiters that overlap. */
1482 for (j=0; j < br_lck->num_locks; j++) {
1483 struct lock_struct *pend_lock = &locks[j];
1485 /* Ignore our own or non-pending locks. */
1486 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1487 continue;
1490 /* Optimisation - don't send to this fnum as we're
1491 closing it. */
1492 if (pend_lock->context.tid == tid &&
1493 procid_equal(&pend_lock->context.pid, &pid) &&
1494 pend_lock->fnum == fnum) {
1495 continue;
1498 /* We could send specific lock info here... */
1499 if (brl_pending_overlap(lock, pend_lock)) {
1500 messaging_send(msg_ctx, pend_lock->context.pid,
1501 MSG_SMB_UNLOCK, &data_blob_null);
1505 /* found it - delete it */
1506 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1507 memmove(&locks[i], &locks[i+1],
1508 sizeof(*locks)*((br_lck->num_locks-1) - i));
1510 br_lck->num_locks--;
1511 br_lck->modified = True;
1512 i--;
1513 dcount++;
1517 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1518 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1519 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1523 /****************************************************************************
1524 Ensure this set of lock entries is valid.
1525 ****************************************************************************/
1526 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1528 unsigned int i;
1529 unsigned int num_valid_entries = 0;
1530 struct lock_struct *locks = *pplocks;
1532 for (i = 0; i < *pnum_entries; i++) {
1533 struct lock_struct *lock_data = &locks[i];
1534 if (!process_exists(lock_data->context.pid)) {
1535 /* This process no longer exists - mark this
1536 entry as invalid by zeroing it. */
1537 ZERO_STRUCTP(lock_data);
1538 } else {
1539 num_valid_entries++;
1543 if (num_valid_entries != *pnum_entries) {
1544 struct lock_struct *new_lock_data = NULL;
1546 if (num_valid_entries) {
1547 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1548 if (!new_lock_data) {
1549 DEBUG(3, ("malloc fail\n"));
1550 return False;
1553 num_valid_entries = 0;
1554 for (i = 0; i < *pnum_entries; i++) {
1555 struct lock_struct *lock_data = &locks[i];
1556 if (lock_data->context.smbpid &&
1557 lock_data->context.tid) {
1558 /* Valid (nonzero) entry - copy it. */
1559 memcpy(&new_lock_data[num_valid_entries],
1560 lock_data, sizeof(struct lock_struct));
1561 num_valid_entries++;
1566 SAFE_FREE(*pplocks);
1567 *pplocks = new_lock_data;
1568 *pnum_entries = num_valid_entries;
1571 return True;
1574 struct brl_forall_cb {
1575 void (*fn)(struct file_id id, struct server_id pid,
1576 enum brl_type lock_type,
1577 enum brl_flavour lock_flav,
1578 br_off start, br_off size,
1579 void *private_data);
1580 void *private_data;
1583 /****************************************************************************
1584 Traverse the whole database with this function, calling traverse_callback
1585 on each lock.
1586 ****************************************************************************/
1588 static int traverse_fn(struct db_record *rec, void *state)
1590 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1591 struct lock_struct *locks;
1592 struct file_id *key;
1593 unsigned int i;
1594 unsigned int num_locks = 0;
1595 unsigned int orig_num_locks = 0;
1597 /* In a traverse function we must make a copy of
1598 dbuf before modifying it. */
1600 locks = (struct lock_struct *)memdup(rec->value.dptr,
1601 rec->value.dsize);
1602 if (!locks) {
1603 return -1; /* Terminate traversal. */
1606 key = (struct file_id *)rec->key.dptr;
1607 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
1609 /* Ensure the lock db is clean of entries from invalid processes. */
1611 if (!validate_lock_entries(&num_locks, &locks)) {
1612 SAFE_FREE(locks);
1613 return -1; /* Terminate traversal */
1616 if (orig_num_locks != num_locks) {
1617 if (num_locks) {
1618 TDB_DATA data;
1619 data.dptr = (uint8_t *)locks;
1620 data.dsize = num_locks*sizeof(struct lock_struct);
1621 rec->store(rec, data, TDB_REPLACE);
1622 } else {
1623 rec->delete_rec(rec);
1627 if (cb->fn) {
1628 for ( i=0; i<num_locks; i++) {
1629 cb->fn(*key,
1630 locks[i].context.pid,
1631 locks[i].lock_type,
1632 locks[i].lock_flav,
1633 locks[i].start,
1634 locks[i].size,
1635 cb->private_data);
1639 SAFE_FREE(locks);
1640 return 0;
1643 /*******************************************************************
1644 Call the specified function on each lock in the database.
1645 ********************************************************************/
1647 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1648 enum brl_type lock_type,
1649 enum brl_flavour lock_flav,
1650 br_off start, br_off size,
1651 void *private_data),
1652 void *private_data)
1654 struct brl_forall_cb cb;
1656 if (!brlock_db) {
1657 return 0;
1659 cb.fn = fn;
1660 cb.private_data = private_data;
1661 return brlock_db->traverse(brlock_db, traverse_fn, &cb);
1664 /*******************************************************************
1665 Store a potentially modified set of byte range lock data back into
1666 the database.
1667 Unlock the record.
1668 ********************************************************************/
1670 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1672 if (br_lck->read_only) {
1673 SMB_ASSERT(!br_lck->modified);
1676 if (!br_lck->modified) {
1677 goto done;
1680 if (br_lck->num_locks == 0) {
1681 /* No locks - delete this entry. */
1682 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
1683 if (!NT_STATUS_IS_OK(status)) {
1684 DEBUG(0, ("delete_rec returned %s\n",
1685 nt_errstr(status)));
1686 smb_panic("Could not delete byte range lock entry");
1688 } else {
1689 TDB_DATA data;
1690 NTSTATUS status;
1692 data.dptr = (uint8 *)br_lck->lock_data;
1693 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1695 status = br_lck->record->store(br_lck->record, data,
1696 TDB_REPLACE);
1697 if (!NT_STATUS_IS_OK(status)) {
1698 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1699 smb_panic("Could not store byte range mode entry");
1703 done:
1705 SAFE_FREE(br_lck->lock_data);
1706 TALLOC_FREE(br_lck->record);
1707 return 0;
1710 /*******************************************************************
1711 Fetch a set of byte range lock data from the database.
1712 Leave the record locked.
1713 TALLOC_FREE(brl) will release the lock in the destructor.
1714 ********************************************************************/
1716 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1717 files_struct *fsp, bool read_only)
1719 TDB_DATA key, data;
1720 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1722 if (br_lck == NULL) {
1723 return NULL;
1726 br_lck->fsp = fsp;
1727 br_lck->num_locks = 0;
1728 br_lck->modified = False;
1729 memset(&br_lck->key, '\0', sizeof(struct file_id));
1730 br_lck->key = fsp->file_id;
1732 key.dptr = (uint8 *)&br_lck->key;
1733 key.dsize = sizeof(struct file_id);
1735 if (!fsp->lockdb_clean) {
1736 /* We must be read/write to clean
1737 the dead entries. */
1738 read_only = False;
1741 if (read_only) {
1742 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
1743 DEBUG(3, ("Could not fetch byte range lock record\n"));
1744 TALLOC_FREE(br_lck);
1745 return NULL;
1747 br_lck->record = NULL;
1749 else {
1750 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
1752 if (br_lck->record == NULL) {
1753 DEBUG(3, ("Could not lock byte range lock entry\n"));
1754 TALLOC_FREE(br_lck);
1755 return NULL;
1758 data = br_lck->record->value;
1761 br_lck->read_only = read_only;
1762 br_lck->lock_data = NULL;
1764 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1766 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1768 if (br_lck->num_locks != 0) {
1769 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1770 br_lck->num_locks);
1771 if (br_lck->lock_data == NULL) {
1772 DEBUG(0, ("malloc failed\n"));
1773 TALLOC_FREE(br_lck);
1774 return NULL;
1777 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1780 if (!fsp->lockdb_clean) {
1781 int orig_num_locks = br_lck->num_locks;
1783 /* This is the first time we've accessed this. */
1784 /* Go through and ensure all entries exist - remove any that don't. */
1785 /* Makes the lockdb self cleaning at low cost. */
1787 if (!validate_lock_entries(&br_lck->num_locks,
1788 &br_lck->lock_data)) {
1789 SAFE_FREE(br_lck->lock_data);
1790 TALLOC_FREE(br_lck);
1791 return NULL;
1794 /* Ensure invalid locks are cleaned up in the destructor. */
1795 if (orig_num_locks != br_lck->num_locks) {
1796 br_lck->modified = True;
1799 /* Mark the lockdb as "clean" as seen from this open file. */
1800 fsp->lockdb_clean = True;
1803 if (DEBUGLEVEL >= 10) {
1804 unsigned int i;
1805 struct lock_struct *locks = br_lck->lock_data;
1806 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1807 br_lck->num_locks,
1808 file_id_string_tos(&fsp->file_id)));
1809 for( i = 0; i < br_lck->num_locks; i++) {
1810 print_lock_struct(i, &locks[i]);
1813 return br_lck;
1816 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1817 files_struct *fsp)
1819 return brl_get_locks_internal(mem_ctx, fsp, False);
1822 struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
1823 files_struct *fsp)
1825 return brl_get_locks_internal(mem_ctx, fsp, True);
1828 struct brl_revalidate_state {
1829 ssize_t array_size;
1830 uint32 num_pids;
1831 struct server_id *pids;
1835 * Collect PIDs of all processes with pending entries
1838 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
1839 enum brl_type lock_type,
1840 enum brl_flavour lock_flav,
1841 br_off start, br_off size,
1842 void *private_data)
1844 struct brl_revalidate_state *state =
1845 (struct brl_revalidate_state *)private_data;
1847 if (!IS_PENDING_LOCK(lock_type)) {
1848 return;
1851 add_to_large_array(state, sizeof(pid), (void *)&pid,
1852 &state->pids, &state->num_pids,
1853 &state->array_size);
1857 * qsort callback to sort the processes
1860 static int compare_procids(const void *p1, const void *p2)
1862 const struct server_id *i1 = (struct server_id *)p1;
1863 const struct server_id *i2 = (struct server_id *)p2;
1865 if (i1->pid < i2->pid) return -1;
1866 if (i2->pid > i2->pid) return 1;
1867 return 0;
1871 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
1872 * locks so that they retry. Mainly used in the cluster code after a node has
1873 * died.
1875 * Done in two steps to avoid double-sends: First we collect all entries in an
1876 * array, then qsort that array and only send to non-dupes.
1879 static void brl_revalidate(struct messaging_context *msg_ctx,
1880 void *private_data,
1881 uint32_t msg_type,
1882 struct server_id server_id,
1883 DATA_BLOB *data)
1885 struct brl_revalidate_state *state;
1886 uint32 i;
1887 struct server_id last_pid;
1889 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
1890 DEBUG(0, ("talloc failed\n"));
1891 return;
1894 brl_forall(brl_revalidate_collect, state);
1896 if (state->array_size == -1) {
1897 DEBUG(0, ("talloc failed\n"));
1898 goto done;
1901 if (state->num_pids == 0) {
1902 goto done;
1905 qsort(state->pids, state->num_pids, sizeof(state->pids[0]),
1906 compare_procids);
1908 ZERO_STRUCT(last_pid);
1910 for (i=0; i<state->num_pids; i++) {
1911 if (procid_equal(&last_pid, &state->pids[i])) {
1913 * We've seen that one already
1915 continue;
1918 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
1919 &data_blob_null);
1920 last_pid = state->pids[i];
1923 done:
1924 TALLOC_FREE(state);
1925 return;
1928 void brl_register_msgs(struct messaging_context *msg_ctx)
1930 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
1931 brl_revalidate);