s3:registry: add an extra check for dsize==0 to regdb_fetch_keys_internal()
[Samba/fernandojvsilva.git] / source3 / locking / brlock.c
blob18a798182b957048310287bfc275701b9e310e6f
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
27 #include "includes.h"
29 #undef DBGC_CLASS
30 #define DBGC_CLASS DBGC_LOCKING
32 #define ZERO_ZERO 0
34 /* The open brlock.tdb database. */
36 static struct db_context *brlock_db;
38 /****************************************************************************
39 Debug info at level 10 for lock struct.
40 ****************************************************************************/
42 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
44 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %s, ",
46 (unsigned int)pls->context.smbpid,
47 (unsigned int)pls->context.tid,
48 procid_str(debug_ctx(), &pls->context.pid) ));
50 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
51 (double)pls->start,
52 (double)pls->size,
53 pls->fnum,
54 lock_type_name(pls->lock_type),
55 lock_flav_name(pls->lock_flav) ));
58 /****************************************************************************
59 See if two locking contexts are equal.
60 ****************************************************************************/
62 bool brl_same_context(const struct lock_context *ctx1,
63 const struct lock_context *ctx2)
65 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
66 (ctx1->smbpid == ctx2->smbpid) &&
67 (ctx1->tid == ctx2->tid));
70 /****************************************************************************
71 See if lck1 and lck2 overlap.
72 ****************************************************************************/
74 static bool brl_overlap(const struct lock_struct *lck1,
75 const struct lock_struct *lck2)
77 /* XXX Remove for Win7 compatibility. */
78 /* this extra check is not redundent - it copes with locks
79 that go beyond the end of 64 bit file space */
80 if (lck1->size != 0 &&
81 lck1->start == lck2->start &&
82 lck1->size == lck2->size) {
83 return True;
86 if (lck1->start >= (lck2->start+lck2->size) ||
87 lck2->start >= (lck1->start+lck1->size)) {
88 return False;
90 return True;
93 /****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
97 static bool brl_conflict(const struct lock_struct *lck1,
98 const struct lock_struct *lck2)
100 /* Ignore PENDING locks. */
101 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
102 return False;
104 /* Read locks never conflict. */
105 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106 return False;
109 /* A READ lock can stack on top of a WRITE lock if they have the same
110 * context & fnum. */
111 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
112 brl_same_context(&lck1->context, &lck2->context) &&
113 lck1->fnum == lck2->fnum) {
114 return False;
117 return brl_overlap(lck1, lck2);
120 /****************************************************************************
121 See if lock2 can be added when lock1 is in place - when both locks are POSIX
122 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
123 know already match.
124 ****************************************************************************/
126 static bool brl_conflict_posix(const struct lock_struct *lck1,
127 const struct lock_struct *lck2)
129 #if defined(DEVELOPER)
130 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
131 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
132 #endif
134 /* Ignore PENDING locks. */
135 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
136 return False;
138 /* Read locks never conflict. */
139 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
140 return False;
143 /* Locks on the same context con't conflict. Ignore fnum. */
144 if (brl_same_context(&lck1->context, &lck2->context)) {
145 return False;
148 /* One is read, the other write, or the context is different,
149 do they overlap ? */
150 return brl_overlap(lck1, lck2);
153 #if ZERO_ZERO
154 static bool brl_conflict1(const struct lock_struct *lck1,
155 const struct lock_struct *lck2)
157 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
158 return False;
160 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
161 return False;
164 if (brl_same_context(&lck1->context, &lck2->context) &&
165 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
166 return False;
169 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
170 return True;
173 if (lck1->start >= (lck2->start + lck2->size) ||
174 lck2->start >= (lck1->start + lck1->size)) {
175 return False;
178 return True;
180 #endif
182 /****************************************************************************
183 Check to see if this lock conflicts, but ignore our own locks on the
184 same fnum only. This is the read/write lock check code path.
185 This is never used in the POSIX lock case.
186 ****************************************************************************/
188 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
190 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
191 return False;
193 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
194 return False;
196 /* POSIX flavour locks never conflict here - this is only called
197 in the read/write path. */
199 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
200 return False;
203 * Incoming WRITE locks conflict with existing READ locks even
204 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
207 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
208 if (brl_same_context(&lck1->context, &lck2->context) &&
209 lck1->fnum == lck2->fnum)
210 return False;
213 return brl_overlap(lck1, lck2);
216 /****************************************************************************
217 Check if an unlock overlaps a pending lock.
218 ****************************************************************************/
220 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
222 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
223 return True;
224 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
225 return True;
226 return False;
229 /****************************************************************************
230 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
231 is the same as this one and changes its error code. I wonder if any
232 app depends on this ?
233 ****************************************************************************/
235 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
237 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
238 /* amazing the little things you learn with a test
239 suite. Locks beyond this offset (as a 64 bit
240 number!) always generate the conflict error code,
241 unless the top bit is set */
242 if (!blocking_lock) {
243 fsp->last_lock_failure = *lock;
245 return NT_STATUS_FILE_LOCK_CONFLICT;
248 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
249 lock->context.tid == fsp->last_lock_failure.context.tid &&
250 lock->fnum == fsp->last_lock_failure.fnum &&
251 lock->start == fsp->last_lock_failure.start) {
252 return NT_STATUS_FILE_LOCK_CONFLICT;
255 if (!blocking_lock) {
256 fsp->last_lock_failure = *lock;
258 return NT_STATUS_LOCK_NOT_GRANTED;
261 /****************************************************************************
262 Open up the brlock.tdb database.
263 ****************************************************************************/
265 void brl_init(bool read_only)
267 if (brlock_db) {
268 return;
270 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
271 lp_open_files_db_hash_size(),
272 TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST,
273 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
274 if (!brlock_db) {
275 DEBUG(0,("Failed to open byte range locking database %s\n",
276 lock_path("brlock.tdb")));
277 return;
281 /****************************************************************************
282 Close down the brlock.tdb database.
283 ****************************************************************************/
285 void brl_shutdown(void)
287 TALLOC_FREE(brlock_db);
290 #if ZERO_ZERO
291 /****************************************************************************
292 Compare two locks for sorting.
293 ****************************************************************************/
295 static int lock_compare(const struct lock_struct *lck1,
296 const struct lock_struct *lck2)
298 if (lck1->start != lck2->start) {
299 return (lck1->start - lck2->start);
301 if (lck2->size != lck1->size) {
302 return ((int)lck1->size - (int)lck2->size);
304 return 0;
306 #endif
308 /****************************************************************************
309 Lock a range of bytes - Windows lock semantics.
310 ****************************************************************************/
312 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
313 struct lock_struct *plock, bool blocking_lock)
315 unsigned int i;
316 files_struct *fsp = br_lck->fsp;
317 struct lock_struct *locks = br_lck->lock_data;
318 NTSTATUS status;
320 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
322 for (i=0; i < br_lck->num_locks; i++) {
323 /* Do any Windows or POSIX locks conflict ? */
324 if (brl_conflict(&locks[i], plock)) {
325 /* Remember who blocked us. */
326 plock->context.smbpid = locks[i].context.smbpid;
327 return brl_lock_failed(fsp,plock,blocking_lock);
329 #if ZERO_ZERO
330 if (plock->start == 0 && plock->size == 0 &&
331 locks[i].size == 0) {
332 break;
334 #endif
337 if (!IS_PENDING_LOCK(plock->lock_type)) {
338 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
341 /* We can get the Windows lock, now see if it needs to
342 be mapped into a lower level POSIX one, and if so can
343 we get it ? */
345 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
346 int errno_ret;
347 if (!set_posix_lock_windows_flavour(fsp,
348 plock->start,
349 plock->size,
350 plock->lock_type,
351 &plock->context,
352 locks,
353 br_lck->num_locks,
354 &errno_ret)) {
356 /* We don't know who blocked us. */
357 plock->context.smbpid = 0xFFFFFFFF;
359 if (errno_ret == EACCES || errno_ret == EAGAIN) {
360 status = NT_STATUS_FILE_LOCK_CONFLICT;
361 goto fail;
362 } else {
363 status = map_nt_error_from_unix(errno);
364 goto fail;
369 /* no conflicts - add it to the list of locks */
370 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
371 if (!locks) {
372 status = NT_STATUS_NO_MEMORY;
373 goto fail;
376 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
377 br_lck->num_locks += 1;
378 br_lck->lock_data = locks;
379 br_lck->modified = True;
381 return NT_STATUS_OK;
382 fail:
383 if (!IS_PENDING_LOCK(plock->lock_type)) {
384 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
386 return status;
389 /****************************************************************************
390 Cope with POSIX range splits and merges.
391 ****************************************************************************/
393 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
394 struct lock_struct *ex, /* existing lock. */
395 struct lock_struct *plock) /* proposed lock. */
397 bool lock_types_differ = (ex->lock_type != plock->lock_type);
399 /* We can't merge non-conflicting locks on different context - ignore fnum. */
401 if (!brl_same_context(&ex->context, &plock->context)) {
402 /* Just copy. */
403 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
404 return 1;
407 /* We now know we have the same context. */
409 /* Did we overlap ? */
411 /*********************************************
412 +---------+
413 | ex |
414 +---------+
415 +-------+
416 | plock |
417 +-------+
418 OR....
419 +---------+
420 | ex |
421 +---------+
422 **********************************************/
424 if ( (ex->start > (plock->start + plock->size)) ||
425 (plock->start > (ex->start + ex->size))) {
427 /* No overlap with this lock - copy existing. */
429 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
430 return 1;
433 /*********************************************
434 +---------------------------+
435 | ex |
436 +---------------------------+
437 +---------------------------+
438 | plock | -> replace with plock.
439 +---------------------------+
441 +---------------+
442 | ex |
443 +---------------+
444 +---------------------------+
445 | plock | -> replace with plock.
446 +---------------------------+
448 **********************************************/
450 if ( (ex->start >= plock->start) &&
451 (ex->start + ex->size <= plock->start + plock->size) ) {
453 /* Replace - discard existing lock. */
455 return 0;
458 /*********************************************
459 Adjacent after.
460 +-------+
461 | ex |
462 +-------+
463 +---------------+
464 | plock |
465 +---------------+
467 BECOMES....
468 +---------------+-------+
469 | plock | ex | - different lock types.
470 +---------------+-------+
471 OR.... (merge)
472 +-----------------------+
473 | plock | - same lock type.
474 +-----------------------+
475 **********************************************/
477 if (plock->start + plock->size == ex->start) {
479 /* If the lock types are the same, we merge, if different, we
480 add the remainder of the old lock. */
482 if (lock_types_differ) {
483 /* Add existing. */
484 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
485 return 1;
486 } else {
487 /* Merge - adjust incoming lock as we may have more
488 * merging to come. */
489 plock->size += ex->size;
490 return 0;
494 /*********************************************
495 Adjacent before.
496 +-------+
497 | ex |
498 +-------+
499 +---------------+
500 | plock |
501 +---------------+
502 BECOMES....
503 +-------+---------------+
504 | ex | plock | - different lock types
505 +-------+---------------+
507 OR.... (merge)
508 +-----------------------+
509 | plock | - same lock type.
510 +-----------------------+
512 **********************************************/
514 if (ex->start + ex->size == plock->start) {
516 /* If the lock types are the same, we merge, if different, we
517 add the existing lock. */
519 if (lock_types_differ) {
520 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
521 return 1;
522 } else {
523 /* Merge - adjust incoming lock as we may have more
524 * merging to come. */
525 plock->start = ex->start;
526 plock->size += ex->size;
527 return 0;
531 /*********************************************
532 Overlap after.
533 +-----------------------+
534 | ex |
535 +-----------------------+
536 +---------------+
537 | plock |
538 +---------------+
540 +----------------+
541 | ex |
542 +----------------+
543 +---------------+
544 | plock |
545 +---------------+
547 BECOMES....
548 +---------------+-------+
549 | plock | ex | - different lock types.
550 +---------------+-------+
551 OR.... (merge)
552 +-----------------------+
553 | plock | - same lock type.
554 +-----------------------+
555 **********************************************/
557 if ( (ex->start >= plock->start) &&
558 (ex->start <= plock->start + plock->size) &&
559 (ex->start + ex->size > plock->start + plock->size) ) {
561 /* If the lock types are the same, we merge, if different, we
562 add the remainder of the old lock. */
564 if (lock_types_differ) {
565 /* Add remaining existing. */
566 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
567 /* Adjust existing start and size. */
568 lck_arr[0].start = plock->start + plock->size;
569 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
570 return 1;
571 } else {
572 /* Merge - adjust incoming lock as we may have more
573 * merging to come. */
574 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
575 return 0;
579 /*********************************************
580 Overlap before.
581 +-----------------------+
582 | ex |
583 +-----------------------+
584 +---------------+
585 | plock |
586 +---------------+
588 +-------------+
589 | ex |
590 +-------------+
591 +---------------+
592 | plock |
593 +---------------+
595 BECOMES....
596 +-------+---------------+
597 | ex | plock | - different lock types
598 +-------+---------------+
600 OR.... (merge)
601 +-----------------------+
602 | plock | - same lock type.
603 +-----------------------+
605 **********************************************/
607 if ( (ex->start < plock->start) &&
608 (ex->start + ex->size >= plock->start) &&
609 (ex->start + ex->size <= plock->start + plock->size) ) {
611 /* If the lock types are the same, we merge, if different, we
612 add the truncated old lock. */
614 if (lock_types_differ) {
615 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
616 /* Adjust existing size. */
617 lck_arr[0].size = plock->start - ex->start;
618 return 1;
619 } else {
620 /* Merge - adjust incoming lock as we may have more
621 * merging to come. MUST ADJUST plock SIZE FIRST ! */
622 plock->size += (plock->start - ex->start);
623 plock->start = ex->start;
624 return 0;
628 /*********************************************
629 Complete overlap.
630 +---------------------------+
631 | ex |
632 +---------------------------+
633 +---------+
634 | plock |
635 +---------+
636 BECOMES.....
637 +-------+---------+---------+
638 | ex | plock | ex | - different lock types.
639 +-------+---------+---------+
641 +---------------------------+
642 | plock | - same lock type.
643 +---------------------------+
644 **********************************************/
646 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
648 if (lock_types_differ) {
650 /* We have to split ex into two locks here. */
652 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
653 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
655 /* Adjust first existing size. */
656 lck_arr[0].size = plock->start - ex->start;
658 /* Adjust second existing start and size. */
659 lck_arr[1].start = plock->start + plock->size;
660 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
661 return 2;
662 } else {
663 /* Just eat the existing locks, merge them into plock. */
664 plock->start = ex->start;
665 plock->size = ex->size;
666 return 0;
670 /* Never get here. */
671 smb_panic("brlock_posix_split_merge");
672 /* Notreached. */
674 /* Keep some compilers happy. */
675 return 0;
678 /****************************************************************************
679 Lock a range of bytes - POSIX lock semantics.
680 We must cope with range splits and merges.
681 ****************************************************************************/
683 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
684 struct byte_range_lock *br_lck,
685 struct lock_struct *plock)
687 unsigned int i, count, posix_count;
688 struct lock_struct *locks = br_lck->lock_data;
689 struct lock_struct *tp;
690 bool signal_pending_read = False;
691 bool break_oplocks = false;
692 NTSTATUS status;
694 /* No zero-zero locks for POSIX. */
695 if (plock->start == 0 && plock->size == 0) {
696 return NT_STATUS_INVALID_PARAMETER;
699 /* Don't allow 64-bit lock wrap. */
700 if (plock->start + plock->size < plock->start ||
701 plock->start + plock->size < plock->size) {
702 return NT_STATUS_INVALID_PARAMETER;
705 /* The worst case scenario here is we have to split an
706 existing POSIX lock range into two, and add our lock,
707 so we need at most 2 more entries. */
709 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
710 if (!tp) {
711 return NT_STATUS_NO_MEMORY;
714 count = posix_count = 0;
716 for (i=0; i < br_lck->num_locks; i++) {
717 struct lock_struct *curr_lock = &locks[i];
719 /* If we have a pending read lock, a lock downgrade should
720 trigger a lock re-evaluation. */
721 if (curr_lock->lock_type == PENDING_READ_LOCK &&
722 brl_pending_overlap(plock, curr_lock)) {
723 signal_pending_read = True;
726 if (curr_lock->lock_flav == WINDOWS_LOCK) {
727 /* Do any Windows flavour locks conflict ? */
728 if (brl_conflict(curr_lock, plock)) {
729 /* No games with error messages. */
730 SAFE_FREE(tp);
731 /* Remember who blocked us. */
732 plock->context.smbpid = curr_lock->context.smbpid;
733 return NT_STATUS_FILE_LOCK_CONFLICT;
735 /* Just copy the Windows lock into the new array. */
736 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
737 count++;
738 } else {
739 unsigned int tmp_count = 0;
741 /* POSIX conflict semantics are different. */
742 if (brl_conflict_posix(curr_lock, plock)) {
743 /* Can't block ourselves with POSIX locks. */
744 /* No games with error messages. */
745 SAFE_FREE(tp);
746 /* Remember who blocked us. */
747 plock->context.smbpid = curr_lock->context.smbpid;
748 return NT_STATUS_FILE_LOCK_CONFLICT;
751 /* Work out overlaps. */
752 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
753 posix_count += tmp_count;
754 count += tmp_count;
759 * Break oplocks while we hold a brl. Since lock() and unlock() calls
760 * are not symetric with POSIX semantics, we cannot guarantee our
761 * contend_level2_oplocks_begin/end calls will be acquired and
762 * released one-for-one as with Windows semantics. Therefore we only
763 * call contend_level2_oplocks_begin if this is the first POSIX brl on
764 * the file.
766 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
767 posix_count == 0);
768 if (break_oplocks) {
769 contend_level2_oplocks_begin(br_lck->fsp,
770 LEVEL2_CONTEND_POSIX_BRL);
773 /* Try and add the lock in order, sorted by lock start. */
774 for (i=0; i < count; i++) {
775 struct lock_struct *curr_lock = &tp[i];
777 if (curr_lock->start <= plock->start) {
778 continue;
782 if (i < count) {
783 memmove(&tp[i+1], &tp[i],
784 (count - i)*sizeof(struct lock_struct));
786 memcpy(&tp[i], plock, sizeof(struct lock_struct));
787 count++;
789 /* We can get the POSIX lock, now see if it needs to
790 be mapped into a lower level POSIX one, and if so can
791 we get it ? */
793 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
794 int errno_ret;
796 /* The lower layer just needs to attempt to
797 get the system POSIX lock. We've weeded out
798 any conflicts above. */
800 if (!set_posix_lock_posix_flavour(br_lck->fsp,
801 plock->start,
802 plock->size,
803 plock->lock_type,
804 &errno_ret)) {
806 /* We don't know who blocked us. */
807 plock->context.smbpid = 0xFFFFFFFF;
809 if (errno_ret == EACCES || errno_ret == EAGAIN) {
810 SAFE_FREE(tp);
811 status = NT_STATUS_FILE_LOCK_CONFLICT;
812 goto fail;
813 } else {
814 SAFE_FREE(tp);
815 status = map_nt_error_from_unix(errno);
816 goto fail;
821 /* If we didn't use all the allocated size,
822 * Realloc so we don't leak entries per lock call. */
823 if (count < br_lck->num_locks + 2) {
824 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
825 if (!tp) {
826 status = NT_STATUS_NO_MEMORY;
827 goto fail;
831 br_lck->num_locks = count;
832 SAFE_FREE(br_lck->lock_data);
833 br_lck->lock_data = tp;
834 locks = tp;
835 br_lck->modified = True;
837 /* A successful downgrade from write to read lock can trigger a lock
838 re-evalutation where waiting readers can now proceed. */
840 if (signal_pending_read) {
841 /* Send unlock messages to any pending read waiters that overlap. */
842 for (i=0; i < br_lck->num_locks; i++) {
843 struct lock_struct *pend_lock = &locks[i];
845 /* Ignore non-pending locks. */
846 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
847 continue;
850 if (pend_lock->lock_type == PENDING_READ_LOCK &&
851 brl_pending_overlap(plock, pend_lock)) {
852 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
853 procid_str_static(&pend_lock->context.pid )));
855 messaging_send(msg_ctx, pend_lock->context.pid,
856 MSG_SMB_UNLOCK, &data_blob_null);
861 return NT_STATUS_OK;
862 fail:
863 if (break_oplocks) {
864 contend_level2_oplocks_end(br_lck->fsp,
865 LEVEL2_CONTEND_POSIX_BRL);
867 return status;
870 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
871 struct byte_range_lock *br_lck,
872 struct lock_struct *plock,
873 bool blocking_lock,
874 struct blocking_lock_record *blr)
876 VFS_FIND(brl_lock_windows);
877 return handle->fns->brl_lock_windows(handle, br_lck, plock,
878 blocking_lock, blr);
881 /****************************************************************************
882 Lock a range of bytes.
883 ****************************************************************************/
885 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
886 struct byte_range_lock *br_lck,
887 uint32 smbpid,
888 struct server_id pid,
889 br_off start,
890 br_off size,
891 enum brl_type lock_type,
892 enum brl_flavour lock_flav,
893 bool blocking_lock,
894 uint32 *psmbpid,
895 struct blocking_lock_record *blr)
897 NTSTATUS ret;
898 struct lock_struct lock;
900 #if !ZERO_ZERO
901 if (start == 0 && size == 0) {
902 DEBUG(0,("client sent 0/0 lock - please report this\n"));
904 #endif
906 #ifdef DEVELOPER
907 /* Quieten valgrind on test. */
908 memset(&lock, '\0', sizeof(lock));
909 #endif
911 lock.context.smbpid = smbpid;
912 lock.context.pid = pid;
913 lock.context.tid = br_lck->fsp->conn->cnum;
914 lock.start = start;
915 lock.size = size;
916 lock.fnum = br_lck->fsp->fnum;
917 lock.lock_type = lock_type;
918 lock.lock_flav = lock_flav;
920 if (lock_flav == WINDOWS_LOCK) {
921 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
922 &lock, blocking_lock, blr);
923 } else {
924 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
927 #if ZERO_ZERO
928 /* sort the lock list */
929 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
930 #endif
932 /* If we're returning an error, return who blocked us. */
933 if (!NT_STATUS_IS_OK(ret) && psmbpid) {
934 *psmbpid = lock.context.smbpid;
936 return ret;
939 /****************************************************************************
940 Unlock a range of bytes - Windows semantics.
941 ****************************************************************************/
943 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
944 struct byte_range_lock *br_lck,
945 const struct lock_struct *plock)
947 unsigned int i, j;
948 struct lock_struct *locks = br_lck->lock_data;
949 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
951 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
953 #if ZERO_ZERO
954 /* Delete write locks by preference... The lock list
955 is sorted in the zero zero case. */
957 for (i = 0; i < br_lck->num_locks; i++) {
958 struct lock_struct *lock = &locks[i];
960 if (lock->lock_type == WRITE_LOCK &&
961 brl_same_context(&lock->context, &plock->context) &&
962 lock->fnum == plock->fnum &&
963 lock->lock_flav == WINDOWS_LOCK &&
964 lock->start == plock->start &&
965 lock->size == plock->size) {
967 /* found it - delete it */
968 deleted_lock_type = lock->lock_type;
969 break;
973 if (i != br_lck->num_locks) {
974 /* We found it - don't search again. */
975 goto unlock_continue;
977 #endif
979 for (i = 0; i < br_lck->num_locks; i++) {
980 struct lock_struct *lock = &locks[i];
982 /* Only remove our own locks that match in start, size, and flavour. */
983 if (brl_same_context(&lock->context, &plock->context) &&
984 lock->fnum == plock->fnum &&
985 lock->lock_flav == WINDOWS_LOCK &&
986 lock->start == plock->start &&
987 lock->size == plock->size ) {
988 deleted_lock_type = lock->lock_type;
989 break;
993 if (i == br_lck->num_locks) {
994 /* we didn't find it */
995 return False;
998 #if ZERO_ZERO
999 unlock_continue:
1000 #endif
1002 /* Actually delete the lock. */
1003 if (i < br_lck->num_locks - 1) {
1004 memmove(&locks[i], &locks[i+1],
1005 sizeof(*locks)*((br_lck->num_locks-1) - i));
1008 br_lck->num_locks -= 1;
1009 br_lck->modified = True;
1011 /* Unlock the underlying POSIX regions. */
1012 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1013 release_posix_lock_windows_flavour(br_lck->fsp,
1014 plock->start,
1015 plock->size,
1016 deleted_lock_type,
1017 &plock->context,
1018 locks,
1019 br_lck->num_locks);
1022 /* Send unlock messages to any pending waiters that overlap. */
1023 for (j=0; j < br_lck->num_locks; j++) {
1024 struct lock_struct *pend_lock = &locks[j];
1026 /* Ignore non-pending locks. */
1027 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1028 continue;
1031 /* We could send specific lock info here... */
1032 if (brl_pending_overlap(plock, pend_lock)) {
1033 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1034 procid_str_static(&pend_lock->context.pid )));
1036 messaging_send(msg_ctx, pend_lock->context.pid,
1037 MSG_SMB_UNLOCK, &data_blob_null);
1041 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1042 return True;
1045 /****************************************************************************
1046 Unlock a range of bytes - POSIX semantics.
1047 ****************************************************************************/
1049 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1050 struct byte_range_lock *br_lck,
1051 struct lock_struct *plock)
1053 unsigned int i, j, count;
1054 struct lock_struct *tp;
1055 struct lock_struct *locks = br_lck->lock_data;
1056 bool overlap_found = False;
1058 /* No zero-zero locks for POSIX. */
1059 if (plock->start == 0 && plock->size == 0) {
1060 return False;
1063 /* Don't allow 64-bit lock wrap. */
1064 if (plock->start + plock->size < plock->start ||
1065 plock->start + plock->size < plock->size) {
1066 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1067 return False;
1070 /* The worst case scenario here is we have to split an
1071 existing POSIX lock range into two, so we need at most
1072 1 more entry. */
1074 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
1075 if (!tp) {
1076 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1077 return False;
1080 count = 0;
1081 for (i = 0; i < br_lck->num_locks; i++) {
1082 struct lock_struct *lock = &locks[i];
1083 unsigned int tmp_count;
1085 /* Only remove our own locks - ignore fnum. */
1086 if (IS_PENDING_LOCK(lock->lock_type) ||
1087 !brl_same_context(&lock->context, &plock->context)) {
1088 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1089 count++;
1090 continue;
1093 if (lock->lock_flav == WINDOWS_LOCK) {
1094 /* Do any Windows flavour locks conflict ? */
1095 if (brl_conflict(lock, plock)) {
1096 SAFE_FREE(tp);
1097 return false;
1099 /* Just copy the Windows lock into the new array. */
1100 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1101 count++;
1102 continue;
1105 /* Work out overlaps. */
1106 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1108 if (tmp_count == 0) {
1109 /* plock overlapped the existing lock completely,
1110 or replaced it. Don't copy the existing lock. */
1111 overlap_found = true;
1112 } else if (tmp_count == 1) {
1113 /* Either no overlap, (simple copy of existing lock) or
1114 * an overlap of an existing lock. */
1115 /* If the lock changed size, we had an overlap. */
1116 if (tp[count].size != lock->size) {
1117 overlap_found = true;
1119 count += tmp_count;
1120 } else if (tmp_count == 2) {
1121 /* We split a lock range in two. */
1122 overlap_found = true;
1123 count += tmp_count;
1125 /* Optimisation... */
1126 /* We know we're finished here as we can't overlap any
1127 more POSIX locks. Copy the rest of the lock array. */
1129 if (i < br_lck->num_locks - 1) {
1130 memcpy(&tp[count], &locks[i+1],
1131 sizeof(*locks)*((br_lck->num_locks-1) - i));
1132 count += ((br_lck->num_locks-1) - i);
1134 break;
1139 if (!overlap_found) {
1140 /* Just ignore - no change. */
1141 SAFE_FREE(tp);
1142 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1143 return True;
1146 /* Unlock any POSIX regions. */
1147 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1148 release_posix_lock_posix_flavour(br_lck->fsp,
1149 plock->start,
1150 plock->size,
1151 &plock->context,
1153 count);
1156 /* Realloc so we don't leak entries per unlock call. */
1157 if (count) {
1158 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1159 if (!tp) {
1160 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1161 return False;
1163 } else {
1164 /* We deleted the last lock. */
1165 SAFE_FREE(tp);
1166 tp = NULL;
1169 contend_level2_oplocks_end(br_lck->fsp,
1170 LEVEL2_CONTEND_POSIX_BRL);
1172 br_lck->num_locks = count;
1173 SAFE_FREE(br_lck->lock_data);
1174 locks = tp;
1175 br_lck->lock_data = tp;
1176 br_lck->modified = True;
1178 /* Send unlock messages to any pending waiters that overlap. */
1180 for (j=0; j < br_lck->num_locks; j++) {
1181 struct lock_struct *pend_lock = &locks[j];
1183 /* Ignore non-pending locks. */
1184 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1185 continue;
1188 /* We could send specific lock info here... */
1189 if (brl_pending_overlap(plock, pend_lock)) {
1190 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1191 procid_str_static(&pend_lock->context.pid )));
1193 messaging_send(msg_ctx, pend_lock->context.pid,
1194 MSG_SMB_UNLOCK, &data_blob_null);
1198 return True;
1201 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1202 struct messaging_context *msg_ctx,
1203 struct byte_range_lock *br_lck,
1204 const struct lock_struct *plock)
1206 VFS_FIND(brl_unlock_windows);
1207 return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock);
1210 /****************************************************************************
1211 Unlock a range of bytes.
1212 ****************************************************************************/
1214 bool brl_unlock(struct messaging_context *msg_ctx,
1215 struct byte_range_lock *br_lck,
1216 uint32 smbpid,
1217 struct server_id pid,
1218 br_off start,
1219 br_off size,
1220 enum brl_flavour lock_flav)
1222 struct lock_struct lock;
1224 lock.context.smbpid = smbpid;
1225 lock.context.pid = pid;
1226 lock.context.tid = br_lck->fsp->conn->cnum;
1227 lock.start = start;
1228 lock.size = size;
1229 lock.fnum = br_lck->fsp->fnum;
1230 lock.lock_type = UNLOCK_LOCK;
1231 lock.lock_flav = lock_flav;
1233 if (lock_flav == WINDOWS_LOCK) {
1234 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1235 br_lck, &lock);
1236 } else {
1237 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1241 /****************************************************************************
1242 Test if we could add a lock if we wanted to.
1243 Returns True if the region required is currently unlocked, False if locked.
1244 ****************************************************************************/
1246 bool brl_locktest(struct byte_range_lock *br_lck,
1247 uint32 smbpid,
1248 struct server_id pid,
1249 br_off start,
1250 br_off size,
1251 enum brl_type lock_type,
1252 enum brl_flavour lock_flav)
1254 bool ret = True;
1255 unsigned int i;
1256 struct lock_struct lock;
1257 const struct lock_struct *locks = br_lck->lock_data;
1258 files_struct *fsp = br_lck->fsp;
1260 lock.context.smbpid = smbpid;
1261 lock.context.pid = pid;
1262 lock.context.tid = br_lck->fsp->conn->cnum;
1263 lock.start = start;
1264 lock.size = size;
1265 lock.fnum = fsp->fnum;
1266 lock.lock_type = lock_type;
1267 lock.lock_flav = lock_flav;
1269 /* Make sure existing locks don't conflict */
1270 for (i=0; i < br_lck->num_locks; i++) {
1272 * Our own locks don't conflict.
1274 if (brl_conflict_other(&locks[i], &lock)) {
1275 return False;
1280 * There is no lock held by an SMB daemon, check to
1281 * see if there is a POSIX lock from a UNIX or NFS process.
1282 * This only conflicts with Windows locks, not POSIX locks.
1285 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1286 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1288 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1289 (double)start, (double)size, ret ? "locked" : "unlocked",
1290 fsp->fnum, fsp_str_dbg(fsp)));
1292 /* We need to return the inverse of is_posix_locked. */
1293 ret = !ret;
1296 /* no conflicts - we could have added it */
1297 return ret;
1300 /****************************************************************************
1301 Query for existing locks.
1302 ****************************************************************************/
1304 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1305 uint32 *psmbpid,
1306 struct server_id pid,
1307 br_off *pstart,
1308 br_off *psize,
1309 enum brl_type *plock_type,
1310 enum brl_flavour lock_flav)
1312 unsigned int i;
1313 struct lock_struct lock;
1314 const struct lock_struct *locks = br_lck->lock_data;
1315 files_struct *fsp = br_lck->fsp;
1317 lock.context.smbpid = *psmbpid;
1318 lock.context.pid = pid;
1319 lock.context.tid = br_lck->fsp->conn->cnum;
1320 lock.start = *pstart;
1321 lock.size = *psize;
1322 lock.fnum = fsp->fnum;
1323 lock.lock_type = *plock_type;
1324 lock.lock_flav = lock_flav;
1326 /* Make sure existing locks don't conflict */
1327 for (i=0; i < br_lck->num_locks; i++) {
1328 const struct lock_struct *exlock = &locks[i];
1329 bool conflict = False;
1331 if (exlock->lock_flav == WINDOWS_LOCK) {
1332 conflict = brl_conflict(exlock, &lock);
1333 } else {
1334 conflict = brl_conflict_posix(exlock, &lock);
1337 if (conflict) {
1338 *psmbpid = exlock->context.smbpid;
1339 *pstart = exlock->start;
1340 *psize = exlock->size;
1341 *plock_type = exlock->lock_type;
1342 return NT_STATUS_LOCK_NOT_GRANTED;
1347 * There is no lock held by an SMB daemon, check to
1348 * see if there is a POSIX lock from a UNIX or NFS process.
1351 if(lp_posix_locking(fsp->conn->params)) {
1352 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1354 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1355 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1356 fsp->fnum, fsp_str_dbg(fsp)));
1358 if (ret) {
1359 /* Hmmm. No clue what to set smbpid to - use -1. */
1360 *psmbpid = 0xFFFF;
1361 return NT_STATUS_LOCK_NOT_GRANTED;
1365 return NT_STATUS_OK;
1369 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1370 struct byte_range_lock *br_lck,
1371 struct lock_struct *plock,
1372 struct blocking_lock_record *blr)
1374 VFS_FIND(brl_cancel_windows);
1375 return handle->fns->brl_cancel_windows(handle, br_lck, plock, blr);
1378 /****************************************************************************
1379 Remove a particular pending lock.
1380 ****************************************************************************/
1381 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1382 uint32 smbpid,
1383 struct server_id pid,
1384 br_off start,
1385 br_off size,
1386 enum brl_flavour lock_flav,
1387 struct blocking_lock_record *blr)
1389 bool ret;
1390 struct lock_struct lock;
1392 lock.context.smbpid = smbpid;
1393 lock.context.pid = pid;
1394 lock.context.tid = br_lck->fsp->conn->cnum;
1395 lock.start = start;
1396 lock.size = size;
1397 lock.fnum = br_lck->fsp->fnum;
1398 lock.lock_flav = lock_flav;
1399 /* lock.lock_type doesn't matter */
1401 if (lock_flav == WINDOWS_LOCK) {
1402 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1403 &lock, blr);
1404 } else {
1405 ret = brl_lock_cancel_default(br_lck, &lock);
1408 return ret;
1411 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1412 struct lock_struct *plock)
1414 unsigned int i;
1415 struct lock_struct *locks = br_lck->lock_data;
1417 SMB_ASSERT(plock);
1419 for (i = 0; i < br_lck->num_locks; i++) {
1420 struct lock_struct *lock = &locks[i];
1422 /* For pending locks we *always* care about the fnum. */
1423 if (brl_same_context(&lock->context, &plock->context) &&
1424 lock->fnum == plock->fnum &&
1425 IS_PENDING_LOCK(lock->lock_type) &&
1426 lock->lock_flav == plock->lock_flav &&
1427 lock->start == plock->start &&
1428 lock->size == plock->size) {
1429 break;
1433 if (i == br_lck->num_locks) {
1434 /* Didn't find it. */
1435 return False;
1438 if (i < br_lck->num_locks - 1) {
1439 /* Found this particular pending lock - delete it */
1440 memmove(&locks[i], &locks[i+1],
1441 sizeof(*locks)*((br_lck->num_locks-1) - i));
1444 br_lck->num_locks -= 1;
1445 br_lck->modified = True;
1446 return True;
1449 /****************************************************************************
1450 Remove any locks associated with a open file.
1451 We return True if this process owns any other Windows locks on this
1452 fd and so we should not immediately close the fd.
1453 ****************************************************************************/
1455 void brl_close_fnum(struct messaging_context *msg_ctx,
1456 struct byte_range_lock *br_lck)
1458 files_struct *fsp = br_lck->fsp;
1459 uint16 tid = fsp->conn->cnum;
1460 int fnum = fsp->fnum;
1461 unsigned int i, j, dcount=0;
1462 int num_deleted_windows_locks = 0;
1463 struct lock_struct *locks = br_lck->lock_data;
1464 struct server_id pid = procid_self();
1465 bool unlock_individually = False;
1466 bool posix_level2_contention_ended = false;
1468 if(lp_posix_locking(fsp->conn->params)) {
1470 /* Check if there are any Windows locks associated with this dev/ino
1471 pair that are not this fnum. If so we need to call unlock on each
1472 one in order to release the system POSIX locks correctly. */
1474 for (i=0; i < br_lck->num_locks; i++) {
1475 struct lock_struct *lock = &locks[i];
1477 if (!procid_equal(&lock->context.pid, &pid)) {
1478 continue;
1481 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1482 continue; /* Ignore pending. */
1485 if (lock->context.tid != tid || lock->fnum != fnum) {
1486 unlock_individually = True;
1487 break;
1491 if (unlock_individually) {
1492 struct lock_struct *locks_copy;
1493 unsigned int num_locks_copy;
1495 /* Copy the current lock array. */
1496 if (br_lck->num_locks) {
1497 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1498 if (!locks_copy) {
1499 smb_panic("brl_close_fnum: talloc failed");
1501 } else {
1502 locks_copy = NULL;
1505 num_locks_copy = br_lck->num_locks;
1507 for (i=0; i < num_locks_copy; i++) {
1508 struct lock_struct *lock = &locks_copy[i];
1510 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1511 (lock->fnum == fnum)) {
1512 brl_unlock(msg_ctx,
1513 br_lck,
1514 lock->context.smbpid,
1515 pid,
1516 lock->start,
1517 lock->size,
1518 lock->lock_flav);
1521 return;
1525 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1527 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1529 for (i=0; i < br_lck->num_locks; i++) {
1530 struct lock_struct *lock = &locks[i];
1531 bool del_this_lock = False;
1533 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1534 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1535 del_this_lock = True;
1536 num_deleted_windows_locks++;
1537 contend_level2_oplocks_end(br_lck->fsp,
1538 LEVEL2_CONTEND_WINDOWS_BRL);
1539 } else if (lock->lock_flav == POSIX_LOCK) {
1540 del_this_lock = True;
1542 /* Only end level2 contention once for posix */
1543 if (!posix_level2_contention_ended) {
1544 posix_level2_contention_ended = true;
1545 contend_level2_oplocks_end(br_lck->fsp,
1546 LEVEL2_CONTEND_POSIX_BRL);
1551 if (del_this_lock) {
1552 /* Send unlock messages to any pending waiters that overlap. */
1553 for (j=0; j < br_lck->num_locks; j++) {
1554 struct lock_struct *pend_lock = &locks[j];
1556 /* Ignore our own or non-pending locks. */
1557 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1558 continue;
1561 /* Optimisation - don't send to this fnum as we're
1562 closing it. */
1563 if (pend_lock->context.tid == tid &&
1564 procid_equal(&pend_lock->context.pid, &pid) &&
1565 pend_lock->fnum == fnum) {
1566 continue;
1569 /* We could send specific lock info here... */
1570 if (brl_pending_overlap(lock, pend_lock)) {
1571 messaging_send(msg_ctx, pend_lock->context.pid,
1572 MSG_SMB_UNLOCK, &data_blob_null);
1576 /* found it - delete it */
1577 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1578 memmove(&locks[i], &locks[i+1],
1579 sizeof(*locks)*((br_lck->num_locks-1) - i));
1581 br_lck->num_locks--;
1582 br_lck->modified = True;
1583 i--;
1584 dcount++;
1588 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1589 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1590 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1594 /****************************************************************************
1595 Ensure this set of lock entries is valid.
1596 ****************************************************************************/
1597 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1599 unsigned int i;
1600 unsigned int num_valid_entries = 0;
1601 struct lock_struct *locks = *pplocks;
1603 for (i = 0; i < *pnum_entries; i++) {
1604 struct lock_struct *lock_data = &locks[i];
1605 if (!process_exists(lock_data->context.pid)) {
1606 /* This process no longer exists - mark this
1607 entry as invalid by zeroing it. */
1608 ZERO_STRUCTP(lock_data);
1609 } else {
1610 num_valid_entries++;
1614 if (num_valid_entries != *pnum_entries) {
1615 struct lock_struct *new_lock_data = NULL;
1617 if (num_valid_entries) {
1618 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1619 if (!new_lock_data) {
1620 DEBUG(3, ("malloc fail\n"));
1621 return False;
1624 num_valid_entries = 0;
1625 for (i = 0; i < *pnum_entries; i++) {
1626 struct lock_struct *lock_data = &locks[i];
1627 if (lock_data->context.smbpid &&
1628 lock_data->context.tid) {
1629 /* Valid (nonzero) entry - copy it. */
1630 memcpy(&new_lock_data[num_valid_entries],
1631 lock_data, sizeof(struct lock_struct));
1632 num_valid_entries++;
1637 SAFE_FREE(*pplocks);
1638 *pplocks = new_lock_data;
1639 *pnum_entries = num_valid_entries;
1642 return True;
1645 struct brl_forall_cb {
1646 void (*fn)(struct file_id id, struct server_id pid,
1647 enum brl_type lock_type,
1648 enum brl_flavour lock_flav,
1649 br_off start, br_off size,
1650 void *private_data);
1651 void *private_data;
1654 /****************************************************************************
1655 Traverse the whole database with this function, calling traverse_callback
1656 on each lock.
1657 ****************************************************************************/
1659 static int traverse_fn(struct db_record *rec, void *state)
1661 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1662 struct lock_struct *locks;
1663 struct file_id *key;
1664 unsigned int i;
1665 unsigned int num_locks = 0;
1666 unsigned int orig_num_locks = 0;
1668 /* In a traverse function we must make a copy of
1669 dbuf before modifying it. */
1671 locks = (struct lock_struct *)memdup(rec->value.dptr,
1672 rec->value.dsize);
1673 if (!locks) {
1674 return -1; /* Terminate traversal. */
1677 key = (struct file_id *)rec->key.dptr;
1678 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
1680 /* Ensure the lock db is clean of entries from invalid processes. */
1682 if (!validate_lock_entries(&num_locks, &locks)) {
1683 SAFE_FREE(locks);
1684 return -1; /* Terminate traversal */
1687 if (orig_num_locks != num_locks) {
1688 if (num_locks) {
1689 TDB_DATA data;
1690 data.dptr = (uint8_t *)locks;
1691 data.dsize = num_locks*sizeof(struct lock_struct);
1692 rec->store(rec, data, TDB_REPLACE);
1693 } else {
1694 rec->delete_rec(rec);
1698 if (cb->fn) {
1699 for ( i=0; i<num_locks; i++) {
1700 cb->fn(*key,
1701 locks[i].context.pid,
1702 locks[i].lock_type,
1703 locks[i].lock_flav,
1704 locks[i].start,
1705 locks[i].size,
1706 cb->private_data);
1710 SAFE_FREE(locks);
1711 return 0;
1714 /*******************************************************************
1715 Call the specified function on each lock in the database.
1716 ********************************************************************/
1718 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1719 enum brl_type lock_type,
1720 enum brl_flavour lock_flav,
1721 br_off start, br_off size,
1722 void *private_data),
1723 void *private_data)
1725 struct brl_forall_cb cb;
1727 if (!brlock_db) {
1728 return 0;
1730 cb.fn = fn;
1731 cb.private_data = private_data;
1732 return brlock_db->traverse(brlock_db, traverse_fn, &cb);
1735 /*******************************************************************
1736 Store a potentially modified set of byte range lock data back into
1737 the database.
1738 Unlock the record.
1739 ********************************************************************/
1741 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1743 if (br_lck->read_only) {
1744 SMB_ASSERT(!br_lck->modified);
1747 if (!br_lck->modified) {
1748 goto done;
1751 if (br_lck->num_locks == 0) {
1752 /* No locks - delete this entry. */
1753 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
1754 if (!NT_STATUS_IS_OK(status)) {
1755 DEBUG(0, ("delete_rec returned %s\n",
1756 nt_errstr(status)));
1757 smb_panic("Could not delete byte range lock entry");
1759 } else {
1760 TDB_DATA data;
1761 NTSTATUS status;
1763 data.dptr = (uint8 *)br_lck->lock_data;
1764 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1766 status = br_lck->record->store(br_lck->record, data,
1767 TDB_REPLACE);
1768 if (!NT_STATUS_IS_OK(status)) {
1769 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1770 smb_panic("Could not store byte range mode entry");
1774 done:
1776 SAFE_FREE(br_lck->lock_data);
1777 TALLOC_FREE(br_lck->record);
1778 return 0;
1781 /*******************************************************************
1782 Fetch a set of byte range lock data from the database.
1783 Leave the record locked.
1784 TALLOC_FREE(brl) will release the lock in the destructor.
1785 ********************************************************************/
1787 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1788 files_struct *fsp, bool read_only)
1790 TDB_DATA key, data;
1791 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1793 if (br_lck == NULL) {
1794 return NULL;
1797 br_lck->fsp = fsp;
1798 br_lck->num_locks = 0;
1799 br_lck->modified = False;
1800 memset(&br_lck->key, '\0', sizeof(struct file_id));
1801 br_lck->key = fsp->file_id;
1803 key.dptr = (uint8 *)&br_lck->key;
1804 key.dsize = sizeof(struct file_id);
1806 if (!fsp->lockdb_clean) {
1807 /* We must be read/write to clean
1808 the dead entries. */
1809 read_only = False;
1812 if (read_only) {
1813 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
1814 DEBUG(3, ("Could not fetch byte range lock record\n"));
1815 TALLOC_FREE(br_lck);
1816 return NULL;
1818 br_lck->record = NULL;
1820 else {
1821 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
1823 if (br_lck->record == NULL) {
1824 DEBUG(3, ("Could not lock byte range lock entry\n"));
1825 TALLOC_FREE(br_lck);
1826 return NULL;
1829 data = br_lck->record->value;
1832 br_lck->read_only = read_only;
1833 br_lck->lock_data = NULL;
1835 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1837 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1839 if (br_lck->num_locks != 0) {
1840 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1841 br_lck->num_locks);
1842 if (br_lck->lock_data == NULL) {
1843 DEBUG(0, ("malloc failed\n"));
1844 TALLOC_FREE(br_lck);
1845 return NULL;
1848 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1851 if (!fsp->lockdb_clean) {
1852 int orig_num_locks = br_lck->num_locks;
1854 /* This is the first time we've accessed this. */
1855 /* Go through and ensure all entries exist - remove any that don't. */
1856 /* Makes the lockdb self cleaning at low cost. */
1858 if (!validate_lock_entries(&br_lck->num_locks,
1859 &br_lck->lock_data)) {
1860 SAFE_FREE(br_lck->lock_data);
1861 TALLOC_FREE(br_lck);
1862 return NULL;
1865 /* Ensure invalid locks are cleaned up in the destructor. */
1866 if (orig_num_locks != br_lck->num_locks) {
1867 br_lck->modified = True;
1870 /* Mark the lockdb as "clean" as seen from this open file. */
1871 fsp->lockdb_clean = True;
1874 if (DEBUGLEVEL >= 10) {
1875 unsigned int i;
1876 struct lock_struct *locks = br_lck->lock_data;
1877 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1878 br_lck->num_locks,
1879 file_id_string_tos(&fsp->file_id)));
1880 for( i = 0; i < br_lck->num_locks; i++) {
1881 print_lock_struct(i, &locks[i]);
1884 return br_lck;
1887 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1888 files_struct *fsp)
1890 return brl_get_locks_internal(mem_ctx, fsp, False);
1893 struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
1894 files_struct *fsp)
1896 return brl_get_locks_internal(mem_ctx, fsp, True);
1899 struct brl_revalidate_state {
1900 ssize_t array_size;
1901 uint32 num_pids;
1902 struct server_id *pids;
1906 * Collect PIDs of all processes with pending entries
1909 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
1910 enum brl_type lock_type,
1911 enum brl_flavour lock_flav,
1912 br_off start, br_off size,
1913 void *private_data)
1915 struct brl_revalidate_state *state =
1916 (struct brl_revalidate_state *)private_data;
1918 if (!IS_PENDING_LOCK(lock_type)) {
1919 return;
1922 add_to_large_array(state, sizeof(pid), (void *)&pid,
1923 &state->pids, &state->num_pids,
1924 &state->array_size);
1928 * qsort callback to sort the processes
1931 static int compare_procids(const void *p1, const void *p2)
1933 const struct server_id *i1 = (struct server_id *)p1;
1934 const struct server_id *i2 = (struct server_id *)p2;
1936 if (i1->pid < i2->pid) return -1;
1937 if (i2->pid > i2->pid) return 1;
1938 return 0;
1942 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
1943 * locks so that they retry. Mainly used in the cluster code after a node has
1944 * died.
1946 * Done in two steps to avoid double-sends: First we collect all entries in an
1947 * array, then qsort that array and only send to non-dupes.
1950 static void brl_revalidate(struct messaging_context *msg_ctx,
1951 void *private_data,
1952 uint32_t msg_type,
1953 struct server_id server_id,
1954 DATA_BLOB *data)
1956 struct brl_revalidate_state *state;
1957 uint32 i;
1958 struct server_id last_pid;
1960 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
1961 DEBUG(0, ("talloc failed\n"));
1962 return;
1965 brl_forall(brl_revalidate_collect, state);
1967 if (state->array_size == -1) {
1968 DEBUG(0, ("talloc failed\n"));
1969 goto done;
1972 if (state->num_pids == 0) {
1973 goto done;
1976 qsort(state->pids, state->num_pids, sizeof(state->pids[0]),
1977 compare_procids);
1979 ZERO_STRUCT(last_pid);
1981 for (i=0; i<state->num_pids; i++) {
1982 if (procid_equal(&last_pid, &state->pids[i])) {
1984 * We've seen that one already
1986 continue;
1989 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
1990 &data_blob_null);
1991 last_pid = state->pids[i];
1994 done:
1995 TALLOC_FREE(state);
1996 return;
1999 void brl_register_msgs(struct messaging_context *msg_ctx)
2001 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
2002 brl_revalidate);