s4:dsdb/pydsdb.c and web_server/wsgi.c - remove accidentally introduced Py_RETURN_NONE
[Samba.git] / source3 / locking / brlock.c
blob3cb94871cc24e4ee4e6832d84821f12be0340f4a
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 /* This module implements a tdb based byte range locking service,
24 replacing the fcntl() based byte range locking previously
25 used. This allows us to provide the same semantics as NT */
27 #include "includes.h"
28 #include "librpc/gen_ndr/messaging.h"
29 #include "smbd/globals.h"
30 #include "dbwrap.h"
32 #undef DBGC_CLASS
33 #define DBGC_CLASS DBGC_LOCKING
35 #define ZERO_ZERO 0
37 /* The open brlock.tdb database. */
39 static struct db_context *brlock_db;
41 /****************************************************************************
42 Debug info at level 10 for lock struct.
43 ****************************************************************************/
45 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
47 DEBUG(10,("[%u]: smblctx = %llu, tid = %u, pid = %s, ",
49 (unsigned long long)pls->context.smblctx,
50 (unsigned int)pls->context.tid,
51 procid_str(talloc_tos(), &pls->context.pid) ));
53 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
54 (double)pls->start,
55 (double)pls->size,
56 pls->fnum,
57 lock_type_name(pls->lock_type),
58 lock_flav_name(pls->lock_flav) ));
61 /****************************************************************************
62 See if two locking contexts are equal.
63 ****************************************************************************/
65 bool brl_same_context(const struct lock_context *ctx1,
66 const struct lock_context *ctx2)
68 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
69 (ctx1->smblctx == ctx2->smblctx) &&
70 (ctx1->tid == ctx2->tid));
73 /****************************************************************************
74 See if lck1 and lck2 overlap.
75 ****************************************************************************/
77 static bool brl_overlap(const struct lock_struct *lck1,
78 const struct lock_struct *lck2)
80 /* XXX Remove for Win7 compatibility. */
81 /* this extra check is not redundent - it copes with locks
82 that go beyond the end of 64 bit file space */
83 if (lck1->size != 0 &&
84 lck1->start == lck2->start &&
85 lck1->size == lck2->size) {
86 return True;
89 if (lck1->start >= (lck2->start+lck2->size) ||
90 lck2->start >= (lck1->start+lck1->size)) {
91 return False;
93 return True;
96 /****************************************************************************
97 See if lock2 can be added when lock1 is in place.
98 ****************************************************************************/
100 static bool brl_conflict(const struct lock_struct *lck1,
101 const struct lock_struct *lck2)
103 /* Ignore PENDING locks. */
104 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
105 return False;
107 /* Read locks never conflict. */
108 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
109 return False;
112 /* A READ lock can stack on top of a WRITE lock if they have the same
113 * context & fnum. */
114 if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
115 brl_same_context(&lck1->context, &lck2->context) &&
116 lck1->fnum == lck2->fnum) {
117 return False;
120 return brl_overlap(lck1, lck2);
123 /****************************************************************************
124 See if lock2 can be added when lock1 is in place - when both locks are POSIX
125 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
126 know already match.
127 ****************************************************************************/
129 static bool brl_conflict_posix(const struct lock_struct *lck1,
130 const struct lock_struct *lck2)
132 #if defined(DEVELOPER)
133 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
134 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
135 #endif
137 /* Ignore PENDING locks. */
138 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
139 return False;
141 /* Read locks never conflict. */
142 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
143 return False;
146 /* Locks on the same context con't conflict. Ignore fnum. */
147 if (brl_same_context(&lck1->context, &lck2->context)) {
148 return False;
151 /* One is read, the other write, or the context is different,
152 do they overlap ? */
153 return brl_overlap(lck1, lck2);
156 #if ZERO_ZERO
157 static bool brl_conflict1(const struct lock_struct *lck1,
158 const struct lock_struct *lck2)
160 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
161 return False;
163 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
164 return False;
167 if (brl_same_context(&lck1->context, &lck2->context) &&
168 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
169 return False;
172 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
173 return True;
176 if (lck1->start >= (lck2->start + lck2->size) ||
177 lck2->start >= (lck1->start + lck1->size)) {
178 return False;
181 return True;
183 #endif
185 /****************************************************************************
186 Check to see if this lock conflicts, but ignore our own locks on the
187 same fnum only. This is the read/write lock check code path.
188 This is never used in the POSIX lock case.
189 ****************************************************************************/
191 static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
193 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
194 return False;
196 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
197 return False;
199 /* POSIX flavour locks never conflict here - this is only called
200 in the read/write path. */
202 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
203 return False;
206 * Incoming WRITE locks conflict with existing READ locks even
207 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
210 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
211 if (brl_same_context(&lck1->context, &lck2->context) &&
212 lck1->fnum == lck2->fnum)
213 return False;
216 return brl_overlap(lck1, lck2);
219 /****************************************************************************
220 Check if an unlock overlaps a pending lock.
221 ****************************************************************************/
223 static bool brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
225 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
226 return True;
227 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
228 return True;
229 return False;
232 /****************************************************************************
233 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
234 is the same as this one and changes its error code. I wonder if any
235 app depends on this ?
236 ****************************************************************************/
238 NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, bool blocking_lock)
240 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
241 /* amazing the little things you learn with a test
242 suite. Locks beyond this offset (as a 64 bit
243 number!) always generate the conflict error code,
244 unless the top bit is set */
245 if (!blocking_lock) {
246 fsp->last_lock_failure = *lock;
248 return NT_STATUS_FILE_LOCK_CONFLICT;
251 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
252 lock->context.tid == fsp->last_lock_failure.context.tid &&
253 lock->fnum == fsp->last_lock_failure.fnum &&
254 lock->start == fsp->last_lock_failure.start) {
255 return NT_STATUS_FILE_LOCK_CONFLICT;
258 if (!blocking_lock) {
259 fsp->last_lock_failure = *lock;
261 return NT_STATUS_LOCK_NOT_GRANTED;
264 /****************************************************************************
265 Open up the brlock.tdb database.
266 ****************************************************************************/
268 void brl_init(bool read_only)
270 int tdb_flags;
272 if (brlock_db) {
273 return;
276 tdb_flags = TDB_DEFAULT|TDB_VOLATILE|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
278 if (!lp_clustering()) {
280 * We can't use the SEQNUM trick to cache brlock
281 * entries in the clustering case because ctdb seqnum
282 * propagation has a delay.
284 tdb_flags |= TDB_SEQNUM;
287 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
288 lp_open_files_db_hash_size(), tdb_flags,
289 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
290 if (!brlock_db) {
291 DEBUG(0,("Failed to open byte range locking database %s\n",
292 lock_path("brlock.tdb")));
293 return;
297 /****************************************************************************
298 Close down the brlock.tdb database.
299 ****************************************************************************/
301 void brl_shutdown(void)
303 TALLOC_FREE(brlock_db);
306 #if ZERO_ZERO
307 /****************************************************************************
308 Compare two locks for sorting.
309 ****************************************************************************/
311 static int lock_compare(const struct lock_struct *lck1,
312 const struct lock_struct *lck2)
314 if (lck1->start != lck2->start) {
315 return (lck1->start - lck2->start);
317 if (lck2->size != lck1->size) {
318 return ((int)lck1->size - (int)lck2->size);
320 return 0;
322 #endif
324 /****************************************************************************
325 Lock a range of bytes - Windows lock semantics.
326 ****************************************************************************/
328 NTSTATUS brl_lock_windows_default(struct byte_range_lock *br_lck,
329 struct lock_struct *plock, bool blocking_lock)
331 unsigned int i;
332 files_struct *fsp = br_lck->fsp;
333 struct lock_struct *locks = br_lck->lock_data;
334 NTSTATUS status;
336 SMB_ASSERT(plock->lock_type != UNLOCK_LOCK);
338 if ((plock->start + plock->size - 1 < plock->start) &&
339 plock->size != 0) {
340 return NT_STATUS_INVALID_LOCK_RANGE;
343 for (i=0; i < br_lck->num_locks; i++) {
344 /* Do any Windows or POSIX locks conflict ? */
345 if (brl_conflict(&locks[i], plock)) {
346 /* Remember who blocked us. */
347 plock->context.smblctx = locks[i].context.smblctx;
348 return brl_lock_failed(fsp,plock,blocking_lock);
350 #if ZERO_ZERO
351 if (plock->start == 0 && plock->size == 0 &&
352 locks[i].size == 0) {
353 break;
355 #endif
358 if (!IS_PENDING_LOCK(plock->lock_type)) {
359 contend_level2_oplocks_begin(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
362 /* We can get the Windows lock, now see if it needs to
363 be mapped into a lower level POSIX one, and if so can
364 we get it ? */
366 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
367 int errno_ret;
368 if (!set_posix_lock_windows_flavour(fsp,
369 plock->start,
370 plock->size,
371 plock->lock_type,
372 &plock->context,
373 locks,
374 br_lck->num_locks,
375 &errno_ret)) {
377 /* We don't know who blocked us. */
378 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
380 if (errno_ret == EACCES || errno_ret == EAGAIN) {
381 status = NT_STATUS_FILE_LOCK_CONFLICT;
382 goto fail;
383 } else {
384 status = map_nt_error_from_unix(errno);
385 goto fail;
390 /* no conflicts - add it to the list of locks */
391 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
392 if (!locks) {
393 status = NT_STATUS_NO_MEMORY;
394 goto fail;
397 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
398 br_lck->num_locks += 1;
399 br_lck->lock_data = locks;
400 br_lck->modified = True;
402 return NT_STATUS_OK;
403 fail:
404 if (!IS_PENDING_LOCK(plock->lock_type)) {
405 contend_level2_oplocks_end(fsp, LEVEL2_CONTEND_WINDOWS_BRL);
407 return status;
410 /****************************************************************************
411 Cope with POSIX range splits and merges.
412 ****************************************************************************/
414 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
415 struct lock_struct *ex, /* existing lock. */
416 struct lock_struct *plock) /* proposed lock. */
418 bool lock_types_differ = (ex->lock_type != plock->lock_type);
420 /* We can't merge non-conflicting locks on different context - ignore fnum. */
422 if (!brl_same_context(&ex->context, &plock->context)) {
423 /* Just copy. */
424 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
425 return 1;
428 /* We now know we have the same context. */
430 /* Did we overlap ? */
432 /*********************************************
433 +---------+
434 | ex |
435 +---------+
436 +-------+
437 | plock |
438 +-------+
439 OR....
440 +---------+
441 | ex |
442 +---------+
443 **********************************************/
445 if ( (ex->start > (plock->start + plock->size)) ||
446 (plock->start > (ex->start + ex->size))) {
448 /* No overlap with this lock - copy existing. */
450 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
451 return 1;
454 /*********************************************
455 +---------------------------+
456 | ex |
457 +---------------------------+
458 +---------------------------+
459 | plock | -> replace with plock.
460 +---------------------------+
462 +---------------+
463 | ex |
464 +---------------+
465 +---------------------------+
466 | plock | -> replace with plock.
467 +---------------------------+
469 **********************************************/
471 if ( (ex->start >= plock->start) &&
472 (ex->start + ex->size <= plock->start + plock->size) ) {
474 /* Replace - discard existing lock. */
476 return 0;
479 /*********************************************
480 Adjacent after.
481 +-------+
482 | ex |
483 +-------+
484 +---------------+
485 | plock |
486 +---------------+
488 BECOMES....
489 +---------------+-------+
490 | plock | ex | - different lock types.
491 +---------------+-------+
492 OR.... (merge)
493 +-----------------------+
494 | plock | - same lock type.
495 +-----------------------+
496 **********************************************/
498 if (plock->start + plock->size == ex->start) {
500 /* If the lock types are the same, we merge, if different, we
501 add the remainder of the old lock. */
503 if (lock_types_differ) {
504 /* Add existing. */
505 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
506 return 1;
507 } else {
508 /* Merge - adjust incoming lock as we may have more
509 * merging to come. */
510 plock->size += ex->size;
511 return 0;
515 /*********************************************
516 Adjacent before.
517 +-------+
518 | ex |
519 +-------+
520 +---------------+
521 | plock |
522 +---------------+
523 BECOMES....
524 +-------+---------------+
525 | ex | plock | - different lock types
526 +-------+---------------+
528 OR.... (merge)
529 +-----------------------+
530 | plock | - same lock type.
531 +-----------------------+
533 **********************************************/
535 if (ex->start + ex->size == plock->start) {
537 /* If the lock types are the same, we merge, if different, we
538 add the existing lock. */
540 if (lock_types_differ) {
541 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
542 return 1;
543 } else {
544 /* Merge - adjust incoming lock as we may have more
545 * merging to come. */
546 plock->start = ex->start;
547 plock->size += ex->size;
548 return 0;
552 /*********************************************
553 Overlap after.
554 +-----------------------+
555 | ex |
556 +-----------------------+
557 +---------------+
558 | plock |
559 +---------------+
561 +----------------+
562 | ex |
563 +----------------+
564 +---------------+
565 | plock |
566 +---------------+
568 BECOMES....
569 +---------------+-------+
570 | plock | ex | - different lock types.
571 +---------------+-------+
572 OR.... (merge)
573 +-----------------------+
574 | plock | - same lock type.
575 +-----------------------+
576 **********************************************/
578 if ( (ex->start >= plock->start) &&
579 (ex->start <= plock->start + plock->size) &&
580 (ex->start + ex->size > plock->start + plock->size) ) {
582 /* If the lock types are the same, we merge, if different, we
583 add the remainder of the old lock. */
585 if (lock_types_differ) {
586 /* Add remaining existing. */
587 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
588 /* Adjust existing start and size. */
589 lck_arr[0].start = plock->start + plock->size;
590 lck_arr[0].size = (ex->start + ex->size) - (plock->start + plock->size);
591 return 1;
592 } else {
593 /* Merge - adjust incoming lock as we may have more
594 * merging to come. */
595 plock->size += (ex->start + ex->size) - (plock->start + plock->size);
596 return 0;
600 /*********************************************
601 Overlap before.
602 +-----------------------+
603 | ex |
604 +-----------------------+
605 +---------------+
606 | plock |
607 +---------------+
609 +-------------+
610 | ex |
611 +-------------+
612 +---------------+
613 | plock |
614 +---------------+
616 BECOMES....
617 +-------+---------------+
618 | ex | plock | - different lock types
619 +-------+---------------+
621 OR.... (merge)
622 +-----------------------+
623 | plock | - same lock type.
624 +-----------------------+
626 **********************************************/
628 if ( (ex->start < plock->start) &&
629 (ex->start + ex->size >= plock->start) &&
630 (ex->start + ex->size <= plock->start + plock->size) ) {
632 /* If the lock types are the same, we merge, if different, we
633 add the truncated old lock. */
635 if (lock_types_differ) {
636 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
637 /* Adjust existing size. */
638 lck_arr[0].size = plock->start - ex->start;
639 return 1;
640 } else {
641 /* Merge - adjust incoming lock as we may have more
642 * merging to come. MUST ADJUST plock SIZE FIRST ! */
643 plock->size += (plock->start - ex->start);
644 plock->start = ex->start;
645 return 0;
649 /*********************************************
650 Complete overlap.
651 +---------------------------+
652 | ex |
653 +---------------------------+
654 +---------+
655 | plock |
656 +---------+
657 BECOMES.....
658 +-------+---------+---------+
659 | ex | plock | ex | - different lock types.
660 +-------+---------+---------+
662 +---------------------------+
663 | plock | - same lock type.
664 +---------------------------+
665 **********************************************/
667 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
669 if (lock_types_differ) {
671 /* We have to split ex into two locks here. */
673 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
674 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
676 /* Adjust first existing size. */
677 lck_arr[0].size = plock->start - ex->start;
679 /* Adjust second existing start and size. */
680 lck_arr[1].start = plock->start + plock->size;
681 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
682 return 2;
683 } else {
684 /* Just eat the existing locks, merge them into plock. */
685 plock->start = ex->start;
686 plock->size = ex->size;
687 return 0;
691 /* Never get here. */
692 smb_panic("brlock_posix_split_merge");
693 /* Notreached. */
695 /* Keep some compilers happy. */
696 return 0;
699 /****************************************************************************
700 Lock a range of bytes - POSIX lock semantics.
701 We must cope with range splits and merges.
702 ****************************************************************************/
704 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
705 struct byte_range_lock *br_lck,
706 struct lock_struct *plock)
708 unsigned int i, count, posix_count;
709 struct lock_struct *locks = br_lck->lock_data;
710 struct lock_struct *tp;
711 bool signal_pending_read = False;
712 bool break_oplocks = false;
713 NTSTATUS status;
715 /* No zero-zero locks for POSIX. */
716 if (plock->start == 0 && plock->size == 0) {
717 return NT_STATUS_INVALID_PARAMETER;
720 /* Don't allow 64-bit lock wrap. */
721 if (plock->start + plock->size - 1 < plock->start) {
722 return NT_STATUS_INVALID_PARAMETER;
725 /* The worst case scenario here is we have to split an
726 existing POSIX lock range into two, and add our lock,
727 so we need at most 2 more entries. */
729 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
730 if (!tp) {
731 return NT_STATUS_NO_MEMORY;
734 count = posix_count = 0;
736 for (i=0; i < br_lck->num_locks; i++) {
737 struct lock_struct *curr_lock = &locks[i];
739 /* If we have a pending read lock, a lock downgrade should
740 trigger a lock re-evaluation. */
741 if (curr_lock->lock_type == PENDING_READ_LOCK &&
742 brl_pending_overlap(plock, curr_lock)) {
743 signal_pending_read = True;
746 if (curr_lock->lock_flav == WINDOWS_LOCK) {
747 /* Do any Windows flavour locks conflict ? */
748 if (brl_conflict(curr_lock, plock)) {
749 /* No games with error messages. */
750 SAFE_FREE(tp);
751 /* Remember who blocked us. */
752 plock->context.smblctx = curr_lock->context.smblctx;
753 return NT_STATUS_FILE_LOCK_CONFLICT;
755 /* Just copy the Windows lock into the new array. */
756 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
757 count++;
758 } else {
759 unsigned int tmp_count = 0;
761 /* POSIX conflict semantics are different. */
762 if (brl_conflict_posix(curr_lock, plock)) {
763 /* Can't block ourselves with POSIX locks. */
764 /* No games with error messages. */
765 SAFE_FREE(tp);
766 /* Remember who blocked us. */
767 plock->context.smblctx = curr_lock->context.smblctx;
768 return NT_STATUS_FILE_LOCK_CONFLICT;
771 /* Work out overlaps. */
772 tmp_count += brlock_posix_split_merge(&tp[count], curr_lock, plock);
773 posix_count += tmp_count;
774 count += tmp_count;
779 * Break oplocks while we hold a brl. Since lock() and unlock() calls
780 * are not symetric with POSIX semantics, we cannot guarantee our
781 * contend_level2_oplocks_begin/end calls will be acquired and
782 * released one-for-one as with Windows semantics. Therefore we only
783 * call contend_level2_oplocks_begin if this is the first POSIX brl on
784 * the file.
786 break_oplocks = (!IS_PENDING_LOCK(plock->lock_type) &&
787 posix_count == 0);
788 if (break_oplocks) {
789 contend_level2_oplocks_begin(br_lck->fsp,
790 LEVEL2_CONTEND_POSIX_BRL);
793 /* Try and add the lock in order, sorted by lock start. */
794 for (i=0; i < count; i++) {
795 struct lock_struct *curr_lock = &tp[i];
797 if (curr_lock->start <= plock->start) {
798 continue;
802 if (i < count) {
803 memmove(&tp[i+1], &tp[i],
804 (count - i)*sizeof(struct lock_struct));
806 memcpy(&tp[i], plock, sizeof(struct lock_struct));
807 count++;
809 /* We can get the POSIX lock, now see if it needs to
810 be mapped into a lower level POSIX one, and if so can
811 we get it ? */
813 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
814 int errno_ret;
816 /* The lower layer just needs to attempt to
817 get the system POSIX lock. We've weeded out
818 any conflicts above. */
820 if (!set_posix_lock_posix_flavour(br_lck->fsp,
821 plock->start,
822 plock->size,
823 plock->lock_type,
824 &errno_ret)) {
826 /* We don't know who blocked us. */
827 plock->context.smblctx = 0xFFFFFFFFFFFFFFFFLL;
829 if (errno_ret == EACCES || errno_ret == EAGAIN) {
830 SAFE_FREE(tp);
831 status = NT_STATUS_FILE_LOCK_CONFLICT;
832 goto fail;
833 } else {
834 SAFE_FREE(tp);
835 status = map_nt_error_from_unix(errno);
836 goto fail;
841 /* If we didn't use all the allocated size,
842 * Realloc so we don't leak entries per lock call. */
843 if (count < br_lck->num_locks + 2) {
844 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
845 if (!tp) {
846 status = NT_STATUS_NO_MEMORY;
847 goto fail;
851 br_lck->num_locks = count;
852 SAFE_FREE(br_lck->lock_data);
853 br_lck->lock_data = tp;
854 locks = tp;
855 br_lck->modified = True;
857 /* A successful downgrade from write to read lock can trigger a lock
858 re-evalutation where waiting readers can now proceed. */
860 if (signal_pending_read) {
861 /* Send unlock messages to any pending read waiters that overlap. */
862 for (i=0; i < br_lck->num_locks; i++) {
863 struct lock_struct *pend_lock = &locks[i];
865 /* Ignore non-pending locks. */
866 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
867 continue;
870 if (pend_lock->lock_type == PENDING_READ_LOCK &&
871 brl_pending_overlap(plock, pend_lock)) {
872 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
873 procid_str_static(&pend_lock->context.pid )));
875 messaging_send(msg_ctx, pend_lock->context.pid,
876 MSG_SMB_UNLOCK, &data_blob_null);
881 return NT_STATUS_OK;
882 fail:
883 if (break_oplocks) {
884 contend_level2_oplocks_end(br_lck->fsp,
885 LEVEL2_CONTEND_POSIX_BRL);
887 return status;
890 NTSTATUS smb_vfs_call_brl_lock_windows(struct vfs_handle_struct *handle,
891 struct byte_range_lock *br_lck,
892 struct lock_struct *plock,
893 bool blocking_lock,
894 struct blocking_lock_record *blr)
896 VFS_FIND(brl_lock_windows);
897 return handle->fns->brl_lock_windows(handle, br_lck, plock,
898 blocking_lock, blr);
901 /****************************************************************************
902 Lock a range of bytes.
903 ****************************************************************************/
905 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
906 struct byte_range_lock *br_lck,
907 uint64_t smblctx,
908 struct server_id pid,
909 br_off start,
910 br_off size,
911 enum brl_type lock_type,
912 enum brl_flavour lock_flav,
913 bool blocking_lock,
914 uint64_t *psmblctx,
915 struct blocking_lock_record *blr)
917 NTSTATUS ret;
918 struct lock_struct lock;
920 #if !ZERO_ZERO
921 if (start == 0 && size == 0) {
922 DEBUG(0,("client sent 0/0 lock - please report this\n"));
924 #endif
926 #ifdef DEVELOPER
927 /* Quieten valgrind on test. */
928 memset(&lock, '\0', sizeof(lock));
929 #endif
931 lock.context.smblctx = smblctx;
932 lock.context.pid = pid;
933 lock.context.tid = br_lck->fsp->conn->cnum;
934 lock.start = start;
935 lock.size = size;
936 lock.fnum = br_lck->fsp->fnum;
937 lock.lock_type = lock_type;
938 lock.lock_flav = lock_flav;
940 if (lock_flav == WINDOWS_LOCK) {
941 ret = SMB_VFS_BRL_LOCK_WINDOWS(br_lck->fsp->conn, br_lck,
942 &lock, blocking_lock, blr);
943 } else {
944 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
947 #if ZERO_ZERO
948 /* sort the lock list */
949 TYPESAFE_QSORT(br_lck->lock_data, (size_t)br_lck->num_locks, lock_compare);
950 #endif
952 /* If we're returning an error, return who blocked us. */
953 if (!NT_STATUS_IS_OK(ret) && psmblctx) {
954 *psmblctx = lock.context.smblctx;
956 return ret;
959 /****************************************************************************
960 Unlock a range of bytes - Windows semantics.
961 ****************************************************************************/
963 bool brl_unlock_windows_default(struct messaging_context *msg_ctx,
964 struct byte_range_lock *br_lck,
965 const struct lock_struct *plock)
967 unsigned int i, j;
968 struct lock_struct *locks = br_lck->lock_data;
969 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
971 SMB_ASSERT(plock->lock_type == UNLOCK_LOCK);
973 #if ZERO_ZERO
974 /* Delete write locks by preference... The lock list
975 is sorted in the zero zero case. */
977 for (i = 0; i < br_lck->num_locks; i++) {
978 struct lock_struct *lock = &locks[i];
980 if (lock->lock_type == WRITE_LOCK &&
981 brl_same_context(&lock->context, &plock->context) &&
982 lock->fnum == plock->fnum &&
983 lock->lock_flav == WINDOWS_LOCK &&
984 lock->start == plock->start &&
985 lock->size == plock->size) {
987 /* found it - delete it */
988 deleted_lock_type = lock->lock_type;
989 break;
993 if (i != br_lck->num_locks) {
994 /* We found it - don't search again. */
995 goto unlock_continue;
997 #endif
999 for (i = 0; i < br_lck->num_locks; i++) {
1000 struct lock_struct *lock = &locks[i];
1002 if (IS_PENDING_LOCK(lock->lock_type)) {
1003 continue;
1006 /* Only remove our own locks that match in start, size, and flavour. */
1007 if (brl_same_context(&lock->context, &plock->context) &&
1008 lock->fnum == plock->fnum &&
1009 lock->lock_flav == WINDOWS_LOCK &&
1010 lock->start == plock->start &&
1011 lock->size == plock->size ) {
1012 deleted_lock_type = lock->lock_type;
1013 break;
1017 if (i == br_lck->num_locks) {
1018 /* we didn't find it */
1019 return False;
1022 #if ZERO_ZERO
1023 unlock_continue:
1024 #endif
1026 /* Actually delete the lock. */
1027 if (i < br_lck->num_locks - 1) {
1028 memmove(&locks[i], &locks[i+1],
1029 sizeof(*locks)*((br_lck->num_locks-1) - i));
1032 br_lck->num_locks -= 1;
1033 br_lck->modified = True;
1035 /* Unlock the underlying POSIX regions. */
1036 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1037 release_posix_lock_windows_flavour(br_lck->fsp,
1038 plock->start,
1039 plock->size,
1040 deleted_lock_type,
1041 &plock->context,
1042 locks,
1043 br_lck->num_locks);
1046 /* Send unlock messages to any pending waiters that overlap. */
1047 for (j=0; j < br_lck->num_locks; j++) {
1048 struct lock_struct *pend_lock = &locks[j];
1050 /* Ignore non-pending locks. */
1051 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1052 continue;
1055 /* We could send specific lock info here... */
1056 if (brl_pending_overlap(plock, pend_lock)) {
1057 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1058 procid_str_static(&pend_lock->context.pid )));
1060 messaging_send(msg_ctx, pend_lock->context.pid,
1061 MSG_SMB_UNLOCK, &data_blob_null);
1065 contend_level2_oplocks_end(br_lck->fsp, LEVEL2_CONTEND_WINDOWS_BRL);
1066 return True;
1069 /****************************************************************************
1070 Unlock a range of bytes - POSIX semantics.
1071 ****************************************************************************/
1073 static bool brl_unlock_posix(struct messaging_context *msg_ctx,
1074 struct byte_range_lock *br_lck,
1075 struct lock_struct *plock)
1077 unsigned int i, j, count;
1078 struct lock_struct *tp;
1079 struct lock_struct *locks = br_lck->lock_data;
1080 bool overlap_found = False;
1082 /* No zero-zero locks for POSIX. */
1083 if (plock->start == 0 && plock->size == 0) {
1084 return False;
1087 /* Don't allow 64-bit lock wrap. */
1088 if (plock->start + plock->size < plock->start ||
1089 plock->start + plock->size < plock->size) {
1090 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
1091 return False;
1094 /* The worst case scenario here is we have to split an
1095 existing POSIX lock range into two, so we need at most
1096 1 more entry. */
1098 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
1099 if (!tp) {
1100 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
1101 return False;
1104 count = 0;
1105 for (i = 0; i < br_lck->num_locks; i++) {
1106 struct lock_struct *lock = &locks[i];
1107 unsigned int tmp_count;
1109 /* Only remove our own locks - ignore fnum. */
1110 if (IS_PENDING_LOCK(lock->lock_type) ||
1111 !brl_same_context(&lock->context, &plock->context)) {
1112 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1113 count++;
1114 continue;
1117 if (lock->lock_flav == WINDOWS_LOCK) {
1118 /* Do any Windows flavour locks conflict ? */
1119 if (brl_conflict(lock, plock)) {
1120 SAFE_FREE(tp);
1121 return false;
1123 /* Just copy the Windows lock into the new array. */
1124 memcpy(&tp[count], lock, sizeof(struct lock_struct));
1125 count++;
1126 continue;
1129 /* Work out overlaps. */
1130 tmp_count = brlock_posix_split_merge(&tp[count], lock, plock);
1132 if (tmp_count == 0) {
1133 /* plock overlapped the existing lock completely,
1134 or replaced it. Don't copy the existing lock. */
1135 overlap_found = true;
1136 } else if (tmp_count == 1) {
1137 /* Either no overlap, (simple copy of existing lock) or
1138 * an overlap of an existing lock. */
1139 /* If the lock changed size, we had an overlap. */
1140 if (tp[count].size != lock->size) {
1141 overlap_found = true;
1143 count += tmp_count;
1144 } else if (tmp_count == 2) {
1145 /* We split a lock range in two. */
1146 overlap_found = true;
1147 count += tmp_count;
1149 /* Optimisation... */
1150 /* We know we're finished here as we can't overlap any
1151 more POSIX locks. Copy the rest of the lock array. */
1153 if (i < br_lck->num_locks - 1) {
1154 memcpy(&tp[count], &locks[i+1],
1155 sizeof(*locks)*((br_lck->num_locks-1) - i));
1156 count += ((br_lck->num_locks-1) - i);
1158 break;
1163 if (!overlap_found) {
1164 /* Just ignore - no change. */
1165 SAFE_FREE(tp);
1166 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1167 return True;
1170 /* Unlock any POSIX regions. */
1171 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1172 release_posix_lock_posix_flavour(br_lck->fsp,
1173 plock->start,
1174 plock->size,
1175 &plock->context,
1177 count);
1180 /* Realloc so we don't leak entries per unlock call. */
1181 if (count) {
1182 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1183 if (!tp) {
1184 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1185 return False;
1187 } else {
1188 /* We deleted the last lock. */
1189 SAFE_FREE(tp);
1190 tp = NULL;
1193 contend_level2_oplocks_end(br_lck->fsp,
1194 LEVEL2_CONTEND_POSIX_BRL);
1196 br_lck->num_locks = count;
1197 SAFE_FREE(br_lck->lock_data);
1198 locks = tp;
1199 br_lck->lock_data = tp;
1200 br_lck->modified = True;
1202 /* Send unlock messages to any pending waiters that overlap. */
1204 for (j=0; j < br_lck->num_locks; j++) {
1205 struct lock_struct *pend_lock = &locks[j];
1207 /* Ignore non-pending locks. */
1208 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1209 continue;
1212 /* We could send specific lock info here... */
1213 if (brl_pending_overlap(plock, pend_lock)) {
1214 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1215 procid_str_static(&pend_lock->context.pid )));
1217 messaging_send(msg_ctx, pend_lock->context.pid,
1218 MSG_SMB_UNLOCK, &data_blob_null);
1222 return True;
1225 bool smb_vfs_call_brl_unlock_windows(struct vfs_handle_struct *handle,
1226 struct messaging_context *msg_ctx,
1227 struct byte_range_lock *br_lck,
1228 const struct lock_struct *plock)
1230 VFS_FIND(brl_unlock_windows);
1231 return handle->fns->brl_unlock_windows(handle, msg_ctx, br_lck, plock);
1234 /****************************************************************************
1235 Unlock a range of bytes.
1236 ****************************************************************************/
1238 bool brl_unlock(struct messaging_context *msg_ctx,
1239 struct byte_range_lock *br_lck,
1240 uint64_t smblctx,
1241 struct server_id pid,
1242 br_off start,
1243 br_off size,
1244 enum brl_flavour lock_flav)
1246 struct lock_struct lock;
1248 lock.context.smblctx = smblctx;
1249 lock.context.pid = pid;
1250 lock.context.tid = br_lck->fsp->conn->cnum;
1251 lock.start = start;
1252 lock.size = size;
1253 lock.fnum = br_lck->fsp->fnum;
1254 lock.lock_type = UNLOCK_LOCK;
1255 lock.lock_flav = lock_flav;
1257 if (lock_flav == WINDOWS_LOCK) {
1258 return SMB_VFS_BRL_UNLOCK_WINDOWS(br_lck->fsp->conn, msg_ctx,
1259 br_lck, &lock);
1260 } else {
1261 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1265 /****************************************************************************
1266 Test if we could add a lock if we wanted to.
1267 Returns True if the region required is currently unlocked, False if locked.
1268 ****************************************************************************/
1270 bool brl_locktest(struct byte_range_lock *br_lck,
1271 uint64_t smblctx,
1272 struct server_id pid,
1273 br_off start,
1274 br_off size,
1275 enum brl_type lock_type,
1276 enum brl_flavour lock_flav)
1278 bool ret = True;
1279 unsigned int i;
1280 struct lock_struct lock;
1281 const struct lock_struct *locks = br_lck->lock_data;
1282 files_struct *fsp = br_lck->fsp;
1284 lock.context.smblctx = smblctx;
1285 lock.context.pid = pid;
1286 lock.context.tid = br_lck->fsp->conn->cnum;
1287 lock.start = start;
1288 lock.size = size;
1289 lock.fnum = fsp->fnum;
1290 lock.lock_type = lock_type;
1291 lock.lock_flav = lock_flav;
1293 /* Make sure existing locks don't conflict */
1294 for (i=0; i < br_lck->num_locks; i++) {
1296 * Our own locks don't conflict.
1298 if (brl_conflict_other(&locks[i], &lock)) {
1299 return False;
1304 * There is no lock held by an SMB daemon, check to
1305 * see if there is a POSIX lock from a UNIX or NFS process.
1306 * This only conflicts with Windows locks, not POSIX locks.
1309 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1310 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1312 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1313 (double)start, (double)size, ret ? "locked" : "unlocked",
1314 fsp->fnum, fsp_str_dbg(fsp)));
1316 /* We need to return the inverse of is_posix_locked. */
1317 ret = !ret;
1320 /* no conflicts - we could have added it */
1321 return ret;
1324 /****************************************************************************
1325 Query for existing locks.
1326 ****************************************************************************/
1328 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1329 uint64_t *psmblctx,
1330 struct server_id pid,
1331 br_off *pstart,
1332 br_off *psize,
1333 enum brl_type *plock_type,
1334 enum brl_flavour lock_flav)
1336 unsigned int i;
1337 struct lock_struct lock;
1338 const struct lock_struct *locks = br_lck->lock_data;
1339 files_struct *fsp = br_lck->fsp;
1341 lock.context.smblctx = *psmblctx;
1342 lock.context.pid = pid;
1343 lock.context.tid = br_lck->fsp->conn->cnum;
1344 lock.start = *pstart;
1345 lock.size = *psize;
1346 lock.fnum = fsp->fnum;
1347 lock.lock_type = *plock_type;
1348 lock.lock_flav = lock_flav;
1350 /* Make sure existing locks don't conflict */
1351 for (i=0; i < br_lck->num_locks; i++) {
1352 const struct lock_struct *exlock = &locks[i];
1353 bool conflict = False;
1355 if (exlock->lock_flav == WINDOWS_LOCK) {
1356 conflict = brl_conflict(exlock, &lock);
1357 } else {
1358 conflict = brl_conflict_posix(exlock, &lock);
1361 if (conflict) {
1362 *psmblctx = exlock->context.smblctx;
1363 *pstart = exlock->start;
1364 *psize = exlock->size;
1365 *plock_type = exlock->lock_type;
1366 return NT_STATUS_LOCK_NOT_GRANTED;
1371 * There is no lock held by an SMB daemon, check to
1372 * see if there is a POSIX lock from a UNIX or NFS process.
1375 if(lp_posix_locking(fsp->conn->params)) {
1376 bool ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1378 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1379 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1380 fsp->fnum, fsp_str_dbg(fsp)));
1382 if (ret) {
1383 /* Hmmm. No clue what to set smblctx to - use -1. */
1384 *psmblctx = 0xFFFFFFFFFFFFFFFFLL;
1385 return NT_STATUS_LOCK_NOT_GRANTED;
1389 return NT_STATUS_OK;
1393 bool smb_vfs_call_brl_cancel_windows(struct vfs_handle_struct *handle,
1394 struct byte_range_lock *br_lck,
1395 struct lock_struct *plock,
1396 struct blocking_lock_record *blr)
1398 VFS_FIND(brl_cancel_windows);
1399 return handle->fns->brl_cancel_windows(handle, br_lck, plock, blr);
1402 /****************************************************************************
1403 Remove a particular pending lock.
1404 ****************************************************************************/
1405 bool brl_lock_cancel(struct byte_range_lock *br_lck,
1406 uint64_t smblctx,
1407 struct server_id pid,
1408 br_off start,
1409 br_off size,
1410 enum brl_flavour lock_flav,
1411 struct blocking_lock_record *blr)
1413 bool ret;
1414 struct lock_struct lock;
1416 lock.context.smblctx = smblctx;
1417 lock.context.pid = pid;
1418 lock.context.tid = br_lck->fsp->conn->cnum;
1419 lock.start = start;
1420 lock.size = size;
1421 lock.fnum = br_lck->fsp->fnum;
1422 lock.lock_flav = lock_flav;
1423 /* lock.lock_type doesn't matter */
1425 if (lock_flav == WINDOWS_LOCK) {
1426 ret = SMB_VFS_BRL_CANCEL_WINDOWS(br_lck->fsp->conn, br_lck,
1427 &lock, blr);
1428 } else {
1429 ret = brl_lock_cancel_default(br_lck, &lock);
1432 return ret;
1435 bool brl_lock_cancel_default(struct byte_range_lock *br_lck,
1436 struct lock_struct *plock)
1438 unsigned int i;
1439 struct lock_struct *locks = br_lck->lock_data;
1441 SMB_ASSERT(plock);
1443 for (i = 0; i < br_lck->num_locks; i++) {
1444 struct lock_struct *lock = &locks[i];
1446 /* For pending locks we *always* care about the fnum. */
1447 if (brl_same_context(&lock->context, &plock->context) &&
1448 lock->fnum == plock->fnum &&
1449 IS_PENDING_LOCK(lock->lock_type) &&
1450 lock->lock_flav == plock->lock_flav &&
1451 lock->start == plock->start &&
1452 lock->size == plock->size) {
1453 break;
1457 if (i == br_lck->num_locks) {
1458 /* Didn't find it. */
1459 return False;
1462 if (i < br_lck->num_locks - 1) {
1463 /* Found this particular pending lock - delete it */
1464 memmove(&locks[i], &locks[i+1],
1465 sizeof(*locks)*((br_lck->num_locks-1) - i));
1468 br_lck->num_locks -= 1;
1469 br_lck->modified = True;
1470 return True;
1473 /****************************************************************************
1474 Remove any locks associated with a open file.
1475 We return True if this process owns any other Windows locks on this
1476 fd and so we should not immediately close the fd.
1477 ****************************************************************************/
1479 void brl_close_fnum(struct messaging_context *msg_ctx,
1480 struct byte_range_lock *br_lck)
1482 files_struct *fsp = br_lck->fsp;
1483 uint16 tid = fsp->conn->cnum;
1484 int fnum = fsp->fnum;
1485 unsigned int i, j, dcount=0;
1486 int num_deleted_windows_locks = 0;
1487 struct lock_struct *locks = br_lck->lock_data;
1488 struct server_id pid = sconn_server_id(fsp->conn->sconn);
1489 bool unlock_individually = False;
1490 bool posix_level2_contention_ended = false;
1492 if(lp_posix_locking(fsp->conn->params)) {
1494 /* Check if there are any Windows locks associated with this dev/ino
1495 pair that are not this fnum. If so we need to call unlock on each
1496 one in order to release the system POSIX locks correctly. */
1498 for (i=0; i < br_lck->num_locks; i++) {
1499 struct lock_struct *lock = &locks[i];
1501 if (!procid_equal(&lock->context.pid, &pid)) {
1502 continue;
1505 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1506 continue; /* Ignore pending. */
1509 if (lock->context.tid != tid || lock->fnum != fnum) {
1510 unlock_individually = True;
1511 break;
1515 if (unlock_individually) {
1516 struct lock_struct *locks_copy;
1517 unsigned int num_locks_copy;
1519 /* Copy the current lock array. */
1520 if (br_lck->num_locks) {
1521 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1522 if (!locks_copy) {
1523 smb_panic("brl_close_fnum: talloc failed");
1525 } else {
1526 locks_copy = NULL;
1529 num_locks_copy = br_lck->num_locks;
1531 for (i=0; i < num_locks_copy; i++) {
1532 struct lock_struct *lock = &locks_copy[i];
1534 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1535 (lock->fnum == fnum)) {
1536 brl_unlock(msg_ctx,
1537 br_lck,
1538 lock->context.smblctx,
1539 pid,
1540 lock->start,
1541 lock->size,
1542 lock->lock_flav);
1545 return;
1549 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1551 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1553 for (i=0; i < br_lck->num_locks; i++) {
1554 struct lock_struct *lock = &locks[i];
1555 bool del_this_lock = False;
1557 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1558 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1559 del_this_lock = True;
1560 num_deleted_windows_locks++;
1561 contend_level2_oplocks_end(br_lck->fsp,
1562 LEVEL2_CONTEND_WINDOWS_BRL);
1563 } else if (lock->lock_flav == POSIX_LOCK) {
1564 del_this_lock = True;
1566 /* Only end level2 contention once for posix */
1567 if (!posix_level2_contention_ended) {
1568 posix_level2_contention_ended = true;
1569 contend_level2_oplocks_end(br_lck->fsp,
1570 LEVEL2_CONTEND_POSIX_BRL);
1575 if (del_this_lock) {
1576 /* Send unlock messages to any pending waiters that overlap. */
1577 for (j=0; j < br_lck->num_locks; j++) {
1578 struct lock_struct *pend_lock = &locks[j];
1580 /* Ignore our own or non-pending locks. */
1581 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1582 continue;
1585 /* Optimisation - don't send to this fnum as we're
1586 closing it. */
1587 if (pend_lock->context.tid == tid &&
1588 procid_equal(&pend_lock->context.pid, &pid) &&
1589 pend_lock->fnum == fnum) {
1590 continue;
1593 /* We could send specific lock info here... */
1594 if (brl_pending_overlap(lock, pend_lock)) {
1595 messaging_send(msg_ctx, pend_lock->context.pid,
1596 MSG_SMB_UNLOCK, &data_blob_null);
1600 /* found it - delete it */
1601 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1602 memmove(&locks[i], &locks[i+1],
1603 sizeof(*locks)*((br_lck->num_locks-1) - i));
1605 br_lck->num_locks--;
1606 br_lck->modified = True;
1607 i--;
1608 dcount++;
1612 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1613 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1614 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1618 /****************************************************************************
1619 Ensure this set of lock entries is valid.
1620 ****************************************************************************/
1621 static bool validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1623 unsigned int i;
1624 unsigned int num_valid_entries = 0;
1625 struct lock_struct *locks = *pplocks;
1627 for (i = 0; i < *pnum_entries; i++) {
1628 struct lock_struct *lock_data = &locks[i];
1629 if (!serverid_exists(&lock_data->context.pid)) {
1630 /* This process no longer exists - mark this
1631 entry as invalid by zeroing it. */
1632 ZERO_STRUCTP(lock_data);
1633 } else {
1634 num_valid_entries++;
1638 if (num_valid_entries != *pnum_entries) {
1639 struct lock_struct *new_lock_data = NULL;
1641 if (num_valid_entries) {
1642 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1643 if (!new_lock_data) {
1644 DEBUG(3, ("malloc fail\n"));
1645 return False;
1648 num_valid_entries = 0;
1649 for (i = 0; i < *pnum_entries; i++) {
1650 struct lock_struct *lock_data = &locks[i];
1651 if (lock_data->context.smblctx &&
1652 lock_data->context.tid) {
1653 /* Valid (nonzero) entry - copy it. */
1654 memcpy(&new_lock_data[num_valid_entries],
1655 lock_data, sizeof(struct lock_struct));
1656 num_valid_entries++;
1661 SAFE_FREE(*pplocks);
1662 *pplocks = new_lock_data;
1663 *pnum_entries = num_valid_entries;
1666 return True;
1669 struct brl_forall_cb {
1670 void (*fn)(struct file_id id, struct server_id pid,
1671 enum brl_type lock_type,
1672 enum brl_flavour lock_flav,
1673 br_off start, br_off size,
1674 void *private_data);
1675 void *private_data;
1678 /****************************************************************************
1679 Traverse the whole database with this function, calling traverse_callback
1680 on each lock.
1681 ****************************************************************************/
1683 static int traverse_fn(struct db_record *rec, void *state)
1685 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1686 struct lock_struct *locks;
1687 struct file_id *key;
1688 unsigned int i;
1689 unsigned int num_locks = 0;
1690 unsigned int orig_num_locks = 0;
1692 /* In a traverse function we must make a copy of
1693 dbuf before modifying it. */
1695 locks = (struct lock_struct *)memdup(rec->value.dptr,
1696 rec->value.dsize);
1697 if (!locks) {
1698 return -1; /* Terminate traversal. */
1701 key = (struct file_id *)rec->key.dptr;
1702 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
1704 /* Ensure the lock db is clean of entries from invalid processes. */
1706 if (!validate_lock_entries(&num_locks, &locks)) {
1707 SAFE_FREE(locks);
1708 return -1; /* Terminate traversal */
1711 if (orig_num_locks != num_locks) {
1712 if (num_locks) {
1713 TDB_DATA data;
1714 data.dptr = (uint8_t *)locks;
1715 data.dsize = num_locks*sizeof(struct lock_struct);
1716 rec->store(rec, data, TDB_REPLACE);
1717 } else {
1718 rec->delete_rec(rec);
1722 if (cb->fn) {
1723 for ( i=0; i<num_locks; i++) {
1724 cb->fn(*key,
1725 locks[i].context.pid,
1726 locks[i].lock_type,
1727 locks[i].lock_flav,
1728 locks[i].start,
1729 locks[i].size,
1730 cb->private_data);
1734 SAFE_FREE(locks);
1735 return 0;
1738 /*******************************************************************
1739 Call the specified function on each lock in the database.
1740 ********************************************************************/
1742 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1743 enum brl_type lock_type,
1744 enum brl_flavour lock_flav,
1745 br_off start, br_off size,
1746 void *private_data),
1747 void *private_data)
1749 struct brl_forall_cb cb;
1751 if (!brlock_db) {
1752 return 0;
1754 cb.fn = fn;
1755 cb.private_data = private_data;
1756 return brlock_db->traverse(brlock_db, traverse_fn, &cb);
1759 /*******************************************************************
1760 Store a potentially modified set of byte range lock data back into
1761 the database.
1762 Unlock the record.
1763 ********************************************************************/
1765 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1767 if (br_lck->read_only) {
1768 SMB_ASSERT(!br_lck->modified);
1771 if (!br_lck->modified) {
1772 goto done;
1775 if (br_lck->num_locks == 0) {
1776 /* No locks - delete this entry. */
1777 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
1778 if (!NT_STATUS_IS_OK(status)) {
1779 DEBUG(0, ("delete_rec returned %s\n",
1780 nt_errstr(status)));
1781 smb_panic("Could not delete byte range lock entry");
1783 } else {
1784 TDB_DATA data;
1785 NTSTATUS status;
1787 data.dptr = (uint8 *)br_lck->lock_data;
1788 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1790 status = br_lck->record->store(br_lck->record, data,
1791 TDB_REPLACE);
1792 if (!NT_STATUS_IS_OK(status)) {
1793 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1794 smb_panic("Could not store byte range mode entry");
1798 done:
1800 SAFE_FREE(br_lck->lock_data);
1801 TALLOC_FREE(br_lck->record);
1802 return 0;
1805 /*******************************************************************
1806 Fetch a set of byte range lock data from the database.
1807 Leave the record locked.
1808 TALLOC_FREE(brl) will release the lock in the destructor.
1809 ********************************************************************/
1811 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1812 files_struct *fsp, bool read_only)
1814 TDB_DATA key, data;
1815 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1817 if (br_lck == NULL) {
1818 return NULL;
1821 br_lck->fsp = fsp;
1822 br_lck->num_locks = 0;
1823 br_lck->modified = False;
1824 br_lck->key = fsp->file_id;
1826 key.dptr = (uint8 *)&br_lck->key;
1827 key.dsize = sizeof(struct file_id);
1829 if (!fsp->lockdb_clean) {
1830 /* We must be read/write to clean
1831 the dead entries. */
1832 read_only = False;
1835 if (read_only) {
1836 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
1837 DEBUG(3, ("Could not fetch byte range lock record\n"));
1838 TALLOC_FREE(br_lck);
1839 return NULL;
1841 br_lck->record = NULL;
1843 else {
1844 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
1846 if (br_lck->record == NULL) {
1847 DEBUG(3, ("Could not lock byte range lock entry\n"));
1848 TALLOC_FREE(br_lck);
1849 return NULL;
1852 data = br_lck->record->value;
1855 br_lck->read_only = read_only;
1856 br_lck->lock_data = NULL;
1858 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1860 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1862 if (br_lck->num_locks != 0) {
1863 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct,
1864 br_lck->num_locks);
1865 if (br_lck->lock_data == NULL) {
1866 DEBUG(0, ("malloc failed\n"));
1867 TALLOC_FREE(br_lck);
1868 return NULL;
1871 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1874 if (!fsp->lockdb_clean) {
1875 int orig_num_locks = br_lck->num_locks;
1877 /* This is the first time we've accessed this. */
1878 /* Go through and ensure all entries exist - remove any that don't. */
1879 /* Makes the lockdb self cleaning at low cost. */
1881 if (!validate_lock_entries(&br_lck->num_locks,
1882 &br_lck->lock_data)) {
1883 SAFE_FREE(br_lck->lock_data);
1884 TALLOC_FREE(br_lck);
1885 return NULL;
1888 /* Ensure invalid locks are cleaned up in the destructor. */
1889 if (orig_num_locks != br_lck->num_locks) {
1890 br_lck->modified = True;
1893 /* Mark the lockdb as "clean" as seen from this open file. */
1894 fsp->lockdb_clean = True;
1897 if (DEBUGLEVEL >= 10) {
1898 unsigned int i;
1899 struct lock_struct *locks = br_lck->lock_data;
1900 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1901 br_lck->num_locks,
1902 file_id_string_tos(&fsp->file_id)));
1903 for( i = 0; i < br_lck->num_locks; i++) {
1904 print_lock_struct(i, &locks[i]);
1907 return br_lck;
1910 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1911 files_struct *fsp)
1913 return brl_get_locks_internal(mem_ctx, fsp, False);
1916 struct byte_range_lock *brl_get_locks_readonly(files_struct *fsp)
1918 struct byte_range_lock *br_lock;
1920 if (lp_clustering()) {
1921 return brl_get_locks_internal(talloc_tos(), fsp, true);
1924 if ((fsp->brlock_rec != NULL)
1925 && (brlock_db->get_seqnum(brlock_db) == fsp->brlock_seqnum)) {
1926 return fsp->brlock_rec;
1929 TALLOC_FREE(fsp->brlock_rec);
1931 br_lock = brl_get_locks_internal(talloc_tos(), fsp, false);
1932 if (br_lock == NULL) {
1933 return NULL;
1935 fsp->brlock_seqnum = brlock_db->get_seqnum(brlock_db);
1937 fsp->brlock_rec = talloc_zero(fsp, struct byte_range_lock);
1938 if (fsp->brlock_rec == NULL) {
1939 goto fail;
1941 fsp->brlock_rec->fsp = fsp;
1942 fsp->brlock_rec->num_locks = br_lock->num_locks;
1943 fsp->brlock_rec->read_only = true;
1944 fsp->brlock_rec->key = br_lock->key;
1946 fsp->brlock_rec->lock_data = (struct lock_struct *)
1947 talloc_memdup(fsp->brlock_rec, br_lock->lock_data,
1948 sizeof(struct lock_struct) * br_lock->num_locks);
1949 if (fsp->brlock_rec->lock_data == NULL) {
1950 goto fail;
1953 TALLOC_FREE(br_lock);
1954 return fsp->brlock_rec;
1955 fail:
1956 TALLOC_FREE(br_lock);
1957 TALLOC_FREE(fsp->brlock_rec);
1958 return NULL;
1961 struct brl_revalidate_state {
1962 ssize_t array_size;
1963 uint32 num_pids;
1964 struct server_id *pids;
1968 * Collect PIDs of all processes with pending entries
1971 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
1972 enum brl_type lock_type,
1973 enum brl_flavour lock_flav,
1974 br_off start, br_off size,
1975 void *private_data)
1977 struct brl_revalidate_state *state =
1978 (struct brl_revalidate_state *)private_data;
1980 if (!IS_PENDING_LOCK(lock_type)) {
1981 return;
1984 add_to_large_array(state, sizeof(pid), (void *)&pid,
1985 &state->pids, &state->num_pids,
1986 &state->array_size);
1990 * qsort callback to sort the processes
1993 static int compare_procids(const void *p1, const void *p2)
1995 const struct server_id *i1 = (struct server_id *)p1;
1996 const struct server_id *i2 = (struct server_id *)p2;
1998 if (i1->pid < i2->pid) return -1;
1999 if (i2->pid > i2->pid) return 1;
2000 return 0;
2004 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
2005 * locks so that they retry. Mainly used in the cluster code after a node has
2006 * died.
2008 * Done in two steps to avoid double-sends: First we collect all entries in an
2009 * array, then qsort that array and only send to non-dupes.
2012 static void brl_revalidate(struct messaging_context *msg_ctx,
2013 void *private_data,
2014 uint32_t msg_type,
2015 struct server_id server_id,
2016 DATA_BLOB *data)
2018 struct brl_revalidate_state *state;
2019 uint32 i;
2020 struct server_id last_pid;
2022 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
2023 DEBUG(0, ("talloc failed\n"));
2024 return;
2027 brl_forall(brl_revalidate_collect, state);
2029 if (state->array_size == -1) {
2030 DEBUG(0, ("talloc failed\n"));
2031 goto done;
2034 if (state->num_pids == 0) {
2035 goto done;
2038 TYPESAFE_QSORT(state->pids, state->num_pids, compare_procids);
2040 ZERO_STRUCT(last_pid);
2042 for (i=0; i<state->num_pids; i++) {
2043 if (procid_equal(&last_pid, &state->pids[i])) {
2045 * We've seen that one already
2047 continue;
2050 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
2051 &data_blob_null);
2052 last_pid = state->pids[i];
2055 done:
2056 TALLOC_FREE(state);
2057 return;
2060 void brl_register_msgs(struct messaging_context *msg_ctx)
2062 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
2063 brl_revalidate);