r23759: Fix make test -- sorry
[Samba.git] / source / locking / brlock.c
blob9b90d87ff0b41897d500e2eb64f15ea6cefe7768
1 /*
2 Unix SMB/CIFS implementation.
3 byte range locking code
4 Updated to handle range splits/merges.
6 Copyright (C) Andrew Tridgell 1992-2000
7 Copyright (C) Jeremy Allison 1992-2000
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 /* This module implements a tdb based byte range locking service,
25 replacing the fcntl() based byte range locking previously
26 used. This allows us to provide the same semantics as NT */
28 #include "includes.h"
30 #undef DBGC_CLASS
31 #define DBGC_CLASS DBGC_LOCKING
33 #define ZERO_ZERO 0
35 /* The open brlock.tdb database. */
37 static struct db_context *brlock_db;
39 /****************************************************************************
40 Debug info at level 10 for lock struct.
41 ****************************************************************************/
43 static void print_lock_struct(unsigned int i, struct lock_struct *pls)
45 DEBUG(10,("[%u]: smbpid = %u, tid = %u, pid = %u, ",
47 (unsigned int)pls->context.smbpid,
48 (unsigned int)pls->context.tid,
49 (unsigned int)procid_to_pid(&pls->context.pid) ));
51 DEBUG(10,("start = %.0f, size = %.0f, fnum = %d, %s %s\n",
52 (double)pls->start,
53 (double)pls->size,
54 pls->fnum,
55 lock_type_name(pls->lock_type),
56 lock_flav_name(pls->lock_flav) ));
59 /****************************************************************************
60 See if two locking contexts are equal.
61 ****************************************************************************/
63 BOOL brl_same_context(const struct lock_context *ctx1,
64 const struct lock_context *ctx2)
66 return (procid_equal(&ctx1->pid, &ctx2->pid) &&
67 (ctx1->smbpid == ctx2->smbpid) &&
68 (ctx1->tid == ctx2->tid));
71 /****************************************************************************
72 See if lck1 and lck2 overlap.
73 ****************************************************************************/
75 static BOOL brl_overlap(const struct lock_struct *lck1,
76 const struct lock_struct *lck2)
78 /* this extra check is not redundent - it copes with locks
79 that go beyond the end of 64 bit file space */
80 if (lck1->size != 0 &&
81 lck1->start == lck2->start &&
82 lck1->size == lck2->size) {
83 return True;
86 if (lck1->start >= (lck2->start+lck2->size) ||
87 lck2->start >= (lck1->start+lck1->size)) {
88 return False;
90 return True;
93 /****************************************************************************
94 See if lock2 can be added when lock1 is in place.
95 ****************************************************************************/
97 static BOOL brl_conflict(const struct lock_struct *lck1,
98 const struct lock_struct *lck2)
100 /* Ignore PENDING locks. */
101 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
102 return False;
104 /* Read locks never conflict. */
105 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
106 return False;
109 if (brl_same_context(&lck1->context, &lck2->context) &&
110 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
111 return False;
114 return brl_overlap(lck1, lck2);
117 /****************************************************************************
118 See if lock2 can be added when lock1 is in place - when both locks are POSIX
119 flavour. POSIX locks ignore fnum - they only care about dev/ino which we
120 know already match.
121 ****************************************************************************/
123 static BOOL brl_conflict_posix(const struct lock_struct *lck1,
124 const struct lock_struct *lck2)
126 #if defined(DEVELOPER)
127 SMB_ASSERT(lck1->lock_flav == POSIX_LOCK);
128 SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
129 #endif
131 /* Ignore PENDING locks. */
132 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
133 return False;
135 /* Read locks never conflict. */
136 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
137 return False;
140 /* Locks on the same context con't conflict. Ignore fnum. */
141 if (brl_same_context(&lck1->context, &lck2->context)) {
142 return False;
145 /* One is read, the other write, or the context is different,
146 do they overlap ? */
147 return brl_overlap(lck1, lck2);
150 #if ZERO_ZERO
151 static BOOL brl_conflict1(const struct lock_struct *lck1,
152 const struct lock_struct *lck2)
154 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
155 return False;
157 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
158 return False;
161 if (brl_same_context(&lck1->context, &lck2->context) &&
162 lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
163 return False;
166 if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
167 return True;
170 if (lck1->start >= (lck2->start + lck2->size) ||
171 lck2->start >= (lck1->start + lck1->size)) {
172 return False;
175 return True;
177 #endif
179 /****************************************************************************
180 Check to see if this lock conflicts, but ignore our own locks on the
181 same fnum only. This is the read/write lock check code path.
182 This is never used in the POSIX lock case.
183 ****************************************************************************/
185 static BOOL brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
187 if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
188 return False;
190 if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
191 return False;
193 /* POSIX flavour locks never conflict here - this is only called
194 in the read/write path. */
196 if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
197 return False;
200 * Incoming WRITE locks conflict with existing READ locks even
201 * if the context is the same. JRA. See LOCKTEST7 in smbtorture.
204 if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
205 if (brl_same_context(&lck1->context, &lck2->context) &&
206 lck1->fnum == lck2->fnum)
207 return False;
210 return brl_overlap(lck1, lck2);
213 /****************************************************************************
214 Check if an unlock overlaps a pending lock.
215 ****************************************************************************/
217 static BOOL brl_pending_overlap(const struct lock_struct *lock, const struct lock_struct *pend_lock)
219 if ((lock->start <= pend_lock->start) && (lock->start + lock->size > pend_lock->start))
220 return True;
221 if ((lock->start >= pend_lock->start) && (lock->start <= pend_lock->start + pend_lock->size))
222 return True;
223 return False;
226 /****************************************************************************
227 Amazingly enough, w2k3 "remembers" whether the last lock failure on a fnum
228 is the same as this one and changes its error code. I wonder if any
229 app depends on this ?
230 ****************************************************************************/
232 static NTSTATUS brl_lock_failed(files_struct *fsp, const struct lock_struct *lock, BOOL blocking_lock)
234 if (lock->start >= 0xEF000000 && (lock->start >> 63) == 0) {
235 /* amazing the little things you learn with a test
236 suite. Locks beyond this offset (as a 64 bit
237 number!) always generate the conflict error code,
238 unless the top bit is set */
239 if (!blocking_lock) {
240 fsp->last_lock_failure = *lock;
242 return NT_STATUS_FILE_LOCK_CONFLICT;
245 if (procid_equal(&lock->context.pid, &fsp->last_lock_failure.context.pid) &&
246 lock->context.tid == fsp->last_lock_failure.context.tid &&
247 lock->fnum == fsp->last_lock_failure.fnum &&
248 lock->start == fsp->last_lock_failure.start) {
249 return NT_STATUS_FILE_LOCK_CONFLICT;
252 if (!blocking_lock) {
253 fsp->last_lock_failure = *lock;
255 return NT_STATUS_LOCK_NOT_GRANTED;
258 /****************************************************************************
259 Open up the brlock.tdb database.
260 ****************************************************************************/
262 void brl_init(int read_only)
264 if (brlock_db) {
265 return;
267 brlock_db = db_open(NULL, lock_path("brlock.tdb"),
268 lp_open_files_db_hash_size(),
269 TDB_DEFAULT
270 |TDB_VOLATILE
271 |(read_only?0x0:TDB_CLEAR_IF_FIRST),
272 read_only?O_RDONLY:(O_RDWR|O_CREAT), 0644 );
273 if (!brlock_db) {
274 DEBUG(0,("Failed to open byte range locking database %s\n",
275 lock_path("brlock.tdb")));
276 return;
280 /****************************************************************************
281 Close down the brlock.tdb database.
282 ****************************************************************************/
284 void brl_shutdown(int read_only)
286 if (!brlock_db) {
287 return;
289 TALLOC_FREE(brlock_db);
292 #if ZERO_ZERO
293 /****************************************************************************
294 Compare two locks for sorting.
295 ****************************************************************************/
297 static int lock_compare(const struct lock_struct *lck1,
298 const struct lock_struct *lck2)
300 if (lck1->start != lck2->start) {
301 return (lck1->start - lck2->start);
303 if (lck2->size != lck1->size) {
304 return ((int)lck1->size - (int)lck2->size);
306 return 0;
308 #endif
310 /****************************************************************************
311 Lock a range of bytes - Windows lock semantics.
312 ****************************************************************************/
314 static NTSTATUS brl_lock_windows(struct byte_range_lock *br_lck,
315 struct lock_struct *plock, BOOL blocking_lock)
317 unsigned int i;
318 files_struct *fsp = br_lck->fsp;
319 struct lock_struct *locks = br_lck->lock_data;
321 for (i=0; i < br_lck->num_locks; i++) {
322 /* Do any Windows or POSIX locks conflict ? */
323 if (brl_conflict(&locks[i], plock)) {
324 /* Remember who blocked us. */
325 plock->context.smbpid = locks[i].context.smbpid;
326 return brl_lock_failed(fsp,plock,blocking_lock);
328 #if ZERO_ZERO
329 if (plock->start == 0 && plock->size == 0 &&
330 locks[i].size == 0) {
331 break;
333 #endif
336 /* We can get the Windows lock, now see if it needs to
337 be mapped into a lower level POSIX one, and if so can
338 we get it ? */
340 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(fsp->conn->params)) {
341 int errno_ret;
342 if (!set_posix_lock_windows_flavour(fsp,
343 plock->start,
344 plock->size,
345 plock->lock_type,
346 &plock->context,
347 locks,
348 br_lck->num_locks,
349 &errno_ret)) {
351 /* We don't know who blocked us. */
352 plock->context.smbpid = 0xFFFFFFFF;
354 if (errno_ret == EACCES || errno_ret == EAGAIN) {
355 return NT_STATUS_FILE_LOCK_CONFLICT;
356 } else {
357 return map_nt_error_from_unix(errno);
362 /* no conflicts - add it to the list of locks */
363 locks = (struct lock_struct *)SMB_REALLOC(locks, (br_lck->num_locks + 1) * sizeof(*locks));
364 if (!locks) {
365 return NT_STATUS_NO_MEMORY;
368 memcpy(&locks[br_lck->num_locks], plock, sizeof(struct lock_struct));
369 br_lck->num_locks += 1;
370 br_lck->lock_data = locks;
371 br_lck->modified = True;
373 return NT_STATUS_OK;
376 /****************************************************************************
377 Cope with POSIX range splits and merges.
378 ****************************************************************************/
380 static unsigned int brlock_posix_split_merge(struct lock_struct *lck_arr, /* Output array. */
381 const struct lock_struct *ex, /* existing lock. */
382 const struct lock_struct *plock, /* proposed lock. */
383 BOOL *lock_was_added)
385 BOOL lock_types_differ = (ex->lock_type != plock->lock_type);
387 /* We can't merge non-conflicting locks on different context - ignore fnum. */
389 if (!brl_same_context(&ex->context, &plock->context)) {
390 /* Just copy. */
391 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
392 return 1;
395 /* We now know we have the same context. */
397 /* Did we overlap ? */
399 /*********************************************
400 +---------+
401 | ex |
402 +---------+
403 +-------+
404 | plock |
405 +-------+
406 OR....
407 +---------+
408 | ex |
409 +---------+
410 **********************************************/
412 if ( (ex->start > (plock->start + plock->size)) ||
413 (plock->start > (ex->start + ex->size))) {
414 /* No overlap with this lock - copy existing. */
415 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
416 return 1;
419 /*********************************************
420 +---------------------------+
421 | ex |
422 +---------------------------+
423 +---------------------------+
424 | plock | -> replace with plock.
425 +---------------------------+
426 **********************************************/
428 if ( (ex->start >= plock->start) &&
429 (ex->start + ex->size <= plock->start + plock->size) ) {
430 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
431 *lock_was_added = True;
432 return 1;
435 /*********************************************
436 +-----------------------+
437 | ex |
438 +-----------------------+
439 +---------------+
440 | plock |
441 +---------------+
442 OR....
443 +-------+
444 | ex |
445 +-------+
446 +---------------+
447 | plock |
448 +---------------+
450 BECOMES....
451 +---------------+-------+
452 | plock | ex | - different lock types.
453 +---------------+-------+
454 OR.... (merge)
455 +-----------------------+
456 | ex | - same lock type.
457 +-----------------------+
458 **********************************************/
460 if ( (ex->start >= plock->start) &&
461 (ex->start <= plock->start + plock->size) &&
462 (ex->start + ex->size > plock->start + plock->size) ) {
464 *lock_was_added = True;
466 /* If the lock types are the same, we merge, if different, we
467 add the new lock before the old. */
469 if (lock_types_differ) {
470 /* Add new. */
471 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
472 memcpy(&lck_arr[1], ex, sizeof(struct lock_struct));
473 /* Adjust existing start and size. */
474 lck_arr[1].start = plock->start + plock->size;
475 lck_arr[1].size = (ex->start + ex->size) - (plock->start + plock->size);
476 return 2;
477 } else {
478 /* Merge. */
479 memcpy(&lck_arr[0], plock, sizeof(struct lock_struct));
480 /* Set new start and size. */
481 lck_arr[0].start = plock->start;
482 lck_arr[0].size = (ex->start + ex->size) - plock->start;
483 return 1;
487 /*********************************************
488 +-----------------------+
489 | ex |
490 +-----------------------+
491 +---------------+
492 | plock |
493 +---------------+
494 OR....
495 +-------+
496 | ex |
497 +-------+
498 +---------------+
499 | plock |
500 +---------------+
501 BECOMES....
502 +-------+---------------+
503 | ex | plock | - different lock types
504 +-------+---------------+
506 OR.... (merge)
507 +-----------------------+
508 | ex | - same lock type.
509 +-----------------------+
511 **********************************************/
513 if ( (ex->start < plock->start) &&
514 (ex->start + ex->size >= plock->start) &&
515 (ex->start + ex->size <= plock->start + plock->size) ) {
517 *lock_was_added = True;
519 /* If the lock types are the same, we merge, if different, we
520 add the new lock after the old. */
522 if (lock_types_differ) {
523 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
524 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
525 /* Adjust existing size. */
526 lck_arr[0].size = plock->start - ex->start;
527 return 2;
528 } else {
529 /* Merge. */
530 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
531 /* Adjust existing size. */
532 lck_arr[0].size = (plock->start + plock->size) - ex->start;
533 return 1;
537 /*********************************************
538 +---------------------------+
539 | ex |
540 +---------------------------+
541 +---------+
542 | plock |
543 +---------+
544 BECOMES.....
545 +-------+---------+---------+
546 | ex | plock | ex | - different lock types.
547 +-------+---------+---------+
549 +---------------------------+
550 | ex | - same lock type.
551 +---------------------------+
552 **********************************************/
554 if ( (ex->start < plock->start) && (ex->start + ex->size > plock->start + plock->size) ) {
555 *lock_was_added = True;
557 if (lock_types_differ) {
559 /* We have to split ex into two locks here. */
561 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
562 memcpy(&lck_arr[1], plock, sizeof(struct lock_struct));
563 memcpy(&lck_arr[2], ex, sizeof(struct lock_struct));
565 /* Adjust first existing size. */
566 lck_arr[0].size = plock->start - ex->start;
568 /* Adjust second existing start and size. */
569 lck_arr[2].start = plock->start + plock->size;
570 lck_arr[2].size = (ex->start + ex->size) - (plock->start + plock->size);
571 return 3;
572 } else {
573 /* Just eat plock. */
574 memcpy(&lck_arr[0], ex, sizeof(struct lock_struct));
575 return 1;
579 /* Never get here. */
580 smb_panic("brlock_posix_split_merge");
581 /* Notreached. */
583 /* Keep some compilers happy. */
584 return 0;
587 /****************************************************************************
588 Lock a range of bytes - POSIX lock semantics.
589 We must cope with range splits and merges.
590 ****************************************************************************/
592 static NTSTATUS brl_lock_posix(struct messaging_context *msg_ctx,
593 struct byte_range_lock *br_lck,
594 struct lock_struct *plock)
596 unsigned int i, count;
597 struct lock_struct *locks = br_lck->lock_data;
598 struct lock_struct *tp;
599 BOOL lock_was_added = False;
600 BOOL signal_pending_read = False;
602 /* No zero-zero locks for POSIX. */
603 if (plock->start == 0 && plock->size == 0) {
604 return NT_STATUS_INVALID_PARAMETER;
607 /* Don't allow 64-bit lock wrap. */
608 if (plock->start + plock->size < plock->start ||
609 plock->start + plock->size < plock->size) {
610 return NT_STATUS_INVALID_PARAMETER;
613 /* The worst case scenario here is we have to split an
614 existing POSIX lock range into two, and add our lock,
615 so we need at most 2 more entries. */
617 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 2));
618 if (!tp) {
619 return NT_STATUS_NO_MEMORY;
622 count = 0;
623 for (i=0; i < br_lck->num_locks; i++) {
624 struct lock_struct *curr_lock = &locks[i];
626 /* If we have a pending read lock, a lock downgrade should
627 trigger a lock re-evaluation. */
628 if (curr_lock->lock_type == PENDING_READ_LOCK &&
629 brl_pending_overlap(plock, curr_lock)) {
630 signal_pending_read = True;
633 if (curr_lock->lock_flav == WINDOWS_LOCK) {
634 /* Do any Windows flavour locks conflict ? */
635 if (brl_conflict(curr_lock, plock)) {
636 /* No games with error messages. */
637 SAFE_FREE(tp);
638 /* Remember who blocked us. */
639 plock->context.smbpid = curr_lock->context.smbpid;
640 return NT_STATUS_FILE_LOCK_CONFLICT;
642 /* Just copy the Windows lock into the new array. */
643 memcpy(&tp[count], curr_lock, sizeof(struct lock_struct));
644 count++;
645 } else {
646 /* POSIX conflict semantics are different. */
647 if (brl_conflict_posix(curr_lock, plock)) {
648 /* Can't block ourselves with POSIX locks. */
649 /* No games with error messages. */
650 SAFE_FREE(tp);
651 /* Remember who blocked us. */
652 plock->context.smbpid = curr_lock->context.smbpid;
653 return NT_STATUS_FILE_LOCK_CONFLICT;
656 /* Work out overlaps. */
657 count += brlock_posix_split_merge(&tp[count], curr_lock, plock, &lock_was_added);
661 if (!lock_was_added) {
662 memcpy(&tp[count], plock, sizeof(struct lock_struct));
663 count++;
666 /* We can get the POSIX lock, now see if it needs to
667 be mapped into a lower level POSIX one, and if so can
668 we get it ? */
670 if (!IS_PENDING_LOCK(plock->lock_type) && lp_posix_locking(br_lck->fsp->conn->params)) {
671 int errno_ret;
673 /* The lower layer just needs to attempt to
674 get the system POSIX lock. We've weeded out
675 any conflicts above. */
677 if (!set_posix_lock_posix_flavour(br_lck->fsp,
678 plock->start,
679 plock->size,
680 plock->lock_type,
681 &errno_ret)) {
683 /* We don't know who blocked us. */
684 plock->context.smbpid = 0xFFFFFFFF;
686 if (errno_ret == EACCES || errno_ret == EAGAIN) {
687 SAFE_FREE(tp);
688 return NT_STATUS_FILE_LOCK_CONFLICT;
689 } else {
690 SAFE_FREE(tp);
691 return map_nt_error_from_unix(errno);
696 /* Realloc so we don't leak entries per lock call. */
697 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
698 if (!tp) {
699 return NT_STATUS_NO_MEMORY;
701 br_lck->num_locks = count;
702 SAFE_FREE(br_lck->lock_data);
703 br_lck->lock_data = tp;
704 locks = tp;
705 br_lck->modified = True;
707 /* A successful downgrade from write to read lock can trigger a lock
708 re-evalutation where waiting readers can now proceed. */
710 if (signal_pending_read) {
711 /* Send unlock messages to any pending read waiters that overlap. */
712 for (i=0; i < br_lck->num_locks; i++) {
713 struct lock_struct *pend_lock = &locks[i];
715 /* Ignore non-pending locks. */
716 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
717 continue;
720 if (pend_lock->lock_type == PENDING_READ_LOCK &&
721 brl_pending_overlap(plock, pend_lock)) {
722 DEBUG(10,("brl_lock_posix: sending unlock message to pid %s\n",
723 procid_str_static(&pend_lock->context.pid )));
725 messaging_send(msg_ctx, pend_lock->context.pid,
726 MSG_SMB_UNLOCK, &data_blob_null);
731 return NT_STATUS_OK;
734 /****************************************************************************
735 Lock a range of bytes.
736 ****************************************************************************/
738 NTSTATUS brl_lock(struct messaging_context *msg_ctx,
739 struct byte_range_lock *br_lck,
740 uint32 smbpid,
741 struct server_id pid,
742 br_off start,
743 br_off size,
744 enum brl_type lock_type,
745 enum brl_flavour lock_flav,
746 BOOL blocking_lock,
747 uint32 *psmbpid)
749 NTSTATUS ret;
750 struct lock_struct lock;
752 #if !ZERO_ZERO
753 if (start == 0 && size == 0) {
754 DEBUG(0,("client sent 0/0 lock - please report this\n"));
756 #endif
758 lock.context.smbpid = smbpid;
759 lock.context.pid = pid;
760 lock.context.tid = br_lck->fsp->conn->cnum;
761 lock.start = start;
762 lock.size = size;
763 lock.fnum = br_lck->fsp->fnum;
764 lock.lock_type = lock_type;
765 lock.lock_flav = lock_flav;
767 if (lock_flav == WINDOWS_LOCK) {
768 ret = brl_lock_windows(br_lck, &lock, blocking_lock);
769 } else {
770 ret = brl_lock_posix(msg_ctx, br_lck, &lock);
773 #if ZERO_ZERO
774 /* sort the lock list */
775 qsort(br_lck->lock_data, (size_t)br_lck->num_locks, sizeof(lock), lock_compare);
776 #endif
778 /* If we're returning an error, return who blocked us. */
779 if (!NT_STATUS_IS_OK(ret) && psmbpid) {
780 *psmbpid = lock.context.smbpid;
782 return ret;
785 /****************************************************************************
786 Unlock a range of bytes - Windows semantics.
787 ****************************************************************************/
789 static BOOL brl_unlock_windows(struct messaging_context *msg_ctx,
790 struct byte_range_lock *br_lck,
791 const struct lock_struct *plock)
793 unsigned int i, j;
794 struct lock_struct *locks = br_lck->lock_data;
795 enum brl_type deleted_lock_type = READ_LOCK; /* shut the compiler up.... */
797 #if ZERO_ZERO
798 /* Delete write locks by preference... The lock list
799 is sorted in the zero zero case. */
801 for (i = 0; i < br_lck->num_locks; i++) {
802 struct lock_struct *lock = &locks[i];
804 if (lock->lock_type == WRITE_LOCK &&
805 brl_same_context(&lock->context, &plock->context) &&
806 lock->fnum == plock->fnum &&
807 lock->lock_flav == WINDOWS_LOCK &&
808 lock->start == plock->start &&
809 lock->size == plock->size) {
811 /* found it - delete it */
812 deleted_lock_type = lock->lock_type;
813 break;
817 if (i != br_lck->num_locks) {
818 /* We found it - don't search again. */
819 goto unlock_continue;
821 #endif
823 for (i = 0; i < br_lck->num_locks; i++) {
824 struct lock_struct *lock = &locks[i];
826 /* Only remove our own locks that match in start, size, and flavour. */
827 if (brl_same_context(&lock->context, &plock->context) &&
828 lock->fnum == plock->fnum &&
829 lock->lock_flav == WINDOWS_LOCK &&
830 lock->start == plock->start &&
831 lock->size == plock->size ) {
832 deleted_lock_type = lock->lock_type;
833 break;
837 if (i == br_lck->num_locks) {
838 /* we didn't find it */
839 return False;
842 #if ZERO_ZERO
843 unlock_continue:
844 #endif
846 /* Actually delete the lock. */
847 if (i < br_lck->num_locks - 1) {
848 memmove(&locks[i], &locks[i+1],
849 sizeof(*locks)*((br_lck->num_locks-1) - i));
852 br_lck->num_locks -= 1;
853 br_lck->modified = True;
855 /* Unlock the underlying POSIX regions. */
856 if(lp_posix_locking(br_lck->fsp->conn->params)) {
857 release_posix_lock_windows_flavour(br_lck->fsp,
858 plock->start,
859 plock->size,
860 deleted_lock_type,
861 &plock->context,
862 locks,
863 br_lck->num_locks);
866 /* Send unlock messages to any pending waiters that overlap. */
867 for (j=0; j < br_lck->num_locks; j++) {
868 struct lock_struct *pend_lock = &locks[j];
870 /* Ignore non-pending locks. */
871 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
872 continue;
875 /* We could send specific lock info here... */
876 if (brl_pending_overlap(plock, pend_lock)) {
877 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
878 procid_str_static(&pend_lock->context.pid )));
880 messaging_send(msg_ctx, pend_lock->context.pid,
881 MSG_SMB_UNLOCK, &data_blob_null);
885 return True;
888 /****************************************************************************
889 Unlock a range of bytes - POSIX semantics.
890 ****************************************************************************/
892 static BOOL brl_unlock_posix(struct messaging_context *msg_ctx,
893 struct byte_range_lock *br_lck,
894 const struct lock_struct *plock)
896 unsigned int i, j, count;
897 struct lock_struct *tp;
898 struct lock_struct *locks = br_lck->lock_data;
899 BOOL overlap_found = False;
901 /* No zero-zero locks for POSIX. */
902 if (plock->start == 0 && plock->size == 0) {
903 return False;
906 /* Don't allow 64-bit lock wrap. */
907 if (plock->start + plock->size < plock->start ||
908 plock->start + plock->size < plock->size) {
909 DEBUG(10,("brl_unlock_posix: lock wrap\n"));
910 return False;
913 /* The worst case scenario here is we have to split an
914 existing POSIX lock range into two, so we need at most
915 1 more entry. */
917 tp = SMB_MALLOC_ARRAY(struct lock_struct, (br_lck->num_locks + 1));
918 if (!tp) {
919 DEBUG(10,("brl_unlock_posix: malloc fail\n"));
920 return False;
923 count = 0;
924 for (i = 0; i < br_lck->num_locks; i++) {
925 struct lock_struct *lock = &locks[i];
926 struct lock_struct tmp_lock[3];
927 BOOL lock_was_added = False;
928 unsigned int tmp_count;
930 /* Only remove our own locks - ignore fnum. */
931 if (IS_PENDING_LOCK(lock->lock_type) ||
932 !brl_same_context(&lock->context, &plock->context)) {
933 memcpy(&tp[count], lock, sizeof(struct lock_struct));
934 count++;
935 continue;
938 /* Work out overlaps. */
939 tmp_count = brlock_posix_split_merge(&tmp_lock[0], &locks[i], plock, &lock_was_added);
941 if (tmp_count == 1) {
942 /* Ether the locks didn't overlap, or the unlock completely
943 overlapped this lock. If it didn't overlap, then there's
944 no change in the locks. */
945 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
946 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
947 /* No change in this lock. */
948 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
949 count++;
950 } else {
951 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
952 overlap_found = True;
954 continue;
955 } else if (tmp_count == 2) {
956 /* The unlock overlapped an existing lock. Copy the truncated
957 lock into the lock array. */
958 if (tmp_lock[0].lock_type != UNLOCK_LOCK) {
959 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
960 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
961 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
962 if (tmp_lock[0].size != locks[i].size) {
963 overlap_found = True;
965 } else {
966 SMB_ASSERT(tmp_lock[0].lock_type == UNLOCK_LOCK);
967 SMB_ASSERT(tmp_lock[1].lock_type == locks[i].lock_type);
968 memcpy(&tp[count], &tmp_lock[1], sizeof(struct lock_struct));
969 if (tmp_lock[1].start != locks[i].start) {
970 overlap_found = True;
973 count++;
974 continue;
975 } else {
976 /* tmp_count == 3 - (we split a lock range in two). */
977 SMB_ASSERT(tmp_lock[0].lock_type == locks[i].lock_type);
978 SMB_ASSERT(tmp_lock[1].lock_type == UNLOCK_LOCK);
979 SMB_ASSERT(tmp_lock[2].lock_type == locks[i].lock_type);
981 memcpy(&tp[count], &tmp_lock[0], sizeof(struct lock_struct));
982 count++;
983 memcpy(&tp[count], &tmp_lock[2], sizeof(struct lock_struct));
984 count++;
985 overlap_found = True;
986 /* Optimisation... */
987 /* We know we're finished here as we can't overlap any
988 more POSIX locks. Copy the rest of the lock array. */
989 if (i < br_lck->num_locks - 1) {
990 memcpy(&tp[count], &locks[i+1],
991 sizeof(*locks)*((br_lck->num_locks-1) - i));
992 count += ((br_lck->num_locks-1) - i);
994 break;
998 if (!overlap_found) {
999 /* Just ignore - no change. */
1000 SAFE_FREE(tp);
1001 DEBUG(10,("brl_unlock_posix: No overlap - unlocked.\n"));
1002 return True;
1005 /* Unlock any POSIX regions. */
1006 if(lp_posix_locking(br_lck->fsp->conn->params)) {
1007 release_posix_lock_posix_flavour(br_lck->fsp,
1008 plock->start,
1009 plock->size,
1010 &plock->context,
1012 count);
1015 /* Realloc so we don't leak entries per unlock call. */
1016 if (count) {
1017 tp = (struct lock_struct *)SMB_REALLOC(tp, count * sizeof(*locks));
1018 if (!tp) {
1019 DEBUG(10,("brl_unlock_posix: realloc fail\n"));
1020 return False;
1022 } else {
1023 /* We deleted the last lock. */
1024 SAFE_FREE(tp);
1025 tp = NULL;
1028 br_lck->num_locks = count;
1029 SAFE_FREE(br_lck->lock_data);
1030 locks = tp;
1031 br_lck->lock_data = tp;
1032 br_lck->modified = True;
1034 /* Send unlock messages to any pending waiters that overlap. */
1036 for (j=0; j < br_lck->num_locks; j++) {
1037 struct lock_struct *pend_lock = &locks[j];
1039 /* Ignore non-pending locks. */
1040 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1041 continue;
1044 /* We could send specific lock info here... */
1045 if (brl_pending_overlap(plock, pend_lock)) {
1046 DEBUG(10,("brl_unlock: sending unlock message to pid %s\n",
1047 procid_str_static(&pend_lock->context.pid )));
1049 messaging_send(msg_ctx, pend_lock->context.pid,
1050 MSG_SMB_UNLOCK, &data_blob_null);
1054 return True;
1057 /****************************************************************************
1058 Unlock a range of bytes.
1059 ****************************************************************************/
1061 BOOL brl_unlock(struct messaging_context *msg_ctx,
1062 struct byte_range_lock *br_lck,
1063 uint32 smbpid,
1064 struct server_id pid,
1065 br_off start,
1066 br_off size,
1067 enum brl_flavour lock_flav)
1069 struct lock_struct lock;
1071 lock.context.smbpid = smbpid;
1072 lock.context.pid = pid;
1073 lock.context.tid = br_lck->fsp->conn->cnum;
1074 lock.start = start;
1075 lock.size = size;
1076 lock.fnum = br_lck->fsp->fnum;
1077 lock.lock_type = UNLOCK_LOCK;
1078 lock.lock_flav = lock_flav;
1080 if (lock_flav == WINDOWS_LOCK) {
1081 return brl_unlock_windows(msg_ctx, br_lck, &lock);
1082 } else {
1083 return brl_unlock_posix(msg_ctx, br_lck, &lock);
1087 /****************************************************************************
1088 Test if we could add a lock if we wanted to.
1089 Returns True if the region required is currently unlocked, False if locked.
1090 ****************************************************************************/
1092 BOOL brl_locktest(struct byte_range_lock *br_lck,
1093 uint32 smbpid,
1094 struct server_id pid,
1095 br_off start,
1096 br_off size,
1097 enum brl_type lock_type,
1098 enum brl_flavour lock_flav)
1100 BOOL ret = True;
1101 unsigned int i;
1102 struct lock_struct lock;
1103 const struct lock_struct *locks = br_lck->lock_data;
1104 files_struct *fsp = br_lck->fsp;
1106 lock.context.smbpid = smbpid;
1107 lock.context.pid = pid;
1108 lock.context.tid = br_lck->fsp->conn->cnum;
1109 lock.start = start;
1110 lock.size = size;
1111 lock.fnum = fsp->fnum;
1112 lock.lock_type = lock_type;
1113 lock.lock_flav = lock_flav;
1115 /* Make sure existing locks don't conflict */
1116 for (i=0; i < br_lck->num_locks; i++) {
1118 * Our own locks don't conflict.
1120 if (brl_conflict_other(&locks[i], &lock)) {
1121 return False;
1126 * There is no lock held by an SMB daemon, check to
1127 * see if there is a POSIX lock from a UNIX or NFS process.
1128 * This only conflicts with Windows locks, not POSIX locks.
1131 if(lp_posix_locking(fsp->conn->params) && (lock_flav == WINDOWS_LOCK)) {
1132 ret = is_posix_locked(fsp, &start, &size, &lock_type, WINDOWS_LOCK);
1134 DEBUG(10,("brl_locktest: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1135 (double)start, (double)size, ret ? "locked" : "unlocked",
1136 fsp->fnum, fsp->fsp_name ));
1138 /* We need to return the inverse of is_posix_locked. */
1139 ret = !ret;
1142 /* no conflicts - we could have added it */
1143 return ret;
1146 /****************************************************************************
1147 Query for existing locks.
1148 ****************************************************************************/
1150 NTSTATUS brl_lockquery(struct byte_range_lock *br_lck,
1151 uint32 *psmbpid,
1152 struct server_id pid,
1153 br_off *pstart,
1154 br_off *psize,
1155 enum brl_type *plock_type,
1156 enum brl_flavour lock_flav)
1158 unsigned int i;
1159 struct lock_struct lock;
1160 const struct lock_struct *locks = br_lck->lock_data;
1161 files_struct *fsp = br_lck->fsp;
1163 lock.context.smbpid = *psmbpid;
1164 lock.context.pid = pid;
1165 lock.context.tid = br_lck->fsp->conn->cnum;
1166 lock.start = *pstart;
1167 lock.size = *psize;
1168 lock.fnum = fsp->fnum;
1169 lock.lock_type = *plock_type;
1170 lock.lock_flav = lock_flav;
1172 /* Make sure existing locks don't conflict */
1173 for (i=0; i < br_lck->num_locks; i++) {
1174 const struct lock_struct *exlock = &locks[i];
1175 BOOL conflict = False;
1177 if (exlock->lock_flav == WINDOWS_LOCK) {
1178 conflict = brl_conflict(exlock, &lock);
1179 } else {
1180 conflict = brl_conflict_posix(exlock, &lock);
1183 if (conflict) {
1184 *psmbpid = exlock->context.smbpid;
1185 *pstart = exlock->start;
1186 *psize = exlock->size;
1187 *plock_type = exlock->lock_type;
1188 return NT_STATUS_LOCK_NOT_GRANTED;
1193 * There is no lock held by an SMB daemon, check to
1194 * see if there is a POSIX lock from a UNIX or NFS process.
1197 if(lp_posix_locking(fsp->conn->params)) {
1198 BOOL ret = is_posix_locked(fsp, pstart, psize, plock_type, POSIX_LOCK);
1200 DEBUG(10,("brl_lockquery: posix start=%.0f len=%.0f %s for fnum %d file %s\n",
1201 (double)*pstart, (double)*psize, ret ? "locked" : "unlocked",
1202 fsp->fnum, fsp->fsp_name ));
1204 if (ret) {
1205 /* Hmmm. No clue what to set smbpid to - use -1. */
1206 *psmbpid = 0xFFFF;
1207 return NT_STATUS_LOCK_NOT_GRANTED;
1211 return NT_STATUS_OK;
1214 /****************************************************************************
1215 Remove a particular pending lock.
1216 ****************************************************************************/
1218 BOOL brl_lock_cancel(struct byte_range_lock *br_lck,
1219 uint32 smbpid,
1220 struct server_id pid,
1221 br_off start,
1222 br_off size,
1223 enum brl_flavour lock_flav)
1225 unsigned int i;
1226 struct lock_struct *locks = br_lck->lock_data;
1227 struct lock_context context;
1229 context.smbpid = smbpid;
1230 context.pid = pid;
1231 context.tid = br_lck->fsp->conn->cnum;
1233 for (i = 0; i < br_lck->num_locks; i++) {
1234 struct lock_struct *lock = &locks[i];
1236 /* For pending locks we *always* care about the fnum. */
1237 if (brl_same_context(&lock->context, &context) &&
1238 lock->fnum == br_lck->fsp->fnum &&
1239 IS_PENDING_LOCK(lock->lock_type) &&
1240 lock->lock_flav == lock_flav &&
1241 lock->start == start &&
1242 lock->size == size) {
1243 break;
1247 if (i == br_lck->num_locks) {
1248 /* Didn't find it. */
1249 return False;
1252 if (i < br_lck->num_locks - 1) {
1253 /* Found this particular pending lock - delete it */
1254 memmove(&locks[i], &locks[i+1],
1255 sizeof(*locks)*((br_lck->num_locks-1) - i));
1258 br_lck->num_locks -= 1;
1259 br_lck->modified = True;
1260 return True;
1263 /****************************************************************************
1264 Remove any locks associated with a open file.
1265 We return True if this process owns any other Windows locks on this
1266 fd and so we should not immediately close the fd.
1267 ****************************************************************************/
1269 void brl_close_fnum(struct messaging_context *msg_ctx,
1270 struct byte_range_lock *br_lck)
1272 files_struct *fsp = br_lck->fsp;
1273 uint16 tid = fsp->conn->cnum;
1274 int fnum = fsp->fnum;
1275 unsigned int i, j, dcount=0;
1276 int num_deleted_windows_locks = 0;
1277 struct lock_struct *locks = br_lck->lock_data;
1278 struct server_id pid = procid_self();
1279 BOOL unlock_individually = False;
1281 if(lp_posix_locking(fsp->conn->params)) {
1283 /* Check if there are any Windows locks associated with this dev/ino
1284 pair that are not this fnum. If so we need to call unlock on each
1285 one in order to release the system POSIX locks correctly. */
1287 for (i=0; i < br_lck->num_locks; i++) {
1288 struct lock_struct *lock = &locks[i];
1290 if (!procid_equal(&lock->context.pid, &pid)) {
1291 continue;
1294 if (lock->lock_type != READ_LOCK && lock->lock_type != WRITE_LOCK) {
1295 continue; /* Ignore pending. */
1298 if (lock->context.tid != tid || lock->fnum != fnum) {
1299 unlock_individually = True;
1300 break;
1304 if (unlock_individually) {
1305 struct lock_struct *locks_copy;
1306 unsigned int num_locks_copy;
1308 /* Copy the current lock array. */
1309 if (br_lck->num_locks) {
1310 locks_copy = (struct lock_struct *)TALLOC_MEMDUP(br_lck, locks, br_lck->num_locks * sizeof(struct lock_struct));
1311 if (!locks_copy) {
1312 smb_panic("brl_close_fnum: talloc failed");
1314 } else {
1315 locks_copy = NULL;
1318 num_locks_copy = br_lck->num_locks;
1320 for (i=0; i < num_locks_copy; i++) {
1321 struct lock_struct *lock = &locks_copy[i];
1323 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid) &&
1324 (lock->fnum == fnum)) {
1325 brl_unlock(msg_ctx,
1326 br_lck,
1327 lock->context.smbpid,
1328 pid,
1329 lock->start,
1330 lock->size,
1331 lock->lock_flav);
1334 return;
1338 /* We can bulk delete - any POSIX locks will be removed when the fd closes. */
1340 /* Remove any existing locks for this fnum (or any fnum if they're POSIX). */
1342 for (i=0; i < br_lck->num_locks; i++) {
1343 struct lock_struct *lock = &locks[i];
1344 BOOL del_this_lock = False;
1346 if (lock->context.tid == tid && procid_equal(&lock->context.pid, &pid)) {
1347 if ((lock->lock_flav == WINDOWS_LOCK) && (lock->fnum == fnum)) {
1348 del_this_lock = True;
1349 num_deleted_windows_locks++;
1350 } else if (lock->lock_flav == POSIX_LOCK) {
1351 del_this_lock = True;
1355 if (del_this_lock) {
1356 /* Send unlock messages to any pending waiters that overlap. */
1357 for (j=0; j < br_lck->num_locks; j++) {
1358 struct lock_struct *pend_lock = &locks[j];
1360 /* Ignore our own or non-pending locks. */
1361 if (!IS_PENDING_LOCK(pend_lock->lock_type)) {
1362 continue;
1365 /* Optimisation - don't send to this fnum as we're
1366 closing it. */
1367 if (pend_lock->context.tid == tid &&
1368 procid_equal(&pend_lock->context.pid, &pid) &&
1369 pend_lock->fnum == fnum) {
1370 continue;
1373 /* We could send specific lock info here... */
1374 if (brl_pending_overlap(lock, pend_lock)) {
1375 messaging_send(msg_ctx, pend_lock->context.pid,
1376 MSG_SMB_UNLOCK, &data_blob_null);
1380 /* found it - delete it */
1381 if (br_lck->num_locks > 1 && i < br_lck->num_locks - 1) {
1382 memmove(&locks[i], &locks[i+1],
1383 sizeof(*locks)*((br_lck->num_locks-1) - i));
1385 br_lck->num_locks--;
1386 br_lck->modified = True;
1387 i--;
1388 dcount++;
1392 if(lp_posix_locking(fsp->conn->params) && num_deleted_windows_locks) {
1393 /* Reduce the Windows lock POSIX reference count on this dev/ino pair. */
1394 reduce_windows_lock_ref_count(fsp, num_deleted_windows_locks);
1398 /****************************************************************************
1399 Ensure this set of lock entries is valid.
1400 ****************************************************************************/
1402 static BOOL validate_lock_entries(unsigned int *pnum_entries, struct lock_struct **pplocks)
1404 unsigned int i;
1405 unsigned int num_valid_entries = 0;
1406 struct lock_struct *locks = *pplocks;
1408 for (i = 0; i < *pnum_entries; i++) {
1409 struct lock_struct *lock_data = &locks[i];
1410 if (!process_exists(lock_data->context.pid)) {
1411 /* This process no longer exists - mark this
1412 entry as invalid by zeroing it. */
1413 ZERO_STRUCTP(lock_data);
1414 } else {
1415 num_valid_entries++;
1419 if (num_valid_entries != *pnum_entries) {
1420 struct lock_struct *new_lock_data = NULL;
1422 if (num_valid_entries) {
1423 new_lock_data = SMB_MALLOC_ARRAY(struct lock_struct, num_valid_entries);
1424 if (!new_lock_data) {
1425 DEBUG(3, ("malloc fail\n"));
1426 return False;
1429 num_valid_entries = 0;
1430 for (i = 0; i < *pnum_entries; i++) {
1431 struct lock_struct *lock_data = &locks[i];
1432 if (lock_data->context.smbpid &&
1433 lock_data->context.tid) {
1434 /* Valid (nonzero) entry - copy it. */
1435 memcpy(&new_lock_data[num_valid_entries],
1436 lock_data, sizeof(struct lock_struct));
1437 num_valid_entries++;
1442 SAFE_FREE(*pplocks);
1443 *pplocks = new_lock_data;
1444 *pnum_entries = num_valid_entries;
1447 return True;
1450 struct brl_forall_cb {
1451 void (*fn)(struct file_id id, struct server_id pid,
1452 enum brl_type lock_type,
1453 enum brl_flavour lock_flav,
1454 br_off start, br_off size,
1455 void *private_data);
1456 void *private_data;
1459 /****************************************************************************
1460 Traverse the whole database with this function, calling traverse_callback
1461 on each lock.
1462 ****************************************************************************/
1464 static int traverse_fn(struct db_record *rec, void *state)
1466 struct brl_forall_cb *cb = (struct brl_forall_cb *)state;
1467 struct lock_struct *locks;
1468 struct file_id *key;
1469 unsigned int i;
1470 unsigned int num_locks = 0;
1471 unsigned int orig_num_locks = 0;
1473 /* In a traverse function we must make a copy of
1474 dbuf before modifying it. */
1476 locks = (struct lock_struct *)memdup(rec->value.dptr,
1477 rec->value.dsize);
1478 if (!locks) {
1479 return -1; /* Terminate traversal. */
1482 key = (struct file_id *)rec->key.dptr;
1483 orig_num_locks = num_locks = rec->value.dsize/sizeof(*locks);
1485 /* Ensure the lock db is clean of entries from invalid processes. */
1487 if (!validate_lock_entries(&num_locks, &locks)) {
1488 SAFE_FREE(locks);
1489 return -1; /* Terminate traversal */
1492 if (orig_num_locks != num_locks) {
1493 if (num_locks) {
1494 TDB_DATA data;
1495 data.dptr = (uint8_t *)locks;
1496 data.dsize = num_locks*sizeof(struct lock_struct);
1497 rec->store(rec, data, TDB_REPLACE);
1498 } else {
1499 rec->delete_rec(rec);
1503 for ( i=0; i<num_locks; i++) {
1504 cb->fn(*key,
1505 locks[i].context.pid,
1506 locks[i].lock_type,
1507 locks[i].lock_flav,
1508 locks[i].start,
1509 locks[i].size,
1510 cb->private_data);
1513 SAFE_FREE(locks);
1514 return 0;
1517 /*******************************************************************
1518 Call the specified function on each lock in the database.
1519 ********************************************************************/
1521 int brl_forall(void (*fn)(struct file_id id, struct server_id pid,
1522 enum brl_type lock_type,
1523 enum brl_flavour lock_flav,
1524 br_off start, br_off size,
1525 void *private_data),
1526 void *private_data)
1528 struct brl_forall_cb cb;
1530 if (!brlock_db) {
1531 return 0;
1533 cb.fn = fn;
1534 cb.private_data = private_data;
1535 return brlock_db->traverse(brlock_db, traverse_fn, &cb);
1538 /*******************************************************************
1539 Store a potentially modified set of byte range lock data back into
1540 the database.
1541 Unlock the record.
1542 ********************************************************************/
1544 static int byte_range_lock_destructor(struct byte_range_lock *br_lck)
1546 TDB_DATA key;
1548 key.dptr = (uint8 *)&br_lck->key;
1549 key.dsize = sizeof(struct file_id);
1551 if (br_lck->read_only) {
1552 SMB_ASSERT(!br_lck->modified);
1555 if (!br_lck->modified) {
1556 goto done;
1559 if (br_lck->num_locks == 0) {
1560 /* No locks - delete this entry. */
1561 NTSTATUS status = br_lck->record->delete_rec(br_lck->record);
1562 if (!NT_STATUS_IS_OK(status)) {
1563 DEBUG(0, ("delete_rec returned %s\n",
1564 nt_errstr(status)));
1565 smb_panic("Could not delete byte range lock entry");
1567 } else {
1568 TDB_DATA data;
1569 NTSTATUS status;
1571 data.dptr = (uint8 *)br_lck->lock_data;
1572 data.dsize = br_lck->num_locks * sizeof(struct lock_struct);
1574 status = br_lck->record->store(br_lck->record, data,
1575 TDB_REPLACE);
1576 if (!NT_STATUS_IS_OK(status)) {
1577 DEBUG(0, ("store returned %s\n", nt_errstr(status)));
1578 smb_panic("Could not store byte range mode entry");
1582 done:
1584 SAFE_FREE(br_lck->lock_data);
1585 TALLOC_FREE(br_lck->record);
1586 return 0;
1589 /*******************************************************************
1590 Fetch a set of byte range lock data from the database.
1591 Leave the record locked.
1592 TALLOC_FREE(brl) will release the lock in the destructor.
1593 ********************************************************************/
1595 static struct byte_range_lock *brl_get_locks_internal(TALLOC_CTX *mem_ctx,
1596 files_struct *fsp, BOOL read_only)
1598 TDB_DATA key, data;
1599 struct byte_range_lock *br_lck = TALLOC_P(mem_ctx, struct byte_range_lock);
1601 if (br_lck == NULL) {
1602 return NULL;
1605 br_lck->fsp = fsp;
1606 br_lck->num_locks = 0;
1607 br_lck->modified = False;
1608 memset(&br_lck->key, '\0', sizeof(struct file_id));
1609 br_lck->key = fsp->file_id;
1611 key.dptr = (uint8 *)&br_lck->key;
1612 key.dsize = sizeof(struct file_id);
1614 if (!fsp->lockdb_clean) {
1615 /* We must be read/write to clean
1616 the dead entries. */
1617 read_only = False;
1620 if (read_only) {
1621 if (brlock_db->fetch(brlock_db, br_lck, key, &data) == -1) {
1622 DEBUG(3, ("Could not fetch byte range lock record\n"));
1623 TALLOC_FREE(br_lck);
1624 return NULL;
1626 br_lck->record = NULL;
1628 else {
1629 br_lck->record = brlock_db->fetch_locked(brlock_db, br_lck, key);
1631 if (br_lck->record == NULL) {
1632 DEBUG(3, ("Could not lock byte range lock entry\n"));
1633 TALLOC_FREE(br_lck);
1634 return NULL;
1637 data = br_lck->record->value;
1640 br_lck->read_only = read_only;
1642 talloc_set_destructor(br_lck, byte_range_lock_destructor);
1644 br_lck->num_locks = data.dsize / sizeof(struct lock_struct);
1645 br_lck->lock_data = SMB_MALLOC_ARRAY(struct lock_struct, br_lck->num_locks);
1646 if ((br_lck->num_locks != 0) && (br_lck->lock_data == NULL)) {
1647 DEBUG(0, ("malloc failed\n"));
1648 TALLOC_FREE(br_lck);
1649 return NULL;
1652 memcpy(br_lck->lock_data, data.dptr, data.dsize);
1654 if (!fsp->lockdb_clean) {
1655 int orig_num_locks = br_lck->num_locks;
1657 /* This is the first time we've accessed this. */
1658 /* Go through and ensure all entries exist - remove any that don't. */
1659 /* Makes the lockdb self cleaning at low cost. */
1661 if (!validate_lock_entries(&br_lck->num_locks,
1662 &br_lck->lock_data)) {
1663 SAFE_FREE(br_lck->lock_data);
1664 TALLOC_FREE(br_lck);
1665 return NULL;
1668 /* Ensure invalid locks are cleaned up in the destructor. */
1669 if (orig_num_locks != br_lck->num_locks) {
1670 br_lck->modified = True;
1673 /* Mark the lockdb as "clean" as seen from this open file. */
1674 fsp->lockdb_clean = True;
1677 if (DEBUGLEVEL >= 10) {
1678 unsigned int i;
1679 struct lock_struct *locks = br_lck->lock_data;
1680 DEBUG(10,("brl_get_locks_internal: %u current locks on file_id %s\n",
1681 br_lck->num_locks,
1682 file_id_static_string(&fsp->file_id)));
1683 for( i = 0; i < br_lck->num_locks; i++) {
1684 print_lock_struct(i, &locks[i]);
1687 return br_lck;
1690 struct byte_range_lock *brl_get_locks(TALLOC_CTX *mem_ctx,
1691 files_struct *fsp)
1693 return brl_get_locks_internal(mem_ctx, fsp, False);
1696 struct byte_range_lock *brl_get_locks_readonly(TALLOC_CTX *mem_ctx,
1697 files_struct *fsp)
1699 return brl_get_locks_internal(mem_ctx, fsp, True);
1702 struct brl_revalidate_state {
1703 ssize_t array_size;
1704 uint32 num_pids;
1705 struct server_id *pids;
1709 * Collect PIDs of all processes with pending entries
1712 static void brl_revalidate_collect(struct file_id id, struct server_id pid,
1713 enum brl_type lock_type,
1714 enum brl_flavour lock_flav,
1715 br_off start, br_off size,
1716 void *private_data)
1718 struct brl_revalidate_state *state =
1719 (struct brl_revalidate_state *)private_data;
1721 if (!IS_PENDING_LOCK(lock_type)) {
1722 return;
1725 add_to_large_array(state, sizeof(pid), (void *)&pid,
1726 &state->pids, &state->num_pids,
1727 &state->array_size);
1731 * qsort callback to sort the processes
1734 static int compare_procids(const void *p1, const void *p2)
1736 const struct server_id *i1 = (struct server_id *)p1;
1737 const struct server_id *i2 = (struct server_id *)p2;
1739 if (i1->pid < i2->pid) return -1;
1740 if (i2->pid > i2->pid) return 1;
1741 return 0;
1745 * Send a MSG_SMB_UNLOCK message to all processes with pending byte range
1746 * locks so that they retry. Mainly used in the cluster code after a node has
1747 * died.
1749 * Done in two steps to avoid double-sends: First we collect all entries in an
1750 * array, then qsort that array and only send to non-dupes.
1753 static void brl_revalidate(struct messaging_context *msg_ctx,
1754 void *private_data,
1755 uint32_t msg_type,
1756 struct server_id server_id,
1757 DATA_BLOB *data)
1759 struct brl_revalidate_state *state;
1760 uint32 i;
1761 struct server_id last_pid;
1763 if (!(state = TALLOC_ZERO_P(NULL, struct brl_revalidate_state))) {
1764 DEBUG(0, ("talloc failed\n"));
1765 return;
1768 brl_forall(brl_revalidate_collect, state);
1770 if (state->array_size == -1) {
1771 DEBUG(0, ("talloc failed\n"));
1772 goto done;
1775 if (state->num_pids == 0) {
1776 goto done;
1779 qsort(state->pids, state->num_pids, sizeof(state->pids[0]),
1780 compare_procids);
1782 ZERO_STRUCT(last_pid);
1784 for (i=0; i<state->num_pids; i++) {
1785 if (procid_equal(&last_pid, &state->pids[i])) {
1787 * We've seen that one already
1789 continue;
1792 messaging_send(msg_ctx, state->pids[i], MSG_SMB_UNLOCK,
1793 &data_blob_null);
1794 last_pid = state->pids[i];
1797 done:
1798 TALLOC_FREE(state);
1799 return;
1802 void brl_register_msgs(struct messaging_context *msg_ctx)
1804 messaging_register(msg_ctx, NULL, MSG_SMB_BRL_VALIDATE,
1805 brl_revalidate);