2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
29 struct smbd_smb2_lock_element
{
35 struct smbd_smb2_lock_state
{
36 struct smbd_smb2_request
*smb2req
;
37 struct smb_request
*smb1req
;
38 struct blocking_lock_record
*blr
;
40 struct smbd_lock_element
*locks
;
43 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
44 struct blocking_lock_record
*blr
);
46 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
47 struct tevent_context
*ev
,
48 struct smbd_smb2_request
*smb2req
,
49 struct files_struct
*in_fsp
,
50 uint16_t in_lock_count
,
51 struct smbd_smb2_lock_element
*in_locks
);
52 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
54 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
55 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
57 const uint8_t *inbody
;
58 uint16_t in_lock_count
;
59 uint64_t in_file_id_persistent
;
60 uint64_t in_file_id_volatile
;
61 struct files_struct
*in_fsp
;
62 struct smbd_smb2_lock_element
*in_locks
;
63 struct tevent_req
*subreq
;
64 const uint8_t *lock_buffer
;
68 status
= smbd_smb2_request_verify_sizes(req
, 0x30);
69 if (!NT_STATUS_IS_OK(status
)) {
70 return smbd_smb2_request_error(req
, status
);
72 inbody
= SMBD_SMB2_IN_BODY_PTR(req
);
74 in_lock_count
= CVAL(inbody
, 0x02);
75 /* 0x04 - 4 bytes reserved */
76 in_file_id_persistent
= BVAL(inbody
, 0x08);
77 in_file_id_volatile
= BVAL(inbody
, 0x10);
79 if (in_lock_count
< 1) {
80 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
83 if (((in_lock_count
- 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req
)) {
84 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
87 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
89 if (in_locks
== NULL
) {
90 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
94 lock_buffer
= inbody
+ 0x18;
96 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
97 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
98 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
99 /* 0x14 - 4 reserved bytes */
101 lock_buffer
= SMBD_SMB2_IN_DYN_PTR(req
);
103 for (l
=1; l
< in_lock_count
; l
++) {
104 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
105 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
106 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
107 /* 0x14 - 4 reserved bytes */
112 in_fsp
= file_fsp_smb2(req
, in_file_id_persistent
, in_file_id_volatile
);
113 if (in_fsp
== NULL
) {
114 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
117 subreq
= smbd_smb2_lock_send(req
, req
->sconn
->ev_ctx
,
121 if (subreq
== NULL
) {
122 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
124 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
126 return smbd_smb2_request_pending_queue(req
, subreq
, 500);
129 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
131 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
132 struct smbd_smb2_request
);
135 NTSTATUS error
; /* transport error */
137 status
= smbd_smb2_lock_recv(subreq
);
139 if (!NT_STATUS_IS_OK(status
)) {
140 error
= smbd_smb2_request_error(smb2req
, status
);
141 if (!NT_STATUS_IS_OK(error
)) {
142 smbd_server_connection_terminate(smb2req
->sconn
,
149 outbody
= data_blob_talloc(smb2req
->out
.vector
, NULL
, 0x04);
150 if (outbody
.data
== NULL
) {
151 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
152 if (!NT_STATUS_IS_OK(error
)) {
153 smbd_server_connection_terminate(smb2req
->sconn
,
160 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
161 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
163 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
164 if (!NT_STATUS_IS_OK(error
)) {
165 smbd_server_connection_terminate(smb2req
->sconn
,
171 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
172 struct tevent_context
*ev
,
173 struct smbd_smb2_request
*smb2req
,
174 struct files_struct
*fsp
,
175 uint16_t in_lock_count
,
176 struct smbd_smb2_lock_element
*in_locks
)
178 struct tevent_req
*req
;
179 struct smbd_smb2_lock_state
*state
;
180 struct smb_request
*smb1req
;
181 int32_t timeout
= -1;
182 bool isunlock
= false;
184 struct smbd_lock_element
*locks
;
188 req
= tevent_req_create(mem_ctx
, &state
,
189 struct smbd_smb2_lock_state
);
193 state
->smb2req
= smb2req
;
194 smb2req
->subreq
= req
; /* So we can find this when going async. */
196 smb1req
= smbd_smb2_fake_smb_request(smb2req
);
197 if (tevent_req_nomem(smb1req
, req
)) {
198 return tevent_req_post(req
, ev
);
200 state
->smb1req
= smb1req
;
202 DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
203 fsp_str_dbg(fsp
), fsp_fnum_dbg(fsp
)));
205 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
207 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
208 return tevent_req_post(req
, ev
);
211 switch (in_locks
[0].flags
) {
212 case SMB2_LOCK_FLAG_SHARED
:
213 case SMB2_LOCK_FLAG_EXCLUSIVE
:
214 if (in_lock_count
> 1) {
215 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
216 return tevent_req_post(req
, ev
);
221 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
222 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
226 case SMB2_LOCK_FLAG_UNLOCK
:
227 /* only the first lock gives the UNLOCK bit - see
234 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
235 return tevent_req_post(req
, ev
);
238 for (i
=0; i
<in_lock_count
; i
++) {
239 bool invalid
= false;
241 switch (in_locks
[i
].flags
) {
242 case SMB2_LOCK_FLAG_SHARED
:
243 case SMB2_LOCK_FLAG_EXCLUSIVE
:
249 tevent_req_nterror(req
,
250 NT_STATUS_INVALID_PARAMETER
);
251 return tevent_req_post(req
, ev
);
255 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
256 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
262 case SMB2_LOCK_FLAG_UNLOCK
:
264 tevent_req_nterror(req
,
265 NT_STATUS_INVALID_PARAMETER
);
266 return tevent_req_post(req
, ev
);
273 * is the first element was a UNLOCK
274 * we need to deferr the error response
275 * to the backend, because we need to process
276 * all unlock elements before
281 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
282 return tevent_req_post(req
, ev
);
285 locks
[i
].smblctx
= fsp
->op
->global
->open_persistent_id
;
286 locks
[i
].offset
= in_locks
[i
].offset
;
287 locks
[i
].count
= in_locks
[i
].length
;
289 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
290 locks
[i
].brltype
= WRITE_LOCK
;
291 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
292 locks
[i
].brltype
= READ_LOCK
;
293 } else if (invalid
) {
295 * this is an invalid UNLOCK element
296 * and the backend needs to test for
297 * brltype != UNLOCK_LOCK and return
298 * NT_STATUS_INVALID_PARAMER
300 locks
[i
].brltype
= READ_LOCK
;
302 locks
[i
].brltype
= UNLOCK_LOCK
;
305 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
306 "smblctx = %llu type %d\n",
308 (unsigned long long)locks
[i
].offset
,
309 (unsigned long long)locks
[i
].count
,
310 (unsigned long long)locks
[i
].smblctx
,
311 (int)locks
[i
].brltype
));
314 state
->locks
= locks
;
315 state
->lock_count
= in_lock_count
;
318 status
= smbd_do_locking(smb1req
, fsp
,
327 status
= smbd_do_locking(smb1req
, fsp
,
336 if (!NT_STATUS_IS_OK(status
)) {
337 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
338 status
= NT_STATUS_LOCK_NOT_GRANTED
;
340 tevent_req_nterror(req
, status
);
341 return tevent_req_post(req
, ev
);
348 tevent_req_done(req
);
349 return tevent_req_post(req
, ev
);
352 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
356 if (tevent_req_is_nterror(req
, &status
)) {
357 tevent_req_received(req
);
361 tevent_req_received(req
);
365 /****************************************************************
366 Cancel an outstanding blocking lock request.
367 *****************************************************************/
369 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
371 struct smbd_smb2_request
*smb2req
= NULL
;
372 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
373 struct smbd_smb2_lock_state
);
378 if (!state
->smb2req
) {
382 smb2req
= state
->smb2req
;
383 smb2req
->cancelled
= true;
385 remove_pending_lock(state
, state
->blr
);
386 tevent_req_defer_callback(req
, smb2req
->sconn
->ev_ctx
);
387 tevent_req_nterror(req
, NT_STATUS_CANCELLED
);
391 /****************************************************************
392 Got a message saying someone unlocked a file. Re-schedule all
393 blocking lock requests as we don't know if anything overlapped.
394 *****************************************************************/
396 static void received_unlock_msg(struct messaging_context
*msg
,
399 struct server_id server_id
,
402 struct smbd_server_connection
*sconn
=
403 talloc_get_type_abort(private_data
,
404 struct smbd_server_connection
);
406 DEBUG(10,("received_unlock_msg (SMB2)\n"));
408 process_blocking_lock_queue_smb2(sconn
, timeval_current());
411 /****************************************************************
412 Function to get the blr on a pending record.
413 *****************************************************************/
415 struct blocking_lock_record
*get_pending_smb2req_blr(struct smbd_smb2_request
*smb2req
)
417 struct smbd_smb2_lock_state
*state
= NULL
;
418 const uint8_t *inhdr
;
423 if (smb2req
->subreq
== NULL
) {
426 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
429 inhdr
= SMBD_SMB2_IN_HDR_PTR(smb2req
);
430 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
433 state
= tevent_req_data(smb2req
->subreq
,
434 struct smbd_smb2_lock_state
);
440 /****************************************************************
441 Set up the next brl timeout.
442 *****************************************************************/
444 static bool recalc_smb2_brl_timeout(struct smbd_server_connection
*sconn
)
446 struct smbd_smb2_request
*smb2req
;
447 struct timeval next_timeout
= timeval_zero();
448 int max_brl_timeout
= lp_parm_int(-1, "brl", "recalctime", 5);
450 TALLOC_FREE(sconn
->smb2
.locks
.brl_timeout
);
452 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= smb2req
->next
) {
453 struct blocking_lock_record
*blr
=
454 get_pending_smb2req_blr(smb2req
);
458 if (timeval_is_zero(&blr
->expire_time
)) {
460 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
461 * a POSIX lock, so calculate a timeout of
462 * 10 seconds into the future.
464 if (blr
->blocking_smblctx
== 0xFFFFFFFFFFFFFFFFLL
) {
465 struct timeval psx_to
= timeval_current_ofs(10, 0);
466 next_timeout
= timeval_brl_min(&next_timeout
, &psx_to
);
472 next_timeout
= timeval_brl_min(&next_timeout
, &blr
->expire_time
);
475 if (timeval_is_zero(&next_timeout
)) {
476 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
477 "timeout = Infinite.\n"));
482 * To account for unclean shutdowns by clients we need a
483 * maximum timeout that we use for checking pending locks. If
484 * we have any pending locks at all, then check if the pending
485 * lock can continue at least every brl:recalctime seconds
486 * (default 5 seconds).
488 * This saves us needing to do a message_send_all() in the
489 * SIGCHLD handler in the parent daemon. That
490 * message_send_all() caused O(n^2) work to be done when IP
491 * failovers happened in clustered Samba, which could make the
492 * entire system unusable for many minutes.
495 if (max_brl_timeout
> 0) {
496 struct timeval min_to
= timeval_current_ofs(max_brl_timeout
, 0);
497 next_timeout
= timeval_brl_min(&next_timeout
, &min_to
);
501 struct timeval cur
, from_now
;
503 cur
= timeval_current();
504 from_now
= timeval_until(&cur
, &next_timeout
);
505 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
506 "timeout = %d.%d seconds from now.\n",
507 (int)from_now
.tv_sec
, (int)from_now
.tv_usec
));
510 sconn
->smb2
.locks
.brl_timeout
= tevent_add_timer(
516 if (!sconn
->smb2
.locks
.brl_timeout
) {
522 /****************************************************************
523 Get an SMB2 lock reqeust to go async. lock_timeout should
525 *****************************************************************/
527 bool push_blocking_lock_request_smb2( struct byte_range_lock
*br_lck
,
528 struct smb_request
*smb1req
,
533 enum brl_type lock_type
,
534 enum brl_flavour lock_flav
,
537 uint64_t blocking_smblctx
)
539 struct smbd_server_connection
*sconn
= smb1req
->sconn
;
540 struct smbd_smb2_request
*smb2req
= smb1req
->smb2req
;
541 struct tevent_req
*req
= NULL
;
542 struct smbd_smb2_lock_state
*state
= NULL
;
543 struct blocking_lock_record
*blr
= NULL
;
544 NTSTATUS status
= NT_STATUS_OK
;
549 req
= smb2req
->subreq
;
553 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
556 state
= tevent_req_data(req
, struct smbd_smb2_lock_state
);
561 blr
= talloc_zero(state
, struct blocking_lock_record
);
567 if (lock_timeout
== -1) {
568 blr
->expire_time
.tv_sec
= 0;
569 blr
->expire_time
.tv_usec
= 0; /* Never expire. */
571 blr
->expire_time
= timeval_current_ofs_msec(lock_timeout
);
574 blr
->lock_num
= lock_num
;
575 blr
->smblctx
= smblctx
;
576 blr
->blocking_smblctx
= blocking_smblctx
;
577 blr
->lock_flav
= lock_flav
;
578 blr
->lock_type
= lock_type
;
579 blr
->offset
= offset
;
582 /* Specific brl_lock() implementations can fill this in. */
583 blr
->blr_private
= NULL
;
585 /* Add a pending lock record for this. */
586 status
= brl_lock(sconn
->msg_ctx
,
589 messaging_server_id(sconn
->msg_ctx
),
592 lock_type
== READ_LOCK
? PENDING_READ_LOCK
: PENDING_WRITE_LOCK
,
598 if (!NT_STATUS_IS_OK(status
)) {
599 DEBUG(0,("push_blocking_lock_request_smb2: "
600 "failed to add PENDING_LOCK record.\n"));
606 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
610 recalc_smb2_brl_timeout(sconn
);
612 /* Ensure we'll receive messages when this is unlocked. */
613 if (!sconn
->smb2
.locks
.blocking_lock_unlock_state
) {
614 messaging_register(sconn
->msg_ctx
, sconn
,
615 MSG_SMB_UNLOCK
, received_unlock_msg
);
616 sconn
->smb2
.locks
.blocking_lock_unlock_state
= true;
619 /* allow this request to be canceled */
620 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
625 /****************************************************************
626 Remove a pending lock record under lock.
627 *****************************************************************/
629 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
630 struct blocking_lock_record
*blr
)
633 struct byte_range_lock
*br_lck
= brl_get_locks(
636 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr
));
639 brl_lock_cancel(br_lck
,
641 messaging_server_id(blr
->fsp
->conn
->sconn
->msg_ctx
),
649 /* Remove the locks we already got. */
651 for(i
= blr
->lock_num
- 1; i
>= 0; i
--) {
652 struct smbd_lock_element
*e
= &state
->locks
[i
];
654 do_unlock(blr
->fsp
->conn
->sconn
->msg_ctx
,
663 /****************************************************************
664 Re-proccess a blocking lock request.
665 This is equivalent to process_lockingX() inside smbd/blocking.c
666 *****************************************************************/
668 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request
*smb2req
,
669 struct timeval tv_curr
)
671 NTSTATUS status
= NT_STATUS_UNSUCCESSFUL
;
672 struct blocking_lock_record
*blr
= NULL
;
673 struct smbd_smb2_lock_state
*state
= NULL
;
674 files_struct
*fsp
= NULL
;
676 if (!smb2req
->subreq
) {
679 state
= tevent_req_data(smb2req
->subreq
, struct smbd_smb2_lock_state
);
687 /* Try and finish off getting all the outstanding locks. */
689 for (; blr
->lock_num
< state
->lock_count
; blr
->lock_num
++) {
690 struct byte_range_lock
*br_lck
= NULL
;
691 struct smbd_lock_element
*e
= &state
->locks
[blr
->lock_num
];
693 br_lck
= do_lock(fsp
->conn
->sconn
->msg_ctx
,
702 &blr
->blocking_smblctx
,
707 if (NT_STATUS_IS_ERR(status
)) {
712 if(blr
->lock_num
== state
->lock_count
) {
714 * Success - we got all the locks.
717 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
718 "%s, num_locks=%d\n",
721 (int)state
->lock_count
));
723 tevent_req_done(smb2req
->subreq
);
727 if (!NT_STATUS_EQUAL(status
,NT_STATUS_LOCK_NOT_GRANTED
) &&
728 !NT_STATUS_EQUAL(status
,NT_STATUS_FILE_LOCK_CONFLICT
)) {
730 * We have other than a "can't get lock"
731 * error. Return an error.
733 remove_pending_lock(state
, blr
);
734 tevent_req_nterror(smb2req
->subreq
, status
);
739 * We couldn't get the locks for this record on the list.
740 * If the time has expired, return a lock error.
743 if (!timeval_is_zero(&blr
->expire_time
) &&
744 timeval_compare(&blr
->expire_time
, &tv_curr
) <= 0) {
745 remove_pending_lock(state
, blr
);
746 tevent_req_nterror(smb2req
->subreq
, NT_STATUS_LOCK_NOT_GRANTED
);
751 * Still can't get all the locks - keep waiting.
754 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
755 "for file %s, %s. Still waiting....\n",
757 (int)state
->lock_count
,
765 /****************************************************************
766 Attempt to proccess all outstanding blocking locks pending on
768 *****************************************************************/
770 void process_blocking_lock_queue_smb2(
771 struct smbd_server_connection
*sconn
, struct timeval tv_curr
)
773 struct smbd_smb2_request
*smb2req
, *nextreq
;
775 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
776 const uint8_t *inhdr
;
778 nextreq
= smb2req
->next
;
780 if (smb2req
->subreq
== NULL
) {
781 /* This message has been processed. */
784 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
785 /* This message has been processed. */
789 inhdr
= SMBD_SMB2_IN_HDR_PTR(smb2req
);
790 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) == SMB2_OP_LOCK
) {
791 reprocess_blocked_smb2_lock(smb2req
, tv_curr
);
795 recalc_smb2_brl_timeout(sconn
);
798 /****************************************************************************
799 Remove any locks on this fd. Called from file_close().
800 ****************************************************************************/
802 void cancel_pending_lock_requests_by_fid_smb2(files_struct
*fsp
,
803 struct byte_range_lock
*br_lck
,
804 enum file_close_type close_type
)
806 struct smbd_server_connection
*sconn
= fsp
->conn
->sconn
;
807 struct smbd_smb2_request
*smb2req
, *nextreq
;
809 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
810 struct smbd_smb2_lock_state
*state
= NULL
;
811 files_struct
*fsp_curr
= NULL
;
812 struct blocking_lock_record
*blr
= NULL
;
813 const uint8_t *inhdr
;
815 nextreq
= smb2req
->next
;
817 if (smb2req
->subreq
== NULL
) {
818 /* This message has been processed. */
821 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
822 /* This message has been processed. */
826 inhdr
= SMBD_SMB2_IN_HDR_PTR(smb2req
);
827 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
828 /* Not a lock call. */
832 state
= tevent_req_data(smb2req
->subreq
,
833 struct smbd_smb2_lock_state
);
835 /* Strange - is this even possible ? */
839 fsp_curr
= smb2req
->compat_chain_fsp
;
840 if (fsp_curr
== NULL
) {
841 /* Strange - is this even possible ? */
845 if (fsp_curr
!= fsp
) {
846 /* It's not our fid */
852 /* Remove the entries from the lock db. */
853 brl_lock_cancel(br_lck
,
855 messaging_server_id(sconn
->msg_ctx
),
861 /* Finally end the request. */
862 if (close_type
== SHUTDOWN_CLOSE
) {
863 tevent_req_done(smb2req
->subreq
);
865 tevent_req_nterror(smb2req
->subreq
,
866 NT_STATUS_RANGE_NOT_LOCKED
);