2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/globals.h"
24 #include "../libcli/smb/smb_common.h"
25 #include "librpc/gen_ndr/messaging.h"
27 struct smbd_smb2_lock_element
{
33 struct smbd_smb2_lock_state
{
34 struct smbd_smb2_request
*smb2req
;
35 struct smb_request
*smb1req
;
36 struct blocking_lock_record
*blr
;
38 struct smbd_lock_element
*locks
;
41 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
42 struct blocking_lock_record
*blr
);
44 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
45 struct tevent_context
*ev
,
46 struct smbd_smb2_request
*smb2req
,
48 uint64_t in_file_id_volatile
,
49 uint16_t in_lock_count
,
50 struct smbd_smb2_lock_element
*in_locks
);
51 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
53 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
54 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
57 const uint8_t *inbody
;
58 const int i
= req
->current_idx
;
59 size_t expected_body_size
= 0x30;
62 uint16_t in_lock_count
;
63 uint64_t in_file_id_persistent
;
64 uint64_t in_file_id_volatile
;
65 struct smbd_smb2_lock_element
*in_locks
;
66 struct tevent_req
*subreq
;
67 const uint8_t *lock_buffer
;
70 inhdr
= (const uint8_t *)req
->in
.vector
[i
+0].iov_base
;
71 if (req
->in
.vector
[i
+1].iov_len
!= (expected_body_size
& 0xFFFFFFFE)) {
72 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
75 inbody
= (const uint8_t *)req
->in
.vector
[i
+1].iov_base
;
77 body_size
= SVAL(inbody
, 0x00);
78 if (body_size
!= expected_body_size
) {
79 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
82 in_smbpid
= IVAL(inhdr
, SMB2_HDR_PID
);
84 in_lock_count
= CVAL(inbody
, 0x02);
85 /* 0x04 - 4 bytes reserved */
86 in_file_id_persistent
= BVAL(inbody
, 0x08);
87 in_file_id_volatile
= BVAL(inbody
, 0x10);
89 if (in_lock_count
< 1) {
90 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
93 if (((in_lock_count
- 1) * 0x18) > req
->in
.vector
[i
+2].iov_len
) {
94 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
97 if (req
->compat_chain_fsp
) {
99 } else if (in_file_id_persistent
!= in_file_id_volatile
) {
100 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
103 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
105 if (in_locks
== NULL
) {
106 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
110 lock_buffer
= inbody
+ 0x18;
112 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
113 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
114 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
115 /* 0x14 - 4 reserved bytes */
117 lock_buffer
= (const uint8_t *)req
->in
.vector
[i
+2].iov_base
;
119 for (l
=1; l
< in_lock_count
; l
++) {
120 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
121 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
122 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
123 /* 0x14 - 4 reserved bytes */
128 subreq
= smbd_smb2_lock_send(req
,
129 req
->sconn
->smb2
.event_ctx
,
135 if (subreq
== NULL
) {
136 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
138 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
140 return smbd_smb2_request_pending_queue(req
, subreq
);
143 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
145 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
146 struct smbd_smb2_request
);
149 NTSTATUS error
; /* transport error */
151 if (smb2req
->cancelled
) {
152 const uint8_t *inhdr
= (const uint8_t *)
153 smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
154 uint64_t mid
= BVAL(inhdr
, SMB2_HDR_MESSAGE_ID
);
155 struct smbd_smb2_lock_state
*state
;
157 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
158 (unsigned long long)mid
));
160 state
= tevent_req_data(smb2req
->subreq
,
161 struct smbd_smb2_lock_state
);
164 SMB_ASSERT(state
->blr
);
166 remove_pending_lock(state
, state
->blr
);
168 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_CANCELLED
);
169 if (!NT_STATUS_IS_OK(error
)) {
170 smbd_server_connection_terminate(smb2req
->sconn
,
177 status
= smbd_smb2_lock_recv(subreq
);
179 if (!NT_STATUS_IS_OK(status
)) {
180 error
= smbd_smb2_request_error(smb2req
, status
);
181 if (!NT_STATUS_IS_OK(error
)) {
182 smbd_server_connection_terminate(smb2req
->sconn
,
189 outbody
= data_blob_talloc(smb2req
->out
.vector
, NULL
, 0x04);
190 if (outbody
.data
== NULL
) {
191 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
192 if (!NT_STATUS_IS_OK(error
)) {
193 smbd_server_connection_terminate(smb2req
->sconn
,
200 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
201 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
203 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
204 if (!NT_STATUS_IS_OK(error
)) {
205 smbd_server_connection_terminate(smb2req
->sconn
,
211 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
212 struct tevent_context
*ev
,
213 struct smbd_smb2_request
*smb2req
,
215 uint64_t in_file_id_volatile
,
216 uint16_t in_lock_count
,
217 struct smbd_smb2_lock_element
*in_locks
)
219 struct tevent_req
*req
;
220 struct smbd_smb2_lock_state
*state
;
221 struct smb_request
*smb1req
;
222 connection_struct
*conn
= smb2req
->tcon
->compat_conn
;
224 int32_t timeout
= -1;
225 bool isunlock
= false;
227 struct smbd_lock_element
*locks
;
231 req
= tevent_req_create(mem_ctx
, &state
,
232 struct smbd_smb2_lock_state
);
236 state
->smb2req
= smb2req
;
237 smb2req
->subreq
= req
; /* So we can find this when going async. */
239 smb1req
= smbd_smb2_fake_smb_request(smb2req
);
240 if (tevent_req_nomem(smb1req
, req
)) {
241 return tevent_req_post(req
, ev
);
243 state
->smb1req
= smb1req
;
245 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
246 (unsigned long long)in_file_id_volatile
));
248 fsp
= file_fsp(smb1req
, (uint16_t)in_file_id_volatile
);
250 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
251 return tevent_req_post(req
, ev
);
253 if (conn
!= fsp
->conn
) {
254 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
255 return tevent_req_post(req
, ev
);
257 if (smb2req
->session
->vuid
!= fsp
->vuid
) {
258 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
259 return tevent_req_post(req
, ev
);
262 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
264 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
265 return tevent_req_post(req
, ev
);
268 switch (in_locks
[0].flags
) {
269 case SMB2_LOCK_FLAG_SHARED
:
270 case SMB2_LOCK_FLAG_EXCLUSIVE
:
271 if (in_lock_count
> 1) {
272 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
273 return tevent_req_post(req
, ev
);
278 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
279 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
283 case SMB2_LOCK_FLAG_UNLOCK
:
284 /* only the first lock gives the UNLOCK bit - see
291 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
292 return tevent_req_post(req
, ev
);
295 for (i
=0; i
<in_lock_count
; i
++) {
296 bool invalid
= false;
298 switch (in_locks
[i
].flags
) {
299 case SMB2_LOCK_FLAG_SHARED
:
300 case SMB2_LOCK_FLAG_EXCLUSIVE
:
306 tevent_req_nterror(req
,
307 NT_STATUS_INVALID_PARAMETER
);
308 return tevent_req_post(req
, ev
);
312 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
313 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
319 case SMB2_LOCK_FLAG_UNLOCK
:
321 tevent_req_nterror(req
,
322 NT_STATUS_INVALID_PARAMETER
);
323 return tevent_req_post(req
, ev
);
330 * is the first element was a UNLOCK
331 * we need to deferr the error response
332 * to the backend, because we need to process
333 * all unlock elements before
338 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
339 return tevent_req_post(req
, ev
);
342 locks
[i
].smblctx
= in_file_id_volatile
;
343 locks
[i
].offset
= in_locks
[i
].offset
;
344 locks
[i
].count
= in_locks
[i
].length
;
346 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
347 locks
[i
].brltype
= WRITE_LOCK
;
348 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
349 locks
[i
].brltype
= READ_LOCK
;
350 } else if (invalid
) {
352 * this is an invalid UNLOCK element
353 * and the backend needs to test for
354 * brltype != UNLOCK_LOCK and return
355 * NT_STATUS_INVALID_PARAMER
357 locks
[i
].brltype
= READ_LOCK
;
359 locks
[i
].brltype
= UNLOCK_LOCK
;
362 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
363 "smblctx = %llu type %d\n",
365 (unsigned long long)locks
[i
].offset
,
366 (unsigned long long)locks
[i
].count
,
367 (unsigned long long)locks
[i
].smblctx
,
368 (int)locks
[i
].brltype
));
371 state
->locks
= locks
;
372 state
->lock_count
= in_lock_count
;
375 status
= smbd_do_locking(smb1req
, fsp
,
384 status
= smbd_do_locking(smb1req
, fsp
,
393 if (!NT_STATUS_IS_OK(status
)) {
394 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
395 status
= NT_STATUS_LOCK_NOT_GRANTED
;
397 tevent_req_nterror(req
, status
);
398 return tevent_req_post(req
, ev
);
405 tevent_req_done(req
);
406 return tevent_req_post(req
, ev
);
409 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
413 if (tevent_req_is_nterror(req
, &status
)) {
414 tevent_req_received(req
);
418 tevent_req_received(req
);
422 /****************************************************************
423 Cancel an outstanding blocking lock request.
424 *****************************************************************/
426 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
428 struct smbd_smb2_request
*smb2req
= NULL
;
429 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
430 struct smbd_smb2_lock_state
);
435 if (!state
->smb2req
) {
439 smb2req
= state
->smb2req
;
440 smb2req
->cancelled
= true;
442 tevent_req_done(req
);
446 /****************************************************************
447 Got a message saying someone unlocked a file. Re-schedule all
448 blocking lock requests as we don't know if anything overlapped.
449 *****************************************************************/
451 static void received_unlock_msg(struct messaging_context
*msg
,
454 struct server_id server_id
,
457 struct smbd_server_connection
*sconn
;
459 DEBUG(10,("received_unlock_msg (SMB2)\n"));
461 sconn
= msg_ctx_to_sconn(msg
);
463 DEBUG(1, ("could not find sconn\n"));
466 process_blocking_lock_queue_smb2(sconn
, timeval_current());
469 /****************************************************************
470 Function to get the blr on a pending record.
471 *****************************************************************/
473 struct blocking_lock_record
*get_pending_smb2req_blr(struct smbd_smb2_request
*smb2req
)
475 struct smbd_smb2_lock_state
*state
= NULL
;
476 const uint8_t *inhdr
;
481 if (smb2req
->subreq
== NULL
) {
484 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
487 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
488 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
491 state
= tevent_req_data(smb2req
->subreq
,
492 struct smbd_smb2_lock_state
);
498 /****************************************************************
499 Set up the next brl timeout.
500 *****************************************************************/
502 static bool recalc_smb2_brl_timeout(struct smbd_server_connection
*sconn
)
504 struct smbd_smb2_request
*smb2req
;
505 struct timeval next_timeout
= timeval_zero();
506 int max_brl_timeout
= lp_parm_int(-1, "brl", "recalctime", 5);
508 TALLOC_FREE(sconn
->smb2
.locks
.brl_timeout
);
510 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= smb2req
->next
) {
511 struct blocking_lock_record
*blr
=
512 get_pending_smb2req_blr(smb2req
);
516 if (timeval_is_zero(&blr
->expire_time
)) {
518 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
519 * a POSIX lock, so calculate a timeout of
520 * 10 seconds into the future.
522 if (blr
->blocking_smblctx
== 0xFFFFFFFFFFFFFFFFLL
) {
523 struct timeval psx_to
= timeval_current_ofs(10, 0);
524 next_timeout
= timeval_brl_min(&next_timeout
, &psx_to
);
530 next_timeout
= timeval_brl_min(&next_timeout
, &blr
->expire_time
);
533 if (timeval_is_zero(&next_timeout
)) {
534 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
535 "timeout = Infinite.\n"));
540 * To account for unclean shutdowns by clients we need a
541 * maximum timeout that we use for checking pending locks. If
542 * we have any pending locks at all, then check if the pending
543 * lock can continue at least every brl:recalctime seconds
544 * (default 5 seconds).
546 * This saves us needing to do a message_send_all() in the
547 * SIGCHLD handler in the parent daemon. That
548 * message_send_all() caused O(n^2) work to be done when IP
549 * failovers happened in clustered Samba, which could make the
550 * entire system unusable for many minutes.
553 if (max_brl_timeout
> 0) {
554 struct timeval min_to
= timeval_current_ofs(max_brl_timeout
, 0);
555 next_timeout
= timeval_brl_min(&next_timeout
, &min_to
);
559 struct timeval cur
, from_now
;
561 cur
= timeval_current();
562 from_now
= timeval_until(&cur
, &next_timeout
);
563 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
564 "timeout = %d.%d seconds from now.\n",
565 (int)from_now
.tv_sec
, (int)from_now
.tv_usec
));
568 sconn
->smb2
.locks
.brl_timeout
= event_add_timed(
569 smbd_event_context(),
574 if (!sconn
->smb2
.locks
.brl_timeout
) {
580 /****************************************************************
581 Get an SMB2 lock reqeust to go async. lock_timeout should
583 *****************************************************************/
585 bool push_blocking_lock_request_smb2( struct byte_range_lock
*br_lck
,
586 struct smb_request
*smb1req
,
591 enum brl_type lock_type
,
592 enum brl_flavour lock_flav
,
595 uint64_t blocking_smblctx
)
597 struct smbd_server_connection
*sconn
= smb1req
->sconn
;
598 struct smbd_smb2_request
*smb2req
= smb1req
->smb2req
;
599 struct tevent_req
*req
= NULL
;
600 struct smbd_smb2_lock_state
*state
= NULL
;
601 struct blocking_lock_record
*blr
= NULL
;
602 NTSTATUS status
= NT_STATUS_OK
;
607 req
= smb2req
->subreq
;
611 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
614 state
= tevent_req_data(req
, struct smbd_smb2_lock_state
);
619 blr
= talloc_zero(state
, struct blocking_lock_record
);
625 if (lock_timeout
== -1) {
626 blr
->expire_time
.tv_sec
= 0;
627 blr
->expire_time
.tv_usec
= 0; /* Never expire. */
629 blr
->expire_time
= timeval_current_ofs(
631 (lock_timeout
% 1000) * 1000);
634 blr
->lock_num
= lock_num
;
635 blr
->smblctx
= smblctx
;
636 blr
->blocking_smblctx
= blocking_smblctx
;
637 blr
->lock_flav
= lock_flav
;
638 blr
->lock_type
= lock_type
;
639 blr
->offset
= offset
;
642 /* Specific brl_lock() implementations can fill this in. */
643 blr
->blr_private
= NULL
;
645 /* Add a pending lock record for this. */
646 status
= brl_lock(sconn
->msg_ctx
,
649 sconn_server_id(sconn
),
652 lock_type
== READ_LOCK
? PENDING_READ_LOCK
: PENDING_WRITE_LOCK
,
658 if (!NT_STATUS_IS_OK(status
)) {
659 DEBUG(0,("push_blocking_lock_request_smb2: "
660 "failed to add PENDING_LOCK record.\n"));
666 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
670 recalc_smb2_brl_timeout(sconn
);
672 /* Ensure we'll receive messages when this is unlocked. */
673 if (!sconn
->smb2
.locks
.blocking_lock_unlock_state
) {
674 messaging_register(sconn
->msg_ctx
, NULL
,
675 MSG_SMB_UNLOCK
, received_unlock_msg
);
676 sconn
->smb2
.locks
.blocking_lock_unlock_state
= true;
679 /* allow this request to be canceled */
680 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
685 /****************************************************************
686 Remove a pending lock record under lock.
687 *****************************************************************/
689 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
690 struct blocking_lock_record
*blr
)
693 struct byte_range_lock
*br_lck
= brl_get_locks(
696 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr
));
699 brl_lock_cancel(br_lck
,
701 sconn_server_id(blr
->fsp
->conn
->sconn
),
709 /* Remove the locks we already got. */
711 for(i
= blr
->lock_num
- 1; i
>= 0; i
--) {
712 struct smbd_lock_element
*e
= &state
->locks
[i
];
714 do_unlock(blr
->fsp
->conn
->sconn
->msg_ctx
,
723 /****************************************************************
724 Re-proccess a blocking lock request.
725 This is equivalent to process_lockingX() inside smbd/blocking.c
726 *****************************************************************/
728 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request
*smb2req
,
729 struct timeval tv_curr
)
731 NTSTATUS status
= NT_STATUS_UNSUCCESSFUL
;
732 struct blocking_lock_record
*blr
= NULL
;
733 struct smbd_smb2_lock_state
*state
= NULL
;
734 files_struct
*fsp
= NULL
;
736 if (!smb2req
->subreq
) {
739 state
= tevent_req_data(smb2req
->subreq
, struct smbd_smb2_lock_state
);
747 /* Try and finish off getting all the outstanding locks. */
749 for (; blr
->lock_num
< state
->lock_count
; blr
->lock_num
++) {
750 struct byte_range_lock
*br_lck
= NULL
;
751 struct smbd_lock_element
*e
= &state
->locks
[blr
->lock_num
];
753 br_lck
= do_lock(fsp
->conn
->sconn
->msg_ctx
,
762 &blr
->blocking_smblctx
,
767 if (NT_STATUS_IS_ERR(status
)) {
772 if(blr
->lock_num
== state
->lock_count
) {
774 * Success - we got all the locks.
777 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
778 "fnum=%d num_locks=%d\n",
781 (int)state
->lock_count
));
783 tevent_req_done(smb2req
->subreq
);
787 if (!NT_STATUS_EQUAL(status
,NT_STATUS_LOCK_NOT_GRANTED
) &&
788 !NT_STATUS_EQUAL(status
,NT_STATUS_FILE_LOCK_CONFLICT
)) {
790 * We have other than a "can't get lock"
791 * error. Return an error.
793 remove_pending_lock(state
, blr
);
794 tevent_req_nterror(smb2req
->subreq
, status
);
799 * We couldn't get the locks for this record on the list.
800 * If the time has expired, return a lock error.
803 if (!timeval_is_zero(&blr
->expire_time
) &&
804 timeval_compare(&blr
->expire_time
, &tv_curr
) <= 0) {
805 remove_pending_lock(state
, blr
);
806 tevent_req_nterror(smb2req
->subreq
, NT_STATUS_LOCK_NOT_GRANTED
);
811 * Still can't get all the locks - keep waiting.
814 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
815 "for file %s, fnum = %d. Still waiting....\n",
817 (int)state
->lock_count
,
825 /****************************************************************
826 Attempt to proccess all outstanding blocking locks pending on
828 *****************************************************************/
830 void process_blocking_lock_queue_smb2(
831 struct smbd_server_connection
*sconn
, struct timeval tv_curr
)
833 struct smbd_smb2_request
*smb2req
, *nextreq
;
835 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
836 const uint8_t *inhdr
;
838 nextreq
= smb2req
->next
;
840 if (smb2req
->subreq
== NULL
) {
841 /* This message has been processed. */
844 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
845 /* This message has been processed. */
849 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
850 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) == SMB2_OP_LOCK
) {
851 reprocess_blocked_smb2_lock(smb2req
, tv_curr
);
855 recalc_smb2_brl_timeout(sconn
);
858 /****************************************************************************
859 Remove any locks on this fd. Called from file_close().
860 ****************************************************************************/
862 void cancel_pending_lock_requests_by_fid_smb2(files_struct
*fsp
,
863 struct byte_range_lock
*br_lck
,
864 enum file_close_type close_type
)
866 struct smbd_server_connection
*sconn
= fsp
->conn
->sconn
;
867 struct smbd_smb2_request
*smb2req
, *nextreq
;
869 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
870 struct smbd_smb2_lock_state
*state
= NULL
;
871 files_struct
*fsp_curr
= NULL
;
872 int i
= smb2req
->current_idx
;
873 uint64_t in_file_id_volatile
;
874 struct blocking_lock_record
*blr
= NULL
;
875 const uint8_t *inhdr
;
876 const uint8_t *inbody
;
878 nextreq
= smb2req
->next
;
880 if (smb2req
->subreq
== NULL
) {
881 /* This message has been processed. */
884 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
885 /* This message has been processed. */
889 inhdr
= (const uint8_t *)smb2req
->in
.vector
[i
].iov_base
;
890 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
891 /* Not a lock call. */
895 inbody
= (const uint8_t *)smb2req
->in
.vector
[i
+1].iov_base
;
896 in_file_id_volatile
= BVAL(inbody
, 0x10);
898 state
= tevent_req_data(smb2req
->subreq
,
899 struct smbd_smb2_lock_state
);
901 /* Strange - is this even possible ? */
905 fsp_curr
= file_fsp(state
->smb1req
, (uint16_t)in_file_id_volatile
);
906 if (fsp_curr
== NULL
) {
907 /* Strange - is this even possible ? */
911 if (fsp_curr
!= fsp
) {
912 /* It's not our fid */
918 /* Remove the entries from the lock db. */
919 brl_lock_cancel(br_lck
,
921 sconn_server_id(sconn
),
927 /* Finally end the request. */
928 if (close_type
== SHUTDOWN_CLOSE
) {
929 tevent_req_done(smb2req
->subreq
);
931 tevent_req_nterror(smb2req
->subreq
,
932 NT_STATUS_RANGE_NOT_LOCKED
);