2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
29 struct smbd_smb2_lock_element
{
35 struct smbd_smb2_lock_state
{
36 struct smbd_smb2_request
*smb2req
;
37 struct smb_request
*smb1req
;
38 struct blocking_lock_record
*blr
;
40 struct smbd_lock_element
*locks
;
43 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
44 struct blocking_lock_record
*blr
);
46 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
47 struct tevent_context
*ev
,
48 struct smbd_smb2_request
*smb2req
,
50 uint64_t in_file_id_volatile
,
51 uint16_t in_lock_count
,
52 struct smbd_smb2_lock_element
*in_locks
);
53 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
55 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
56 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
59 const uint8_t *inbody
;
60 const int i
= req
->current_idx
;
61 size_t expected_body_size
= 0x30;
64 uint16_t in_lock_count
;
65 uint64_t in_file_id_persistent
;
66 uint64_t in_file_id_volatile
;
67 struct smbd_smb2_lock_element
*in_locks
;
68 struct tevent_req
*subreq
;
69 const uint8_t *lock_buffer
;
72 inhdr
= (const uint8_t *)req
->in
.vector
[i
+0].iov_base
;
73 if (req
->in
.vector
[i
+1].iov_len
!= (expected_body_size
& 0xFFFFFFFE)) {
74 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
77 inbody
= (const uint8_t *)req
->in
.vector
[i
+1].iov_base
;
79 body_size
= SVAL(inbody
, 0x00);
80 if (body_size
!= expected_body_size
) {
81 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
84 in_smbpid
= IVAL(inhdr
, SMB2_HDR_PID
);
86 in_lock_count
= CVAL(inbody
, 0x02);
87 /* 0x04 - 4 bytes reserved */
88 in_file_id_persistent
= BVAL(inbody
, 0x08);
89 in_file_id_volatile
= BVAL(inbody
, 0x10);
91 if (in_lock_count
< 1) {
92 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
95 if (((in_lock_count
- 1) * 0x18) > req
->in
.vector
[i
+2].iov_len
) {
96 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
99 if (req
->compat_chain_fsp
) {
101 } else if (in_file_id_persistent
!= in_file_id_volatile
) {
102 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
105 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
107 if (in_locks
== NULL
) {
108 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
112 lock_buffer
= inbody
+ 0x18;
114 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
115 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
116 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
117 /* 0x14 - 4 reserved bytes */
119 lock_buffer
= (const uint8_t *)req
->in
.vector
[i
+2].iov_base
;
121 for (l
=1; l
< in_lock_count
; l
++) {
122 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
123 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
124 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
125 /* 0x14 - 4 reserved bytes */
130 subreq
= smbd_smb2_lock_send(req
,
131 req
->sconn
->smb2
.event_ctx
,
137 if (subreq
== NULL
) {
138 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
140 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
142 return smbd_smb2_request_pending_queue(req
, subreq
);
145 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
147 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
148 struct smbd_smb2_request
);
151 NTSTATUS error
; /* transport error */
153 if (smb2req
->cancelled
) {
154 const uint8_t *inhdr
= (const uint8_t *)
155 smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
156 uint64_t mid
= BVAL(inhdr
, SMB2_HDR_MESSAGE_ID
);
157 struct smbd_smb2_lock_state
*state
;
159 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
160 (unsigned long long)mid
));
162 state
= tevent_req_data(smb2req
->subreq
,
163 struct smbd_smb2_lock_state
);
166 SMB_ASSERT(state
->blr
);
168 remove_pending_lock(state
, state
->blr
);
170 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_CANCELLED
);
171 if (!NT_STATUS_IS_OK(error
)) {
172 smbd_server_connection_terminate(smb2req
->sconn
,
179 status
= smbd_smb2_lock_recv(subreq
);
181 if (!NT_STATUS_IS_OK(status
)) {
182 error
= smbd_smb2_request_error(smb2req
, status
);
183 if (!NT_STATUS_IS_OK(error
)) {
184 smbd_server_connection_terminate(smb2req
->sconn
,
191 outbody
= data_blob_talloc(smb2req
->out
.vector
, NULL
, 0x04);
192 if (outbody
.data
== NULL
) {
193 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
194 if (!NT_STATUS_IS_OK(error
)) {
195 smbd_server_connection_terminate(smb2req
->sconn
,
202 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
203 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
205 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
206 if (!NT_STATUS_IS_OK(error
)) {
207 smbd_server_connection_terminate(smb2req
->sconn
,
213 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
214 struct tevent_context
*ev
,
215 struct smbd_smb2_request
*smb2req
,
217 uint64_t in_file_id_volatile
,
218 uint16_t in_lock_count
,
219 struct smbd_smb2_lock_element
*in_locks
)
221 struct tevent_req
*req
;
222 struct smbd_smb2_lock_state
*state
;
223 struct smb_request
*smb1req
;
224 connection_struct
*conn
= smb2req
->tcon
->compat_conn
;
226 int32_t timeout
= -1;
227 bool isunlock
= false;
229 struct smbd_lock_element
*locks
;
233 req
= tevent_req_create(mem_ctx
, &state
,
234 struct smbd_smb2_lock_state
);
238 state
->smb2req
= smb2req
;
239 smb2req
->subreq
= req
; /* So we can find this when going async. */
241 smb1req
= smbd_smb2_fake_smb_request(smb2req
);
242 if (tevent_req_nomem(smb1req
, req
)) {
243 return tevent_req_post(req
, ev
);
245 state
->smb1req
= smb1req
;
247 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
248 (unsigned long long)in_file_id_volatile
));
250 fsp
= file_fsp(smb1req
, (uint16_t)in_file_id_volatile
);
252 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
253 return tevent_req_post(req
, ev
);
255 if (conn
!= fsp
->conn
) {
256 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
257 return tevent_req_post(req
, ev
);
259 if (smb2req
->session
->vuid
!= fsp
->vuid
) {
260 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
261 return tevent_req_post(req
, ev
);
264 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
266 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
267 return tevent_req_post(req
, ev
);
270 switch (in_locks
[0].flags
) {
271 case SMB2_LOCK_FLAG_SHARED
:
272 case SMB2_LOCK_FLAG_EXCLUSIVE
:
273 if (in_lock_count
> 1) {
274 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
275 return tevent_req_post(req
, ev
);
280 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
281 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
285 case SMB2_LOCK_FLAG_UNLOCK
:
286 /* only the first lock gives the UNLOCK bit - see
293 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
294 return tevent_req_post(req
, ev
);
297 for (i
=0; i
<in_lock_count
; i
++) {
298 bool invalid
= false;
300 switch (in_locks
[i
].flags
) {
301 case SMB2_LOCK_FLAG_SHARED
:
302 case SMB2_LOCK_FLAG_EXCLUSIVE
:
308 tevent_req_nterror(req
,
309 NT_STATUS_INVALID_PARAMETER
);
310 return tevent_req_post(req
, ev
);
314 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
315 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
321 case SMB2_LOCK_FLAG_UNLOCK
:
323 tevent_req_nterror(req
,
324 NT_STATUS_INVALID_PARAMETER
);
325 return tevent_req_post(req
, ev
);
332 * is the first element was a UNLOCK
333 * we need to deferr the error response
334 * to the backend, because we need to process
335 * all unlock elements before
340 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
341 return tevent_req_post(req
, ev
);
344 locks
[i
].smblctx
= in_file_id_volatile
;
345 locks
[i
].offset
= in_locks
[i
].offset
;
346 locks
[i
].count
= in_locks
[i
].length
;
348 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
349 locks
[i
].brltype
= WRITE_LOCK
;
350 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
351 locks
[i
].brltype
= READ_LOCK
;
352 } else if (invalid
) {
354 * this is an invalid UNLOCK element
355 * and the backend needs to test for
356 * brltype != UNLOCK_LOCK and return
357 * NT_STATUS_INVALID_PARAMER
359 locks
[i
].brltype
= READ_LOCK
;
361 locks
[i
].brltype
= UNLOCK_LOCK
;
364 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
365 "smblctx = %llu type %d\n",
367 (unsigned long long)locks
[i
].offset
,
368 (unsigned long long)locks
[i
].count
,
369 (unsigned long long)locks
[i
].smblctx
,
370 (int)locks
[i
].brltype
));
373 state
->locks
= locks
;
374 state
->lock_count
= in_lock_count
;
377 status
= smbd_do_locking(smb1req
, fsp
,
386 status
= smbd_do_locking(smb1req
, fsp
,
395 if (!NT_STATUS_IS_OK(status
)) {
396 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
397 status
= NT_STATUS_LOCK_NOT_GRANTED
;
399 tevent_req_nterror(req
, status
);
400 return tevent_req_post(req
, ev
);
407 tevent_req_done(req
);
408 return tevent_req_post(req
, ev
);
411 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
415 if (tevent_req_is_nterror(req
, &status
)) {
416 tevent_req_received(req
);
420 tevent_req_received(req
);
424 /****************************************************************
425 Cancel an outstanding blocking lock request.
426 *****************************************************************/
428 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
430 struct smbd_smb2_request
*smb2req
= NULL
;
431 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
432 struct smbd_smb2_lock_state
);
437 if (!state
->smb2req
) {
441 smb2req
= state
->smb2req
;
442 smb2req
->cancelled
= true;
444 tevent_req_done(req
);
448 /****************************************************************
449 Got a message saying someone unlocked a file. Re-schedule all
450 blocking lock requests as we don't know if anything overlapped.
451 *****************************************************************/
453 static void received_unlock_msg(struct messaging_context
*msg
,
456 struct server_id server_id
,
459 struct smbd_server_connection
*sconn
;
461 DEBUG(10,("received_unlock_msg (SMB2)\n"));
463 sconn
= msg_ctx_to_sconn(msg
);
465 DEBUG(1, ("could not find sconn\n"));
468 process_blocking_lock_queue_smb2(sconn
, timeval_current());
471 /****************************************************************
472 Function to get the blr on a pending record.
473 *****************************************************************/
475 struct blocking_lock_record
*get_pending_smb2req_blr(struct smbd_smb2_request
*smb2req
)
477 struct smbd_smb2_lock_state
*state
= NULL
;
478 const uint8_t *inhdr
;
483 if (smb2req
->subreq
== NULL
) {
486 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
489 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
490 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
493 state
= tevent_req_data(smb2req
->subreq
,
494 struct smbd_smb2_lock_state
);
500 /****************************************************************
501 Set up the next brl timeout.
502 *****************************************************************/
504 static bool recalc_smb2_brl_timeout(struct smbd_server_connection
*sconn
)
506 struct smbd_smb2_request
*smb2req
;
507 struct timeval next_timeout
= timeval_zero();
508 int max_brl_timeout
= lp_parm_int(-1, "brl", "recalctime", 5);
510 TALLOC_FREE(sconn
->smb2
.locks
.brl_timeout
);
512 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= smb2req
->next
) {
513 struct blocking_lock_record
*blr
=
514 get_pending_smb2req_blr(smb2req
);
518 if (timeval_is_zero(&blr
->expire_time
)) {
520 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
521 * a POSIX lock, so calculate a timeout of
522 * 10 seconds into the future.
524 if (blr
->blocking_smblctx
== 0xFFFFFFFFFFFFFFFFLL
) {
525 struct timeval psx_to
= timeval_current_ofs(10, 0);
526 next_timeout
= timeval_brl_min(&next_timeout
, &psx_to
);
532 next_timeout
= timeval_brl_min(&next_timeout
, &blr
->expire_time
);
535 if (timeval_is_zero(&next_timeout
)) {
536 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
537 "timeout = Infinite.\n"));
542 * To account for unclean shutdowns by clients we need a
543 * maximum timeout that we use for checking pending locks. If
544 * we have any pending locks at all, then check if the pending
545 * lock can continue at least every brl:recalctime seconds
546 * (default 5 seconds).
548 * This saves us needing to do a message_send_all() in the
549 * SIGCHLD handler in the parent daemon. That
550 * message_send_all() caused O(n^2) work to be done when IP
551 * failovers happened in clustered Samba, which could make the
552 * entire system unusable for many minutes.
555 if (max_brl_timeout
> 0) {
556 struct timeval min_to
= timeval_current_ofs(max_brl_timeout
, 0);
557 next_timeout
= timeval_brl_min(&next_timeout
, &min_to
);
561 struct timeval cur
, from_now
;
563 cur
= timeval_current();
564 from_now
= timeval_until(&cur
, &next_timeout
);
565 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
566 "timeout = %d.%d seconds from now.\n",
567 (int)from_now
.tv_sec
, (int)from_now
.tv_usec
));
570 sconn
->smb2
.locks
.brl_timeout
= event_add_timed(
571 server_event_context(),
576 if (!sconn
->smb2
.locks
.brl_timeout
) {
582 /****************************************************************
583 Get an SMB2 lock reqeust to go async. lock_timeout should
585 *****************************************************************/
587 bool push_blocking_lock_request_smb2( struct byte_range_lock
*br_lck
,
588 struct smb_request
*smb1req
,
593 enum brl_type lock_type
,
594 enum brl_flavour lock_flav
,
597 uint64_t blocking_smblctx
)
599 struct smbd_server_connection
*sconn
= smb1req
->sconn
;
600 struct smbd_smb2_request
*smb2req
= smb1req
->smb2req
;
601 struct tevent_req
*req
= NULL
;
602 struct smbd_smb2_lock_state
*state
= NULL
;
603 struct blocking_lock_record
*blr
= NULL
;
604 NTSTATUS status
= NT_STATUS_OK
;
609 req
= smb2req
->subreq
;
613 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
616 state
= tevent_req_data(req
, struct smbd_smb2_lock_state
);
621 blr
= talloc_zero(state
, struct blocking_lock_record
);
627 if (lock_timeout
== -1) {
628 blr
->expire_time
.tv_sec
= 0;
629 blr
->expire_time
.tv_usec
= 0; /* Never expire. */
631 blr
->expire_time
= timeval_current_ofs_msec(lock_timeout
);
634 blr
->lock_num
= lock_num
;
635 blr
->smblctx
= smblctx
;
636 blr
->blocking_smblctx
= blocking_smblctx
;
637 blr
->lock_flav
= lock_flav
;
638 blr
->lock_type
= lock_type
;
639 blr
->offset
= offset
;
642 /* Specific brl_lock() implementations can fill this in. */
643 blr
->blr_private
= NULL
;
645 /* Add a pending lock record for this. */
646 status
= brl_lock(sconn
->msg_ctx
,
649 sconn_server_id(sconn
),
652 lock_type
== READ_LOCK
? PENDING_READ_LOCK
: PENDING_WRITE_LOCK
,
658 if (!NT_STATUS_IS_OK(status
)) {
659 DEBUG(0,("push_blocking_lock_request_smb2: "
660 "failed to add PENDING_LOCK record.\n"));
666 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
670 recalc_smb2_brl_timeout(sconn
);
672 /* Ensure we'll receive messages when this is unlocked. */
673 if (!sconn
->smb2
.locks
.blocking_lock_unlock_state
) {
674 messaging_register(sconn
->msg_ctx
, NULL
,
675 MSG_SMB_UNLOCK
, received_unlock_msg
);
676 sconn
->smb2
.locks
.blocking_lock_unlock_state
= true;
679 /* allow this request to be canceled */
680 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
685 /****************************************************************
686 Remove a pending lock record under lock.
687 *****************************************************************/
689 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
690 struct blocking_lock_record
*blr
)
693 struct byte_range_lock
*br_lck
= brl_get_locks(
696 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr
));
699 brl_lock_cancel(br_lck
,
701 sconn_server_id(blr
->fsp
->conn
->sconn
),
709 /* Remove the locks we already got. */
711 for(i
= blr
->lock_num
- 1; i
>= 0; i
--) {
712 struct smbd_lock_element
*e
= &state
->locks
[i
];
714 do_unlock(blr
->fsp
->conn
->sconn
->msg_ctx
,
723 /****************************************************************
724 Re-proccess a blocking lock request.
725 This is equivalent to process_lockingX() inside smbd/blocking.c
726 *****************************************************************/
728 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request
*smb2req
,
729 struct timeval tv_curr
)
731 NTSTATUS status
= NT_STATUS_UNSUCCESSFUL
;
732 struct blocking_lock_record
*blr
= NULL
;
733 struct smbd_smb2_lock_state
*state
= NULL
;
734 files_struct
*fsp
= NULL
;
736 if (!smb2req
->subreq
) {
739 state
= tevent_req_data(smb2req
->subreq
, struct smbd_smb2_lock_state
);
747 /* Try and finish off getting all the outstanding locks. */
749 for (; blr
->lock_num
< state
->lock_count
; blr
->lock_num
++) {
750 struct byte_range_lock
*br_lck
= NULL
;
751 struct smbd_lock_element
*e
= &state
->locks
[blr
->lock_num
];
753 br_lck
= do_lock(fsp
->conn
->sconn
->msg_ctx
,
762 &blr
->blocking_smblctx
,
767 if (NT_STATUS_IS_ERR(status
)) {
772 if(blr
->lock_num
== state
->lock_count
) {
774 * Success - we got all the locks.
777 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
778 "fnum=%d num_locks=%d\n",
781 (int)state
->lock_count
));
783 tevent_req_done(smb2req
->subreq
);
787 if (!NT_STATUS_EQUAL(status
,NT_STATUS_LOCK_NOT_GRANTED
) &&
788 !NT_STATUS_EQUAL(status
,NT_STATUS_FILE_LOCK_CONFLICT
)) {
790 * We have other than a "can't get lock"
791 * error. Return an error.
793 remove_pending_lock(state
, blr
);
794 tevent_req_nterror(smb2req
->subreq
, status
);
799 * We couldn't get the locks for this record on the list.
800 * If the time has expired, return a lock error.
803 if (!timeval_is_zero(&blr
->expire_time
) &&
804 timeval_compare(&blr
->expire_time
, &tv_curr
) <= 0) {
805 remove_pending_lock(state
, blr
);
806 tevent_req_nterror(smb2req
->subreq
, NT_STATUS_LOCK_NOT_GRANTED
);
811 * Still can't get all the locks - keep waiting.
814 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
815 "for file %s, fnum = %d. Still waiting....\n",
817 (int)state
->lock_count
,
825 /****************************************************************
826 Attempt to proccess all outstanding blocking locks pending on
828 *****************************************************************/
830 void process_blocking_lock_queue_smb2(
831 struct smbd_server_connection
*sconn
, struct timeval tv_curr
)
833 struct smbd_smb2_request
*smb2req
, *nextreq
;
835 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
836 const uint8_t *inhdr
;
838 nextreq
= smb2req
->next
;
840 if (smb2req
->subreq
== NULL
) {
841 /* This message has been processed. */
844 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
845 /* This message has been processed. */
849 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
850 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) == SMB2_OP_LOCK
) {
851 reprocess_blocked_smb2_lock(smb2req
, tv_curr
);
855 recalc_smb2_brl_timeout(sconn
);
858 /****************************************************************************
859 Remove any locks on this fd. Called from file_close().
860 ****************************************************************************/
862 void cancel_pending_lock_requests_by_fid_smb2(files_struct
*fsp
,
863 struct byte_range_lock
*br_lck
,
864 enum file_close_type close_type
)
866 struct smbd_server_connection
*sconn
= fsp
->conn
->sconn
;
867 struct smbd_smb2_request
*smb2req
, *nextreq
;
869 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
870 struct smbd_smb2_lock_state
*state
= NULL
;
871 files_struct
*fsp_curr
= NULL
;
872 int i
= smb2req
->current_idx
;
873 uint64_t in_file_id_volatile
;
874 struct blocking_lock_record
*blr
= NULL
;
875 const uint8_t *inhdr
;
876 const uint8_t *inbody
;
878 nextreq
= smb2req
->next
;
880 if (smb2req
->subreq
== NULL
) {
881 /* This message has been processed. */
884 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
885 /* This message has been processed. */
889 inhdr
= (const uint8_t *)smb2req
->in
.vector
[i
].iov_base
;
890 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
891 /* Not a lock call. */
895 inbody
= (const uint8_t *)smb2req
->in
.vector
[i
+1].iov_base
;
896 in_file_id_volatile
= BVAL(inbody
, 0x10);
898 state
= tevent_req_data(smb2req
->subreq
,
899 struct smbd_smb2_lock_state
);
901 /* Strange - is this even possible ? */
905 fsp_curr
= file_fsp(state
->smb1req
, (uint16_t)in_file_id_volatile
);
906 if (fsp_curr
== NULL
) {
907 /* Strange - is this even possible ? */
911 if (fsp_curr
!= fsp
) {
912 /* It's not our fid */
918 /* Remove the entries from the lock db. */
919 brl_lock_cancel(br_lck
,
921 sconn_server_id(sconn
),
927 /* Finally end the request. */
928 if (close_type
== SHUTDOWN_CLOSE
) {
929 tevent_req_done(smb2req
->subreq
);
931 tevent_req_nterror(smb2req
->subreq
,
932 NT_STATUS_RANGE_NOT_LOCKED
);