2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
29 struct smbd_smb2_lock_element
{
35 struct smbd_smb2_lock_state
{
36 struct smbd_smb2_request
*smb2req
;
37 struct smb_request
*smb1req
;
38 struct blocking_lock_record
*blr
;
40 struct smbd_lock_element
*locks
;
43 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
44 struct blocking_lock_record
*blr
);
46 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
47 struct tevent_context
*ev
,
48 struct smbd_smb2_request
*smb2req
,
50 uint64_t in_file_id_volatile
,
51 uint16_t in_lock_count
,
52 struct smbd_smb2_lock_element
*in_locks
);
53 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
55 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
56 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
59 const uint8_t *inbody
;
60 const int i
= req
->current_idx
;
62 uint16_t in_lock_count
;
63 uint64_t in_file_id_persistent
;
64 uint64_t in_file_id_volatile
;
65 struct smbd_smb2_lock_element
*in_locks
;
66 struct tevent_req
*subreq
;
67 const uint8_t *lock_buffer
;
71 status
= smbd_smb2_request_verify_sizes(req
, 0x30);
72 if (!NT_STATUS_IS_OK(status
)) {
73 return smbd_smb2_request_error(req
, status
);
75 inhdr
= (const uint8_t *)req
->in
.vector
[i
+0].iov_base
;
76 inbody
= (const uint8_t *)req
->in
.vector
[i
+1].iov_base
;
78 in_smbpid
= IVAL(inhdr
, SMB2_HDR_PID
);
80 in_lock_count
= CVAL(inbody
, 0x02);
81 /* 0x04 - 4 bytes reserved */
82 in_file_id_persistent
= BVAL(inbody
, 0x08);
83 in_file_id_volatile
= BVAL(inbody
, 0x10);
85 if (in_lock_count
< 1) {
86 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
89 if (((in_lock_count
- 1) * 0x18) > req
->in
.vector
[i
+2].iov_len
) {
90 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
93 if (req
->compat_chain_fsp
) {
95 } else if (in_file_id_persistent
!= in_file_id_volatile
) {
96 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
99 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
101 if (in_locks
== NULL
) {
102 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
106 lock_buffer
= inbody
+ 0x18;
108 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
109 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
110 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
111 /* 0x14 - 4 reserved bytes */
113 lock_buffer
= (const uint8_t *)req
->in
.vector
[i
+2].iov_base
;
115 for (l
=1; l
< in_lock_count
; l
++) {
116 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
117 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
118 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
119 /* 0x14 - 4 reserved bytes */
124 subreq
= smbd_smb2_lock_send(req
,
131 if (subreq
== NULL
) {
132 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
134 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
136 return smbd_smb2_request_pending_queue(req
, subreq
, 500);
139 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
141 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
142 struct smbd_smb2_request
);
145 NTSTATUS error
; /* transport error */
147 if (smb2req
->cancelled
) {
148 const uint8_t *inhdr
= (const uint8_t *)
149 smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
150 uint64_t mid
= BVAL(inhdr
, SMB2_HDR_MESSAGE_ID
);
151 struct smbd_smb2_lock_state
*state
;
153 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
154 (unsigned long long)mid
));
156 state
= tevent_req_data(smb2req
->subreq
,
157 struct smbd_smb2_lock_state
);
160 SMB_ASSERT(state
->blr
);
162 remove_pending_lock(state
, state
->blr
);
164 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_CANCELLED
);
165 if (!NT_STATUS_IS_OK(error
)) {
166 smbd_server_connection_terminate(smb2req
->sconn
,
173 status
= smbd_smb2_lock_recv(subreq
);
175 if (!NT_STATUS_IS_OK(status
)) {
176 error
= smbd_smb2_request_error(smb2req
, status
);
177 if (!NT_STATUS_IS_OK(error
)) {
178 smbd_server_connection_terminate(smb2req
->sconn
,
185 outbody
= data_blob_talloc(smb2req
->out
.vector
, NULL
, 0x04);
186 if (outbody
.data
== NULL
) {
187 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
188 if (!NT_STATUS_IS_OK(error
)) {
189 smbd_server_connection_terminate(smb2req
->sconn
,
196 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
197 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
199 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
200 if (!NT_STATUS_IS_OK(error
)) {
201 smbd_server_connection_terminate(smb2req
->sconn
,
207 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
208 struct tevent_context
*ev
,
209 struct smbd_smb2_request
*smb2req
,
211 uint64_t in_file_id_volatile
,
212 uint16_t in_lock_count
,
213 struct smbd_smb2_lock_element
*in_locks
)
215 struct tevent_req
*req
;
216 struct smbd_smb2_lock_state
*state
;
217 struct smb_request
*smb1req
;
218 connection_struct
*conn
= smb2req
->tcon
->compat_conn
;
220 int32_t timeout
= -1;
221 bool isunlock
= false;
223 struct smbd_lock_element
*locks
;
227 req
= tevent_req_create(mem_ctx
, &state
,
228 struct smbd_smb2_lock_state
);
232 state
->smb2req
= smb2req
;
233 smb2req
->subreq
= req
; /* So we can find this when going async. */
235 smb1req
= smbd_smb2_fake_smb_request(smb2req
);
236 if (tevent_req_nomem(smb1req
, req
)) {
237 return tevent_req_post(req
, ev
);
239 state
->smb1req
= smb1req
;
241 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
242 (unsigned long long)in_file_id_volatile
));
244 fsp
= file_fsp(smb1req
, (uint16_t)in_file_id_volatile
);
246 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
247 return tevent_req_post(req
, ev
);
249 if (conn
!= fsp
->conn
) {
250 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
251 return tevent_req_post(req
, ev
);
253 if (smb2req
->session
->vuid
!= fsp
->vuid
) {
254 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
255 return tevent_req_post(req
, ev
);
258 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
260 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
261 return tevent_req_post(req
, ev
);
264 switch (in_locks
[0].flags
) {
265 case SMB2_LOCK_FLAG_SHARED
:
266 case SMB2_LOCK_FLAG_EXCLUSIVE
:
267 if (in_lock_count
> 1) {
268 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
269 return tevent_req_post(req
, ev
);
274 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
275 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
279 case SMB2_LOCK_FLAG_UNLOCK
:
280 /* only the first lock gives the UNLOCK bit - see
287 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
288 return tevent_req_post(req
, ev
);
291 for (i
=0; i
<in_lock_count
; i
++) {
292 bool invalid
= false;
294 switch (in_locks
[i
].flags
) {
295 case SMB2_LOCK_FLAG_SHARED
:
296 case SMB2_LOCK_FLAG_EXCLUSIVE
:
302 tevent_req_nterror(req
,
303 NT_STATUS_INVALID_PARAMETER
);
304 return tevent_req_post(req
, ev
);
308 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
309 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
315 case SMB2_LOCK_FLAG_UNLOCK
:
317 tevent_req_nterror(req
,
318 NT_STATUS_INVALID_PARAMETER
);
319 return tevent_req_post(req
, ev
);
326 * is the first element was a UNLOCK
327 * we need to deferr the error response
328 * to the backend, because we need to process
329 * all unlock elements before
334 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
335 return tevent_req_post(req
, ev
);
338 locks
[i
].smblctx
= in_file_id_volatile
;
339 locks
[i
].offset
= in_locks
[i
].offset
;
340 locks
[i
].count
= in_locks
[i
].length
;
342 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
343 locks
[i
].brltype
= WRITE_LOCK
;
344 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
345 locks
[i
].brltype
= READ_LOCK
;
346 } else if (invalid
) {
348 * this is an invalid UNLOCK element
349 * and the backend needs to test for
350 * brltype != UNLOCK_LOCK and return
351 * NT_STATUS_INVALID_PARAMER
353 locks
[i
].brltype
= READ_LOCK
;
355 locks
[i
].brltype
= UNLOCK_LOCK
;
358 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
359 "smblctx = %llu type %d\n",
361 (unsigned long long)locks
[i
].offset
,
362 (unsigned long long)locks
[i
].count
,
363 (unsigned long long)locks
[i
].smblctx
,
364 (int)locks
[i
].brltype
));
367 state
->locks
= locks
;
368 state
->lock_count
= in_lock_count
;
371 status
= smbd_do_locking(smb1req
, fsp
,
380 status
= smbd_do_locking(smb1req
, fsp
,
389 if (!NT_STATUS_IS_OK(status
)) {
390 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
391 status
= NT_STATUS_LOCK_NOT_GRANTED
;
393 tevent_req_nterror(req
, status
);
394 return tevent_req_post(req
, ev
);
401 tevent_req_done(req
);
402 return tevent_req_post(req
, ev
);
405 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
409 if (tevent_req_is_nterror(req
, &status
)) {
410 tevent_req_received(req
);
414 tevent_req_received(req
);
418 /****************************************************************
419 Cancel an outstanding blocking lock request.
420 *****************************************************************/
422 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
424 struct smbd_smb2_request
*smb2req
= NULL
;
425 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
426 struct smbd_smb2_lock_state
);
431 if (!state
->smb2req
) {
435 smb2req
= state
->smb2req
;
436 smb2req
->cancelled
= true;
438 tevent_req_done(req
);
442 /****************************************************************
443 Got a message saying someone unlocked a file. Re-schedule all
444 blocking lock requests as we don't know if anything overlapped.
445 *****************************************************************/
447 static void received_unlock_msg(struct messaging_context
*msg
,
450 struct server_id server_id
,
453 struct smbd_server_connection
*sconn
=
454 talloc_get_type_abort(private_data
,
455 struct smbd_server_connection
);
457 DEBUG(10,("received_unlock_msg (SMB2)\n"));
459 process_blocking_lock_queue_smb2(sconn
, timeval_current());
462 /****************************************************************
463 Function to get the blr on a pending record.
464 *****************************************************************/
466 struct blocking_lock_record
*get_pending_smb2req_blr(struct smbd_smb2_request
*smb2req
)
468 struct smbd_smb2_lock_state
*state
= NULL
;
469 const uint8_t *inhdr
;
474 if (smb2req
->subreq
== NULL
) {
477 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
480 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
481 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
484 state
= tevent_req_data(smb2req
->subreq
,
485 struct smbd_smb2_lock_state
);
491 /****************************************************************
492 Set up the next brl timeout.
493 *****************************************************************/
495 static bool recalc_smb2_brl_timeout(struct smbd_server_connection
*sconn
)
497 struct smbd_smb2_request
*smb2req
;
498 struct timeval next_timeout
= timeval_zero();
499 int max_brl_timeout
= lp_parm_int(-1, "brl", "recalctime", 5);
501 TALLOC_FREE(sconn
->smb2
.locks
.brl_timeout
);
503 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= smb2req
->next
) {
504 struct blocking_lock_record
*blr
=
505 get_pending_smb2req_blr(smb2req
);
509 if (timeval_is_zero(&blr
->expire_time
)) {
511 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
512 * a POSIX lock, so calculate a timeout of
513 * 10 seconds into the future.
515 if (blr
->blocking_smblctx
== 0xFFFFFFFFFFFFFFFFLL
) {
516 struct timeval psx_to
= timeval_current_ofs(10, 0);
517 next_timeout
= timeval_brl_min(&next_timeout
, &psx_to
);
523 next_timeout
= timeval_brl_min(&next_timeout
, &blr
->expire_time
);
526 if (timeval_is_zero(&next_timeout
)) {
527 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
528 "timeout = Infinite.\n"));
533 * To account for unclean shutdowns by clients we need a
534 * maximum timeout that we use for checking pending locks. If
535 * we have any pending locks at all, then check if the pending
536 * lock can continue at least every brl:recalctime seconds
537 * (default 5 seconds).
539 * This saves us needing to do a message_send_all() in the
540 * SIGCHLD handler in the parent daemon. That
541 * message_send_all() caused O(n^2) work to be done when IP
542 * failovers happened in clustered Samba, which could make the
543 * entire system unusable for many minutes.
546 if (max_brl_timeout
> 0) {
547 struct timeval min_to
= timeval_current_ofs(max_brl_timeout
, 0);
548 next_timeout
= timeval_brl_min(&next_timeout
, &min_to
);
552 struct timeval cur
, from_now
;
554 cur
= timeval_current();
555 from_now
= timeval_until(&cur
, &next_timeout
);
556 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
557 "timeout = %d.%d seconds from now.\n",
558 (int)from_now
.tv_sec
, (int)from_now
.tv_usec
));
561 sconn
->smb2
.locks
.brl_timeout
= tevent_add_timer(
567 if (!sconn
->smb2
.locks
.brl_timeout
) {
573 /****************************************************************
574 Get an SMB2 lock reqeust to go async. lock_timeout should
576 *****************************************************************/
578 bool push_blocking_lock_request_smb2( struct byte_range_lock
*br_lck
,
579 struct smb_request
*smb1req
,
584 enum brl_type lock_type
,
585 enum brl_flavour lock_flav
,
588 uint64_t blocking_smblctx
)
590 struct smbd_server_connection
*sconn
= smb1req
->sconn
;
591 struct smbd_smb2_request
*smb2req
= smb1req
->smb2req
;
592 struct tevent_req
*req
= NULL
;
593 struct smbd_smb2_lock_state
*state
= NULL
;
594 struct blocking_lock_record
*blr
= NULL
;
595 NTSTATUS status
= NT_STATUS_OK
;
600 req
= smb2req
->subreq
;
604 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
607 state
= tevent_req_data(req
, struct smbd_smb2_lock_state
);
612 blr
= talloc_zero(state
, struct blocking_lock_record
);
618 if (lock_timeout
== -1) {
619 blr
->expire_time
.tv_sec
= 0;
620 blr
->expire_time
.tv_usec
= 0; /* Never expire. */
622 blr
->expire_time
= timeval_current_ofs_msec(lock_timeout
);
625 blr
->lock_num
= lock_num
;
626 blr
->smblctx
= smblctx
;
627 blr
->blocking_smblctx
= blocking_smblctx
;
628 blr
->lock_flav
= lock_flav
;
629 blr
->lock_type
= lock_type
;
630 blr
->offset
= offset
;
633 /* Specific brl_lock() implementations can fill this in. */
634 blr
->blr_private
= NULL
;
636 /* Add a pending lock record for this. */
637 status
= brl_lock(sconn
->msg_ctx
,
640 messaging_server_id(sconn
->msg_ctx
),
643 lock_type
== READ_LOCK
? PENDING_READ_LOCK
: PENDING_WRITE_LOCK
,
649 if (!NT_STATUS_IS_OK(status
)) {
650 DEBUG(0,("push_blocking_lock_request_smb2: "
651 "failed to add PENDING_LOCK record.\n"));
657 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
661 recalc_smb2_brl_timeout(sconn
);
663 /* Ensure we'll receive messages when this is unlocked. */
664 if (!sconn
->smb2
.locks
.blocking_lock_unlock_state
) {
665 messaging_register(sconn
->msg_ctx
, sconn
,
666 MSG_SMB_UNLOCK
, received_unlock_msg
);
667 sconn
->smb2
.locks
.blocking_lock_unlock_state
= true;
670 /* allow this request to be canceled */
671 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
676 /****************************************************************
677 Remove a pending lock record under lock.
678 *****************************************************************/
680 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
681 struct blocking_lock_record
*blr
)
684 struct byte_range_lock
*br_lck
= brl_get_locks(
687 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr
));
690 brl_lock_cancel(br_lck
,
692 messaging_server_id(blr
->fsp
->conn
->sconn
->msg_ctx
),
700 /* Remove the locks we already got. */
702 for(i
= blr
->lock_num
- 1; i
>= 0; i
--) {
703 struct smbd_lock_element
*e
= &state
->locks
[i
];
705 do_unlock(blr
->fsp
->conn
->sconn
->msg_ctx
,
714 /****************************************************************
715 Re-proccess a blocking lock request.
716 This is equivalent to process_lockingX() inside smbd/blocking.c
717 *****************************************************************/
719 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request
*smb2req
,
720 struct timeval tv_curr
)
722 NTSTATUS status
= NT_STATUS_UNSUCCESSFUL
;
723 struct blocking_lock_record
*blr
= NULL
;
724 struct smbd_smb2_lock_state
*state
= NULL
;
725 files_struct
*fsp
= NULL
;
727 if (!smb2req
->subreq
) {
730 state
= tevent_req_data(smb2req
->subreq
, struct smbd_smb2_lock_state
);
738 /* Try and finish off getting all the outstanding locks. */
740 for (; blr
->lock_num
< state
->lock_count
; blr
->lock_num
++) {
741 struct byte_range_lock
*br_lck
= NULL
;
742 struct smbd_lock_element
*e
= &state
->locks
[blr
->lock_num
];
744 br_lck
= do_lock(fsp
->conn
->sconn
->msg_ctx
,
753 &blr
->blocking_smblctx
,
758 if (NT_STATUS_IS_ERR(status
)) {
763 if(blr
->lock_num
== state
->lock_count
) {
765 * Success - we got all the locks.
768 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
769 "fnum=%d num_locks=%d\n",
772 (int)state
->lock_count
));
774 tevent_req_done(smb2req
->subreq
);
778 if (!NT_STATUS_EQUAL(status
,NT_STATUS_LOCK_NOT_GRANTED
) &&
779 !NT_STATUS_EQUAL(status
,NT_STATUS_FILE_LOCK_CONFLICT
)) {
781 * We have other than a "can't get lock"
782 * error. Return an error.
784 remove_pending_lock(state
, blr
);
785 tevent_req_nterror(smb2req
->subreq
, status
);
790 * We couldn't get the locks for this record on the list.
791 * If the time has expired, return a lock error.
794 if (!timeval_is_zero(&blr
->expire_time
) &&
795 timeval_compare(&blr
->expire_time
, &tv_curr
) <= 0) {
796 remove_pending_lock(state
, blr
);
797 tevent_req_nterror(smb2req
->subreq
, NT_STATUS_LOCK_NOT_GRANTED
);
802 * Still can't get all the locks - keep waiting.
805 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
806 "for file %s, fnum = %d. Still waiting....\n",
808 (int)state
->lock_count
,
816 /****************************************************************
817 Attempt to proccess all outstanding blocking locks pending on
819 *****************************************************************/
821 void process_blocking_lock_queue_smb2(
822 struct smbd_server_connection
*sconn
, struct timeval tv_curr
)
824 struct smbd_smb2_request
*smb2req
, *nextreq
;
826 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
827 const uint8_t *inhdr
;
829 nextreq
= smb2req
->next
;
831 if (smb2req
->subreq
== NULL
) {
832 /* This message has been processed. */
835 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
836 /* This message has been processed. */
840 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
841 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) == SMB2_OP_LOCK
) {
842 reprocess_blocked_smb2_lock(smb2req
, tv_curr
);
846 recalc_smb2_brl_timeout(sconn
);
849 /****************************************************************************
850 Remove any locks on this fd. Called from file_close().
851 ****************************************************************************/
853 void cancel_pending_lock_requests_by_fid_smb2(files_struct
*fsp
,
854 struct byte_range_lock
*br_lck
,
855 enum file_close_type close_type
)
857 struct smbd_server_connection
*sconn
= fsp
->conn
->sconn
;
858 struct smbd_smb2_request
*smb2req
, *nextreq
;
860 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
861 struct smbd_smb2_lock_state
*state
= NULL
;
862 files_struct
*fsp_curr
= NULL
;
863 int i
= smb2req
->current_idx
;
864 uint64_t in_file_id_volatile
;
865 struct blocking_lock_record
*blr
= NULL
;
866 const uint8_t *inhdr
;
867 const uint8_t *inbody
;
869 nextreq
= smb2req
->next
;
871 if (smb2req
->subreq
== NULL
) {
872 /* This message has been processed. */
875 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
876 /* This message has been processed. */
880 inhdr
= (const uint8_t *)smb2req
->in
.vector
[i
].iov_base
;
881 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
882 /* Not a lock call. */
886 inbody
= (const uint8_t *)smb2req
->in
.vector
[i
+1].iov_base
;
887 in_file_id_volatile
= BVAL(inbody
, 0x10);
889 state
= tevent_req_data(smb2req
->subreq
,
890 struct smbd_smb2_lock_state
);
892 /* Strange - is this even possible ? */
896 fsp_curr
= file_fsp(state
->smb1req
, (uint16_t)in_file_id_volatile
);
897 if (fsp_curr
== NULL
) {
898 /* Strange - is this even possible ? */
902 if (fsp_curr
!= fsp
) {
903 /* It's not our fid */
909 /* Remove the entries from the lock db. */
910 brl_lock_cancel(br_lck
,
912 messaging_server_id(sconn
->msg_ctx
),
918 /* Finally end the request. */
919 if (close_type
== SHUTDOWN_CLOSE
) {
920 tevent_req_done(smb2req
->subreq
);
922 tevent_req_nterror(smb2req
->subreq
,
923 NT_STATUS_RANGE_NOT_LOCKED
);