2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
29 struct smbd_smb2_lock_element
{
35 struct smbd_smb2_lock_state
{
36 struct smbd_smb2_request
*smb2req
;
37 struct smb_request
*smb1req
;
38 struct blocking_lock_record
*blr
;
40 struct smbd_lock_element
*locks
;
43 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
44 struct blocking_lock_record
*blr
);
46 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
47 struct tevent_context
*ev
,
48 struct smbd_smb2_request
*smb2req
,
50 uint64_t in_file_id_volatile
,
51 uint16_t in_lock_count
,
52 struct smbd_smb2_lock_element
*in_locks
);
53 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
55 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
56 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
59 const uint8_t *inbody
;
60 const int i
= req
->current_idx
;
62 uint16_t in_lock_count
;
63 uint64_t in_file_id_persistent
;
64 uint64_t in_file_id_volatile
;
65 struct smbd_smb2_lock_element
*in_locks
;
66 struct tevent_req
*subreq
;
67 const uint8_t *lock_buffer
;
71 status
= smbd_smb2_request_verify_sizes(req
, 0x30);
72 if (!NT_STATUS_IS_OK(status
)) {
73 return smbd_smb2_request_error(req
, status
);
75 inhdr
= (const uint8_t *)req
->in
.vector
[i
+0].iov_base
;
76 inbody
= (const uint8_t *)req
->in
.vector
[i
+1].iov_base
;
78 in_smbpid
= IVAL(inhdr
, SMB2_HDR_PID
);
80 in_lock_count
= CVAL(inbody
, 0x02);
81 /* 0x04 - 4 bytes reserved */
82 in_file_id_persistent
= BVAL(inbody
, 0x08);
83 in_file_id_volatile
= BVAL(inbody
, 0x10);
85 if (in_lock_count
< 1) {
86 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
89 if (((in_lock_count
- 1) * 0x18) > req
->in
.vector
[i
+2].iov_len
) {
90 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
93 if (req
->compat_chain_fsp
) {
95 } else if (in_file_id_persistent
!= in_file_id_volatile
) {
96 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
99 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
101 if (in_locks
== NULL
) {
102 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
106 lock_buffer
= inbody
+ 0x18;
108 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
109 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
110 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
111 /* 0x14 - 4 reserved bytes */
113 lock_buffer
= (const uint8_t *)req
->in
.vector
[i
+2].iov_base
;
115 for (l
=1; l
< in_lock_count
; l
++) {
116 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
117 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
118 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
119 /* 0x14 - 4 reserved bytes */
124 subreq
= smbd_smb2_lock_send(req
,
125 req
->sconn
->smb2
.event_ctx
,
131 if (subreq
== NULL
) {
132 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
134 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
136 return smbd_smb2_request_pending_queue(req
, subreq
);
139 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
141 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
142 struct smbd_smb2_request
);
145 NTSTATUS error
; /* transport error */
147 if (smb2req
->cancelled
) {
148 const uint8_t *inhdr
= (const uint8_t *)
149 smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
150 uint64_t mid
= BVAL(inhdr
, SMB2_HDR_MESSAGE_ID
);
151 struct smbd_smb2_lock_state
*state
;
153 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
154 (unsigned long long)mid
));
156 state
= tevent_req_data(smb2req
->subreq
,
157 struct smbd_smb2_lock_state
);
160 SMB_ASSERT(state
->blr
);
162 remove_pending_lock(state
, state
->blr
);
164 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_CANCELLED
);
165 if (!NT_STATUS_IS_OK(error
)) {
166 smbd_server_connection_terminate(smb2req
->sconn
,
173 status
= smbd_smb2_lock_recv(subreq
);
175 if (!NT_STATUS_IS_OK(status
)) {
176 error
= smbd_smb2_request_error(smb2req
, status
);
177 if (!NT_STATUS_IS_OK(error
)) {
178 smbd_server_connection_terminate(smb2req
->sconn
,
185 outbody
= data_blob_talloc(smb2req
->out
.vector
, NULL
, 0x04);
186 if (outbody
.data
== NULL
) {
187 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
188 if (!NT_STATUS_IS_OK(error
)) {
189 smbd_server_connection_terminate(smb2req
->sconn
,
196 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
197 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
199 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
200 if (!NT_STATUS_IS_OK(error
)) {
201 smbd_server_connection_terminate(smb2req
->sconn
,
207 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
208 struct tevent_context
*ev
,
209 struct smbd_smb2_request
*smb2req
,
211 uint64_t in_file_id_volatile
,
212 uint16_t in_lock_count
,
213 struct smbd_smb2_lock_element
*in_locks
)
215 struct tevent_req
*req
;
216 struct smbd_smb2_lock_state
*state
;
217 struct smb_request
*smb1req
;
218 connection_struct
*conn
= smb2req
->tcon
->compat_conn
;
220 int32_t timeout
= -1;
221 bool isunlock
= false;
223 struct smbd_lock_element
*locks
;
227 req
= tevent_req_create(mem_ctx
, &state
,
228 struct smbd_smb2_lock_state
);
232 state
->smb2req
= smb2req
;
233 smb2req
->subreq
= req
; /* So we can find this when going async. */
235 smb1req
= smbd_smb2_fake_smb_request(smb2req
);
236 if (tevent_req_nomem(smb1req
, req
)) {
237 return tevent_req_post(req
, ev
);
239 state
->smb1req
= smb1req
;
241 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
242 (unsigned long long)in_file_id_volatile
));
244 fsp
= file_fsp(smb1req
, (uint16_t)in_file_id_volatile
);
246 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
247 return tevent_req_post(req
, ev
);
249 if (conn
!= fsp
->conn
) {
250 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
251 return tevent_req_post(req
, ev
);
253 if (smb2req
->session
->vuid
!= fsp
->vuid
) {
254 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
255 return tevent_req_post(req
, ev
);
258 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
260 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
261 return tevent_req_post(req
, ev
);
264 switch (in_locks
[0].flags
) {
265 case SMB2_LOCK_FLAG_SHARED
:
266 case SMB2_LOCK_FLAG_EXCLUSIVE
:
267 if (in_lock_count
> 1) {
268 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
269 return tevent_req_post(req
, ev
);
274 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
275 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
279 case SMB2_LOCK_FLAG_UNLOCK
:
280 /* only the first lock gives the UNLOCK bit - see
287 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
288 return tevent_req_post(req
, ev
);
291 for (i
=0; i
<in_lock_count
; i
++) {
292 bool invalid
= false;
294 switch (in_locks
[i
].flags
) {
295 case SMB2_LOCK_FLAG_SHARED
:
296 case SMB2_LOCK_FLAG_EXCLUSIVE
:
302 tevent_req_nterror(req
,
303 NT_STATUS_INVALID_PARAMETER
);
304 return tevent_req_post(req
, ev
);
308 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
309 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
315 case SMB2_LOCK_FLAG_UNLOCK
:
317 tevent_req_nterror(req
,
318 NT_STATUS_INVALID_PARAMETER
);
319 return tevent_req_post(req
, ev
);
326 * is the first element was a UNLOCK
327 * we need to deferr the error response
328 * to the backend, because we need to process
329 * all unlock elements before
334 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
335 return tevent_req_post(req
, ev
);
338 locks
[i
].smblctx
= in_file_id_volatile
;
339 locks
[i
].offset
= in_locks
[i
].offset
;
340 locks
[i
].count
= in_locks
[i
].length
;
342 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
343 locks
[i
].brltype
= WRITE_LOCK
;
344 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
345 locks
[i
].brltype
= READ_LOCK
;
346 } else if (invalid
) {
348 * this is an invalid UNLOCK element
349 * and the backend needs to test for
350 * brltype != UNLOCK_LOCK and return
351 * NT_STATUS_INVALID_PARAMER
353 locks
[i
].brltype
= READ_LOCK
;
355 locks
[i
].brltype
= UNLOCK_LOCK
;
358 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
359 "smblctx = %llu type %d\n",
361 (unsigned long long)locks
[i
].offset
,
362 (unsigned long long)locks
[i
].count
,
363 (unsigned long long)locks
[i
].smblctx
,
364 (int)locks
[i
].brltype
));
367 state
->locks
= locks
;
368 state
->lock_count
= in_lock_count
;
371 status
= smbd_do_locking(smb1req
, fsp
,
380 status
= smbd_do_locking(smb1req
, fsp
,
389 if (!NT_STATUS_IS_OK(status
)) {
390 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
391 status
= NT_STATUS_LOCK_NOT_GRANTED
;
393 tevent_req_nterror(req
, status
);
394 return tevent_req_post(req
, ev
);
401 tevent_req_done(req
);
402 return tevent_req_post(req
, ev
);
405 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
409 if (tevent_req_is_nterror(req
, &status
)) {
410 tevent_req_received(req
);
414 tevent_req_received(req
);
418 /****************************************************************
419 Cancel an outstanding blocking lock request.
420 *****************************************************************/
422 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
424 struct smbd_smb2_request
*smb2req
= NULL
;
425 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
426 struct smbd_smb2_lock_state
);
431 if (!state
->smb2req
) {
435 smb2req
= state
->smb2req
;
436 smb2req
->cancelled
= true;
438 tevent_req_done(req
);
442 /****************************************************************
443 Got a message saying someone unlocked a file. Re-schedule all
444 blocking lock requests as we don't know if anything overlapped.
445 *****************************************************************/
447 static void received_unlock_msg(struct messaging_context
*msg
,
450 struct server_id server_id
,
453 struct smbd_server_connection
*sconn
;
455 DEBUG(10,("received_unlock_msg (SMB2)\n"));
457 sconn
= msg_ctx_to_sconn(msg
);
459 DEBUG(1, ("could not find sconn\n"));
462 process_blocking_lock_queue_smb2(sconn
, timeval_current());
465 /****************************************************************
466 Function to get the blr on a pending record.
467 *****************************************************************/
469 struct blocking_lock_record
*get_pending_smb2req_blr(struct smbd_smb2_request
*smb2req
)
471 struct smbd_smb2_lock_state
*state
= NULL
;
472 const uint8_t *inhdr
;
477 if (smb2req
->subreq
== NULL
) {
480 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
483 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
484 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
487 state
= tevent_req_data(smb2req
->subreq
,
488 struct smbd_smb2_lock_state
);
494 /****************************************************************
495 Set up the next brl timeout.
496 *****************************************************************/
498 static bool recalc_smb2_brl_timeout(struct smbd_server_connection
*sconn
)
500 struct smbd_smb2_request
*smb2req
;
501 struct timeval next_timeout
= timeval_zero();
502 int max_brl_timeout
= lp_parm_int(-1, "brl", "recalctime", 5);
504 TALLOC_FREE(sconn
->smb2
.locks
.brl_timeout
);
506 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= smb2req
->next
) {
507 struct blocking_lock_record
*blr
=
508 get_pending_smb2req_blr(smb2req
);
512 if (timeval_is_zero(&blr
->expire_time
)) {
514 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
515 * a POSIX lock, so calculate a timeout of
516 * 10 seconds into the future.
518 if (blr
->blocking_smblctx
== 0xFFFFFFFFFFFFFFFFLL
) {
519 struct timeval psx_to
= timeval_current_ofs(10, 0);
520 next_timeout
= timeval_brl_min(&next_timeout
, &psx_to
);
526 next_timeout
= timeval_brl_min(&next_timeout
, &blr
->expire_time
);
529 if (timeval_is_zero(&next_timeout
)) {
530 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
531 "timeout = Infinite.\n"));
536 * To account for unclean shutdowns by clients we need a
537 * maximum timeout that we use for checking pending locks. If
538 * we have any pending locks at all, then check if the pending
539 * lock can continue at least every brl:recalctime seconds
540 * (default 5 seconds).
542 * This saves us needing to do a message_send_all() in the
543 * SIGCHLD handler in the parent daemon. That
544 * message_send_all() caused O(n^2) work to be done when IP
545 * failovers happened in clustered Samba, which could make the
546 * entire system unusable for many minutes.
549 if (max_brl_timeout
> 0) {
550 struct timeval min_to
= timeval_current_ofs(max_brl_timeout
, 0);
551 next_timeout
= timeval_brl_min(&next_timeout
, &min_to
);
555 struct timeval cur
, from_now
;
557 cur
= timeval_current();
558 from_now
= timeval_until(&cur
, &next_timeout
);
559 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
560 "timeout = %d.%d seconds from now.\n",
561 (int)from_now
.tv_sec
, (int)from_now
.tv_usec
));
564 sconn
->smb2
.locks
.brl_timeout
= event_add_timed(
565 smbd_event_context(),
570 if (!sconn
->smb2
.locks
.brl_timeout
) {
576 /****************************************************************
577 Get an SMB2 lock reqeust to go async. lock_timeout should
579 *****************************************************************/
581 bool push_blocking_lock_request_smb2( struct byte_range_lock
*br_lck
,
582 struct smb_request
*smb1req
,
587 enum brl_type lock_type
,
588 enum brl_flavour lock_flav
,
591 uint64_t blocking_smblctx
)
593 struct smbd_server_connection
*sconn
= smb1req
->sconn
;
594 struct smbd_smb2_request
*smb2req
= smb1req
->smb2req
;
595 struct tevent_req
*req
= NULL
;
596 struct smbd_smb2_lock_state
*state
= NULL
;
597 struct blocking_lock_record
*blr
= NULL
;
598 NTSTATUS status
= NT_STATUS_OK
;
603 req
= smb2req
->subreq
;
607 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
610 state
= tevent_req_data(req
, struct smbd_smb2_lock_state
);
615 blr
= talloc_zero(state
, struct blocking_lock_record
);
621 if (lock_timeout
== -1) {
622 blr
->expire_time
.tv_sec
= 0;
623 blr
->expire_time
.tv_usec
= 0; /* Never expire. */
625 blr
->expire_time
= timeval_current_ofs(
627 (lock_timeout
% 1000) * 1000);
630 blr
->lock_num
= lock_num
;
631 blr
->smblctx
= smblctx
;
632 blr
->blocking_smblctx
= blocking_smblctx
;
633 blr
->lock_flav
= lock_flav
;
634 blr
->lock_type
= lock_type
;
635 blr
->offset
= offset
;
638 /* Specific brl_lock() implementations can fill this in. */
639 blr
->blr_private
= NULL
;
641 /* Add a pending lock record for this. */
642 status
= brl_lock(sconn
->msg_ctx
,
645 sconn_server_id(sconn
),
648 lock_type
== READ_LOCK
? PENDING_READ_LOCK
: PENDING_WRITE_LOCK
,
654 if (!NT_STATUS_IS_OK(status
)) {
655 DEBUG(0,("push_blocking_lock_request_smb2: "
656 "failed to add PENDING_LOCK record.\n"));
662 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
666 recalc_smb2_brl_timeout(sconn
);
668 /* Ensure we'll receive messages when this is unlocked. */
669 if (!sconn
->smb2
.locks
.blocking_lock_unlock_state
) {
670 messaging_register(sconn
->msg_ctx
, NULL
,
671 MSG_SMB_UNLOCK
, received_unlock_msg
);
672 sconn
->smb2
.locks
.blocking_lock_unlock_state
= true;
675 /* allow this request to be canceled */
676 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
681 /****************************************************************
682 Remove a pending lock record under lock.
683 *****************************************************************/
685 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
686 struct blocking_lock_record
*blr
)
689 struct byte_range_lock
*br_lck
= brl_get_locks(
692 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr
));
695 brl_lock_cancel(br_lck
,
697 sconn_server_id(blr
->fsp
->conn
->sconn
),
705 /* Remove the locks we already got. */
707 for(i
= blr
->lock_num
- 1; i
>= 0; i
--) {
708 struct smbd_lock_element
*e
= &state
->locks
[i
];
710 do_unlock(blr
->fsp
->conn
->sconn
->msg_ctx
,
719 /****************************************************************
720 Re-proccess a blocking lock request.
721 This is equivalent to process_lockingX() inside smbd/blocking.c
722 *****************************************************************/
724 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request
*smb2req
,
725 struct timeval tv_curr
)
727 NTSTATUS status
= NT_STATUS_UNSUCCESSFUL
;
728 struct blocking_lock_record
*blr
= NULL
;
729 struct smbd_smb2_lock_state
*state
= NULL
;
730 files_struct
*fsp
= NULL
;
732 if (!smb2req
->subreq
) {
735 state
= tevent_req_data(smb2req
->subreq
, struct smbd_smb2_lock_state
);
743 /* Try and finish off getting all the outstanding locks. */
745 for (; blr
->lock_num
< state
->lock_count
; blr
->lock_num
++) {
746 struct byte_range_lock
*br_lck
= NULL
;
747 struct smbd_lock_element
*e
= &state
->locks
[blr
->lock_num
];
749 br_lck
= do_lock(fsp
->conn
->sconn
->msg_ctx
,
758 &blr
->blocking_smblctx
,
763 if (NT_STATUS_IS_ERR(status
)) {
768 if(blr
->lock_num
== state
->lock_count
) {
770 * Success - we got all the locks.
773 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
774 "fnum=%d num_locks=%d\n",
777 (int)state
->lock_count
));
779 tevent_req_done(smb2req
->subreq
);
783 if (!NT_STATUS_EQUAL(status
,NT_STATUS_LOCK_NOT_GRANTED
) &&
784 !NT_STATUS_EQUAL(status
,NT_STATUS_FILE_LOCK_CONFLICT
)) {
786 * We have other than a "can't get lock"
787 * error. Return an error.
789 remove_pending_lock(state
, blr
);
790 tevent_req_nterror(smb2req
->subreq
, status
);
795 * We couldn't get the locks for this record on the list.
796 * If the time has expired, return a lock error.
799 if (!timeval_is_zero(&blr
->expire_time
) &&
800 timeval_compare(&blr
->expire_time
, &tv_curr
) <= 0) {
801 remove_pending_lock(state
, blr
);
802 tevent_req_nterror(smb2req
->subreq
, NT_STATUS_LOCK_NOT_GRANTED
);
807 * Still can't get all the locks - keep waiting.
810 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
811 "for file %s, fnum = %d. Still waiting....\n",
813 (int)state
->lock_count
,
821 /****************************************************************
822 Attempt to proccess all outstanding blocking locks pending on
824 *****************************************************************/
826 void process_blocking_lock_queue_smb2(
827 struct smbd_server_connection
*sconn
, struct timeval tv_curr
)
829 struct smbd_smb2_request
*smb2req
, *nextreq
;
831 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
832 const uint8_t *inhdr
;
834 nextreq
= smb2req
->next
;
836 if (smb2req
->subreq
== NULL
) {
837 /* This message has been processed. */
840 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
841 /* This message has been processed. */
845 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
846 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) == SMB2_OP_LOCK
) {
847 reprocess_blocked_smb2_lock(smb2req
, tv_curr
);
851 recalc_smb2_brl_timeout(sconn
);
854 /****************************************************************************
855 Remove any locks on this fd. Called from file_close().
856 ****************************************************************************/
858 void cancel_pending_lock_requests_by_fid_smb2(files_struct
*fsp
,
859 struct byte_range_lock
*br_lck
,
860 enum file_close_type close_type
)
862 struct smbd_server_connection
*sconn
= fsp
->conn
->sconn
;
863 struct smbd_smb2_request
*smb2req
, *nextreq
;
865 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
866 struct smbd_smb2_lock_state
*state
= NULL
;
867 files_struct
*fsp_curr
= NULL
;
868 int i
= smb2req
->current_idx
;
869 uint64_t in_file_id_volatile
;
870 struct blocking_lock_record
*blr
= NULL
;
871 const uint8_t *inhdr
;
872 const uint8_t *inbody
;
874 nextreq
= smb2req
->next
;
876 if (smb2req
->subreq
== NULL
) {
877 /* This message has been processed. */
880 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
881 /* This message has been processed. */
885 inhdr
= (const uint8_t *)smb2req
->in
.vector
[i
].iov_base
;
886 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
887 /* Not a lock call. */
891 inbody
= (const uint8_t *)smb2req
->in
.vector
[i
+1].iov_base
;
892 in_file_id_volatile
= BVAL(inbody
, 0x10);
894 state
= tevent_req_data(smb2req
->subreq
,
895 struct smbd_smb2_lock_state
);
897 /* Strange - is this even possible ? */
901 fsp_curr
= file_fsp(state
->smb1req
, (uint16_t)in_file_id_volatile
);
902 if (fsp_curr
== NULL
) {
903 /* Strange - is this even possible ? */
907 if (fsp_curr
!= fsp
) {
908 /* It's not our fid */
914 /* Remove the entries from the lock db. */
915 brl_lock_cancel(br_lck
,
917 sconn_server_id(sconn
),
923 /* Finally end the request. */
924 if (close_type
== SHUTDOWN_CLOSE
) {
925 tevent_req_done(smb2req
->subreq
);
927 tevent_req_nterror(smb2req
->subreq
,
928 NT_STATUS_RANGE_NOT_LOCKED
);