2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
29 struct smbd_smb2_lock_element
{
35 struct smbd_smb2_lock_state
{
36 struct smbd_smb2_request
*smb2req
;
37 struct smb_request
*smb1req
;
38 struct blocking_lock_record
*blr
;
40 struct smbd_lock_element
*locks
;
43 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
44 struct blocking_lock_record
*blr
);
46 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
47 struct tevent_context
*ev
,
48 struct smbd_smb2_request
*smb2req
,
50 uint64_t in_file_id_volatile
,
51 uint16_t in_lock_count
,
52 struct smbd_smb2_lock_element
*in_locks
);
53 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
55 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
56 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
59 const uint8_t *inbody
;
60 const int i
= req
->current_idx
;
62 uint16_t in_lock_count
;
63 uint64_t in_file_id_persistent
;
64 uint64_t in_file_id_volatile
;
65 struct smbd_smb2_lock_element
*in_locks
;
66 struct tevent_req
*subreq
;
67 const uint8_t *lock_buffer
;
71 status
= smbd_smb2_request_verify_sizes(req
, 0x30);
72 if (!NT_STATUS_IS_OK(status
)) {
73 return smbd_smb2_request_error(req
, status
);
75 inhdr
= (const uint8_t *)req
->in
.vector
[i
+0].iov_base
;
76 inbody
= (const uint8_t *)req
->in
.vector
[i
+1].iov_base
;
78 in_smbpid
= IVAL(inhdr
, SMB2_HDR_PID
);
80 in_lock_count
= CVAL(inbody
, 0x02);
81 /* 0x04 - 4 bytes reserved */
82 in_file_id_persistent
= BVAL(inbody
, 0x08);
83 in_file_id_volatile
= BVAL(inbody
, 0x10);
85 if (in_lock_count
< 1) {
86 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
89 if (((in_lock_count
- 1) * 0x18) > req
->in
.vector
[i
+2].iov_len
) {
90 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
93 if (req
->compat_chain_fsp
) {
95 } else if (in_file_id_persistent
!= in_file_id_volatile
) {
96 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
99 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
101 if (in_locks
== NULL
) {
102 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
106 lock_buffer
= inbody
+ 0x18;
108 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
109 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
110 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
111 /* 0x14 - 4 reserved bytes */
113 lock_buffer
= (const uint8_t *)req
->in
.vector
[i
+2].iov_base
;
115 for (l
=1; l
< in_lock_count
; l
++) {
116 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
117 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
118 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
119 /* 0x14 - 4 reserved bytes */
124 subreq
= smbd_smb2_lock_send(req
,
125 req
->sconn
->smb2
.event_ctx
,
131 if (subreq
== NULL
) {
132 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
134 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
136 return smbd_smb2_request_pending_queue(req
, subreq
);
139 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
141 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
142 struct smbd_smb2_request
);
145 NTSTATUS error
; /* transport error */
147 if (smb2req
->cancelled
) {
148 const uint8_t *inhdr
= (const uint8_t *)
149 smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
150 uint64_t mid
= BVAL(inhdr
, SMB2_HDR_MESSAGE_ID
);
151 struct smbd_smb2_lock_state
*state
;
153 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
154 (unsigned long long)mid
));
156 state
= tevent_req_data(smb2req
->subreq
,
157 struct smbd_smb2_lock_state
);
160 SMB_ASSERT(state
->blr
);
162 remove_pending_lock(state
, state
->blr
);
164 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_CANCELLED
);
165 if (!NT_STATUS_IS_OK(error
)) {
166 smbd_server_connection_terminate(smb2req
->sconn
,
173 status
= smbd_smb2_lock_recv(subreq
);
175 if (!NT_STATUS_IS_OK(status
)) {
176 error
= smbd_smb2_request_error(smb2req
, status
);
177 if (!NT_STATUS_IS_OK(error
)) {
178 smbd_server_connection_terminate(smb2req
->sconn
,
185 outbody
= data_blob_talloc(smb2req
->out
.vector
, NULL
, 0x04);
186 if (outbody
.data
== NULL
) {
187 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
188 if (!NT_STATUS_IS_OK(error
)) {
189 smbd_server_connection_terminate(smb2req
->sconn
,
196 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
197 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
199 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
200 if (!NT_STATUS_IS_OK(error
)) {
201 smbd_server_connection_terminate(smb2req
->sconn
,
207 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
208 struct tevent_context
*ev
,
209 struct smbd_smb2_request
*smb2req
,
211 uint64_t in_file_id_volatile
,
212 uint16_t in_lock_count
,
213 struct smbd_smb2_lock_element
*in_locks
)
215 struct tevent_req
*req
;
216 struct smbd_smb2_lock_state
*state
;
217 struct smb_request
*smb1req
;
218 connection_struct
*conn
= smb2req
->tcon
->compat_conn
;
220 int32_t timeout
= -1;
221 bool isunlock
= false;
223 struct smbd_lock_element
*locks
;
227 req
= tevent_req_create(mem_ctx
, &state
,
228 struct smbd_smb2_lock_state
);
232 state
->smb2req
= smb2req
;
233 smb2req
->subreq
= req
; /* So we can find this when going async. */
235 smb1req
= smbd_smb2_fake_smb_request(smb2req
);
236 if (tevent_req_nomem(smb1req
, req
)) {
237 return tevent_req_post(req
, ev
);
239 state
->smb1req
= smb1req
;
241 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
242 (unsigned long long)in_file_id_volatile
));
244 fsp
= file_fsp(smb1req
, (uint16_t)in_file_id_volatile
);
246 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
247 return tevent_req_post(req
, ev
);
249 if (conn
!= fsp
->conn
) {
250 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
251 return tevent_req_post(req
, ev
);
253 if (smb2req
->session
->vuid
!= fsp
->vuid
) {
254 tevent_req_nterror(req
, NT_STATUS_FILE_CLOSED
);
255 return tevent_req_post(req
, ev
);
258 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
260 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
261 return tevent_req_post(req
, ev
);
264 switch (in_locks
[0].flags
) {
265 case SMB2_LOCK_FLAG_SHARED
:
266 case SMB2_LOCK_FLAG_EXCLUSIVE
:
267 if (in_lock_count
> 1) {
268 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
269 return tevent_req_post(req
, ev
);
274 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
275 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
279 case SMB2_LOCK_FLAG_UNLOCK
:
280 /* only the first lock gives the UNLOCK bit - see
287 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
288 return tevent_req_post(req
, ev
);
291 for (i
=0; i
<in_lock_count
; i
++) {
292 bool invalid
= false;
294 switch (in_locks
[i
].flags
) {
295 case SMB2_LOCK_FLAG_SHARED
:
296 case SMB2_LOCK_FLAG_EXCLUSIVE
:
302 tevent_req_nterror(req
,
303 NT_STATUS_INVALID_PARAMETER
);
304 return tevent_req_post(req
, ev
);
308 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
309 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
315 case SMB2_LOCK_FLAG_UNLOCK
:
317 tevent_req_nterror(req
,
318 NT_STATUS_INVALID_PARAMETER
);
319 return tevent_req_post(req
, ev
);
326 * is the first element was a UNLOCK
327 * we need to deferr the error response
328 * to the backend, because we need to process
329 * all unlock elements before
334 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
335 return tevent_req_post(req
, ev
);
338 locks
[i
].smblctx
= in_file_id_volatile
;
339 locks
[i
].offset
= in_locks
[i
].offset
;
340 locks
[i
].count
= in_locks
[i
].length
;
342 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
343 locks
[i
].brltype
= WRITE_LOCK
;
344 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
345 locks
[i
].brltype
= READ_LOCK
;
346 } else if (invalid
) {
348 * this is an invalid UNLOCK element
349 * and the backend needs to test for
350 * brltype != UNLOCK_LOCK and return
351 * NT_STATUS_INVALID_PARAMER
353 locks
[i
].brltype
= READ_LOCK
;
355 locks
[i
].brltype
= UNLOCK_LOCK
;
358 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
359 "smblctx = %llu type %d\n",
361 (unsigned long long)locks
[i
].offset
,
362 (unsigned long long)locks
[i
].count
,
363 (unsigned long long)locks
[i
].smblctx
,
364 (int)locks
[i
].brltype
));
367 state
->locks
= locks
;
368 state
->lock_count
= in_lock_count
;
371 status
= smbd_do_locking(smb1req
, fsp
,
380 status
= smbd_do_locking(smb1req
, fsp
,
389 if (!NT_STATUS_IS_OK(status
)) {
390 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
391 status
= NT_STATUS_LOCK_NOT_GRANTED
;
393 tevent_req_nterror(req
, status
);
394 return tevent_req_post(req
, ev
);
401 tevent_req_done(req
);
402 return tevent_req_post(req
, ev
);
405 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
409 if (tevent_req_is_nterror(req
, &status
)) {
410 tevent_req_received(req
);
414 tevent_req_received(req
);
418 /****************************************************************
419 Cancel an outstanding blocking lock request.
420 *****************************************************************/
422 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
424 struct smbd_smb2_request
*smb2req
= NULL
;
425 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
426 struct smbd_smb2_lock_state
);
431 if (!state
->smb2req
) {
435 smb2req
= state
->smb2req
;
436 smb2req
->cancelled
= true;
438 tevent_req_done(req
);
442 /****************************************************************
443 Got a message saying someone unlocked a file. Re-schedule all
444 blocking lock requests as we don't know if anything overlapped.
445 *****************************************************************/
447 static void received_unlock_msg(struct messaging_context
*msg
,
450 struct server_id server_id
,
453 struct smbd_server_connection
*sconn
;
455 DEBUG(10,("received_unlock_msg (SMB2)\n"));
457 sconn
= msg_ctx_to_sconn(msg
);
459 DEBUG(1, ("could not find sconn\n"));
462 process_blocking_lock_queue_smb2(sconn
, timeval_current());
465 /****************************************************************
466 Function to get the blr on a pending record.
467 *****************************************************************/
469 struct blocking_lock_record
*get_pending_smb2req_blr(struct smbd_smb2_request
*smb2req
)
471 struct smbd_smb2_lock_state
*state
= NULL
;
472 const uint8_t *inhdr
;
477 if (smb2req
->subreq
== NULL
) {
480 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
483 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
484 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
487 state
= tevent_req_data(smb2req
->subreq
,
488 struct smbd_smb2_lock_state
);
494 /****************************************************************
495 Set up the next brl timeout.
496 *****************************************************************/
498 static bool recalc_smb2_brl_timeout(struct smbd_server_connection
*sconn
)
500 struct smbd_smb2_request
*smb2req
;
501 struct timeval next_timeout
= timeval_zero();
502 int max_brl_timeout
= lp_parm_int(-1, "brl", "recalctime", 5);
504 TALLOC_FREE(sconn
->smb2
.locks
.brl_timeout
);
506 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= smb2req
->next
) {
507 struct blocking_lock_record
*blr
=
508 get_pending_smb2req_blr(smb2req
);
512 if (timeval_is_zero(&blr
->expire_time
)) {
514 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
515 * a POSIX lock, so calculate a timeout of
516 * 10 seconds into the future.
518 if (blr
->blocking_smblctx
== 0xFFFFFFFFFFFFFFFFLL
) {
519 struct timeval psx_to
= timeval_current_ofs(10, 0);
520 next_timeout
= timeval_brl_min(&next_timeout
, &psx_to
);
526 next_timeout
= timeval_brl_min(&next_timeout
, &blr
->expire_time
);
529 if (timeval_is_zero(&next_timeout
)) {
530 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
531 "timeout = Infinite.\n"));
536 * To account for unclean shutdowns by clients we need a
537 * maximum timeout that we use for checking pending locks. If
538 * we have any pending locks at all, then check if the pending
539 * lock can continue at least every brl:recalctime seconds
540 * (default 5 seconds).
542 * This saves us needing to do a message_send_all() in the
543 * SIGCHLD handler in the parent daemon. That
544 * message_send_all() caused O(n^2) work to be done when IP
545 * failovers happened in clustered Samba, which could make the
546 * entire system unusable for many minutes.
549 if (max_brl_timeout
> 0) {
550 struct timeval min_to
= timeval_current_ofs(max_brl_timeout
, 0);
551 next_timeout
= timeval_brl_min(&next_timeout
, &min_to
);
555 struct timeval cur
, from_now
;
557 cur
= timeval_current();
558 from_now
= timeval_until(&cur
, &next_timeout
);
559 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
560 "timeout = %d.%d seconds from now.\n",
561 (int)from_now
.tv_sec
, (int)from_now
.tv_usec
));
564 sconn
->smb2
.locks
.brl_timeout
= event_add_timed(
565 server_event_context(),
570 if (!sconn
->smb2
.locks
.brl_timeout
) {
576 /****************************************************************
577 Get an SMB2 lock reqeust to go async. lock_timeout should
579 *****************************************************************/
581 bool push_blocking_lock_request_smb2( struct byte_range_lock
*br_lck
,
582 struct smb_request
*smb1req
,
587 enum brl_type lock_type
,
588 enum brl_flavour lock_flav
,
591 uint64_t blocking_smblctx
)
593 struct smbd_server_connection
*sconn
= smb1req
->sconn
;
594 struct smbd_smb2_request
*smb2req
= smb1req
->smb2req
;
595 struct tevent_req
*req
= NULL
;
596 struct smbd_smb2_lock_state
*state
= NULL
;
597 struct blocking_lock_record
*blr
= NULL
;
598 NTSTATUS status
= NT_STATUS_OK
;
603 req
= smb2req
->subreq
;
607 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
610 state
= tevent_req_data(req
, struct smbd_smb2_lock_state
);
615 blr
= talloc_zero(state
, struct blocking_lock_record
);
621 if (lock_timeout
== -1) {
622 blr
->expire_time
.tv_sec
= 0;
623 blr
->expire_time
.tv_usec
= 0; /* Never expire. */
625 blr
->expire_time
= timeval_current_ofs_msec(lock_timeout
);
628 blr
->lock_num
= lock_num
;
629 blr
->smblctx
= smblctx
;
630 blr
->blocking_smblctx
= blocking_smblctx
;
631 blr
->lock_flav
= lock_flav
;
632 blr
->lock_type
= lock_type
;
633 blr
->offset
= offset
;
636 /* Specific brl_lock() implementations can fill this in. */
637 blr
->blr_private
= NULL
;
639 /* Add a pending lock record for this. */
640 status
= brl_lock(sconn
->msg_ctx
,
643 sconn_server_id(sconn
),
646 lock_type
== READ_LOCK
? PENDING_READ_LOCK
: PENDING_WRITE_LOCK
,
652 if (!NT_STATUS_IS_OK(status
)) {
653 DEBUG(0,("push_blocking_lock_request_smb2: "
654 "failed to add PENDING_LOCK record.\n"));
660 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
664 recalc_smb2_brl_timeout(sconn
);
666 /* Ensure we'll receive messages when this is unlocked. */
667 if (!sconn
->smb2
.locks
.blocking_lock_unlock_state
) {
668 messaging_register(sconn
->msg_ctx
, NULL
,
669 MSG_SMB_UNLOCK
, received_unlock_msg
);
670 sconn
->smb2
.locks
.blocking_lock_unlock_state
= true;
673 /* allow this request to be canceled */
674 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
679 /****************************************************************
680 Remove a pending lock record under lock.
681 *****************************************************************/
683 static void remove_pending_lock(struct smbd_smb2_lock_state
*state
,
684 struct blocking_lock_record
*blr
)
687 struct byte_range_lock
*br_lck
= brl_get_locks(
690 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr
));
693 brl_lock_cancel(br_lck
,
695 sconn_server_id(blr
->fsp
->conn
->sconn
),
703 /* Remove the locks we already got. */
705 for(i
= blr
->lock_num
- 1; i
>= 0; i
--) {
706 struct smbd_lock_element
*e
= &state
->locks
[i
];
708 do_unlock(blr
->fsp
->conn
->sconn
->msg_ctx
,
717 /****************************************************************
718 Re-proccess a blocking lock request.
719 This is equivalent to process_lockingX() inside smbd/blocking.c
720 *****************************************************************/
722 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request
*smb2req
,
723 struct timeval tv_curr
)
725 NTSTATUS status
= NT_STATUS_UNSUCCESSFUL
;
726 struct blocking_lock_record
*blr
= NULL
;
727 struct smbd_smb2_lock_state
*state
= NULL
;
728 files_struct
*fsp
= NULL
;
730 if (!smb2req
->subreq
) {
733 state
= tevent_req_data(smb2req
->subreq
, struct smbd_smb2_lock_state
);
741 /* Try and finish off getting all the outstanding locks. */
743 for (; blr
->lock_num
< state
->lock_count
; blr
->lock_num
++) {
744 struct byte_range_lock
*br_lck
= NULL
;
745 struct smbd_lock_element
*e
= &state
->locks
[blr
->lock_num
];
747 br_lck
= do_lock(fsp
->conn
->sconn
->msg_ctx
,
756 &blr
->blocking_smblctx
,
761 if (NT_STATUS_IS_ERR(status
)) {
766 if(blr
->lock_num
== state
->lock_count
) {
768 * Success - we got all the locks.
771 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
772 "fnum=%d num_locks=%d\n",
775 (int)state
->lock_count
));
777 tevent_req_done(smb2req
->subreq
);
781 if (!NT_STATUS_EQUAL(status
,NT_STATUS_LOCK_NOT_GRANTED
) &&
782 !NT_STATUS_EQUAL(status
,NT_STATUS_FILE_LOCK_CONFLICT
)) {
784 * We have other than a "can't get lock"
785 * error. Return an error.
787 remove_pending_lock(state
, blr
);
788 tevent_req_nterror(smb2req
->subreq
, status
);
793 * We couldn't get the locks for this record on the list.
794 * If the time has expired, return a lock error.
797 if (!timeval_is_zero(&blr
->expire_time
) &&
798 timeval_compare(&blr
->expire_time
, &tv_curr
) <= 0) {
799 remove_pending_lock(state
, blr
);
800 tevent_req_nterror(smb2req
->subreq
, NT_STATUS_LOCK_NOT_GRANTED
);
805 * Still can't get all the locks - keep waiting.
808 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
809 "for file %s, fnum = %d. Still waiting....\n",
811 (int)state
->lock_count
,
819 /****************************************************************
820 Attempt to proccess all outstanding blocking locks pending on
822 *****************************************************************/
824 void process_blocking_lock_queue_smb2(
825 struct smbd_server_connection
*sconn
, struct timeval tv_curr
)
827 struct smbd_smb2_request
*smb2req
, *nextreq
;
829 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
830 const uint8_t *inhdr
;
832 nextreq
= smb2req
->next
;
834 if (smb2req
->subreq
== NULL
) {
835 /* This message has been processed. */
838 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
839 /* This message has been processed. */
843 inhdr
= (const uint8_t *)smb2req
->in
.vector
[smb2req
->current_idx
].iov_base
;
844 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) == SMB2_OP_LOCK
) {
845 reprocess_blocked_smb2_lock(smb2req
, tv_curr
);
849 recalc_smb2_brl_timeout(sconn
);
852 /****************************************************************************
853 Remove any locks on this fd. Called from file_close().
854 ****************************************************************************/
856 void cancel_pending_lock_requests_by_fid_smb2(files_struct
*fsp
,
857 struct byte_range_lock
*br_lck
,
858 enum file_close_type close_type
)
860 struct smbd_server_connection
*sconn
= fsp
->conn
->sconn
;
861 struct smbd_smb2_request
*smb2req
, *nextreq
;
863 for (smb2req
= sconn
->smb2
.requests
; smb2req
; smb2req
= nextreq
) {
864 struct smbd_smb2_lock_state
*state
= NULL
;
865 files_struct
*fsp_curr
= NULL
;
866 int i
= smb2req
->current_idx
;
867 uint64_t in_file_id_volatile
;
868 struct blocking_lock_record
*blr
= NULL
;
869 const uint8_t *inhdr
;
870 const uint8_t *inbody
;
872 nextreq
= smb2req
->next
;
874 if (smb2req
->subreq
== NULL
) {
875 /* This message has been processed. */
878 if (!tevent_req_is_in_progress(smb2req
->subreq
)) {
879 /* This message has been processed. */
883 inhdr
= (const uint8_t *)smb2req
->in
.vector
[i
].iov_base
;
884 if (SVAL(inhdr
, SMB2_HDR_OPCODE
) != SMB2_OP_LOCK
) {
885 /* Not a lock call. */
889 inbody
= (const uint8_t *)smb2req
->in
.vector
[i
+1].iov_base
;
890 in_file_id_volatile
= BVAL(inbody
, 0x10);
892 state
= tevent_req_data(smb2req
->subreq
,
893 struct smbd_smb2_lock_state
);
895 /* Strange - is this even possible ? */
899 fsp_curr
= file_fsp(state
->smb1req
, (uint16_t)in_file_id_volatile
);
900 if (fsp_curr
== NULL
) {
901 /* Strange - is this even possible ? */
905 if (fsp_curr
!= fsp
) {
906 /* It's not our fid */
912 /* Remove the entries from the lock db. */
913 brl_lock_cancel(br_lck
,
915 sconn_server_id(sconn
),
921 /* Finally end the request. */
922 if (close_type
== SHUTDOWN_CLOSE
) {
923 tevent_req_done(smb2req
->subreq
);
925 tevent_req_nterror(smb2req
->subreq
,
926 NT_STATUS_RANGE_NOT_LOCKED
);