s3:smbd: pass down vuid as uint64_t in lanman.c
[Samba/gebeck_regimport.git] / source3 / smbd / smb2_lock.c
blobfdca266a2764f35153c286578a9e9e57fe0b5e55
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "messages.h"
29 struct smbd_smb2_lock_element {
30 uint64_t offset;
31 uint64_t length;
32 uint32_t flags;
35 struct smbd_smb2_lock_state {
36 struct smbd_smb2_request *smb2req;
37 struct smb_request *smb1req;
38 struct blocking_lock_record *blr;
39 uint16_t lock_count;
40 struct smbd_lock_element *locks;
43 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
44 struct blocking_lock_record *blr);
46 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
47 struct tevent_context *ev,
48 struct smbd_smb2_request *smb2req,
49 uint32_t in_smbpid,
50 uint64_t in_file_id_volatile,
51 uint16_t in_lock_count,
52 struct smbd_smb2_lock_element *in_locks);
53 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
55 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
56 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
58 const uint8_t *inhdr;
59 const uint8_t *inbody;
60 const int i = req->current_idx;
61 uint32_t in_smbpid;
62 uint16_t in_lock_count;
63 uint64_t in_file_id_persistent;
64 uint64_t in_file_id_volatile;
65 struct smbd_smb2_lock_element *in_locks;
66 struct tevent_req *subreq;
67 const uint8_t *lock_buffer;
68 uint16_t l;
69 NTSTATUS status;
71 status = smbd_smb2_request_verify_sizes(req, 0x30);
72 if (!NT_STATUS_IS_OK(status)) {
73 return smbd_smb2_request_error(req, status);
75 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
76 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
78 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
80 in_lock_count = CVAL(inbody, 0x02);
81 /* 0x04 - 4 bytes reserved */
82 in_file_id_persistent = BVAL(inbody, 0x08);
83 in_file_id_volatile = BVAL(inbody, 0x10);
85 if (in_lock_count < 1) {
86 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
89 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
90 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
93 if (req->compat_chain_fsp) {
94 /* skip check */
95 } else if (in_file_id_persistent != in_file_id_volatile) {
96 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
99 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
100 in_lock_count);
101 if (in_locks == NULL) {
102 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
105 l = 0;
106 lock_buffer = inbody + 0x18;
108 in_locks[l].offset = BVAL(lock_buffer, 0x00);
109 in_locks[l].length = BVAL(lock_buffer, 0x08);
110 in_locks[l].flags = IVAL(lock_buffer, 0x10);
111 /* 0x14 - 4 reserved bytes */
113 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
115 for (l=1; l < in_lock_count; l++) {
116 in_locks[l].offset = BVAL(lock_buffer, 0x00);
117 in_locks[l].length = BVAL(lock_buffer, 0x08);
118 in_locks[l].flags = IVAL(lock_buffer, 0x10);
119 /* 0x14 - 4 reserved bytes */
121 lock_buffer += 0x18;
124 subreq = smbd_smb2_lock_send(req,
125 req->sconn->ev_ctx,
126 req,
127 in_smbpid,
128 in_file_id_volatile,
129 in_lock_count,
130 in_locks);
131 if (subreq == NULL) {
132 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
134 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
136 return smbd_smb2_request_pending_queue(req, subreq, 500);
139 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
141 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
142 struct smbd_smb2_request);
143 DATA_BLOB outbody;
144 NTSTATUS status;
145 NTSTATUS error; /* transport error */
147 if (smb2req->cancelled) {
148 const uint8_t *inhdr = (const uint8_t *)
149 smb2req->in.vector[smb2req->current_idx].iov_base;
150 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
151 struct smbd_smb2_lock_state *state;
153 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
154 (unsigned long long)mid ));
156 state = tevent_req_data(smb2req->subreq,
157 struct smbd_smb2_lock_state);
159 SMB_ASSERT(state);
160 SMB_ASSERT(state->blr);
162 remove_pending_lock(state, state->blr);
164 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
165 if (!NT_STATUS_IS_OK(error)) {
166 smbd_server_connection_terminate(smb2req->sconn,
167 nt_errstr(error));
168 return;
170 return;
173 status = smbd_smb2_lock_recv(subreq);
174 TALLOC_FREE(subreq);
175 if (!NT_STATUS_IS_OK(status)) {
176 error = smbd_smb2_request_error(smb2req, status);
177 if (!NT_STATUS_IS_OK(error)) {
178 smbd_server_connection_terminate(smb2req->sconn,
179 nt_errstr(error));
180 return;
182 return;
185 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
186 if (outbody.data == NULL) {
187 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
188 if (!NT_STATUS_IS_OK(error)) {
189 smbd_server_connection_terminate(smb2req->sconn,
190 nt_errstr(error));
191 return;
193 return;
196 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
197 SSVAL(outbody.data, 0x02, 0); /* reserved */
199 error = smbd_smb2_request_done(smb2req, outbody, NULL);
200 if (!NT_STATUS_IS_OK(error)) {
201 smbd_server_connection_terminate(smb2req->sconn,
202 nt_errstr(error));
203 return;
207 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
208 struct tevent_context *ev,
209 struct smbd_smb2_request *smb2req,
210 uint32_t in_smbpid,
211 uint64_t in_file_id_volatile,
212 uint16_t in_lock_count,
213 struct smbd_smb2_lock_element *in_locks)
215 struct tevent_req *req;
216 struct smbd_smb2_lock_state *state;
217 struct smb_request *smb1req;
218 connection_struct *conn = smb2req->tcon->compat_conn;
219 files_struct *fsp;
220 int32_t timeout = -1;
221 bool isunlock = false;
222 uint16_t i;
223 struct smbd_lock_element *locks;
224 NTSTATUS status;
225 bool async = false;
227 req = tevent_req_create(mem_ctx, &state,
228 struct smbd_smb2_lock_state);
229 if (req == NULL) {
230 return NULL;
232 state->smb2req = smb2req;
233 smb2req->subreq = req; /* So we can find this when going async. */
235 smb1req = smbd_smb2_fake_smb_request(smb2req);
236 if (tevent_req_nomem(smb1req, req)) {
237 return tevent_req_post(req, ev);
239 state->smb1req = smb1req;
241 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
242 (unsigned long long)in_file_id_volatile));
244 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
245 if (fsp == NULL) {
246 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
247 return tevent_req_post(req, ev);
249 if (conn != fsp->conn) {
250 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
251 return tevent_req_post(req, ev);
253 if (smb2req->session->vuid != fsp->vuid) {
254 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
255 return tevent_req_post(req, ev);
258 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
259 if (locks == NULL) {
260 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
261 return tevent_req_post(req, ev);
264 switch (in_locks[0].flags) {
265 case SMB2_LOCK_FLAG_SHARED:
266 case SMB2_LOCK_FLAG_EXCLUSIVE:
267 if (in_lock_count > 1) {
268 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
269 return tevent_req_post(req, ev);
271 timeout = -1;
272 break;
274 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
275 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
276 timeout = 0;
277 break;
279 case SMB2_LOCK_FLAG_UNLOCK:
280 /* only the first lock gives the UNLOCK bit - see
281 MS-SMB2 3.3.5.14 */
282 isunlock = true;
283 timeout = 0;
284 break;
286 default:
287 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
288 return tevent_req_post(req, ev);
291 for (i=0; i<in_lock_count; i++) {
292 bool invalid = false;
294 switch (in_locks[i].flags) {
295 case SMB2_LOCK_FLAG_SHARED:
296 case SMB2_LOCK_FLAG_EXCLUSIVE:
297 if (isunlock) {
298 invalid = true;
299 break;
301 if (i > 0) {
302 tevent_req_nterror(req,
303 NT_STATUS_INVALID_PARAMETER);
304 return tevent_req_post(req, ev);
306 break;
308 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
309 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
310 if (isunlock) {
311 invalid = true;
313 break;
315 case SMB2_LOCK_FLAG_UNLOCK:
316 if (!isunlock) {
317 tevent_req_nterror(req,
318 NT_STATUS_INVALID_PARAMETER);
319 return tevent_req_post(req, ev);
321 break;
323 default:
324 if (isunlock) {
326 * is the first element was a UNLOCK
327 * we need to deferr the error response
328 * to the backend, because we need to process
329 * all unlock elements before
331 invalid = true;
332 break;
334 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
335 return tevent_req_post(req, ev);
338 locks[i].smblctx = in_file_id_volatile;
339 locks[i].offset = in_locks[i].offset;
340 locks[i].count = in_locks[i].length;
342 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
343 locks[i].brltype = WRITE_LOCK;
344 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
345 locks[i].brltype = READ_LOCK;
346 } else if (invalid) {
348 * this is an invalid UNLOCK element
349 * and the backend needs to test for
350 * brltype != UNLOCK_LOCK and return
351 * NT_STATUS_INVALID_PARAMER
353 locks[i].brltype = READ_LOCK;
354 } else {
355 locks[i].brltype = UNLOCK_LOCK;
358 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
359 "smblctx = %llu type %d\n",
361 (unsigned long long)locks[i].offset,
362 (unsigned long long)locks[i].count,
363 (unsigned long long)locks[i].smblctx,
364 (int)locks[i].brltype ));
367 state->locks = locks;
368 state->lock_count = in_lock_count;
370 if (isunlock) {
371 status = smbd_do_locking(smb1req, fsp,
373 timeout,
374 in_lock_count,
375 locks,
377 NULL,
378 &async);
379 } else {
380 status = smbd_do_locking(smb1req, fsp,
382 timeout,
384 NULL,
385 in_lock_count,
386 locks,
387 &async);
389 if (!NT_STATUS_IS_OK(status)) {
390 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
391 status = NT_STATUS_LOCK_NOT_GRANTED;
393 tevent_req_nterror(req, status);
394 return tevent_req_post(req, ev);
397 if (async) {
398 return req;
401 tevent_req_done(req);
402 return tevent_req_post(req, ev);
405 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
407 NTSTATUS status;
409 if (tevent_req_is_nterror(req, &status)) {
410 tevent_req_received(req);
411 return status;
414 tevent_req_received(req);
415 return NT_STATUS_OK;
418 /****************************************************************
419 Cancel an outstanding blocking lock request.
420 *****************************************************************/
422 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
424 struct smbd_smb2_request *smb2req = NULL;
425 struct smbd_smb2_lock_state *state = tevent_req_data(req,
426 struct smbd_smb2_lock_state);
427 if (!state) {
428 return false;
431 if (!state->smb2req) {
432 return false;
435 smb2req = state->smb2req;
436 smb2req->cancelled = true;
438 tevent_req_done(req);
439 return true;
442 /****************************************************************
443 Got a message saying someone unlocked a file. Re-schedule all
444 blocking lock requests as we don't know if anything overlapped.
445 *****************************************************************/
447 static void received_unlock_msg(struct messaging_context *msg,
448 void *private_data,
449 uint32_t msg_type,
450 struct server_id server_id,
451 DATA_BLOB *data)
453 struct smbd_server_connection *sconn =
454 talloc_get_type_abort(private_data,
455 struct smbd_server_connection);
457 DEBUG(10,("received_unlock_msg (SMB2)\n"));
459 process_blocking_lock_queue_smb2(sconn, timeval_current());
462 /****************************************************************
463 Function to get the blr on a pending record.
464 *****************************************************************/
466 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
468 struct smbd_smb2_lock_state *state = NULL;
469 const uint8_t *inhdr;
471 if (!smb2req) {
472 return NULL;
474 if (smb2req->subreq == NULL) {
475 return NULL;
477 if (!tevent_req_is_in_progress(smb2req->subreq)) {
478 return NULL;
480 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
481 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
482 return NULL;
484 state = tevent_req_data(smb2req->subreq,
485 struct smbd_smb2_lock_state);
486 if (!state) {
487 return NULL;
489 return state->blr;
491 /****************************************************************
492 Set up the next brl timeout.
493 *****************************************************************/
495 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
497 struct smbd_smb2_request *smb2req;
498 struct timeval next_timeout = timeval_zero();
499 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
501 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
503 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
504 struct blocking_lock_record *blr =
505 get_pending_smb2req_blr(smb2req);
506 if (!blr) {
507 continue;
509 if (timeval_is_zero(&blr->expire_time)) {
511 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
512 * a POSIX lock, so calculate a timeout of
513 * 10 seconds into the future.
515 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
516 struct timeval psx_to = timeval_current_ofs(10, 0);
517 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
520 continue;
523 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
526 if (timeval_is_zero(&next_timeout)) {
527 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
528 "timeout = Infinite.\n"));
529 return true;
533 * To account for unclean shutdowns by clients we need a
534 * maximum timeout that we use for checking pending locks. If
535 * we have any pending locks at all, then check if the pending
536 * lock can continue at least every brl:recalctime seconds
537 * (default 5 seconds).
539 * This saves us needing to do a message_send_all() in the
540 * SIGCHLD handler in the parent daemon. That
541 * message_send_all() caused O(n^2) work to be done when IP
542 * failovers happened in clustered Samba, which could make the
543 * entire system unusable for many minutes.
546 if (max_brl_timeout > 0) {
547 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
548 next_timeout = timeval_brl_min(&next_timeout, &min_to);
551 if (DEBUGLVL(10)) {
552 struct timeval cur, from_now;
554 cur = timeval_current();
555 from_now = timeval_until(&cur, &next_timeout);
556 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
557 "timeout = %d.%d seconds from now.\n",
558 (int)from_now.tv_sec, (int)from_now.tv_usec));
561 sconn->smb2.locks.brl_timeout = tevent_add_timer(
562 sconn->ev_ctx,
563 NULL,
564 next_timeout,
565 brl_timeout_fn,
566 NULL);
567 if (!sconn->smb2.locks.brl_timeout) {
568 return false;
570 return true;
573 /****************************************************************
574 Get an SMB2 lock reqeust to go async. lock_timeout should
575 always be -1 here.
576 *****************************************************************/
578 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
579 struct smb_request *smb1req,
580 files_struct *fsp,
581 int lock_timeout,
582 int lock_num,
583 uint64_t smblctx,
584 enum brl_type lock_type,
585 enum brl_flavour lock_flav,
586 uint64_t offset,
587 uint64_t count,
588 uint64_t blocking_smblctx)
590 struct smbd_server_connection *sconn = smb1req->sconn;
591 struct smbd_smb2_request *smb2req = smb1req->smb2req;
592 struct tevent_req *req = NULL;
593 struct smbd_smb2_lock_state *state = NULL;
594 struct blocking_lock_record *blr = NULL;
595 NTSTATUS status = NT_STATUS_OK;
597 if (!smb2req) {
598 return false;
600 req = smb2req->subreq;
601 if (!req) {
602 return false;
604 if (!tevent_req_is_in_progress(smb2req->subreq)) {
605 return false;
607 state = tevent_req_data(req, struct smbd_smb2_lock_state);
608 if (!state) {
609 return false;
612 blr = talloc_zero(state, struct blocking_lock_record);
613 if (!blr) {
614 return false;
616 blr->fsp = fsp;
618 if (lock_timeout == -1) {
619 blr->expire_time.tv_sec = 0;
620 blr->expire_time.tv_usec = 0; /* Never expire. */
621 } else {
622 blr->expire_time = timeval_current_ofs_msec(lock_timeout);
625 blr->lock_num = lock_num;
626 blr->smblctx = smblctx;
627 blr->blocking_smblctx = blocking_smblctx;
628 blr->lock_flav = lock_flav;
629 blr->lock_type = lock_type;
630 blr->offset = offset;
631 blr->count = count;
633 /* Specific brl_lock() implementations can fill this in. */
634 blr->blr_private = NULL;
636 /* Add a pending lock record for this. */
637 status = brl_lock(sconn->msg_ctx,
638 br_lck,
639 smblctx,
640 messaging_server_id(sconn->msg_ctx),
641 offset,
642 count,
643 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
644 blr->lock_flav,
645 true,
646 NULL,
647 blr);
649 if (!NT_STATUS_IS_OK(status)) {
650 DEBUG(0,("push_blocking_lock_request_smb2: "
651 "failed to add PENDING_LOCK record.\n"));
652 TALLOC_FREE(blr);
653 return false;
655 state->blr = blr;
657 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
658 fsp_str_dbg(fsp),
659 lock_timeout ));
661 recalc_smb2_brl_timeout(sconn);
663 /* Ensure we'll receive messages when this is unlocked. */
664 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
665 messaging_register(sconn->msg_ctx, sconn,
666 MSG_SMB_UNLOCK, received_unlock_msg);
667 sconn->smb2.locks.blocking_lock_unlock_state = true;
670 /* allow this request to be canceled */
671 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
673 return true;
676 /****************************************************************
677 Remove a pending lock record under lock.
678 *****************************************************************/
680 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
681 struct blocking_lock_record *blr)
683 int i;
684 struct byte_range_lock *br_lck = brl_get_locks(
685 state, blr->fsp);
687 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
689 if (br_lck) {
690 brl_lock_cancel(br_lck,
691 blr->smblctx,
692 messaging_server_id(blr->fsp->conn->sconn->msg_ctx),
693 blr->offset,
694 blr->count,
695 blr->lock_flav,
696 blr);
697 TALLOC_FREE(br_lck);
700 /* Remove the locks we already got. */
702 for(i = blr->lock_num - 1; i >= 0; i--) {
703 struct smbd_lock_element *e = &state->locks[i];
705 do_unlock(blr->fsp->conn->sconn->msg_ctx,
706 blr->fsp,
707 e->smblctx,
708 e->count,
709 e->offset,
710 WINDOWS_LOCK);
714 /****************************************************************
715 Re-proccess a blocking lock request.
716 This is equivalent to process_lockingX() inside smbd/blocking.c
717 *****************************************************************/
719 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
720 struct timeval tv_curr)
722 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
723 struct blocking_lock_record *blr = NULL;
724 struct smbd_smb2_lock_state *state = NULL;
725 files_struct *fsp = NULL;
727 if (!smb2req->subreq) {
728 return;
730 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
731 if (!state) {
732 return;
735 blr = state->blr;
736 fsp = blr->fsp;
738 /* Try and finish off getting all the outstanding locks. */
740 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
741 struct byte_range_lock *br_lck = NULL;
742 struct smbd_lock_element *e = &state->locks[blr->lock_num];
744 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
745 fsp,
746 e->smblctx,
747 e->count,
748 e->offset,
749 e->brltype,
750 WINDOWS_LOCK,
751 true,
752 &status,
753 &blr->blocking_smblctx,
754 blr);
756 TALLOC_FREE(br_lck);
758 if (NT_STATUS_IS_ERR(status)) {
759 break;
763 if(blr->lock_num == state->lock_count) {
765 * Success - we got all the locks.
768 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
769 "fnum=%d num_locks=%d\n",
770 fsp_str_dbg(fsp),
771 fsp->fnum,
772 (int)state->lock_count));
774 tevent_req_done(smb2req->subreq);
775 return;
778 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
779 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
781 * We have other than a "can't get lock"
782 * error. Return an error.
784 remove_pending_lock(state, blr);
785 tevent_req_nterror(smb2req->subreq, status);
786 return;
790 * We couldn't get the locks for this record on the list.
791 * If the time has expired, return a lock error.
794 if (!timeval_is_zero(&blr->expire_time) &&
795 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
796 remove_pending_lock(state, blr);
797 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
798 return;
802 * Still can't get all the locks - keep waiting.
805 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
806 "for file %s, fnum = %d. Still waiting....\n",
807 (int)blr->lock_num,
808 (int)state->lock_count,
809 fsp_str_dbg(fsp),
810 (int)fsp->fnum));
812 return;
816 /****************************************************************
817 Attempt to proccess all outstanding blocking locks pending on
818 the request queue.
819 *****************************************************************/
821 void process_blocking_lock_queue_smb2(
822 struct smbd_server_connection *sconn, struct timeval tv_curr)
824 struct smbd_smb2_request *smb2req, *nextreq;
826 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
827 const uint8_t *inhdr;
829 nextreq = smb2req->next;
831 if (smb2req->subreq == NULL) {
832 /* This message has been processed. */
833 continue;
835 if (!tevent_req_is_in_progress(smb2req->subreq)) {
836 /* This message has been processed. */
837 continue;
840 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
841 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
842 reprocess_blocked_smb2_lock(smb2req, tv_curr);
846 recalc_smb2_brl_timeout(sconn);
849 /****************************************************************************
850 Remove any locks on this fd. Called from file_close().
851 ****************************************************************************/
853 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
854 struct byte_range_lock *br_lck,
855 enum file_close_type close_type)
857 struct smbd_server_connection *sconn = fsp->conn->sconn;
858 struct smbd_smb2_request *smb2req, *nextreq;
860 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
861 struct smbd_smb2_lock_state *state = NULL;
862 files_struct *fsp_curr = NULL;
863 int i = smb2req->current_idx;
864 uint64_t in_file_id_volatile;
865 struct blocking_lock_record *blr = NULL;
866 const uint8_t *inhdr;
867 const uint8_t *inbody;
869 nextreq = smb2req->next;
871 if (smb2req->subreq == NULL) {
872 /* This message has been processed. */
873 continue;
875 if (!tevent_req_is_in_progress(smb2req->subreq)) {
876 /* This message has been processed. */
877 continue;
880 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
881 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
882 /* Not a lock call. */
883 continue;
886 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
887 in_file_id_volatile = BVAL(inbody, 0x10);
889 state = tevent_req_data(smb2req->subreq,
890 struct smbd_smb2_lock_state);
891 if (!state) {
892 /* Strange - is this even possible ? */
893 continue;
896 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
897 if (fsp_curr == NULL) {
898 /* Strange - is this even possible ? */
899 continue;
902 if (fsp_curr != fsp) {
903 /* It's not our fid */
904 continue;
907 blr = state->blr;
909 /* Remove the entries from the lock db. */
910 brl_lock_cancel(br_lck,
911 blr->smblctx,
912 messaging_server_id(sconn->msg_ctx),
913 blr->offset,
914 blr->count,
915 blr->lock_flav,
916 blr);
918 /* Finally end the request. */
919 if (close_type == SHUTDOWN_CLOSE) {
920 tevent_req_done(smb2req->subreq);
921 } else {
922 tevent_req_nterror(smb2req->subreq,
923 NT_STATUS_RANGE_NOT_LOCKED);