messaging_dgm: Receive through a cb function
[Samba.git] / source3 / smbd / smb2_lock.c
blob0713892473891819fcb73b425cd3e24c38d08d26
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "messages.h"
29 struct smbd_smb2_lock_element {
30 uint64_t offset;
31 uint64_t length;
32 uint32_t flags;
35 struct smbd_smb2_lock_state {
36 struct smbd_smb2_request *smb2req;
37 struct smb_request *smb1req;
38 struct blocking_lock_record *blr;
39 uint16_t lock_count;
40 struct smbd_lock_element *locks;
43 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
44 struct blocking_lock_record *blr);
46 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
47 struct tevent_context *ev,
48 struct smbd_smb2_request *smb2req,
49 struct files_struct *in_fsp,
50 uint16_t in_lock_count,
51 struct smbd_smb2_lock_element *in_locks);
52 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
54 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
55 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
57 const uint8_t *inbody;
58 uint16_t in_lock_count;
59 uint64_t in_file_id_persistent;
60 uint64_t in_file_id_volatile;
61 struct files_struct *in_fsp;
62 struct smbd_smb2_lock_element *in_locks;
63 struct tevent_req *subreq;
64 const uint8_t *lock_buffer;
65 uint16_t l;
66 NTSTATUS status;
68 status = smbd_smb2_request_verify_sizes(req, 0x30);
69 if (!NT_STATUS_IS_OK(status)) {
70 return smbd_smb2_request_error(req, status);
72 inbody = SMBD_SMB2_IN_BODY_PTR(req);
74 in_lock_count = CVAL(inbody, 0x02);
75 /* 0x04 - 4 bytes reserved */
76 in_file_id_persistent = BVAL(inbody, 0x08);
77 in_file_id_volatile = BVAL(inbody, 0x10);
79 if (in_lock_count < 1) {
80 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
83 if (((in_lock_count - 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req)) {
84 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
87 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
88 in_lock_count);
89 if (in_locks == NULL) {
90 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
93 l = 0;
94 lock_buffer = inbody + 0x18;
96 in_locks[l].offset = BVAL(lock_buffer, 0x00);
97 in_locks[l].length = BVAL(lock_buffer, 0x08);
98 in_locks[l].flags = IVAL(lock_buffer, 0x10);
99 /* 0x14 - 4 reserved bytes */
101 lock_buffer = SMBD_SMB2_IN_DYN_PTR(req);
103 for (l=1; l < in_lock_count; l++) {
104 in_locks[l].offset = BVAL(lock_buffer, 0x00);
105 in_locks[l].length = BVAL(lock_buffer, 0x08);
106 in_locks[l].flags = IVAL(lock_buffer, 0x10);
107 /* 0x14 - 4 reserved bytes */
109 lock_buffer += 0x18;
112 in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
113 if (in_fsp == NULL) {
114 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
117 subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
118 req, in_fsp,
119 in_lock_count,
120 in_locks);
121 if (subreq == NULL) {
122 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
124 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
126 return smbd_smb2_request_pending_queue(req, subreq, 500);
129 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
131 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
132 struct smbd_smb2_request);
133 DATA_BLOB outbody;
134 NTSTATUS status;
135 NTSTATUS error; /* transport error */
137 status = smbd_smb2_lock_recv(subreq);
138 TALLOC_FREE(subreq);
139 if (!NT_STATUS_IS_OK(status)) {
140 error = smbd_smb2_request_error(smb2req, status);
141 if (!NT_STATUS_IS_OK(error)) {
142 smbd_server_connection_terminate(smb2req->xconn,
143 nt_errstr(error));
144 return;
146 return;
149 outbody = smbd_smb2_generate_outbody(smb2req, 0x04);
150 if (outbody.data == NULL) {
151 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
152 if (!NT_STATUS_IS_OK(error)) {
153 smbd_server_connection_terminate(smb2req->xconn,
154 nt_errstr(error));
155 return;
157 return;
160 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
161 SSVAL(outbody.data, 0x02, 0); /* reserved */
163 error = smbd_smb2_request_done(smb2req, outbody, NULL);
164 if (!NT_STATUS_IS_OK(error)) {
165 smbd_server_connection_terminate(smb2req->xconn,
166 nt_errstr(error));
167 return;
171 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
172 struct tevent_context *ev,
173 struct smbd_smb2_request *smb2req,
174 struct files_struct *fsp,
175 uint16_t in_lock_count,
176 struct smbd_smb2_lock_element *in_locks)
178 struct tevent_req *req;
179 struct smbd_smb2_lock_state *state;
180 struct smb_request *smb1req;
181 int32_t timeout = -1;
182 bool isunlock = false;
183 uint16_t i;
184 struct smbd_lock_element *locks;
185 NTSTATUS status;
186 bool async = false;
188 req = tevent_req_create(mem_ctx, &state,
189 struct smbd_smb2_lock_state);
190 if (req == NULL) {
191 return NULL;
193 state->smb2req = smb2req;
194 smb2req->subreq = req; /* So we can find this when going async. */
196 smb1req = smbd_smb2_fake_smb_request(smb2req);
197 if (tevent_req_nomem(smb1req, req)) {
198 return tevent_req_post(req, ev);
200 state->smb1req = smb1req;
202 DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
203 fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
205 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
206 if (locks == NULL) {
207 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
208 return tevent_req_post(req, ev);
211 switch (in_locks[0].flags) {
212 case SMB2_LOCK_FLAG_SHARED:
213 case SMB2_LOCK_FLAG_EXCLUSIVE:
214 if (in_lock_count > 1) {
215 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
216 return tevent_req_post(req, ev);
218 timeout = -1;
219 break;
221 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
222 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
223 timeout = 0;
224 break;
226 case SMB2_LOCK_FLAG_UNLOCK:
227 /* only the first lock gives the UNLOCK bit - see
228 MS-SMB2 3.3.5.14 */
229 isunlock = true;
230 timeout = 0;
231 break;
233 default:
234 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
235 return tevent_req_post(req, ev);
238 if (!isunlock && (in_lock_count > 1)) {
241 * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
242 * have more than one lock and one of those is blocking.
245 for (i=0; i<in_lock_count; i++) {
246 uint32_t flags = in_locks[i].flags;
248 if ((flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY) == 0) {
249 tevent_req_nterror(
250 req, NT_STATUS_INVALID_PARAMETER);
251 return tevent_req_post(req, ev);
256 for (i=0; i<in_lock_count; i++) {
257 bool invalid = false;
259 switch (in_locks[i].flags) {
260 case SMB2_LOCK_FLAG_SHARED:
261 case SMB2_LOCK_FLAG_EXCLUSIVE:
262 if (isunlock) {
263 invalid = true;
264 break;
266 break;
268 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
269 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
270 if (isunlock) {
271 invalid = true;
273 break;
275 case SMB2_LOCK_FLAG_UNLOCK:
276 if (!isunlock) {
277 tevent_req_nterror(req,
278 NT_STATUS_INVALID_PARAMETER);
279 return tevent_req_post(req, ev);
281 break;
283 default:
284 if (isunlock) {
286 * If the first element was a UNLOCK
287 * we need to defer the error response
288 * to the backend, because we need to process
289 * all unlock elements before
291 invalid = true;
292 break;
294 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
295 return tevent_req_post(req, ev);
298 locks[i].smblctx = fsp->op->global->open_persistent_id;
299 locks[i].offset = in_locks[i].offset;
300 locks[i].count = in_locks[i].length;
302 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
303 locks[i].brltype = WRITE_LOCK;
304 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
305 locks[i].brltype = READ_LOCK;
306 } else if (invalid) {
308 * this is an invalid UNLOCK element
309 * and the backend needs to test for
310 * brltype != UNLOCK_LOCK and return
311 * NT_STATUS_INVALID_PARAMETER
313 locks[i].brltype = READ_LOCK;
314 } else {
315 locks[i].brltype = UNLOCK_LOCK;
318 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
319 "smblctx = %llu type %d\n",
321 (unsigned long long)locks[i].offset,
322 (unsigned long long)locks[i].count,
323 (unsigned long long)locks[i].smblctx,
324 (int)locks[i].brltype ));
327 state->locks = locks;
328 state->lock_count = in_lock_count;
330 if (isunlock) {
331 status = smbd_do_unlocking(smb1req, fsp,
332 in_lock_count, locks);
333 async = false;
334 } else {
335 status = smbd_do_locking(smb1req, fsp,
337 timeout,
338 in_lock_count,
339 locks,
340 &async);
342 if (!NT_STATUS_IS_OK(status)) {
343 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
344 status = NT_STATUS_LOCK_NOT_GRANTED;
346 tevent_req_nterror(req, status);
347 return tevent_req_post(req, ev);
350 if (async) {
351 return req;
354 tevent_req_done(req);
355 return tevent_req_post(req, ev);
358 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
360 NTSTATUS status;
362 if (tevent_req_is_nterror(req, &status)) {
363 tevent_req_received(req);
364 return status;
367 tevent_req_received(req);
368 return NT_STATUS_OK;
371 /****************************************************************
372 Cancel an outstanding blocking lock request.
373 *****************************************************************/
375 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
377 struct smbd_smb2_request *smb2req = NULL;
378 struct smbd_smb2_lock_state *state = tevent_req_data(req,
379 struct smbd_smb2_lock_state);
380 if (!state) {
381 return false;
384 if (!state->smb2req) {
385 return false;
388 smb2req = state->smb2req;
390 remove_pending_lock(state, state->blr);
391 tevent_req_defer_callback(req, smb2req->sconn->ev_ctx);
394 * If the request is canceled because of logoff, tdis or close
395 * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
396 * NT_STATUS_CANCELLED.
398 * Note that the close case is handled in
399 * cancel_pending_lock_requests_by_fid_smb2(SHUTDOWN_CLOSE)
400 * for now.
402 if (!NT_STATUS_IS_OK(smb2req->session->status)) {
403 tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
404 return true;
407 if (!NT_STATUS_IS_OK(smb2req->tcon->status)) {
408 tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
409 return true;
412 tevent_req_nterror(req, NT_STATUS_CANCELLED);
413 return true;
416 /****************************************************************
417 Got a message saying someone unlocked a file. Re-schedule all
418 blocking lock requests as we don't know if anything overlapped.
419 *****************************************************************/
421 static void received_unlock_msg(struct messaging_context *msg,
422 void *private_data,
423 uint32_t msg_type,
424 struct server_id server_id,
425 DATA_BLOB *data)
427 struct smbd_server_connection *sconn =
428 talloc_get_type_abort(private_data,
429 struct smbd_server_connection);
431 DEBUG(10,("received_unlock_msg (SMB2)\n"));
433 process_blocking_lock_queue_smb2(sconn, timeval_current());
436 /****************************************************************
437 Function to get the blr on a pending record.
438 *****************************************************************/
440 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
442 struct smbd_smb2_lock_state *state = NULL;
443 const uint8_t *inhdr;
445 if (!smb2req) {
446 return NULL;
448 if (smb2req->subreq == NULL) {
449 return NULL;
451 if (!tevent_req_is_in_progress(smb2req->subreq)) {
452 return NULL;
454 inhdr = SMBD_SMB2_IN_HDR_PTR(smb2req);
455 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
456 return NULL;
458 state = tevent_req_data(smb2req->subreq,
459 struct smbd_smb2_lock_state);
460 if (!state) {
461 return NULL;
463 return state->blr;
465 /****************************************************************
466 Set up the next brl timeout.
467 *****************************************************************/
469 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
471 struct smbXsrv_connection *xconn = sconn->conn;
472 struct smbd_smb2_request *smb2req;
473 struct timeval next_timeout = timeval_zero();
474 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
476 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
478 for (smb2req = xconn->smb2.requests; smb2req; smb2req = smb2req->next) {
479 struct blocking_lock_record *blr =
480 get_pending_smb2req_blr(smb2req);
481 if (!blr) {
482 continue;
484 if (timeval_is_zero(&blr->expire_time)) {
486 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
487 * a POSIX lock, so calculate a timeout of
488 * 10 seconds into the future.
490 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
491 struct timeval psx_to = timeval_current_ofs(10, 0);
492 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
495 continue;
498 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
501 if (timeval_is_zero(&next_timeout)) {
502 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
503 "timeout = Infinite.\n"));
504 return true;
508 * To account for unclean shutdowns by clients we need a
509 * maximum timeout that we use for checking pending locks. If
510 * we have any pending locks at all, then check if the pending
511 * lock can continue at least every brl:recalctime seconds
512 * (default 5 seconds).
514 * This saves us needing to do a message_send_all() in the
515 * SIGCHLD handler in the parent daemon. That
516 * message_send_all() caused O(n^2) work to be done when IP
517 * failovers happened in clustered Samba, which could make the
518 * entire system unusable for many minutes.
521 if (max_brl_timeout > 0) {
522 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
523 next_timeout = timeval_brl_min(&next_timeout, &min_to);
526 if (DEBUGLVL(10)) {
527 struct timeval cur, from_now;
529 cur = timeval_current();
530 from_now = timeval_until(&cur, &next_timeout);
531 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
532 "timeout = %d.%d seconds from now.\n",
533 (int)from_now.tv_sec, (int)from_now.tv_usec));
536 sconn->smb2.locks.brl_timeout = tevent_add_timer(
537 sconn->ev_ctx,
538 NULL,
539 next_timeout,
540 brl_timeout_fn,
541 sconn);
542 if (!sconn->smb2.locks.brl_timeout) {
543 return false;
545 return true;
548 /****************************************************************
549 Get an SMB2 lock request to go async. lock_timeout should
550 always be -1 here.
551 *****************************************************************/
553 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
554 struct smb_request *smb1req,
555 files_struct *fsp,
556 int lock_timeout,
557 int lock_num,
558 uint64_t smblctx,
559 enum brl_type lock_type,
560 enum brl_flavour lock_flav,
561 uint64_t offset,
562 uint64_t count,
563 uint64_t blocking_smblctx)
565 struct smbd_server_connection *sconn = smb1req->sconn;
566 struct smbd_smb2_request *smb2req = smb1req->smb2req;
567 struct tevent_req *req = NULL;
568 struct smbd_smb2_lock_state *state = NULL;
569 struct blocking_lock_record *blr = NULL;
570 NTSTATUS status = NT_STATUS_OK;
572 if (!smb2req) {
573 return false;
575 req = smb2req->subreq;
576 if (!req) {
577 return false;
579 if (!tevent_req_is_in_progress(smb2req->subreq)) {
580 return false;
582 state = tevent_req_data(req, struct smbd_smb2_lock_state);
583 if (!state) {
584 return false;
587 blr = talloc_zero(state, struct blocking_lock_record);
588 if (!blr) {
589 return false;
591 blr->fsp = fsp;
593 if (lock_timeout == -1) {
594 blr->expire_time.tv_sec = 0;
595 blr->expire_time.tv_usec = 0; /* Never expire. */
596 } else {
597 blr->expire_time = timeval_current_ofs_msec(lock_timeout);
600 blr->lock_num = lock_num;
601 blr->smblctx = smblctx;
602 blr->blocking_smblctx = blocking_smblctx;
603 blr->lock_flav = lock_flav;
604 blr->lock_type = lock_type;
605 blr->offset = offset;
606 blr->count = count;
608 /* Specific brl_lock() implementations can fill this in. */
609 blr->blr_private = NULL;
611 /* Add a pending lock record for this. */
612 status = brl_lock(sconn->msg_ctx,
613 br_lck,
614 smblctx,
615 messaging_server_id(sconn->msg_ctx),
616 offset,
617 count,
618 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
619 blr->lock_flav,
620 true,
621 NULL);
623 if (!NT_STATUS_IS_OK(status)) {
624 DEBUG(0,("push_blocking_lock_request_smb2: "
625 "failed to add PENDING_LOCK record.\n"));
626 TALLOC_FREE(blr);
627 return false;
629 state->blr = blr;
631 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
632 fsp_str_dbg(fsp),
633 lock_timeout ));
635 recalc_smb2_brl_timeout(sconn);
637 /* Ensure we'll receive messages when this is unlocked. */
638 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
639 messaging_register(sconn->msg_ctx, sconn,
640 MSG_SMB_UNLOCK, received_unlock_msg);
641 sconn->smb2.locks.blocking_lock_unlock_state = true;
644 /* allow this request to be canceled */
645 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
647 return true;
650 /****************************************************************
651 Remove a pending lock record under lock.
652 *****************************************************************/
654 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
655 struct blocking_lock_record *blr)
657 struct byte_range_lock *br_lck = brl_get_locks(
658 state, blr->fsp);
660 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
662 if (br_lck) {
663 brl_lock_cancel(br_lck,
664 blr->smblctx,
665 messaging_server_id(blr->fsp->conn->sconn->msg_ctx),
666 blr->offset,
667 blr->count,
668 blr->lock_flav);
669 TALLOC_FREE(br_lck);
673 /****************************************************************
674 Re-proccess a blocking lock request.
675 This is equivalent to process_lockingX() inside smbd/blocking.c
676 *****************************************************************/
678 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
679 struct timeval tv_curr)
681 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
682 struct blocking_lock_record *blr = NULL;
683 struct smbd_smb2_lock_state *state = NULL;
684 struct byte_range_lock *br_lck = NULL;
685 struct smbd_lock_element *e = NULL;
686 files_struct *fsp = NULL;
688 if (!smb2req->subreq) {
689 return;
691 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
692 if (!state) {
693 return;
696 blr = state->blr;
697 fsp = blr->fsp;
699 /* We can only have one blocked lock in SMB2. */
700 SMB_ASSERT(state->lock_count == 1);
701 SMB_ASSERT(blr->lock_num == 0);
703 /* Try and get the outstanding lock. */
704 e = &state->locks[blr->lock_num];
706 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
707 fsp,
708 e->smblctx,
709 e->count,
710 e->offset,
711 e->brltype,
712 WINDOWS_LOCK,
713 true,
714 &status,
715 &blr->blocking_smblctx);
717 TALLOC_FREE(br_lck);
719 if (NT_STATUS_IS_OK(status)) {
721 * Success - we got the lock.
724 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
725 "%s, num_locks=%d\n",
726 fsp_str_dbg(fsp),
727 fsp_fnum_dbg(fsp),
728 (int)state->lock_count));
730 remove_pending_lock(state, blr);
731 tevent_req_done(smb2req->subreq);
732 return;
735 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
736 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
738 * We have other than a "can't get lock"
739 * error. Return an error.
741 remove_pending_lock(state, blr);
742 tevent_req_nterror(smb2req->subreq, status);
743 return;
747 * We couldn't get the lock for this record.
748 * If the time has expired, return a lock error.
751 if (!timeval_is_zero(&blr->expire_time) &&
752 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
753 remove_pending_lock(state, blr);
754 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
755 return;
759 * Still can't get the lock - keep waiting.
762 DEBUG(10,("reprocess_blocked_smb2_lock: failed to get lock "
763 "for file %s, %s. Still waiting....\n",
764 fsp_str_dbg(fsp),
765 fsp_fnum_dbg(fsp)));
767 return;
770 /****************************************************************
771 Attempt to proccess all outstanding blocking locks pending on
772 the request queue.
773 *****************************************************************/
775 void process_blocking_lock_queue_smb2(
776 struct smbd_server_connection *sconn, struct timeval tv_curr)
778 struct smbXsrv_connection *xconn = sconn->conn;
779 struct smbd_smb2_request *smb2req, *nextreq;
781 for (smb2req = xconn->smb2.requests; smb2req; smb2req = nextreq) {
782 const uint8_t *inhdr;
784 nextreq = smb2req->next;
786 if (smb2req->subreq == NULL) {
787 /* This message has been processed. */
788 continue;
790 if (!tevent_req_is_in_progress(smb2req->subreq)) {
791 /* This message has been processed. */
792 continue;
795 inhdr = SMBD_SMB2_IN_HDR_PTR(smb2req);
796 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
797 reprocess_blocked_smb2_lock(smb2req, tv_curr);
801 recalc_smb2_brl_timeout(sconn);
804 /****************************************************************************
805 Remove any locks on this fd. Called from file_close().
806 ****************************************************************************/
808 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
809 struct byte_range_lock *br_lck,
810 enum file_close_type close_type)
812 struct smbd_server_connection *sconn = fsp->conn->sconn;
813 struct smbXsrv_connection *xconn = sconn->conn;
814 struct smbd_smb2_request *smb2req, *nextreq;
816 for (smb2req = xconn->smb2.requests; smb2req; smb2req = nextreq) {
817 struct smbd_smb2_lock_state *state = NULL;
818 files_struct *fsp_curr = NULL;
819 struct blocking_lock_record *blr = NULL;
820 const uint8_t *inhdr;
822 nextreq = smb2req->next;
824 if (smb2req->subreq == NULL) {
825 /* This message has been processed. */
826 continue;
828 if (!tevent_req_is_in_progress(smb2req->subreq)) {
829 /* This message has been processed. */
830 continue;
833 inhdr = SMBD_SMB2_IN_HDR_PTR(smb2req);
834 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
835 /* Not a lock call. */
836 continue;
839 state = tevent_req_data(smb2req->subreq,
840 struct smbd_smb2_lock_state);
841 if (!state) {
842 /* Strange - is this even possible ? */
843 continue;
846 fsp_curr = smb2req->compat_chain_fsp;
847 if (fsp_curr == NULL) {
848 /* Strange - is this even possible ? */
849 continue;
852 if (fsp_curr != fsp) {
853 /* It's not our fid */
854 continue;
857 blr = state->blr;
859 /* Remove the entries from the lock db. */
860 brl_lock_cancel(br_lck,
861 blr->smblctx,
862 messaging_server_id(sconn->msg_ctx),
863 blr->offset,
864 blr->count,
865 blr->lock_flav);
867 /* Finally end the request. */
868 if (close_type == SHUTDOWN_CLOSE) {
869 tevent_req_done(smb2req->subreq);
870 } else {
871 tevent_req_nterror(smb2req->subreq,
872 NT_STATUS_RANGE_NOT_LOCKED);