s3:smb2_lock: remove unused in_smbpid
[Samba/vl.git] / source3 / smbd / smb2_lock.c
blobaacf18965a455c659cc2e814baca3dbf8e702459
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "messages.h"
29 struct smbd_smb2_lock_element {
30 uint64_t offset;
31 uint64_t length;
32 uint32_t flags;
35 struct smbd_smb2_lock_state {
36 struct smbd_smb2_request *smb2req;
37 struct smb_request *smb1req;
38 struct blocking_lock_record *blr;
39 uint16_t lock_count;
40 struct smbd_lock_element *locks;
43 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
44 struct blocking_lock_record *blr);
46 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
47 struct tevent_context *ev,
48 struct smbd_smb2_request *smb2req,
49 struct files_struct *in_fsp,
50 uint16_t in_lock_count,
51 struct smbd_smb2_lock_element *in_locks);
52 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
54 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
55 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
57 const uint8_t *inbody;
58 const int i = req->current_idx;
59 uint16_t in_lock_count;
60 uint64_t in_file_id_persistent;
61 uint64_t in_file_id_volatile;
62 struct files_struct *in_fsp;
63 struct smbd_smb2_lock_element *in_locks;
64 struct tevent_req *subreq;
65 const uint8_t *lock_buffer;
66 uint16_t l;
67 NTSTATUS status;
69 status = smbd_smb2_request_verify_sizes(req, 0x30);
70 if (!NT_STATUS_IS_OK(status)) {
71 return smbd_smb2_request_error(req, status);
73 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
75 in_lock_count = CVAL(inbody, 0x02);
76 /* 0x04 - 4 bytes reserved */
77 in_file_id_persistent = BVAL(inbody, 0x08);
78 in_file_id_volatile = BVAL(inbody, 0x10);
80 if (in_lock_count < 1) {
81 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
84 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
85 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
88 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
89 in_lock_count);
90 if (in_locks == NULL) {
91 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
94 l = 0;
95 lock_buffer = inbody + 0x18;
97 in_locks[l].offset = BVAL(lock_buffer, 0x00);
98 in_locks[l].length = BVAL(lock_buffer, 0x08);
99 in_locks[l].flags = IVAL(lock_buffer, 0x10);
100 /* 0x14 - 4 reserved bytes */
102 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
104 for (l=1; l < in_lock_count; l++) {
105 in_locks[l].offset = BVAL(lock_buffer, 0x00);
106 in_locks[l].length = BVAL(lock_buffer, 0x08);
107 in_locks[l].flags = IVAL(lock_buffer, 0x10);
108 /* 0x14 - 4 reserved bytes */
110 lock_buffer += 0x18;
113 in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
114 if (in_fsp == NULL) {
115 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
118 subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
119 req, in_fsp,
120 in_lock_count,
121 in_locks);
122 if (subreq == NULL) {
123 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
125 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
127 return smbd_smb2_request_pending_queue(req, subreq, 500);
130 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
132 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
133 struct smbd_smb2_request);
134 DATA_BLOB outbody;
135 NTSTATUS status;
136 NTSTATUS error; /* transport error */
138 if (smb2req->cancelled) {
139 const uint8_t *inhdr = (const uint8_t *)
140 smb2req->in.vector[smb2req->current_idx].iov_base;
141 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
142 struct smbd_smb2_lock_state *state;
144 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
145 (unsigned long long)mid ));
147 state = tevent_req_data(smb2req->subreq,
148 struct smbd_smb2_lock_state);
150 SMB_ASSERT(state);
151 SMB_ASSERT(state->blr);
153 remove_pending_lock(state, state->blr);
155 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
156 if (!NT_STATUS_IS_OK(error)) {
157 smbd_server_connection_terminate(smb2req->sconn,
158 nt_errstr(error));
159 return;
161 return;
164 status = smbd_smb2_lock_recv(subreq);
165 TALLOC_FREE(subreq);
166 if (!NT_STATUS_IS_OK(status)) {
167 error = smbd_smb2_request_error(smb2req, status);
168 if (!NT_STATUS_IS_OK(error)) {
169 smbd_server_connection_terminate(smb2req->sconn,
170 nt_errstr(error));
171 return;
173 return;
176 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
177 if (outbody.data == NULL) {
178 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
179 if (!NT_STATUS_IS_OK(error)) {
180 smbd_server_connection_terminate(smb2req->sconn,
181 nt_errstr(error));
182 return;
184 return;
187 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
188 SSVAL(outbody.data, 0x02, 0); /* reserved */
190 error = smbd_smb2_request_done(smb2req, outbody, NULL);
191 if (!NT_STATUS_IS_OK(error)) {
192 smbd_server_connection_terminate(smb2req->sconn,
193 nt_errstr(error));
194 return;
198 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
199 struct tevent_context *ev,
200 struct smbd_smb2_request *smb2req,
201 struct files_struct *fsp,
202 uint16_t in_lock_count,
203 struct smbd_smb2_lock_element *in_locks)
205 struct tevent_req *req;
206 struct smbd_smb2_lock_state *state;
207 struct smb_request *smb1req;
208 int32_t timeout = -1;
209 bool isunlock = false;
210 uint16_t i;
211 struct smbd_lock_element *locks;
212 NTSTATUS status;
213 bool async = false;
215 req = tevent_req_create(mem_ctx, &state,
216 struct smbd_smb2_lock_state);
217 if (req == NULL) {
218 return NULL;
220 state->smb2req = smb2req;
221 smb2req->subreq = req; /* So we can find this when going async. */
223 smb1req = smbd_smb2_fake_smb_request(smb2req);
224 if (tevent_req_nomem(smb1req, req)) {
225 return tevent_req_post(req, ev);
227 state->smb1req = smb1req;
229 DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
230 fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
232 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
233 if (locks == NULL) {
234 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
235 return tevent_req_post(req, ev);
238 switch (in_locks[0].flags) {
239 case SMB2_LOCK_FLAG_SHARED:
240 case SMB2_LOCK_FLAG_EXCLUSIVE:
241 if (in_lock_count > 1) {
242 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
243 return tevent_req_post(req, ev);
245 timeout = -1;
246 break;
248 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
249 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
250 timeout = 0;
251 break;
253 case SMB2_LOCK_FLAG_UNLOCK:
254 /* only the first lock gives the UNLOCK bit - see
255 MS-SMB2 3.3.5.14 */
256 isunlock = true;
257 timeout = 0;
258 break;
260 default:
261 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
262 return tevent_req_post(req, ev);
265 for (i=0; i<in_lock_count; i++) {
266 bool invalid = false;
268 switch (in_locks[i].flags) {
269 case SMB2_LOCK_FLAG_SHARED:
270 case SMB2_LOCK_FLAG_EXCLUSIVE:
271 if (isunlock) {
272 invalid = true;
273 break;
275 if (i > 0) {
276 tevent_req_nterror(req,
277 NT_STATUS_INVALID_PARAMETER);
278 return tevent_req_post(req, ev);
280 break;
282 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
283 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
284 if (isunlock) {
285 invalid = true;
287 break;
289 case SMB2_LOCK_FLAG_UNLOCK:
290 if (!isunlock) {
291 tevent_req_nterror(req,
292 NT_STATUS_INVALID_PARAMETER);
293 return tevent_req_post(req, ev);
295 break;
297 default:
298 if (isunlock) {
300 * is the first element was a UNLOCK
301 * we need to deferr the error response
302 * to the backend, because we need to process
303 * all unlock elements before
305 invalid = true;
306 break;
308 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
309 return tevent_req_post(req, ev);
312 locks[i].smblctx = fsp->op->global->open_persistent_id;
313 locks[i].offset = in_locks[i].offset;
314 locks[i].count = in_locks[i].length;
316 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
317 locks[i].brltype = WRITE_LOCK;
318 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
319 locks[i].brltype = READ_LOCK;
320 } else if (invalid) {
322 * this is an invalid UNLOCK element
323 * and the backend needs to test for
324 * brltype != UNLOCK_LOCK and return
325 * NT_STATUS_INVALID_PARAMER
327 locks[i].brltype = READ_LOCK;
328 } else {
329 locks[i].brltype = UNLOCK_LOCK;
332 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
333 "smblctx = %llu type %d\n",
335 (unsigned long long)locks[i].offset,
336 (unsigned long long)locks[i].count,
337 (unsigned long long)locks[i].smblctx,
338 (int)locks[i].brltype ));
341 state->locks = locks;
342 state->lock_count = in_lock_count;
344 if (isunlock) {
345 status = smbd_do_locking(smb1req, fsp,
347 timeout,
348 in_lock_count,
349 locks,
351 NULL,
352 &async);
353 } else {
354 status = smbd_do_locking(smb1req, fsp,
356 timeout,
358 NULL,
359 in_lock_count,
360 locks,
361 &async);
363 if (!NT_STATUS_IS_OK(status)) {
364 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
365 status = NT_STATUS_LOCK_NOT_GRANTED;
367 tevent_req_nterror(req, status);
368 return tevent_req_post(req, ev);
371 if (async) {
372 return req;
375 tevent_req_done(req);
376 return tevent_req_post(req, ev);
379 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
381 NTSTATUS status;
383 if (tevent_req_is_nterror(req, &status)) {
384 tevent_req_received(req);
385 return status;
388 tevent_req_received(req);
389 return NT_STATUS_OK;
392 /****************************************************************
393 Cancel an outstanding blocking lock request.
394 *****************************************************************/
396 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
398 struct smbd_smb2_request *smb2req = NULL;
399 struct smbd_smb2_lock_state *state = tevent_req_data(req,
400 struct smbd_smb2_lock_state);
401 if (!state) {
402 return false;
405 if (!state->smb2req) {
406 return false;
409 smb2req = state->smb2req;
410 smb2req->cancelled = true;
412 tevent_req_done(req);
413 return true;
416 /****************************************************************
417 Got a message saying someone unlocked a file. Re-schedule all
418 blocking lock requests as we don't know if anything overlapped.
419 *****************************************************************/
421 static void received_unlock_msg(struct messaging_context *msg,
422 void *private_data,
423 uint32_t msg_type,
424 struct server_id server_id,
425 DATA_BLOB *data)
427 struct smbd_server_connection *sconn =
428 talloc_get_type_abort(private_data,
429 struct smbd_server_connection);
431 DEBUG(10,("received_unlock_msg (SMB2)\n"));
433 process_blocking_lock_queue_smb2(sconn, timeval_current());
436 /****************************************************************
437 Function to get the blr on a pending record.
438 *****************************************************************/
440 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
442 struct smbd_smb2_lock_state *state = NULL;
443 const uint8_t *inhdr;
445 if (!smb2req) {
446 return NULL;
448 if (smb2req->subreq == NULL) {
449 return NULL;
451 if (!tevent_req_is_in_progress(smb2req->subreq)) {
452 return NULL;
454 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
455 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
456 return NULL;
458 state = tevent_req_data(smb2req->subreq,
459 struct smbd_smb2_lock_state);
460 if (!state) {
461 return NULL;
463 return state->blr;
465 /****************************************************************
466 Set up the next brl timeout.
467 *****************************************************************/
469 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
471 struct smbd_smb2_request *smb2req;
472 struct timeval next_timeout = timeval_zero();
473 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
475 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
477 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
478 struct blocking_lock_record *blr =
479 get_pending_smb2req_blr(smb2req);
480 if (!blr) {
481 continue;
483 if (timeval_is_zero(&blr->expire_time)) {
485 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
486 * a POSIX lock, so calculate a timeout of
487 * 10 seconds into the future.
489 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
490 struct timeval psx_to = timeval_current_ofs(10, 0);
491 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
494 continue;
497 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
500 if (timeval_is_zero(&next_timeout)) {
501 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
502 "timeout = Infinite.\n"));
503 return true;
507 * To account for unclean shutdowns by clients we need a
508 * maximum timeout that we use for checking pending locks. If
509 * we have any pending locks at all, then check if the pending
510 * lock can continue at least every brl:recalctime seconds
511 * (default 5 seconds).
513 * This saves us needing to do a message_send_all() in the
514 * SIGCHLD handler in the parent daemon. That
515 * message_send_all() caused O(n^2) work to be done when IP
516 * failovers happened in clustered Samba, which could make the
517 * entire system unusable for many minutes.
520 if (max_brl_timeout > 0) {
521 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
522 next_timeout = timeval_brl_min(&next_timeout, &min_to);
525 if (DEBUGLVL(10)) {
526 struct timeval cur, from_now;
528 cur = timeval_current();
529 from_now = timeval_until(&cur, &next_timeout);
530 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
531 "timeout = %d.%d seconds from now.\n",
532 (int)from_now.tv_sec, (int)from_now.tv_usec));
535 sconn->smb2.locks.brl_timeout = tevent_add_timer(
536 sconn->ev_ctx,
537 NULL,
538 next_timeout,
539 brl_timeout_fn,
540 NULL);
541 if (!sconn->smb2.locks.brl_timeout) {
542 return false;
544 return true;
547 /****************************************************************
548 Get an SMB2 lock reqeust to go async. lock_timeout should
549 always be -1 here.
550 *****************************************************************/
552 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
553 struct smb_request *smb1req,
554 files_struct *fsp,
555 int lock_timeout,
556 int lock_num,
557 uint64_t smblctx,
558 enum brl_type lock_type,
559 enum brl_flavour lock_flav,
560 uint64_t offset,
561 uint64_t count,
562 uint64_t blocking_smblctx)
564 struct smbd_server_connection *sconn = smb1req->sconn;
565 struct smbd_smb2_request *smb2req = smb1req->smb2req;
566 struct tevent_req *req = NULL;
567 struct smbd_smb2_lock_state *state = NULL;
568 struct blocking_lock_record *blr = NULL;
569 NTSTATUS status = NT_STATUS_OK;
571 if (!smb2req) {
572 return false;
574 req = smb2req->subreq;
575 if (!req) {
576 return false;
578 if (!tevent_req_is_in_progress(smb2req->subreq)) {
579 return false;
581 state = tevent_req_data(req, struct smbd_smb2_lock_state);
582 if (!state) {
583 return false;
586 blr = talloc_zero(state, struct blocking_lock_record);
587 if (!blr) {
588 return false;
590 blr->fsp = fsp;
592 if (lock_timeout == -1) {
593 blr->expire_time.tv_sec = 0;
594 blr->expire_time.tv_usec = 0; /* Never expire. */
595 } else {
596 blr->expire_time = timeval_current_ofs_msec(lock_timeout);
599 blr->lock_num = lock_num;
600 blr->smblctx = smblctx;
601 blr->blocking_smblctx = blocking_smblctx;
602 blr->lock_flav = lock_flav;
603 blr->lock_type = lock_type;
604 blr->offset = offset;
605 blr->count = count;
607 /* Specific brl_lock() implementations can fill this in. */
608 blr->blr_private = NULL;
610 /* Add a pending lock record for this. */
611 status = brl_lock(sconn->msg_ctx,
612 br_lck,
613 smblctx,
614 messaging_server_id(sconn->msg_ctx),
615 offset,
616 count,
617 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
618 blr->lock_flav,
619 true,
620 NULL,
621 blr);
623 if (!NT_STATUS_IS_OK(status)) {
624 DEBUG(0,("push_blocking_lock_request_smb2: "
625 "failed to add PENDING_LOCK record.\n"));
626 TALLOC_FREE(blr);
627 return false;
629 state->blr = blr;
631 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
632 fsp_str_dbg(fsp),
633 lock_timeout ));
635 recalc_smb2_brl_timeout(sconn);
637 /* Ensure we'll receive messages when this is unlocked. */
638 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
639 messaging_register(sconn->msg_ctx, sconn,
640 MSG_SMB_UNLOCK, received_unlock_msg);
641 sconn->smb2.locks.blocking_lock_unlock_state = true;
644 /* allow this request to be canceled */
645 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
647 return true;
650 /****************************************************************
651 Remove a pending lock record under lock.
652 *****************************************************************/
654 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
655 struct blocking_lock_record *blr)
657 int i;
658 struct byte_range_lock *br_lck = brl_get_locks(
659 state, blr->fsp);
661 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
663 if (br_lck) {
664 brl_lock_cancel(br_lck,
665 blr->smblctx,
666 messaging_server_id(blr->fsp->conn->sconn->msg_ctx),
667 blr->offset,
668 blr->count,
669 blr->lock_flav,
670 blr);
671 TALLOC_FREE(br_lck);
674 /* Remove the locks we already got. */
676 for(i = blr->lock_num - 1; i >= 0; i--) {
677 struct smbd_lock_element *e = &state->locks[i];
679 do_unlock(blr->fsp->conn->sconn->msg_ctx,
680 blr->fsp,
681 e->smblctx,
682 e->count,
683 e->offset,
684 WINDOWS_LOCK);
688 /****************************************************************
689 Re-proccess a blocking lock request.
690 This is equivalent to process_lockingX() inside smbd/blocking.c
691 *****************************************************************/
693 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
694 struct timeval tv_curr)
696 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
697 struct blocking_lock_record *blr = NULL;
698 struct smbd_smb2_lock_state *state = NULL;
699 files_struct *fsp = NULL;
701 if (!smb2req->subreq) {
702 return;
704 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
705 if (!state) {
706 return;
709 blr = state->blr;
710 fsp = blr->fsp;
712 /* Try and finish off getting all the outstanding locks. */
714 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
715 struct byte_range_lock *br_lck = NULL;
716 struct smbd_lock_element *e = &state->locks[blr->lock_num];
718 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
719 fsp,
720 e->smblctx,
721 e->count,
722 e->offset,
723 e->brltype,
724 WINDOWS_LOCK,
725 true,
726 &status,
727 &blr->blocking_smblctx,
728 blr);
730 TALLOC_FREE(br_lck);
732 if (NT_STATUS_IS_ERR(status)) {
733 break;
737 if(blr->lock_num == state->lock_count) {
739 * Success - we got all the locks.
742 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
743 "%s, num_locks=%d\n",
744 fsp_str_dbg(fsp),
745 fsp_fnum_dbg(fsp),
746 (int)state->lock_count));
748 tevent_req_done(smb2req->subreq);
749 return;
752 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
753 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
755 * We have other than a "can't get lock"
756 * error. Return an error.
758 remove_pending_lock(state, blr);
759 tevent_req_nterror(smb2req->subreq, status);
760 return;
764 * We couldn't get the locks for this record on the list.
765 * If the time has expired, return a lock error.
768 if (!timeval_is_zero(&blr->expire_time) &&
769 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
770 remove_pending_lock(state, blr);
771 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
772 return;
776 * Still can't get all the locks - keep waiting.
779 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
780 "for file %s, %s. Still waiting....\n",
781 (int)blr->lock_num,
782 (int)state->lock_count,
783 fsp_str_dbg(fsp),
784 fsp_fnum_dbg(fsp)));
786 return;
790 /****************************************************************
791 Attempt to proccess all outstanding blocking locks pending on
792 the request queue.
793 *****************************************************************/
795 void process_blocking_lock_queue_smb2(
796 struct smbd_server_connection *sconn, struct timeval tv_curr)
798 struct smbd_smb2_request *smb2req, *nextreq;
800 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
801 const uint8_t *inhdr;
803 nextreq = smb2req->next;
805 if (smb2req->subreq == NULL) {
806 /* This message has been processed. */
807 continue;
809 if (!tevent_req_is_in_progress(smb2req->subreq)) {
810 /* This message has been processed. */
811 continue;
814 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
815 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
816 reprocess_blocked_smb2_lock(smb2req, tv_curr);
820 recalc_smb2_brl_timeout(sconn);
823 /****************************************************************************
824 Remove any locks on this fd. Called from file_close().
825 ****************************************************************************/
827 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
828 struct byte_range_lock *br_lck,
829 enum file_close_type close_type)
831 struct smbd_server_connection *sconn = fsp->conn->sconn;
832 struct smbd_smb2_request *smb2req, *nextreq;
834 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
835 struct smbd_smb2_lock_state *state = NULL;
836 files_struct *fsp_curr = NULL;
837 int i = smb2req->current_idx;
838 struct blocking_lock_record *blr = NULL;
839 const uint8_t *inhdr;
841 nextreq = smb2req->next;
843 if (smb2req->subreq == NULL) {
844 /* This message has been processed. */
845 continue;
847 if (!tevent_req_is_in_progress(smb2req->subreq)) {
848 /* This message has been processed. */
849 continue;
852 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
853 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
854 /* Not a lock call. */
855 continue;
858 state = tevent_req_data(smb2req->subreq,
859 struct smbd_smb2_lock_state);
860 if (!state) {
861 /* Strange - is this even possible ? */
862 continue;
865 fsp_curr = smb2req->compat_chain_fsp;
866 if (fsp_curr == NULL) {
867 /* Strange - is this even possible ? */
868 continue;
871 if (fsp_curr != fsp) {
872 /* It's not our fid */
873 continue;
876 blr = state->blr;
878 /* Remove the entries from the lock db. */
879 brl_lock_cancel(br_lck,
880 blr->smblctx,
881 messaging_server_id(sconn->msg_ctx),
882 blr->offset,
883 blr->count,
884 blr->lock_flav,
885 blr);
887 /* Finally end the request. */
888 if (close_type == SHUTDOWN_CLOSE) {
889 tevent_req_done(smb2req->subreq);
890 } else {
891 tevent_req_nterror(smb2req->subreq,
892 NT_STATUS_RANGE_NOT_LOCKED);