s3-pam_winbind: Fix the build.
[Samba.git] / source3 / smbd / smb2_lock.c
blob28612aea8a68314257d178a387ee9de138b80c48
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "messages.h"
29 struct smbd_smb2_lock_element {
30 uint64_t offset;
31 uint64_t length;
32 uint32_t flags;
35 struct smbd_smb2_lock_state {
36 struct smbd_smb2_request *smb2req;
37 struct smb_request *smb1req;
38 struct blocking_lock_record *blr;
39 uint16_t lock_count;
40 struct smbd_lock_element *locks;
43 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
44 struct blocking_lock_record *blr);
46 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
47 struct tevent_context *ev,
48 struct smbd_smb2_request *smb2req,
49 uint32_t in_smbpid,
50 uint64_t in_file_id_volatile,
51 uint16_t in_lock_count,
52 struct smbd_smb2_lock_element *in_locks);
53 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
55 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
56 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
58 const uint8_t *inhdr;
59 const uint8_t *inbody;
60 const int i = req->current_idx;
61 uint32_t in_smbpid;
62 uint16_t in_lock_count;
63 uint64_t in_file_id_persistent;
64 uint64_t in_file_id_volatile;
65 struct smbd_smb2_lock_element *in_locks;
66 struct tevent_req *subreq;
67 const uint8_t *lock_buffer;
68 uint16_t l;
69 NTSTATUS status;
71 status = smbd_smb2_request_verify_sizes(req, 0x30);
72 if (!NT_STATUS_IS_OK(status)) {
73 return smbd_smb2_request_error(req, status);
75 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
76 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
78 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
80 in_lock_count = CVAL(inbody, 0x02);
81 /* 0x04 - 4 bytes reserved */
82 in_file_id_persistent = BVAL(inbody, 0x08);
83 in_file_id_volatile = BVAL(inbody, 0x10);
85 if (in_lock_count < 1) {
86 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
89 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
90 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
93 if (req->compat_chain_fsp) {
94 /* skip check */
95 } else if (in_file_id_persistent != in_file_id_volatile) {
96 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
99 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
100 in_lock_count);
101 if (in_locks == NULL) {
102 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
105 l = 0;
106 lock_buffer = inbody + 0x18;
108 in_locks[l].offset = BVAL(lock_buffer, 0x00);
109 in_locks[l].length = BVAL(lock_buffer, 0x08);
110 in_locks[l].flags = IVAL(lock_buffer, 0x10);
111 /* 0x14 - 4 reserved bytes */
113 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
115 for (l=1; l < in_lock_count; l++) {
116 in_locks[l].offset = BVAL(lock_buffer, 0x00);
117 in_locks[l].length = BVAL(lock_buffer, 0x08);
118 in_locks[l].flags = IVAL(lock_buffer, 0x10);
119 /* 0x14 - 4 reserved bytes */
121 lock_buffer += 0x18;
124 subreq = smbd_smb2_lock_send(req,
125 req->sconn->smb2.event_ctx,
126 req,
127 in_smbpid,
128 in_file_id_volatile,
129 in_lock_count,
130 in_locks);
131 if (subreq == NULL) {
132 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
134 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
136 return smbd_smb2_request_pending_queue(req, subreq);
139 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
141 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
142 struct smbd_smb2_request);
143 DATA_BLOB outbody;
144 NTSTATUS status;
145 NTSTATUS error; /* transport error */
147 if (smb2req->cancelled) {
148 const uint8_t *inhdr = (const uint8_t *)
149 smb2req->in.vector[smb2req->current_idx].iov_base;
150 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
151 struct smbd_smb2_lock_state *state;
153 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
154 (unsigned long long)mid ));
156 state = tevent_req_data(smb2req->subreq,
157 struct smbd_smb2_lock_state);
159 SMB_ASSERT(state);
160 SMB_ASSERT(state->blr);
162 remove_pending_lock(state, state->blr);
164 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
165 if (!NT_STATUS_IS_OK(error)) {
166 smbd_server_connection_terminate(smb2req->sconn,
167 nt_errstr(error));
168 return;
170 return;
173 status = smbd_smb2_lock_recv(subreq);
174 TALLOC_FREE(subreq);
175 if (!NT_STATUS_IS_OK(status)) {
176 error = smbd_smb2_request_error(smb2req, status);
177 if (!NT_STATUS_IS_OK(error)) {
178 smbd_server_connection_terminate(smb2req->sconn,
179 nt_errstr(error));
180 return;
182 return;
185 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
186 if (outbody.data == NULL) {
187 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
188 if (!NT_STATUS_IS_OK(error)) {
189 smbd_server_connection_terminate(smb2req->sconn,
190 nt_errstr(error));
191 return;
193 return;
196 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
197 SSVAL(outbody.data, 0x02, 0); /* reserved */
199 error = smbd_smb2_request_done(smb2req, outbody, NULL);
200 if (!NT_STATUS_IS_OK(error)) {
201 smbd_server_connection_terminate(smb2req->sconn,
202 nt_errstr(error));
203 return;
207 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
208 struct tevent_context *ev,
209 struct smbd_smb2_request *smb2req,
210 uint32_t in_smbpid,
211 uint64_t in_file_id_volatile,
212 uint16_t in_lock_count,
213 struct smbd_smb2_lock_element *in_locks)
215 struct tevent_req *req;
216 struct smbd_smb2_lock_state *state;
217 struct smb_request *smb1req;
218 connection_struct *conn = smb2req->tcon->compat_conn;
219 files_struct *fsp;
220 int32_t timeout = -1;
221 bool isunlock = false;
222 uint16_t i;
223 struct smbd_lock_element *locks;
224 NTSTATUS status;
225 bool async = false;
227 req = tevent_req_create(mem_ctx, &state,
228 struct smbd_smb2_lock_state);
229 if (req == NULL) {
230 return NULL;
232 state->smb2req = smb2req;
233 smb2req->subreq = req; /* So we can find this when going async. */
235 smb1req = smbd_smb2_fake_smb_request(smb2req);
236 if (tevent_req_nomem(smb1req, req)) {
237 return tevent_req_post(req, ev);
239 state->smb1req = smb1req;
241 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
242 (unsigned long long)in_file_id_volatile));
244 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
245 if (fsp == NULL) {
246 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
247 return tevent_req_post(req, ev);
249 if (conn != fsp->conn) {
250 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
251 return tevent_req_post(req, ev);
253 if (smb2req->session->vuid != fsp->vuid) {
254 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
255 return tevent_req_post(req, ev);
258 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
259 if (locks == NULL) {
260 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
261 return tevent_req_post(req, ev);
264 switch (in_locks[0].flags) {
265 case SMB2_LOCK_FLAG_SHARED:
266 case SMB2_LOCK_FLAG_EXCLUSIVE:
267 if (in_lock_count > 1) {
268 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
269 return tevent_req_post(req, ev);
271 timeout = -1;
272 break;
274 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
275 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
276 timeout = 0;
277 break;
279 case SMB2_LOCK_FLAG_UNLOCK:
280 /* only the first lock gives the UNLOCK bit - see
281 MS-SMB2 3.3.5.14 */
282 isunlock = true;
283 timeout = 0;
284 break;
286 default:
287 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
288 return tevent_req_post(req, ev);
291 for (i=0; i<in_lock_count; i++) {
292 bool invalid = false;
294 switch (in_locks[i].flags) {
295 case SMB2_LOCK_FLAG_SHARED:
296 case SMB2_LOCK_FLAG_EXCLUSIVE:
297 if (isunlock) {
298 invalid = true;
299 break;
301 if (i > 0) {
302 tevent_req_nterror(req,
303 NT_STATUS_INVALID_PARAMETER);
304 return tevent_req_post(req, ev);
306 break;
308 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
309 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
310 if (isunlock) {
311 invalid = true;
313 break;
315 case SMB2_LOCK_FLAG_UNLOCK:
316 if (!isunlock) {
317 tevent_req_nterror(req,
318 NT_STATUS_INVALID_PARAMETER);
319 return tevent_req_post(req, ev);
321 break;
323 default:
324 if (isunlock) {
326 * is the first element was a UNLOCK
327 * we need to deferr the error response
328 * to the backend, because we need to process
329 * all unlock elements before
331 invalid = true;
332 break;
334 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
335 return tevent_req_post(req, ev);
338 locks[i].smblctx = in_file_id_volatile;
339 locks[i].offset = in_locks[i].offset;
340 locks[i].count = in_locks[i].length;
342 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
343 locks[i].brltype = WRITE_LOCK;
344 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
345 locks[i].brltype = READ_LOCK;
346 } else if (invalid) {
348 * this is an invalid UNLOCK element
349 * and the backend needs to test for
350 * brltype != UNLOCK_LOCK and return
351 * NT_STATUS_INVALID_PARAMER
353 locks[i].brltype = READ_LOCK;
354 } else {
355 locks[i].brltype = UNLOCK_LOCK;
358 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
359 "smblctx = %llu type %d\n",
361 (unsigned long long)locks[i].offset,
362 (unsigned long long)locks[i].count,
363 (unsigned long long)locks[i].smblctx,
364 (int)locks[i].brltype ));
367 state->locks = locks;
368 state->lock_count = in_lock_count;
370 if (isunlock) {
371 status = smbd_do_locking(smb1req, fsp,
373 timeout,
374 in_lock_count,
375 locks,
377 NULL,
378 &async);
379 } else {
380 status = smbd_do_locking(smb1req, fsp,
382 timeout,
384 NULL,
385 in_lock_count,
386 locks,
387 &async);
389 if (!NT_STATUS_IS_OK(status)) {
390 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
391 status = NT_STATUS_LOCK_NOT_GRANTED;
393 tevent_req_nterror(req, status);
394 return tevent_req_post(req, ev);
397 if (async) {
398 return req;
401 tevent_req_done(req);
402 return tevent_req_post(req, ev);
405 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
407 NTSTATUS status;
409 if (tevent_req_is_nterror(req, &status)) {
410 tevent_req_received(req);
411 return status;
414 tevent_req_received(req);
415 return NT_STATUS_OK;
418 /****************************************************************
419 Cancel an outstanding blocking lock request.
420 *****************************************************************/
422 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
424 struct smbd_smb2_request *smb2req = NULL;
425 struct smbd_smb2_lock_state *state = tevent_req_data(req,
426 struct smbd_smb2_lock_state);
427 if (!state) {
428 return false;
431 if (!state->smb2req) {
432 return false;
435 smb2req = state->smb2req;
436 smb2req->cancelled = true;
438 tevent_req_done(req);
439 return true;
442 /****************************************************************
443 Got a message saying someone unlocked a file. Re-schedule all
444 blocking lock requests as we don't know if anything overlapped.
445 *****************************************************************/
447 static void received_unlock_msg(struct messaging_context *msg,
448 void *private_data,
449 uint32_t msg_type,
450 struct server_id server_id,
451 DATA_BLOB *data)
453 struct smbd_server_connection *sconn;
455 DEBUG(10,("received_unlock_msg (SMB2)\n"));
457 sconn = msg_ctx_to_sconn(msg);
458 if (sconn == NULL) {
459 DEBUG(1, ("could not find sconn\n"));
460 return;
462 process_blocking_lock_queue_smb2(sconn, timeval_current());
465 /****************************************************************
466 Function to get the blr on a pending record.
467 *****************************************************************/
469 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
471 struct smbd_smb2_lock_state *state = NULL;
472 const uint8_t *inhdr;
474 if (!smb2req) {
475 return NULL;
477 if (smb2req->subreq == NULL) {
478 return NULL;
480 if (!tevent_req_is_in_progress(smb2req->subreq)) {
481 return NULL;
483 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
484 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
485 return NULL;
487 state = tevent_req_data(smb2req->subreq,
488 struct smbd_smb2_lock_state);
489 if (!state) {
490 return NULL;
492 return state->blr;
494 /****************************************************************
495 Set up the next brl timeout.
496 *****************************************************************/
498 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
500 struct smbd_smb2_request *smb2req;
501 struct timeval next_timeout = timeval_zero();
502 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
504 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
506 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
507 struct blocking_lock_record *blr =
508 get_pending_smb2req_blr(smb2req);
509 if (!blr) {
510 continue;
512 if (timeval_is_zero(&blr->expire_time)) {
514 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
515 * a POSIX lock, so calculate a timeout of
516 * 10 seconds into the future.
518 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
519 struct timeval psx_to = timeval_current_ofs(10, 0);
520 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
523 continue;
526 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
529 if (timeval_is_zero(&next_timeout)) {
530 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
531 "timeout = Infinite.\n"));
532 return true;
536 * To account for unclean shutdowns by clients we need a
537 * maximum timeout that we use for checking pending locks. If
538 * we have any pending locks at all, then check if the pending
539 * lock can continue at least every brl:recalctime seconds
540 * (default 5 seconds).
542 * This saves us needing to do a message_send_all() in the
543 * SIGCHLD handler in the parent daemon. That
544 * message_send_all() caused O(n^2) work to be done when IP
545 * failovers happened in clustered Samba, which could make the
546 * entire system unusable for many minutes.
549 if (max_brl_timeout > 0) {
550 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
551 next_timeout = timeval_brl_min(&next_timeout, &min_to);
554 if (DEBUGLVL(10)) {
555 struct timeval cur, from_now;
557 cur = timeval_current();
558 from_now = timeval_until(&cur, &next_timeout);
559 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
560 "timeout = %d.%d seconds from now.\n",
561 (int)from_now.tv_sec, (int)from_now.tv_usec));
564 sconn->smb2.locks.brl_timeout = event_add_timed(
565 smbd_event_context(),
566 NULL,
567 next_timeout,
568 brl_timeout_fn,
569 NULL);
570 if (!sconn->smb2.locks.brl_timeout) {
571 return false;
573 return true;
576 /****************************************************************
577 Get an SMB2 lock reqeust to go async. lock_timeout should
578 always be -1 here.
579 *****************************************************************/
581 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
582 struct smb_request *smb1req,
583 files_struct *fsp,
584 int lock_timeout,
585 int lock_num,
586 uint64_t smblctx,
587 enum brl_type lock_type,
588 enum brl_flavour lock_flav,
589 uint64_t offset,
590 uint64_t count,
591 uint64_t blocking_smblctx)
593 struct smbd_server_connection *sconn = smb1req->sconn;
594 struct smbd_smb2_request *smb2req = smb1req->smb2req;
595 struct tevent_req *req = NULL;
596 struct smbd_smb2_lock_state *state = NULL;
597 struct blocking_lock_record *blr = NULL;
598 NTSTATUS status = NT_STATUS_OK;
600 if (!smb2req) {
601 return false;
603 req = smb2req->subreq;
604 if (!req) {
605 return false;
607 if (!tevent_req_is_in_progress(smb2req->subreq)) {
608 return false;
610 state = tevent_req_data(req, struct smbd_smb2_lock_state);
611 if (!state) {
612 return false;
615 blr = talloc_zero(state, struct blocking_lock_record);
616 if (!blr) {
617 return false;
619 blr->fsp = fsp;
621 if (lock_timeout == -1) {
622 blr->expire_time.tv_sec = 0;
623 blr->expire_time.tv_usec = 0; /* Never expire. */
624 } else {
625 blr->expire_time = timeval_current_ofs(
626 lock_timeout/1000,
627 (lock_timeout % 1000) * 1000);
630 blr->lock_num = lock_num;
631 blr->smblctx = smblctx;
632 blr->blocking_smblctx = blocking_smblctx;
633 blr->lock_flav = lock_flav;
634 blr->lock_type = lock_type;
635 blr->offset = offset;
636 blr->count = count;
638 /* Specific brl_lock() implementations can fill this in. */
639 blr->blr_private = NULL;
641 /* Add a pending lock record for this. */
642 status = brl_lock(sconn->msg_ctx,
643 br_lck,
644 smblctx,
645 sconn_server_id(sconn),
646 offset,
647 count,
648 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
649 blr->lock_flav,
650 true,
651 NULL,
652 blr);
654 if (!NT_STATUS_IS_OK(status)) {
655 DEBUG(0,("push_blocking_lock_request_smb2: "
656 "failed to add PENDING_LOCK record.\n"));
657 TALLOC_FREE(blr);
658 return false;
660 state->blr = blr;
662 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
663 fsp_str_dbg(fsp),
664 lock_timeout ));
666 recalc_smb2_brl_timeout(sconn);
668 /* Ensure we'll receive messages when this is unlocked. */
669 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
670 messaging_register(sconn->msg_ctx, NULL,
671 MSG_SMB_UNLOCK, received_unlock_msg);
672 sconn->smb2.locks.blocking_lock_unlock_state = true;
675 /* allow this request to be canceled */
676 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
678 return true;
681 /****************************************************************
682 Remove a pending lock record under lock.
683 *****************************************************************/
685 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
686 struct blocking_lock_record *blr)
688 int i;
689 struct byte_range_lock *br_lck = brl_get_locks(
690 state, blr->fsp);
692 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
694 if (br_lck) {
695 brl_lock_cancel(br_lck,
696 blr->smblctx,
697 sconn_server_id(blr->fsp->conn->sconn),
698 blr->offset,
699 blr->count,
700 blr->lock_flav,
701 blr);
702 TALLOC_FREE(br_lck);
705 /* Remove the locks we already got. */
707 for(i = blr->lock_num - 1; i >= 0; i--) {
708 struct smbd_lock_element *e = &state->locks[i];
710 do_unlock(blr->fsp->conn->sconn->msg_ctx,
711 blr->fsp,
712 e->smblctx,
713 e->count,
714 e->offset,
715 WINDOWS_LOCK);
719 /****************************************************************
720 Re-proccess a blocking lock request.
721 This is equivalent to process_lockingX() inside smbd/blocking.c
722 *****************************************************************/
724 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
725 struct timeval tv_curr)
727 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
728 struct blocking_lock_record *blr = NULL;
729 struct smbd_smb2_lock_state *state = NULL;
730 files_struct *fsp = NULL;
732 if (!smb2req->subreq) {
733 return;
735 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
736 if (!state) {
737 return;
740 blr = state->blr;
741 fsp = blr->fsp;
743 /* Try and finish off getting all the outstanding locks. */
745 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
746 struct byte_range_lock *br_lck = NULL;
747 struct smbd_lock_element *e = &state->locks[blr->lock_num];
749 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
750 fsp,
751 e->smblctx,
752 e->count,
753 e->offset,
754 e->brltype,
755 WINDOWS_LOCK,
756 true,
757 &status,
758 &blr->blocking_smblctx,
759 blr);
761 TALLOC_FREE(br_lck);
763 if (NT_STATUS_IS_ERR(status)) {
764 break;
768 if(blr->lock_num == state->lock_count) {
770 * Success - we got all the locks.
773 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
774 "fnum=%d num_locks=%d\n",
775 fsp_str_dbg(fsp),
776 fsp->fnum,
777 (int)state->lock_count));
779 tevent_req_done(smb2req->subreq);
780 return;
783 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
784 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
786 * We have other than a "can't get lock"
787 * error. Return an error.
789 remove_pending_lock(state, blr);
790 tevent_req_nterror(smb2req->subreq, status);
791 return;
795 * We couldn't get the locks for this record on the list.
796 * If the time has expired, return a lock error.
799 if (!timeval_is_zero(&blr->expire_time) &&
800 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
801 remove_pending_lock(state, blr);
802 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
803 return;
807 * Still can't get all the locks - keep waiting.
810 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
811 "for file %s, fnum = %d. Still waiting....\n",
812 (int)blr->lock_num,
813 (int)state->lock_count,
814 fsp_str_dbg(fsp),
815 (int)fsp->fnum));
817 return;
821 /****************************************************************
822 Attempt to proccess all outstanding blocking locks pending on
823 the request queue.
824 *****************************************************************/
826 void process_blocking_lock_queue_smb2(
827 struct smbd_server_connection *sconn, struct timeval tv_curr)
829 struct smbd_smb2_request *smb2req, *nextreq;
831 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
832 const uint8_t *inhdr;
834 nextreq = smb2req->next;
836 if (smb2req->subreq == NULL) {
837 /* This message has been processed. */
838 continue;
840 if (!tevent_req_is_in_progress(smb2req->subreq)) {
841 /* This message has been processed. */
842 continue;
845 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
846 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
847 reprocess_blocked_smb2_lock(smb2req, tv_curr);
851 recalc_smb2_brl_timeout(sconn);
854 /****************************************************************************
855 Remove any locks on this fd. Called from file_close().
856 ****************************************************************************/
858 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
859 struct byte_range_lock *br_lck,
860 enum file_close_type close_type)
862 struct smbd_server_connection *sconn = fsp->conn->sconn;
863 struct smbd_smb2_request *smb2req, *nextreq;
865 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
866 struct smbd_smb2_lock_state *state = NULL;
867 files_struct *fsp_curr = NULL;
868 int i = smb2req->current_idx;
869 uint64_t in_file_id_volatile;
870 struct blocking_lock_record *blr = NULL;
871 const uint8_t *inhdr;
872 const uint8_t *inbody;
874 nextreq = smb2req->next;
876 if (smb2req->subreq == NULL) {
877 /* This message has been processed. */
878 continue;
880 if (!tevent_req_is_in_progress(smb2req->subreq)) {
881 /* This message has been processed. */
882 continue;
885 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
886 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
887 /* Not a lock call. */
888 continue;
891 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
892 in_file_id_volatile = BVAL(inbody, 0x10);
894 state = tevent_req_data(smb2req->subreq,
895 struct smbd_smb2_lock_state);
896 if (!state) {
897 /* Strange - is this even possible ? */
898 continue;
901 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
902 if (fsp_curr == NULL) {
903 /* Strange - is this even possible ? */
904 continue;
907 if (fsp_curr != fsp) {
908 /* It's not our fid */
909 continue;
912 blr = state->blr;
914 /* Remove the entries from the lock db. */
915 brl_lock_cancel(br_lck,
916 blr->smblctx,
917 sconn_server_id(sconn),
918 blr->offset,
919 blr->count,
920 blr->lock_flav,
921 blr);
923 /* Finally end the request. */
924 if (close_type == SHUTDOWN_CLOSE) {
925 tevent_req_done(smb2req->subreq);
926 } else {
927 tevent_req_nterror(smb2req->subreq,
928 NT_STATUS_RANGE_NOT_LOCKED);