s4-dsdb: Modify the repl_meta_data behavior to allow Metadata change on attribute...
[Samba/gebeck_regimport.git] / source3 / smbd / smb2_lock.c
blob4f88bb832bdd637f6d18c09999258627765c96ea
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "messages.h"
29 struct smbd_smb2_lock_element {
30 uint64_t offset;
31 uint64_t length;
32 uint32_t flags;
35 struct smbd_smb2_lock_state {
36 struct smbd_smb2_request *smb2req;
37 struct smb_request *smb1req;
38 struct blocking_lock_record *blr;
39 uint16_t lock_count;
40 struct smbd_lock_element *locks;
43 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
44 struct blocking_lock_record *blr);
46 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
47 struct tevent_context *ev,
48 struct smbd_smb2_request *smb2req,
49 uint32_t in_smbpid,
50 uint64_t in_file_id_volatile,
51 uint16_t in_lock_count,
52 struct smbd_smb2_lock_element *in_locks);
53 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
55 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
56 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
58 const uint8_t *inhdr;
59 const uint8_t *inbody;
60 const int i = req->current_idx;
61 uint32_t in_smbpid;
62 uint16_t in_lock_count;
63 uint64_t in_file_id_persistent;
64 uint64_t in_file_id_volatile;
65 struct smbd_smb2_lock_element *in_locks;
66 struct tevent_req *subreq;
67 const uint8_t *lock_buffer;
68 uint16_t l;
69 NTSTATUS status;
71 status = smbd_smb2_request_verify_sizes(req, 0x30);
72 if (!NT_STATUS_IS_OK(status)) {
73 return smbd_smb2_request_error(req, status);
75 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
76 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
78 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
80 in_lock_count = CVAL(inbody, 0x02);
81 /* 0x04 - 4 bytes reserved */
82 in_file_id_persistent = BVAL(inbody, 0x08);
83 in_file_id_volatile = BVAL(inbody, 0x10);
85 if (in_lock_count < 1) {
86 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
89 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
90 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
93 if (req->compat_chain_fsp) {
94 /* skip check */
95 } else if (in_file_id_persistent != in_file_id_volatile) {
96 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
99 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
100 in_lock_count);
101 if (in_locks == NULL) {
102 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
105 l = 0;
106 lock_buffer = inbody + 0x18;
108 in_locks[l].offset = BVAL(lock_buffer, 0x00);
109 in_locks[l].length = BVAL(lock_buffer, 0x08);
110 in_locks[l].flags = IVAL(lock_buffer, 0x10);
111 /* 0x14 - 4 reserved bytes */
113 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
115 for (l=1; l < in_lock_count; l++) {
116 in_locks[l].offset = BVAL(lock_buffer, 0x00);
117 in_locks[l].length = BVAL(lock_buffer, 0x08);
118 in_locks[l].flags = IVAL(lock_buffer, 0x10);
119 /* 0x14 - 4 reserved bytes */
121 lock_buffer += 0x18;
124 subreq = smbd_smb2_lock_send(req,
125 req->sconn->smb2.event_ctx,
126 req,
127 in_smbpid,
128 in_file_id_volatile,
129 in_lock_count,
130 in_locks);
131 if (subreq == NULL) {
132 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
134 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
136 return smbd_smb2_request_pending_queue(req, subreq, 500);
139 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
141 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
142 struct smbd_smb2_request);
143 DATA_BLOB outbody;
144 NTSTATUS status;
145 NTSTATUS error; /* transport error */
147 if (smb2req->cancelled) {
148 const uint8_t *inhdr = (const uint8_t *)
149 smb2req->in.vector[smb2req->current_idx].iov_base;
150 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
151 struct smbd_smb2_lock_state *state;
153 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
154 (unsigned long long)mid ));
156 state = tevent_req_data(smb2req->subreq,
157 struct smbd_smb2_lock_state);
159 SMB_ASSERT(state);
160 SMB_ASSERT(state->blr);
162 remove_pending_lock(state, state->blr);
164 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
165 if (!NT_STATUS_IS_OK(error)) {
166 smbd_server_connection_terminate(smb2req->sconn,
167 nt_errstr(error));
168 return;
170 return;
173 status = smbd_smb2_lock_recv(subreq);
174 TALLOC_FREE(subreq);
175 if (!NT_STATUS_IS_OK(status)) {
176 error = smbd_smb2_request_error(smb2req, status);
177 if (!NT_STATUS_IS_OK(error)) {
178 smbd_server_connection_terminate(smb2req->sconn,
179 nt_errstr(error));
180 return;
182 return;
185 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
186 if (outbody.data == NULL) {
187 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
188 if (!NT_STATUS_IS_OK(error)) {
189 smbd_server_connection_terminate(smb2req->sconn,
190 nt_errstr(error));
191 return;
193 return;
196 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
197 SSVAL(outbody.data, 0x02, 0); /* reserved */
199 error = smbd_smb2_request_done(smb2req, outbody, NULL);
200 if (!NT_STATUS_IS_OK(error)) {
201 smbd_server_connection_terminate(smb2req->sconn,
202 nt_errstr(error));
203 return;
207 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
208 struct tevent_context *ev,
209 struct smbd_smb2_request *smb2req,
210 uint32_t in_smbpid,
211 uint64_t in_file_id_volatile,
212 uint16_t in_lock_count,
213 struct smbd_smb2_lock_element *in_locks)
215 struct tevent_req *req;
216 struct smbd_smb2_lock_state *state;
217 struct smb_request *smb1req;
218 connection_struct *conn = smb2req->tcon->compat_conn;
219 files_struct *fsp;
220 int32_t timeout = -1;
221 bool isunlock = false;
222 uint16_t i;
223 struct smbd_lock_element *locks;
224 NTSTATUS status;
225 bool async = false;
227 req = tevent_req_create(mem_ctx, &state,
228 struct smbd_smb2_lock_state);
229 if (req == NULL) {
230 return NULL;
232 state->smb2req = smb2req;
233 smb2req->subreq = req; /* So we can find this when going async. */
235 smb1req = smbd_smb2_fake_smb_request(smb2req);
236 if (tevent_req_nomem(smb1req, req)) {
237 return tevent_req_post(req, ev);
239 state->smb1req = smb1req;
241 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
242 (unsigned long long)in_file_id_volatile));
244 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
245 if (fsp == NULL) {
246 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
247 return tevent_req_post(req, ev);
249 if (conn != fsp->conn) {
250 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
251 return tevent_req_post(req, ev);
253 if (smb2req->session->vuid != fsp->vuid) {
254 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
255 return tevent_req_post(req, ev);
258 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
259 if (locks == NULL) {
260 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
261 return tevent_req_post(req, ev);
264 switch (in_locks[0].flags) {
265 case SMB2_LOCK_FLAG_SHARED:
266 case SMB2_LOCK_FLAG_EXCLUSIVE:
267 if (in_lock_count > 1) {
268 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
269 return tevent_req_post(req, ev);
271 timeout = -1;
272 break;
274 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
275 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
276 timeout = 0;
277 break;
279 case SMB2_LOCK_FLAG_UNLOCK:
280 /* only the first lock gives the UNLOCK bit - see
281 MS-SMB2 3.3.5.14 */
282 isunlock = true;
283 timeout = 0;
284 break;
286 default:
287 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
288 return tevent_req_post(req, ev);
291 for (i=0; i<in_lock_count; i++) {
292 bool invalid = false;
294 switch (in_locks[i].flags) {
295 case SMB2_LOCK_FLAG_SHARED:
296 case SMB2_LOCK_FLAG_EXCLUSIVE:
297 if (isunlock) {
298 invalid = true;
299 break;
301 if (i > 0) {
302 tevent_req_nterror(req,
303 NT_STATUS_INVALID_PARAMETER);
304 return tevent_req_post(req, ev);
306 break;
308 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
309 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
310 if (isunlock) {
311 invalid = true;
313 break;
315 case SMB2_LOCK_FLAG_UNLOCK:
316 if (!isunlock) {
317 tevent_req_nterror(req,
318 NT_STATUS_INVALID_PARAMETER);
319 return tevent_req_post(req, ev);
321 break;
323 default:
324 if (isunlock) {
326 * is the first element was a UNLOCK
327 * we need to deferr the error response
328 * to the backend, because we need to process
329 * all unlock elements before
331 invalid = true;
332 break;
334 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
335 return tevent_req_post(req, ev);
338 locks[i].smblctx = in_file_id_volatile;
339 locks[i].offset = in_locks[i].offset;
340 locks[i].count = in_locks[i].length;
342 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
343 locks[i].brltype = WRITE_LOCK;
344 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
345 locks[i].brltype = READ_LOCK;
346 } else if (invalid) {
348 * this is an invalid UNLOCK element
349 * and the backend needs to test for
350 * brltype != UNLOCK_LOCK and return
351 * NT_STATUS_INVALID_PARAMER
353 locks[i].brltype = READ_LOCK;
354 } else {
355 locks[i].brltype = UNLOCK_LOCK;
358 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
359 "smblctx = %llu type %d\n",
361 (unsigned long long)locks[i].offset,
362 (unsigned long long)locks[i].count,
363 (unsigned long long)locks[i].smblctx,
364 (int)locks[i].brltype ));
367 state->locks = locks;
368 state->lock_count = in_lock_count;
370 if (isunlock) {
371 status = smbd_do_locking(smb1req, fsp,
373 timeout,
374 in_lock_count,
375 locks,
377 NULL,
378 &async);
379 } else {
380 status = smbd_do_locking(smb1req, fsp,
382 timeout,
384 NULL,
385 in_lock_count,
386 locks,
387 &async);
389 if (!NT_STATUS_IS_OK(status)) {
390 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
391 status = NT_STATUS_LOCK_NOT_GRANTED;
393 tevent_req_nterror(req, status);
394 return tevent_req_post(req, ev);
397 if (async) {
398 return req;
401 tevent_req_done(req);
402 return tevent_req_post(req, ev);
405 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
407 NTSTATUS status;
409 if (tevent_req_is_nterror(req, &status)) {
410 tevent_req_received(req);
411 return status;
414 tevent_req_received(req);
415 return NT_STATUS_OK;
418 /****************************************************************
419 Cancel an outstanding blocking lock request.
420 *****************************************************************/
422 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
424 struct smbd_smb2_request *smb2req = NULL;
425 struct smbd_smb2_lock_state *state = tevent_req_data(req,
426 struct smbd_smb2_lock_state);
427 if (!state) {
428 return false;
431 if (!state->smb2req) {
432 return false;
435 smb2req = state->smb2req;
436 smb2req->cancelled = true;
438 tevent_req_done(req);
439 return true;
442 /****************************************************************
443 Got a message saying someone unlocked a file. Re-schedule all
444 blocking lock requests as we don't know if anything overlapped.
445 *****************************************************************/
447 static void received_unlock_msg(struct messaging_context *msg,
448 void *private_data,
449 uint32_t msg_type,
450 struct server_id server_id,
451 DATA_BLOB *data)
453 struct smbd_server_connection *sconn;
455 DEBUG(10,("received_unlock_msg (SMB2)\n"));
457 sconn = msg_ctx_to_sconn(msg);
458 if (sconn == NULL) {
459 DEBUG(1, ("could not find sconn\n"));
460 return;
462 process_blocking_lock_queue_smb2(sconn, timeval_current());
465 /****************************************************************
466 Function to get the blr on a pending record.
467 *****************************************************************/
469 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
471 struct smbd_smb2_lock_state *state = NULL;
472 const uint8_t *inhdr;
474 if (!smb2req) {
475 return NULL;
477 if (smb2req->subreq == NULL) {
478 return NULL;
480 if (!tevent_req_is_in_progress(smb2req->subreq)) {
481 return NULL;
483 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
484 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
485 return NULL;
487 state = tevent_req_data(smb2req->subreq,
488 struct smbd_smb2_lock_state);
489 if (!state) {
490 return NULL;
492 return state->blr;
494 /****************************************************************
495 Set up the next brl timeout.
496 *****************************************************************/
498 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
500 struct smbd_smb2_request *smb2req;
501 struct timeval next_timeout = timeval_zero();
502 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
504 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
506 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
507 struct blocking_lock_record *blr =
508 get_pending_smb2req_blr(smb2req);
509 if (!blr) {
510 continue;
512 if (timeval_is_zero(&blr->expire_time)) {
514 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
515 * a POSIX lock, so calculate a timeout of
516 * 10 seconds into the future.
518 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
519 struct timeval psx_to = timeval_current_ofs(10, 0);
520 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
523 continue;
526 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
529 if (timeval_is_zero(&next_timeout)) {
530 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
531 "timeout = Infinite.\n"));
532 return true;
536 * To account for unclean shutdowns by clients we need a
537 * maximum timeout that we use for checking pending locks. If
538 * we have any pending locks at all, then check if the pending
539 * lock can continue at least every brl:recalctime seconds
540 * (default 5 seconds).
542 * This saves us needing to do a message_send_all() in the
543 * SIGCHLD handler in the parent daemon. That
544 * message_send_all() caused O(n^2) work to be done when IP
545 * failovers happened in clustered Samba, which could make the
546 * entire system unusable for many minutes.
549 if (max_brl_timeout > 0) {
550 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
551 next_timeout = timeval_brl_min(&next_timeout, &min_to);
554 if (DEBUGLVL(10)) {
555 struct timeval cur, from_now;
557 cur = timeval_current();
558 from_now = timeval_until(&cur, &next_timeout);
559 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
560 "timeout = %d.%d seconds from now.\n",
561 (int)from_now.tv_sec, (int)from_now.tv_usec));
564 sconn->smb2.locks.brl_timeout = event_add_timed(
565 server_event_context(),
566 NULL,
567 next_timeout,
568 brl_timeout_fn,
569 NULL);
570 if (!sconn->smb2.locks.brl_timeout) {
571 return false;
573 return true;
576 /****************************************************************
577 Get an SMB2 lock reqeust to go async. lock_timeout should
578 always be -1 here.
579 *****************************************************************/
581 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
582 struct smb_request *smb1req,
583 files_struct *fsp,
584 int lock_timeout,
585 int lock_num,
586 uint64_t smblctx,
587 enum brl_type lock_type,
588 enum brl_flavour lock_flav,
589 uint64_t offset,
590 uint64_t count,
591 uint64_t blocking_smblctx)
593 struct smbd_server_connection *sconn = smb1req->sconn;
594 struct smbd_smb2_request *smb2req = smb1req->smb2req;
595 struct tevent_req *req = NULL;
596 struct smbd_smb2_lock_state *state = NULL;
597 struct blocking_lock_record *blr = NULL;
598 NTSTATUS status = NT_STATUS_OK;
600 if (!smb2req) {
601 return false;
603 req = smb2req->subreq;
604 if (!req) {
605 return false;
607 if (!tevent_req_is_in_progress(smb2req->subreq)) {
608 return false;
610 state = tevent_req_data(req, struct smbd_smb2_lock_state);
611 if (!state) {
612 return false;
615 blr = talloc_zero(state, struct blocking_lock_record);
616 if (!blr) {
617 return false;
619 blr->fsp = fsp;
621 if (lock_timeout == -1) {
622 blr->expire_time.tv_sec = 0;
623 blr->expire_time.tv_usec = 0; /* Never expire. */
624 } else {
625 blr->expire_time = timeval_current_ofs_msec(lock_timeout);
628 blr->lock_num = lock_num;
629 blr->smblctx = smblctx;
630 blr->blocking_smblctx = blocking_smblctx;
631 blr->lock_flav = lock_flav;
632 blr->lock_type = lock_type;
633 blr->offset = offset;
634 blr->count = count;
636 /* Specific brl_lock() implementations can fill this in. */
637 blr->blr_private = NULL;
639 /* Add a pending lock record for this. */
640 status = brl_lock(sconn->msg_ctx,
641 br_lck,
642 smblctx,
643 sconn_server_id(sconn),
644 offset,
645 count,
646 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
647 blr->lock_flav,
648 true,
649 NULL,
650 blr);
652 if (!NT_STATUS_IS_OK(status)) {
653 DEBUG(0,("push_blocking_lock_request_smb2: "
654 "failed to add PENDING_LOCK record.\n"));
655 TALLOC_FREE(blr);
656 return false;
658 state->blr = blr;
660 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
661 fsp_str_dbg(fsp),
662 lock_timeout ));
664 recalc_smb2_brl_timeout(sconn);
666 /* Ensure we'll receive messages when this is unlocked. */
667 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
668 messaging_register(sconn->msg_ctx, NULL,
669 MSG_SMB_UNLOCK, received_unlock_msg);
670 sconn->smb2.locks.blocking_lock_unlock_state = true;
673 /* allow this request to be canceled */
674 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
676 return true;
679 /****************************************************************
680 Remove a pending lock record under lock.
681 *****************************************************************/
683 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
684 struct blocking_lock_record *blr)
686 int i;
687 struct byte_range_lock *br_lck = brl_get_locks(
688 state, blr->fsp);
690 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
692 if (br_lck) {
693 brl_lock_cancel(br_lck,
694 blr->smblctx,
695 sconn_server_id(blr->fsp->conn->sconn),
696 blr->offset,
697 blr->count,
698 blr->lock_flav,
699 blr);
700 TALLOC_FREE(br_lck);
703 /* Remove the locks we already got. */
705 for(i = blr->lock_num - 1; i >= 0; i--) {
706 struct smbd_lock_element *e = &state->locks[i];
708 do_unlock(blr->fsp->conn->sconn->msg_ctx,
709 blr->fsp,
710 e->smblctx,
711 e->count,
712 e->offset,
713 WINDOWS_LOCK);
717 /****************************************************************
718 Re-proccess a blocking lock request.
719 This is equivalent to process_lockingX() inside smbd/blocking.c
720 *****************************************************************/
722 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
723 struct timeval tv_curr)
725 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
726 struct blocking_lock_record *blr = NULL;
727 struct smbd_smb2_lock_state *state = NULL;
728 files_struct *fsp = NULL;
730 if (!smb2req->subreq) {
731 return;
733 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
734 if (!state) {
735 return;
738 blr = state->blr;
739 fsp = blr->fsp;
741 /* Try and finish off getting all the outstanding locks. */
743 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
744 struct byte_range_lock *br_lck = NULL;
745 struct smbd_lock_element *e = &state->locks[blr->lock_num];
747 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
748 fsp,
749 e->smblctx,
750 e->count,
751 e->offset,
752 e->brltype,
753 WINDOWS_LOCK,
754 true,
755 &status,
756 &blr->blocking_smblctx,
757 blr);
759 TALLOC_FREE(br_lck);
761 if (NT_STATUS_IS_ERR(status)) {
762 break;
766 if(blr->lock_num == state->lock_count) {
768 * Success - we got all the locks.
771 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
772 "fnum=%d num_locks=%d\n",
773 fsp_str_dbg(fsp),
774 fsp->fnum,
775 (int)state->lock_count));
777 tevent_req_done(smb2req->subreq);
778 return;
781 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
782 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
784 * We have other than a "can't get lock"
785 * error. Return an error.
787 remove_pending_lock(state, blr);
788 tevent_req_nterror(smb2req->subreq, status);
789 return;
793 * We couldn't get the locks for this record on the list.
794 * If the time has expired, return a lock error.
797 if (!timeval_is_zero(&blr->expire_time) &&
798 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
799 remove_pending_lock(state, blr);
800 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
801 return;
805 * Still can't get all the locks - keep waiting.
808 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
809 "for file %s, fnum = %d. Still waiting....\n",
810 (int)blr->lock_num,
811 (int)state->lock_count,
812 fsp_str_dbg(fsp),
813 (int)fsp->fnum));
815 return;
819 /****************************************************************
820 Attempt to proccess all outstanding blocking locks pending on
821 the request queue.
822 *****************************************************************/
824 void process_blocking_lock_queue_smb2(
825 struct smbd_server_connection *sconn, struct timeval tv_curr)
827 struct smbd_smb2_request *smb2req, *nextreq;
829 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
830 const uint8_t *inhdr;
832 nextreq = smb2req->next;
834 if (smb2req->subreq == NULL) {
835 /* This message has been processed. */
836 continue;
838 if (!tevent_req_is_in_progress(smb2req->subreq)) {
839 /* This message has been processed. */
840 continue;
843 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
844 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
845 reprocess_blocked_smb2_lock(smb2req, tv_curr);
849 recalc_smb2_brl_timeout(sconn);
852 /****************************************************************************
853 Remove any locks on this fd. Called from file_close().
854 ****************************************************************************/
856 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
857 struct byte_range_lock *br_lck,
858 enum file_close_type close_type)
860 struct smbd_server_connection *sconn = fsp->conn->sconn;
861 struct smbd_smb2_request *smb2req, *nextreq;
863 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
864 struct smbd_smb2_lock_state *state = NULL;
865 files_struct *fsp_curr = NULL;
866 int i = smb2req->current_idx;
867 uint64_t in_file_id_volatile;
868 struct blocking_lock_record *blr = NULL;
869 const uint8_t *inhdr;
870 const uint8_t *inbody;
872 nextreq = smb2req->next;
874 if (smb2req->subreq == NULL) {
875 /* This message has been processed. */
876 continue;
878 if (!tevent_req_is_in_progress(smb2req->subreq)) {
879 /* This message has been processed. */
880 continue;
883 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
884 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
885 /* Not a lock call. */
886 continue;
889 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
890 in_file_id_volatile = BVAL(inbody, 0x10);
892 state = tevent_req_data(smb2req->subreq,
893 struct smbd_smb2_lock_state);
894 if (!state) {
895 /* Strange - is this even possible ? */
896 continue;
899 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
900 if (fsp_curr == NULL) {
901 /* Strange - is this even possible ? */
902 continue;
905 if (fsp_curr != fsp) {
906 /* It's not our fid */
907 continue;
910 blr = state->blr;
912 /* Remove the entries from the lock db. */
913 brl_lock_cancel(br_lck,
914 blr->smblctx,
915 sconn_server_id(sconn),
916 blr->offset,
917 blr->count,
918 blr->lock_flav,
919 blr);
921 /* Finally end the request. */
922 if (close_type == SHUTDOWN_CLOSE) {
923 tevent_req_done(smb2req->subreq);
924 } else {
925 tevent_req_nterror(smb2req->subreq,
926 NT_STATUS_RANGE_NOT_LOCKED);