s3-utils/net_rpc_printer.c: print more info on write error
[Samba/gebeck_regimport.git] / source3 / smbd / smb2_lock.c
blob5d615e1bed5639d7e99ea984b6bcbd1b97e0aef1
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/smbd.h"
24 #include "smbd/globals.h"
25 #include "../libcli/smb/smb_common.h"
26 #include "../lib/util/tevent_ntstatus.h"
27 #include "messages.h"
29 struct smbd_smb2_lock_element {
30 uint64_t offset;
31 uint64_t length;
32 uint32_t flags;
35 struct smbd_smb2_lock_state {
36 struct smbd_smb2_request *smb2req;
37 struct smb_request *smb1req;
38 struct blocking_lock_record *blr;
39 uint16_t lock_count;
40 struct smbd_lock_element *locks;
43 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
44 struct blocking_lock_record *blr);
46 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
47 struct tevent_context *ev,
48 struct smbd_smb2_request *smb2req,
49 uint32_t in_smbpid,
50 uint64_t in_file_id_volatile,
51 uint16_t in_lock_count,
52 struct smbd_smb2_lock_element *in_locks);
53 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
55 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
56 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
58 const uint8_t *inhdr;
59 const uint8_t *inbody;
60 const int i = req->current_idx;
61 size_t expected_body_size = 0x30;
62 size_t body_size;
63 uint32_t in_smbpid;
64 uint16_t in_lock_count;
65 uint64_t in_file_id_persistent;
66 uint64_t in_file_id_volatile;
67 struct smbd_smb2_lock_element *in_locks;
68 struct tevent_req *subreq;
69 const uint8_t *lock_buffer;
70 uint16_t l;
72 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
73 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
74 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
77 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
79 body_size = SVAL(inbody, 0x00);
80 if (body_size != expected_body_size) {
81 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
84 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
86 in_lock_count = CVAL(inbody, 0x02);
87 /* 0x04 - 4 bytes reserved */
88 in_file_id_persistent = BVAL(inbody, 0x08);
89 in_file_id_volatile = BVAL(inbody, 0x10);
91 if (in_lock_count < 1) {
92 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
95 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
96 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
99 if (req->compat_chain_fsp) {
100 /* skip check */
101 } else if (in_file_id_persistent != in_file_id_volatile) {
102 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
105 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
106 in_lock_count);
107 if (in_locks == NULL) {
108 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
111 l = 0;
112 lock_buffer = inbody + 0x18;
114 in_locks[l].offset = BVAL(lock_buffer, 0x00);
115 in_locks[l].length = BVAL(lock_buffer, 0x08);
116 in_locks[l].flags = IVAL(lock_buffer, 0x10);
117 /* 0x14 - 4 reserved bytes */
119 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
121 for (l=1; l < in_lock_count; l++) {
122 in_locks[l].offset = BVAL(lock_buffer, 0x00);
123 in_locks[l].length = BVAL(lock_buffer, 0x08);
124 in_locks[l].flags = IVAL(lock_buffer, 0x10);
125 /* 0x14 - 4 reserved bytes */
127 lock_buffer += 0x18;
130 subreq = smbd_smb2_lock_send(req,
131 req->sconn->smb2.event_ctx,
132 req,
133 in_smbpid,
134 in_file_id_volatile,
135 in_lock_count,
136 in_locks);
137 if (subreq == NULL) {
138 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
140 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
142 return smbd_smb2_request_pending_queue(req, subreq);
145 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
147 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
148 struct smbd_smb2_request);
149 DATA_BLOB outbody;
150 NTSTATUS status;
151 NTSTATUS error; /* transport error */
153 if (smb2req->cancelled) {
154 const uint8_t *inhdr = (const uint8_t *)
155 smb2req->in.vector[smb2req->current_idx].iov_base;
156 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
157 struct smbd_smb2_lock_state *state;
159 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
160 (unsigned long long)mid ));
162 state = tevent_req_data(smb2req->subreq,
163 struct smbd_smb2_lock_state);
165 SMB_ASSERT(state);
166 SMB_ASSERT(state->blr);
168 remove_pending_lock(state, state->blr);
170 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
171 if (!NT_STATUS_IS_OK(error)) {
172 smbd_server_connection_terminate(smb2req->sconn,
173 nt_errstr(error));
174 return;
176 return;
179 status = smbd_smb2_lock_recv(subreq);
180 TALLOC_FREE(subreq);
181 if (!NT_STATUS_IS_OK(status)) {
182 error = smbd_smb2_request_error(smb2req, status);
183 if (!NT_STATUS_IS_OK(error)) {
184 smbd_server_connection_terminate(smb2req->sconn,
185 nt_errstr(error));
186 return;
188 return;
191 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
192 if (outbody.data == NULL) {
193 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
194 if (!NT_STATUS_IS_OK(error)) {
195 smbd_server_connection_terminate(smb2req->sconn,
196 nt_errstr(error));
197 return;
199 return;
202 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
203 SSVAL(outbody.data, 0x02, 0); /* reserved */
205 error = smbd_smb2_request_done(smb2req, outbody, NULL);
206 if (!NT_STATUS_IS_OK(error)) {
207 smbd_server_connection_terminate(smb2req->sconn,
208 nt_errstr(error));
209 return;
213 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
214 struct tevent_context *ev,
215 struct smbd_smb2_request *smb2req,
216 uint32_t in_smbpid,
217 uint64_t in_file_id_volatile,
218 uint16_t in_lock_count,
219 struct smbd_smb2_lock_element *in_locks)
221 struct tevent_req *req;
222 struct smbd_smb2_lock_state *state;
223 struct smb_request *smb1req;
224 connection_struct *conn = smb2req->tcon->compat_conn;
225 files_struct *fsp;
226 int32_t timeout = -1;
227 bool isunlock = false;
228 uint16_t i;
229 struct smbd_lock_element *locks;
230 NTSTATUS status;
231 bool async = false;
233 req = tevent_req_create(mem_ctx, &state,
234 struct smbd_smb2_lock_state);
235 if (req == NULL) {
236 return NULL;
238 state->smb2req = smb2req;
239 smb2req->subreq = req; /* So we can find this when going async. */
241 smb1req = smbd_smb2_fake_smb_request(smb2req);
242 if (tevent_req_nomem(smb1req, req)) {
243 return tevent_req_post(req, ev);
245 state->smb1req = smb1req;
247 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
248 (unsigned long long)in_file_id_volatile));
250 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
251 if (fsp == NULL) {
252 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
253 return tevent_req_post(req, ev);
255 if (conn != fsp->conn) {
256 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
257 return tevent_req_post(req, ev);
259 if (smb2req->session->vuid != fsp->vuid) {
260 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
261 return tevent_req_post(req, ev);
264 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
265 if (locks == NULL) {
266 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
267 return tevent_req_post(req, ev);
270 switch (in_locks[0].flags) {
271 case SMB2_LOCK_FLAG_SHARED:
272 case SMB2_LOCK_FLAG_EXCLUSIVE:
273 if (in_lock_count > 1) {
274 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
275 return tevent_req_post(req, ev);
277 timeout = -1;
278 break;
280 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
281 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
282 timeout = 0;
283 break;
285 case SMB2_LOCK_FLAG_UNLOCK:
286 /* only the first lock gives the UNLOCK bit - see
287 MS-SMB2 3.3.5.14 */
288 isunlock = true;
289 timeout = 0;
290 break;
292 default:
293 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
294 return tevent_req_post(req, ev);
297 for (i=0; i<in_lock_count; i++) {
298 bool invalid = false;
300 switch (in_locks[i].flags) {
301 case SMB2_LOCK_FLAG_SHARED:
302 case SMB2_LOCK_FLAG_EXCLUSIVE:
303 if (isunlock) {
304 invalid = true;
305 break;
307 if (i > 0) {
308 tevent_req_nterror(req,
309 NT_STATUS_INVALID_PARAMETER);
310 return tevent_req_post(req, ev);
312 break;
314 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
315 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
316 if (isunlock) {
317 invalid = true;
319 break;
321 case SMB2_LOCK_FLAG_UNLOCK:
322 if (!isunlock) {
323 tevent_req_nterror(req,
324 NT_STATUS_INVALID_PARAMETER);
325 return tevent_req_post(req, ev);
327 break;
329 default:
330 if (isunlock) {
332 * is the first element was a UNLOCK
333 * we need to deferr the error response
334 * to the backend, because we need to process
335 * all unlock elements before
337 invalid = true;
338 break;
340 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
341 return tevent_req_post(req, ev);
344 locks[i].smblctx = in_file_id_volatile;
345 locks[i].offset = in_locks[i].offset;
346 locks[i].count = in_locks[i].length;
348 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
349 locks[i].brltype = WRITE_LOCK;
350 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
351 locks[i].brltype = READ_LOCK;
352 } else if (invalid) {
354 * this is an invalid UNLOCK element
355 * and the backend needs to test for
356 * brltype != UNLOCK_LOCK and return
357 * NT_STATUS_INVALID_PARAMER
359 locks[i].brltype = READ_LOCK;
360 } else {
361 locks[i].brltype = UNLOCK_LOCK;
364 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
365 "smblctx = %llu type %d\n",
367 (unsigned long long)locks[i].offset,
368 (unsigned long long)locks[i].count,
369 (unsigned long long)locks[i].smblctx,
370 (int)locks[i].brltype ));
373 state->locks = locks;
374 state->lock_count = in_lock_count;
376 if (isunlock) {
377 status = smbd_do_locking(smb1req, fsp,
379 timeout,
380 in_lock_count,
381 locks,
383 NULL,
384 &async);
385 } else {
386 status = smbd_do_locking(smb1req, fsp,
388 timeout,
390 NULL,
391 in_lock_count,
392 locks,
393 &async);
395 if (!NT_STATUS_IS_OK(status)) {
396 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
397 status = NT_STATUS_LOCK_NOT_GRANTED;
399 tevent_req_nterror(req, status);
400 return tevent_req_post(req, ev);
403 if (async) {
404 return req;
407 tevent_req_done(req);
408 return tevent_req_post(req, ev);
411 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
413 NTSTATUS status;
415 if (tevent_req_is_nterror(req, &status)) {
416 tevent_req_received(req);
417 return status;
420 tevent_req_received(req);
421 return NT_STATUS_OK;
424 /****************************************************************
425 Cancel an outstanding blocking lock request.
426 *****************************************************************/
428 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
430 struct smbd_smb2_request *smb2req = NULL;
431 struct smbd_smb2_lock_state *state = tevent_req_data(req,
432 struct smbd_smb2_lock_state);
433 if (!state) {
434 return false;
437 if (!state->smb2req) {
438 return false;
441 smb2req = state->smb2req;
442 smb2req->cancelled = true;
444 tevent_req_done(req);
445 return true;
448 /****************************************************************
449 Got a message saying someone unlocked a file. Re-schedule all
450 blocking lock requests as we don't know if anything overlapped.
451 *****************************************************************/
453 static void received_unlock_msg(struct messaging_context *msg,
454 void *private_data,
455 uint32_t msg_type,
456 struct server_id server_id,
457 DATA_BLOB *data)
459 struct smbd_server_connection *sconn;
461 DEBUG(10,("received_unlock_msg (SMB2)\n"));
463 sconn = msg_ctx_to_sconn(msg);
464 if (sconn == NULL) {
465 DEBUG(1, ("could not find sconn\n"));
466 return;
468 process_blocking_lock_queue_smb2(sconn, timeval_current());
471 /****************************************************************
472 Function to get the blr on a pending record.
473 *****************************************************************/
475 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
477 struct smbd_smb2_lock_state *state = NULL;
478 const uint8_t *inhdr;
480 if (!smb2req) {
481 return NULL;
483 if (smb2req->subreq == NULL) {
484 return NULL;
486 if (!tevent_req_is_in_progress(smb2req->subreq)) {
487 return NULL;
489 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
490 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
491 return NULL;
493 state = tevent_req_data(smb2req->subreq,
494 struct smbd_smb2_lock_state);
495 if (!state) {
496 return NULL;
498 return state->blr;
500 /****************************************************************
501 Set up the next brl timeout.
502 *****************************************************************/
504 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
506 struct smbd_smb2_request *smb2req;
507 struct timeval next_timeout = timeval_zero();
508 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
510 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
512 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
513 struct blocking_lock_record *blr =
514 get_pending_smb2req_blr(smb2req);
515 if (!blr) {
516 continue;
518 if (timeval_is_zero(&blr->expire_time)) {
520 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
521 * a POSIX lock, so calculate a timeout of
522 * 10 seconds into the future.
524 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
525 struct timeval psx_to = timeval_current_ofs(10, 0);
526 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
529 continue;
532 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
535 if (timeval_is_zero(&next_timeout)) {
536 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
537 "timeout = Infinite.\n"));
538 return true;
542 * To account for unclean shutdowns by clients we need a
543 * maximum timeout that we use for checking pending locks. If
544 * we have any pending locks at all, then check if the pending
545 * lock can continue at least every brl:recalctime seconds
546 * (default 5 seconds).
548 * This saves us needing to do a message_send_all() in the
549 * SIGCHLD handler in the parent daemon. That
550 * message_send_all() caused O(n^2) work to be done when IP
551 * failovers happened in clustered Samba, which could make the
552 * entire system unusable for many minutes.
555 if (max_brl_timeout > 0) {
556 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
557 next_timeout = timeval_brl_min(&next_timeout, &min_to);
560 if (DEBUGLVL(10)) {
561 struct timeval cur, from_now;
563 cur = timeval_current();
564 from_now = timeval_until(&cur, &next_timeout);
565 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
566 "timeout = %d.%d seconds from now.\n",
567 (int)from_now.tv_sec, (int)from_now.tv_usec));
570 sconn->smb2.locks.brl_timeout = event_add_timed(
571 server_event_context(),
572 NULL,
573 next_timeout,
574 brl_timeout_fn,
575 NULL);
576 if (!sconn->smb2.locks.brl_timeout) {
577 return false;
579 return true;
582 /****************************************************************
583 Get an SMB2 lock reqeust to go async. lock_timeout should
584 always be -1 here.
585 *****************************************************************/
587 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
588 struct smb_request *smb1req,
589 files_struct *fsp,
590 int lock_timeout,
591 int lock_num,
592 uint64_t smblctx,
593 enum brl_type lock_type,
594 enum brl_flavour lock_flav,
595 uint64_t offset,
596 uint64_t count,
597 uint64_t blocking_smblctx)
599 struct smbd_server_connection *sconn = smb1req->sconn;
600 struct smbd_smb2_request *smb2req = smb1req->smb2req;
601 struct tevent_req *req = NULL;
602 struct smbd_smb2_lock_state *state = NULL;
603 struct blocking_lock_record *blr = NULL;
604 NTSTATUS status = NT_STATUS_OK;
606 if (!smb2req) {
607 return false;
609 req = smb2req->subreq;
610 if (!req) {
611 return false;
613 if (!tevent_req_is_in_progress(smb2req->subreq)) {
614 return false;
616 state = tevent_req_data(req, struct smbd_smb2_lock_state);
617 if (!state) {
618 return false;
621 blr = talloc_zero(state, struct blocking_lock_record);
622 if (!blr) {
623 return false;
625 blr->fsp = fsp;
627 if (lock_timeout == -1) {
628 blr->expire_time.tv_sec = 0;
629 blr->expire_time.tv_usec = 0; /* Never expire. */
630 } else {
631 blr->expire_time = timeval_current_ofs_msec(lock_timeout);
634 blr->lock_num = lock_num;
635 blr->smblctx = smblctx;
636 blr->blocking_smblctx = blocking_smblctx;
637 blr->lock_flav = lock_flav;
638 blr->lock_type = lock_type;
639 blr->offset = offset;
640 blr->count = count;
642 /* Specific brl_lock() implementations can fill this in. */
643 blr->blr_private = NULL;
645 /* Add a pending lock record for this. */
646 status = brl_lock(sconn->msg_ctx,
647 br_lck,
648 smblctx,
649 sconn_server_id(sconn),
650 offset,
651 count,
652 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
653 blr->lock_flav,
654 true,
655 NULL,
656 blr);
658 if (!NT_STATUS_IS_OK(status)) {
659 DEBUG(0,("push_blocking_lock_request_smb2: "
660 "failed to add PENDING_LOCK record.\n"));
661 TALLOC_FREE(blr);
662 return false;
664 state->blr = blr;
666 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
667 fsp_str_dbg(fsp),
668 lock_timeout ));
670 recalc_smb2_brl_timeout(sconn);
672 /* Ensure we'll receive messages when this is unlocked. */
673 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
674 messaging_register(sconn->msg_ctx, NULL,
675 MSG_SMB_UNLOCK, received_unlock_msg);
676 sconn->smb2.locks.blocking_lock_unlock_state = true;
679 /* allow this request to be canceled */
680 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
682 return true;
685 /****************************************************************
686 Remove a pending lock record under lock.
687 *****************************************************************/
689 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
690 struct blocking_lock_record *blr)
692 int i;
693 struct byte_range_lock *br_lck = brl_get_locks(
694 state, blr->fsp);
696 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
698 if (br_lck) {
699 brl_lock_cancel(br_lck,
700 blr->smblctx,
701 sconn_server_id(blr->fsp->conn->sconn),
702 blr->offset,
703 blr->count,
704 blr->lock_flav,
705 blr);
706 TALLOC_FREE(br_lck);
709 /* Remove the locks we already got. */
711 for(i = blr->lock_num - 1; i >= 0; i--) {
712 struct smbd_lock_element *e = &state->locks[i];
714 do_unlock(blr->fsp->conn->sconn->msg_ctx,
715 blr->fsp,
716 e->smblctx,
717 e->count,
718 e->offset,
719 WINDOWS_LOCK);
723 /****************************************************************
724 Re-proccess a blocking lock request.
725 This is equivalent to process_lockingX() inside smbd/blocking.c
726 *****************************************************************/
728 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
729 struct timeval tv_curr)
731 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
732 struct blocking_lock_record *blr = NULL;
733 struct smbd_smb2_lock_state *state = NULL;
734 files_struct *fsp = NULL;
736 if (!smb2req->subreq) {
737 return;
739 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
740 if (!state) {
741 return;
744 blr = state->blr;
745 fsp = blr->fsp;
747 /* Try and finish off getting all the outstanding locks. */
749 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
750 struct byte_range_lock *br_lck = NULL;
751 struct smbd_lock_element *e = &state->locks[blr->lock_num];
753 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
754 fsp,
755 e->smblctx,
756 e->count,
757 e->offset,
758 e->brltype,
759 WINDOWS_LOCK,
760 true,
761 &status,
762 &blr->blocking_smblctx,
763 blr);
765 TALLOC_FREE(br_lck);
767 if (NT_STATUS_IS_ERR(status)) {
768 break;
772 if(blr->lock_num == state->lock_count) {
774 * Success - we got all the locks.
777 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
778 "fnum=%d num_locks=%d\n",
779 fsp_str_dbg(fsp),
780 fsp->fnum,
781 (int)state->lock_count));
783 tevent_req_done(smb2req->subreq);
784 return;
787 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
788 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
790 * We have other than a "can't get lock"
791 * error. Return an error.
793 remove_pending_lock(state, blr);
794 tevent_req_nterror(smb2req->subreq, status);
795 return;
799 * We couldn't get the locks for this record on the list.
800 * If the time has expired, return a lock error.
803 if (!timeval_is_zero(&blr->expire_time) &&
804 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
805 remove_pending_lock(state, blr);
806 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
807 return;
811 * Still can't get all the locks - keep waiting.
814 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
815 "for file %s, fnum = %d. Still waiting....\n",
816 (int)blr->lock_num,
817 (int)state->lock_count,
818 fsp_str_dbg(fsp),
819 (int)fsp->fnum));
821 return;
825 /****************************************************************
826 Attempt to proccess all outstanding blocking locks pending on
827 the request queue.
828 *****************************************************************/
830 void process_blocking_lock_queue_smb2(
831 struct smbd_server_connection *sconn, struct timeval tv_curr)
833 struct smbd_smb2_request *smb2req, *nextreq;
835 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
836 const uint8_t *inhdr;
838 nextreq = smb2req->next;
840 if (smb2req->subreq == NULL) {
841 /* This message has been processed. */
842 continue;
844 if (!tevent_req_is_in_progress(smb2req->subreq)) {
845 /* This message has been processed. */
846 continue;
849 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
850 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
851 reprocess_blocked_smb2_lock(smb2req, tv_curr);
855 recalc_smb2_brl_timeout(sconn);
858 /****************************************************************************
859 Remove any locks on this fd. Called from file_close().
860 ****************************************************************************/
862 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
863 struct byte_range_lock *br_lck,
864 enum file_close_type close_type)
866 struct smbd_server_connection *sconn = fsp->conn->sconn;
867 struct smbd_smb2_request *smb2req, *nextreq;
869 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
870 struct smbd_smb2_lock_state *state = NULL;
871 files_struct *fsp_curr = NULL;
872 int i = smb2req->current_idx;
873 uint64_t in_file_id_volatile;
874 struct blocking_lock_record *blr = NULL;
875 const uint8_t *inhdr;
876 const uint8_t *inbody;
878 nextreq = smb2req->next;
880 if (smb2req->subreq == NULL) {
881 /* This message has been processed. */
882 continue;
884 if (!tevent_req_is_in_progress(smb2req->subreq)) {
885 /* This message has been processed. */
886 continue;
889 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
890 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
891 /* Not a lock call. */
892 continue;
895 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
896 in_file_id_volatile = BVAL(inbody, 0x10);
898 state = tevent_req_data(smb2req->subreq,
899 struct smbd_smb2_lock_state);
900 if (!state) {
901 /* Strange - is this even possible ? */
902 continue;
905 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
906 if (fsp_curr == NULL) {
907 /* Strange - is this even possible ? */
908 continue;
911 if (fsp_curr != fsp) {
912 /* It's not our fid */
913 continue;
916 blr = state->blr;
918 /* Remove the entries from the lock db. */
919 brl_lock_cancel(br_lck,
920 blr->smblctx,
921 sconn_server_id(sconn),
922 blr->offset,
923 blr->count,
924 blr->lock_flav,
925 blr);
927 /* Finally end the request. */
928 if (close_type == SHUTDOWN_CLOSE) {
929 tevent_req_done(smb2req->subreq);
930 } else {
931 tevent_req_nterror(smb2req->subreq,
932 NT_STATUS_RANGE_NOT_LOCKED);