s3: async cli_list
[Samba/gbeck.git] / source3 / smbd / smb2_lock.c
blob8e4b6a46acd935ed194ec62d3cb197b2a463344c
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "smbd/globals.h"
24 #include "../libcli/smb/smb_common.h"
25 #include "librpc/gen_ndr/messaging.h"
27 struct smbd_smb2_lock_element {
28 uint64_t offset;
29 uint64_t length;
30 uint32_t flags;
33 struct smbd_smb2_lock_state {
34 struct smbd_smb2_request *smb2req;
35 struct smb_request *smb1req;
36 struct blocking_lock_record *blr;
37 uint16_t lock_count;
38 struct smbd_lock_element *locks;
41 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
42 struct blocking_lock_record *blr);
44 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
45 struct tevent_context *ev,
46 struct smbd_smb2_request *smb2req,
47 uint32_t in_smbpid,
48 uint64_t in_file_id_volatile,
49 uint16_t in_lock_count,
50 struct smbd_smb2_lock_element *in_locks);
51 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
53 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
54 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
56 const uint8_t *inhdr;
57 const uint8_t *inbody;
58 const int i = req->current_idx;
59 size_t expected_body_size = 0x30;
60 size_t body_size;
61 uint32_t in_smbpid;
62 uint16_t in_lock_count;
63 uint64_t in_file_id_persistent;
64 uint64_t in_file_id_volatile;
65 struct smbd_smb2_lock_element *in_locks;
66 struct tevent_req *subreq;
67 const uint8_t *lock_buffer;
68 uint16_t l;
70 inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;
71 if (req->in.vector[i+1].iov_len != (expected_body_size & 0xFFFFFFFE)) {
72 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
75 inbody = (const uint8_t *)req->in.vector[i+1].iov_base;
77 body_size = SVAL(inbody, 0x00);
78 if (body_size != expected_body_size) {
79 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
82 in_smbpid = IVAL(inhdr, SMB2_HDR_PID);
84 in_lock_count = CVAL(inbody, 0x02);
85 /* 0x04 - 4 bytes reserved */
86 in_file_id_persistent = BVAL(inbody, 0x08);
87 in_file_id_volatile = BVAL(inbody, 0x10);
89 if (in_lock_count < 1) {
90 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
93 if (((in_lock_count - 1) * 0x18) > req->in.vector[i+2].iov_len) {
94 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
97 if (req->compat_chain_fsp) {
98 /* skip check */
99 } else if (in_file_id_persistent != in_file_id_volatile) {
100 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
103 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
104 in_lock_count);
105 if (in_locks == NULL) {
106 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
109 l = 0;
110 lock_buffer = inbody + 0x18;
112 in_locks[l].offset = BVAL(lock_buffer, 0x00);
113 in_locks[l].length = BVAL(lock_buffer, 0x08);
114 in_locks[l].flags = IVAL(lock_buffer, 0x10);
115 /* 0x14 - 4 reserved bytes */
117 lock_buffer = (const uint8_t *)req->in.vector[i+2].iov_base;
119 for (l=1; l < in_lock_count; l++) {
120 in_locks[l].offset = BVAL(lock_buffer, 0x00);
121 in_locks[l].length = BVAL(lock_buffer, 0x08);
122 in_locks[l].flags = IVAL(lock_buffer, 0x10);
123 /* 0x14 - 4 reserved bytes */
125 lock_buffer += 0x18;
128 subreq = smbd_smb2_lock_send(req,
129 req->sconn->smb2.event_ctx,
130 req,
131 in_smbpid,
132 in_file_id_volatile,
133 in_lock_count,
134 in_locks);
135 if (subreq == NULL) {
136 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
138 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
140 return smbd_smb2_request_pending_queue(req, subreq);
143 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
145 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
146 struct smbd_smb2_request);
147 DATA_BLOB outbody;
148 NTSTATUS status;
149 NTSTATUS error; /* transport error */
151 if (smb2req->cancelled) {
152 const uint8_t *inhdr = (const uint8_t *)
153 smb2req->in.vector[smb2req->current_idx].iov_base;
154 uint64_t mid = BVAL(inhdr, SMB2_HDR_MESSAGE_ID);
155 struct smbd_smb2_lock_state *state;
157 DEBUG(10,("smbd_smb2_request_lock_done: cancelled mid %llu\n",
158 (unsigned long long)mid ));
160 state = tevent_req_data(smb2req->subreq,
161 struct smbd_smb2_lock_state);
163 SMB_ASSERT(state);
164 SMB_ASSERT(state->blr);
166 remove_pending_lock(state, state->blr);
168 error = smbd_smb2_request_error(smb2req, NT_STATUS_CANCELLED);
169 if (!NT_STATUS_IS_OK(error)) {
170 smbd_server_connection_terminate(smb2req->sconn,
171 nt_errstr(error));
172 return;
174 return;
177 status = smbd_smb2_lock_recv(subreq);
178 TALLOC_FREE(subreq);
179 if (!NT_STATUS_IS_OK(status)) {
180 error = smbd_smb2_request_error(smb2req, status);
181 if (!NT_STATUS_IS_OK(error)) {
182 smbd_server_connection_terminate(smb2req->sconn,
183 nt_errstr(error));
184 return;
186 return;
189 outbody = data_blob_talloc(smb2req->out.vector, NULL, 0x04);
190 if (outbody.data == NULL) {
191 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
192 if (!NT_STATUS_IS_OK(error)) {
193 smbd_server_connection_terminate(smb2req->sconn,
194 nt_errstr(error));
195 return;
197 return;
200 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
201 SSVAL(outbody.data, 0x02, 0); /* reserved */
203 error = smbd_smb2_request_done(smb2req, outbody, NULL);
204 if (!NT_STATUS_IS_OK(error)) {
205 smbd_server_connection_terminate(smb2req->sconn,
206 nt_errstr(error));
207 return;
211 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
212 struct tevent_context *ev,
213 struct smbd_smb2_request *smb2req,
214 uint32_t in_smbpid,
215 uint64_t in_file_id_volatile,
216 uint16_t in_lock_count,
217 struct smbd_smb2_lock_element *in_locks)
219 struct tevent_req *req;
220 struct smbd_smb2_lock_state *state;
221 struct smb_request *smb1req;
222 connection_struct *conn = smb2req->tcon->compat_conn;
223 files_struct *fsp;
224 int32_t timeout = -1;
225 bool isunlock = false;
226 uint16_t i;
227 struct smbd_lock_element *locks;
228 NTSTATUS status;
229 bool async = false;
231 req = tevent_req_create(mem_ctx, &state,
232 struct smbd_smb2_lock_state);
233 if (req == NULL) {
234 return NULL;
236 state->smb2req = smb2req;
237 smb2req->subreq = req; /* So we can find this when going async. */
239 smb1req = smbd_smb2_fake_smb_request(smb2req);
240 if (tevent_req_nomem(smb1req, req)) {
241 return tevent_req_post(req, ev);
243 state->smb1req = smb1req;
245 DEBUG(10,("smbd_smb2_lock_send: file_id[0x%016llX]\n",
246 (unsigned long long)in_file_id_volatile));
248 fsp = file_fsp(smb1req, (uint16_t)in_file_id_volatile);
249 if (fsp == NULL) {
250 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
251 return tevent_req_post(req, ev);
253 if (conn != fsp->conn) {
254 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
255 return tevent_req_post(req, ev);
257 if (smb2req->session->vuid != fsp->vuid) {
258 tevent_req_nterror(req, NT_STATUS_FILE_CLOSED);
259 return tevent_req_post(req, ev);
262 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
263 if (locks == NULL) {
264 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
265 return tevent_req_post(req, ev);
268 switch (in_locks[0].flags) {
269 case SMB2_LOCK_FLAG_SHARED:
270 case SMB2_LOCK_FLAG_EXCLUSIVE:
271 if (in_lock_count > 1) {
272 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
273 return tevent_req_post(req, ev);
275 timeout = -1;
276 break;
278 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
279 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
280 timeout = 0;
281 break;
283 case SMB2_LOCK_FLAG_UNLOCK:
284 /* only the first lock gives the UNLOCK bit - see
285 MS-SMB2 3.3.5.14 */
286 isunlock = true;
287 timeout = 0;
288 break;
290 default:
291 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
292 return tevent_req_post(req, ev);
295 for (i=0; i<in_lock_count; i++) {
296 bool invalid = false;
298 switch (in_locks[i].flags) {
299 case SMB2_LOCK_FLAG_SHARED:
300 case SMB2_LOCK_FLAG_EXCLUSIVE:
301 if (isunlock) {
302 invalid = true;
303 break;
305 if (i > 0) {
306 tevent_req_nterror(req,
307 NT_STATUS_INVALID_PARAMETER);
308 return tevent_req_post(req, ev);
310 break;
312 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
313 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
314 if (isunlock) {
315 invalid = true;
317 break;
319 case SMB2_LOCK_FLAG_UNLOCK:
320 if (!isunlock) {
321 tevent_req_nterror(req,
322 NT_STATUS_INVALID_PARAMETER);
323 return tevent_req_post(req, ev);
325 break;
327 default:
328 if (isunlock) {
330 * is the first element was a UNLOCK
331 * we need to deferr the error response
332 * to the backend, because we need to process
333 * all unlock elements before
335 invalid = true;
336 break;
338 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
339 return tevent_req_post(req, ev);
342 locks[i].smblctx = in_file_id_volatile;
343 locks[i].offset = in_locks[i].offset;
344 locks[i].count = in_locks[i].length;
346 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
347 locks[i].brltype = WRITE_LOCK;
348 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
349 locks[i].brltype = READ_LOCK;
350 } else if (invalid) {
352 * this is an invalid UNLOCK element
353 * and the backend needs to test for
354 * brltype != UNLOCK_LOCK and return
355 * NT_STATUS_INVALID_PARAMER
357 locks[i].brltype = READ_LOCK;
358 } else {
359 locks[i].brltype = UNLOCK_LOCK;
362 DEBUG(10,("smbd_smb2_lock_send: index %d offset=%llu, count=%llu, "
363 "smblctx = %llu type %d\n",
365 (unsigned long long)locks[i].offset,
366 (unsigned long long)locks[i].count,
367 (unsigned long long)locks[i].smblctx,
368 (int)locks[i].brltype ));
371 state->locks = locks;
372 state->lock_count = in_lock_count;
374 if (isunlock) {
375 status = smbd_do_locking(smb1req, fsp,
377 timeout,
378 in_lock_count,
379 locks,
381 NULL,
382 &async);
383 } else {
384 status = smbd_do_locking(smb1req, fsp,
386 timeout,
388 NULL,
389 in_lock_count,
390 locks,
391 &async);
393 if (!NT_STATUS_IS_OK(status)) {
394 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
395 status = NT_STATUS_LOCK_NOT_GRANTED;
397 tevent_req_nterror(req, status);
398 return tevent_req_post(req, ev);
401 if (async) {
402 return req;
405 tevent_req_done(req);
406 return tevent_req_post(req, ev);
409 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
411 NTSTATUS status;
413 if (tevent_req_is_nterror(req, &status)) {
414 tevent_req_received(req);
415 return status;
418 tevent_req_received(req);
419 return NT_STATUS_OK;
422 /****************************************************************
423 Cancel an outstanding blocking lock request.
424 *****************************************************************/
426 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
428 struct smbd_smb2_request *smb2req = NULL;
429 struct smbd_smb2_lock_state *state = tevent_req_data(req,
430 struct smbd_smb2_lock_state);
431 if (!state) {
432 return false;
435 if (!state->smb2req) {
436 return false;
439 smb2req = state->smb2req;
440 smb2req->cancelled = true;
442 tevent_req_done(req);
443 return true;
446 /****************************************************************
447 Got a message saying someone unlocked a file. Re-schedule all
448 blocking lock requests as we don't know if anything overlapped.
449 *****************************************************************/
451 static void received_unlock_msg(struct messaging_context *msg,
452 void *private_data,
453 uint32_t msg_type,
454 struct server_id server_id,
455 DATA_BLOB *data)
457 DEBUG(10,("received_unlock_msg (SMB2)\n"));
458 process_blocking_lock_queue_smb2(smbd_server_conn, timeval_current());
461 /****************************************************************
462 Function to get the blr on a pending record.
463 *****************************************************************/
465 struct blocking_lock_record *get_pending_smb2req_blr(struct smbd_smb2_request *smb2req)
467 struct smbd_smb2_lock_state *state = NULL;
468 const uint8_t *inhdr;
470 if (!smb2req) {
471 return NULL;
473 if (smb2req->subreq == NULL) {
474 return NULL;
476 if (!tevent_req_is_in_progress(smb2req->subreq)) {
477 return NULL;
479 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
480 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
481 return NULL;
483 state = tevent_req_data(smb2req->subreq,
484 struct smbd_smb2_lock_state);
485 if (!state) {
486 return NULL;
488 return state->blr;
490 /****************************************************************
491 Set up the next brl timeout.
492 *****************************************************************/
494 static bool recalc_smb2_brl_timeout(struct smbd_server_connection *sconn)
496 struct smbd_smb2_request *smb2req;
497 struct timeval next_timeout = timeval_zero();
498 int max_brl_timeout = lp_parm_int(-1, "brl", "recalctime", 5);
500 TALLOC_FREE(sconn->smb2.locks.brl_timeout);
502 for (smb2req = sconn->smb2.requests; smb2req; smb2req = smb2req->next) {
503 struct blocking_lock_record *blr =
504 get_pending_smb2req_blr(smb2req);
505 if (!blr) {
506 continue;
508 if (timeval_is_zero(&blr->expire_time)) {
510 * If we're blocked on pid 0xFFFFFFFFFFFFFFFFLL this is
511 * a POSIX lock, so calculate a timeout of
512 * 10 seconds into the future.
514 if (blr->blocking_smblctx == 0xFFFFFFFFFFFFFFFFLL) {
515 struct timeval psx_to = timeval_current_ofs(10, 0);
516 next_timeout = timeval_brl_min(&next_timeout, &psx_to);
519 continue;
522 next_timeout = timeval_brl_min(&next_timeout, &blr->expire_time);
525 if (timeval_is_zero(&next_timeout)) {
526 DEBUG(10, ("recalc_smb2_brl_timeout:Next "
527 "timeout = Infinite.\n"));
528 return true;
532 * To account for unclean shutdowns by clients we need a
533 * maximum timeout that we use for checking pending locks. If
534 * we have any pending locks at all, then check if the pending
535 * lock can continue at least every brl:recalctime seconds
536 * (default 5 seconds).
538 * This saves us needing to do a message_send_all() in the
539 * SIGCHLD handler in the parent daemon. That
540 * message_send_all() caused O(n^2) work to be done when IP
541 * failovers happened in clustered Samba, which could make the
542 * entire system unusable for many minutes.
545 if (max_brl_timeout > 0) {
546 struct timeval min_to = timeval_current_ofs(max_brl_timeout, 0);
547 next_timeout = timeval_brl_min(&next_timeout, &min_to);
550 if (DEBUGLVL(10)) {
551 struct timeval cur, from_now;
553 cur = timeval_current();
554 from_now = timeval_until(&cur, &next_timeout);
555 DEBUG(10, ("recalc_smb2_brl_timeout: Next "
556 "timeout = %d.%d seconds from now.\n",
557 (int)from_now.tv_sec, (int)from_now.tv_usec));
560 sconn->smb2.locks.brl_timeout = event_add_timed(
561 smbd_event_context(),
562 NULL,
563 next_timeout,
564 brl_timeout_fn,
565 NULL);
566 if (!sconn->smb2.locks.brl_timeout) {
567 return false;
569 return true;
572 /****************************************************************
573 Get an SMB2 lock reqeust to go async. lock_timeout should
574 always be -1 here.
575 *****************************************************************/
577 bool push_blocking_lock_request_smb2( struct byte_range_lock *br_lck,
578 struct smb_request *smb1req,
579 files_struct *fsp,
580 int lock_timeout,
581 int lock_num,
582 uint64_t smblctx,
583 enum brl_type lock_type,
584 enum brl_flavour lock_flav,
585 uint64_t offset,
586 uint64_t count,
587 uint64_t blocking_smblctx)
589 struct smbd_server_connection *sconn = smb1req->sconn;
590 struct smbd_smb2_request *smb2req = smb1req->smb2req;
591 struct tevent_req *req = NULL;
592 struct smbd_smb2_lock_state *state = NULL;
593 struct blocking_lock_record *blr = NULL;
594 NTSTATUS status = NT_STATUS_OK;
596 if (!smb2req) {
597 return false;
599 req = smb2req->subreq;
600 if (!req) {
601 return false;
603 if (!tevent_req_is_in_progress(smb2req->subreq)) {
604 return false;
606 state = tevent_req_data(req, struct smbd_smb2_lock_state);
607 if (!state) {
608 return false;
611 blr = talloc_zero(state, struct blocking_lock_record);
612 if (!blr) {
613 return false;
615 blr->fsp = fsp;
617 if (lock_timeout == -1) {
618 blr->expire_time.tv_sec = 0;
619 blr->expire_time.tv_usec = 0; /* Never expire. */
620 } else {
621 blr->expire_time = timeval_current_ofs(
622 lock_timeout/1000,
623 (lock_timeout % 1000) * 1000);
626 blr->lock_num = lock_num;
627 blr->smblctx = smblctx;
628 blr->blocking_smblctx = blocking_smblctx;
629 blr->lock_flav = lock_flav;
630 blr->lock_type = lock_type;
631 blr->offset = offset;
632 blr->count = count;
634 /* Specific brl_lock() implementations can fill this in. */
635 blr->blr_private = NULL;
637 /* Add a pending lock record for this. */
638 status = brl_lock(sconn->msg_ctx,
639 br_lck,
640 smblctx,
641 sconn_server_id(sconn),
642 offset,
643 count,
644 lock_type == READ_LOCK ? PENDING_READ_LOCK : PENDING_WRITE_LOCK,
645 blr->lock_flav,
646 true,
647 NULL,
648 blr);
650 if (!NT_STATUS_IS_OK(status)) {
651 DEBUG(0,("push_blocking_lock_request_smb2: "
652 "failed to add PENDING_LOCK record.\n"));
653 TALLOC_FREE(blr);
654 return false;
656 state->blr = blr;
658 DEBUG(10,("push_blocking_lock_request_smb2: file %s timeout %d\n",
659 fsp_str_dbg(fsp),
660 lock_timeout ));
662 recalc_smb2_brl_timeout(sconn);
664 /* Ensure we'll receive messages when this is unlocked. */
665 if (!sconn->smb2.locks.blocking_lock_unlock_state) {
666 messaging_register(sconn->msg_ctx, NULL,
667 MSG_SMB_UNLOCK, received_unlock_msg);
668 sconn->smb2.locks.blocking_lock_unlock_state = true;
671 /* allow this request to be canceled */
672 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
674 return true;
677 /****************************************************************
678 Remove a pending lock record under lock.
679 *****************************************************************/
681 static void remove_pending_lock(struct smbd_smb2_lock_state *state,
682 struct blocking_lock_record *blr)
684 int i;
685 struct byte_range_lock *br_lck = brl_get_locks(
686 state, blr->fsp);
688 DEBUG(10, ("remove_pending_lock: BLR = %p\n", blr));
690 if (br_lck) {
691 brl_lock_cancel(br_lck,
692 blr->smblctx,
693 sconn_server_id(blr->fsp->conn->sconn),
694 blr->offset,
695 blr->count,
696 blr->lock_flav,
697 blr);
698 TALLOC_FREE(br_lck);
701 /* Remove the locks we already got. */
703 for(i = blr->lock_num - 1; i >= 0; i--) {
704 struct smbd_lock_element *e = &state->locks[i];
706 do_unlock(blr->fsp->conn->sconn->msg_ctx,
707 blr->fsp,
708 e->smblctx,
709 e->count,
710 e->offset,
711 WINDOWS_LOCK);
715 /****************************************************************
716 Re-proccess a blocking lock request.
717 This is equivalent to process_lockingX() inside smbd/blocking.c
718 *****************************************************************/
720 static void reprocess_blocked_smb2_lock(struct smbd_smb2_request *smb2req,
721 struct timeval tv_curr)
723 NTSTATUS status = NT_STATUS_UNSUCCESSFUL;
724 struct blocking_lock_record *blr = NULL;
725 struct smbd_smb2_lock_state *state = NULL;
726 files_struct *fsp = NULL;
728 if (!smb2req->subreq) {
729 return;
731 state = tevent_req_data(smb2req->subreq, struct smbd_smb2_lock_state);
732 if (!state) {
733 return;
736 blr = state->blr;
737 fsp = blr->fsp;
739 /* Try and finish off getting all the outstanding locks. */
741 for (; blr->lock_num < state->lock_count; blr->lock_num++) {
742 struct byte_range_lock *br_lck = NULL;
743 struct smbd_lock_element *e = &state->locks[blr->lock_num];
745 br_lck = do_lock(fsp->conn->sconn->msg_ctx,
746 fsp,
747 e->smblctx,
748 e->count,
749 e->offset,
750 e->brltype,
751 WINDOWS_LOCK,
752 true,
753 &status,
754 &blr->blocking_smblctx,
755 blr);
757 TALLOC_FREE(br_lck);
759 if (NT_STATUS_IS_ERR(status)) {
760 break;
764 if(blr->lock_num == state->lock_count) {
766 * Success - we got all the locks.
769 DEBUG(3,("reprocess_blocked_smb2_lock SUCCESS file = %s, "
770 "fnum=%d num_locks=%d\n",
771 fsp_str_dbg(fsp),
772 fsp->fnum,
773 (int)state->lock_count));
775 tevent_req_done(smb2req->subreq);
776 return;
779 if (!NT_STATUS_EQUAL(status,NT_STATUS_LOCK_NOT_GRANTED) &&
780 !NT_STATUS_EQUAL(status,NT_STATUS_FILE_LOCK_CONFLICT)) {
782 * We have other than a "can't get lock"
783 * error. Return an error.
785 remove_pending_lock(state, blr);
786 tevent_req_nterror(smb2req->subreq, status);
787 return;
791 * We couldn't get the locks for this record on the list.
792 * If the time has expired, return a lock error.
795 if (!timeval_is_zero(&blr->expire_time) &&
796 timeval_compare(&blr->expire_time, &tv_curr) <= 0) {
797 remove_pending_lock(state, blr);
798 tevent_req_nterror(smb2req->subreq, NT_STATUS_LOCK_NOT_GRANTED);
799 return;
803 * Still can't get all the locks - keep waiting.
806 DEBUG(10,("reprocess_blocked_smb2_lock: only got %d locks of %d needed "
807 "for file %s, fnum = %d. Still waiting....\n",
808 (int)blr->lock_num,
809 (int)state->lock_count,
810 fsp_str_dbg(fsp),
811 (int)fsp->fnum));
813 return;
817 /****************************************************************
818 Attempt to proccess all outstanding blocking locks pending on
819 the request queue.
820 *****************************************************************/
822 void process_blocking_lock_queue_smb2(
823 struct smbd_server_connection *sconn, struct timeval tv_curr)
825 struct smbd_smb2_request *smb2req, *nextreq;
827 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
828 const uint8_t *inhdr;
830 nextreq = smb2req->next;
832 if (smb2req->subreq == NULL) {
833 /* This message has been processed. */
834 continue;
836 if (!tevent_req_is_in_progress(smb2req->subreq)) {
837 /* This message has been processed. */
838 continue;
841 inhdr = (const uint8_t *)smb2req->in.vector[smb2req->current_idx].iov_base;
842 if (SVAL(inhdr, SMB2_HDR_OPCODE) == SMB2_OP_LOCK) {
843 reprocess_blocked_smb2_lock(smb2req, tv_curr);
847 recalc_smb2_brl_timeout(sconn);
850 /****************************************************************************
851 Remove any locks on this fd. Called from file_close().
852 ****************************************************************************/
854 void cancel_pending_lock_requests_by_fid_smb2(files_struct *fsp,
855 struct byte_range_lock *br_lck,
856 enum file_close_type close_type)
858 struct smbd_server_connection *sconn = fsp->conn->sconn;
859 struct smbd_smb2_request *smb2req, *nextreq;
861 for (smb2req = sconn->smb2.requests; smb2req; smb2req = nextreq) {
862 struct smbd_smb2_lock_state *state = NULL;
863 files_struct *fsp_curr = NULL;
864 int i = smb2req->current_idx;
865 uint64_t in_file_id_volatile;
866 struct blocking_lock_record *blr = NULL;
867 const uint8_t *inhdr;
868 const uint8_t *inbody;
870 nextreq = smb2req->next;
872 if (smb2req->subreq == NULL) {
873 /* This message has been processed. */
874 continue;
876 if (!tevent_req_is_in_progress(smb2req->subreq)) {
877 /* This message has been processed. */
878 continue;
881 inhdr = (const uint8_t *)smb2req->in.vector[i].iov_base;
882 if (SVAL(inhdr, SMB2_HDR_OPCODE) != SMB2_OP_LOCK) {
883 /* Not a lock call. */
884 continue;
887 inbody = (const uint8_t *)smb2req->in.vector[i+1].iov_base;
888 in_file_id_volatile = BVAL(inbody, 0x10);
890 state = tevent_req_data(smb2req->subreq,
891 struct smbd_smb2_lock_state);
892 if (!state) {
893 /* Strange - is this even possible ? */
894 continue;
897 fsp_curr = file_fsp(state->smb1req, (uint16_t)in_file_id_volatile);
898 if (fsp_curr == NULL) {
899 /* Strange - is this even possible ? */
900 continue;
903 if (fsp_curr != fsp) {
904 /* It's not our fid */
905 continue;
908 blr = state->blr;
910 /* Remove the entries from the lock db. */
911 brl_lock_cancel(br_lck,
912 blr->smblctx,
913 sconn_server_id(sconn),
914 blr->offset,
915 blr->count,
916 blr->lock_flav,
917 blr);
919 /* Finally end the request. */
920 if (close_type == SHUTDOWN_CLOSE) {
921 tevent_req_done(smb2req->subreq);
922 } else {
923 tevent_req_nterror(smb2req->subreq,
924 NT_STATUS_RANGE_NOT_LOCKED);