s3: smbd: Fix log spam. Change a normal error message from DBG_ERR (level 0) to DBG_I...
[Samba.git] / source3 / smbd / smb2_lock.c
blob48593af47905e2110b9618cc5cadcf3a0bbbe2f0
1 /*
2 Unix SMB/CIFS implementation.
3 Core SMB2 server
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "locking/share_mode_lock.h"
24 #include "smbd/smbd.h"
25 #include "smbd/globals.h"
26 #include "../libcli/smb/smb_common.h"
27 #include "../lib/util/tevent_ntstatus.h"
28 #include "lib/dbwrap/dbwrap_watch.h"
29 #include "librpc/gen_ndr/open_files.h"
30 #include "messages.h"
32 #undef DBGC_CLASS
33 #define DBGC_CLASS DBGC_SMB2
35 struct smbd_smb2_lock_element {
36 uint64_t offset;
37 uint64_t length;
38 uint32_t flags;
41 struct smbd_smb2_lock_state {
42 struct tevent_context *ev;
43 struct smbd_smb2_request *smb2req;
44 struct smb_request *smb1req;
45 struct files_struct *fsp;
46 bool blocking;
47 uint32_t polling_msecs;
48 uint32_t retry_msecs;
49 uint16_t lock_count;
50 struct smbd_lock_element *locks;
51 uint8_t lock_sequence_value;
52 uint8_t *lock_sequence_element;
55 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
56 struct tevent_context *ev,
57 struct smbd_smb2_request *smb2req,
58 struct files_struct *in_fsp,
59 uint32_t in_lock_sequence,
60 uint16_t in_lock_count,
61 struct smbd_smb2_lock_element *in_locks);
62 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req);
64 static void smbd_smb2_request_lock_done(struct tevent_req *subreq);
65 NTSTATUS smbd_smb2_request_process_lock(struct smbd_smb2_request *req)
67 const uint8_t *inbody;
68 uint16_t in_lock_count;
69 uint32_t in_lock_sequence;
70 uint64_t in_file_id_persistent;
71 uint64_t in_file_id_volatile;
72 struct files_struct *in_fsp;
73 struct smbd_smb2_lock_element *in_locks;
74 struct tevent_req *subreq;
75 const uint8_t *lock_buffer;
76 uint16_t l;
77 NTSTATUS status;
79 status = smbd_smb2_request_verify_sizes(req, 0x30);
80 if (!NT_STATUS_IS_OK(status)) {
81 return smbd_smb2_request_error(req, status);
83 inbody = SMBD_SMB2_IN_BODY_PTR(req);
85 in_lock_count = CVAL(inbody, 0x02);
86 if (req->xconn->protocol >= PROTOCOL_SMB2_10) {
87 in_lock_sequence = IVAL(inbody, 0x04);
88 } else {
89 /* 0x04 - 4 bytes reserved */
90 in_lock_sequence = 0;
92 in_file_id_persistent = BVAL(inbody, 0x08);
93 in_file_id_volatile = BVAL(inbody, 0x10);
95 if (in_lock_count < 1) {
96 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
99 if (((in_lock_count - 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req)) {
100 return smbd_smb2_request_error(req, NT_STATUS_INVALID_PARAMETER);
103 in_locks = talloc_array(req, struct smbd_smb2_lock_element,
104 in_lock_count);
105 if (in_locks == NULL) {
106 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
109 l = 0;
110 lock_buffer = inbody + 0x18;
112 in_locks[l].offset = BVAL(lock_buffer, 0x00);
113 in_locks[l].length = BVAL(lock_buffer, 0x08);
114 in_locks[l].flags = IVAL(lock_buffer, 0x10);
115 /* 0x14 - 4 reserved bytes */
117 status = req->session->status;
118 if (NT_STATUS_EQUAL(status, NT_STATUS_NETWORK_SESSION_EXPIRED)) {
120 * We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
121 * for lock requests only.
123 * Unlock requests still need to be processed!
125 * This means smbd_smb2_request_check_session()
126 * can't handle the difference and always
127 * allows SMB2_OP_LOCK.
129 if (in_locks[0].flags != SMB2_LOCK_FLAG_UNLOCK) {
130 return smbd_smb2_request_error(req, status);
134 lock_buffer = SMBD_SMB2_IN_DYN_PTR(req);
136 for (l=1; l < in_lock_count; l++) {
137 in_locks[l].offset = BVAL(lock_buffer, 0x00);
138 in_locks[l].length = BVAL(lock_buffer, 0x08);
139 in_locks[l].flags = IVAL(lock_buffer, 0x10);
140 /* 0x14 - 4 reserved bytes */
142 lock_buffer += 0x18;
145 in_fsp = file_fsp_smb2(req, in_file_id_persistent, in_file_id_volatile);
146 if (in_fsp == NULL) {
147 return smbd_smb2_request_error(req, NT_STATUS_FILE_CLOSED);
150 subreq = smbd_smb2_lock_send(req, req->sconn->ev_ctx,
151 req, in_fsp,
152 in_lock_sequence,
153 in_lock_count,
154 in_locks);
155 if (subreq == NULL) {
156 return smbd_smb2_request_error(req, NT_STATUS_NO_MEMORY);
158 tevent_req_set_callback(subreq, smbd_smb2_request_lock_done, req);
160 return smbd_smb2_request_pending_queue(req, subreq, 500);
163 static void smbd_smb2_request_lock_done(struct tevent_req *subreq)
165 struct smbd_smb2_request *smb2req = tevent_req_callback_data(subreq,
166 struct smbd_smb2_request);
167 DATA_BLOB outbody;
168 NTSTATUS status;
169 NTSTATUS error; /* transport error */
171 status = smbd_smb2_lock_recv(subreq);
172 TALLOC_FREE(subreq);
173 if (!NT_STATUS_IS_OK(status)) {
174 error = smbd_smb2_request_error(smb2req, status);
175 if (!NT_STATUS_IS_OK(error)) {
176 smbd_server_connection_terminate(smb2req->xconn,
177 nt_errstr(error));
178 return;
180 return;
183 outbody = smbd_smb2_generate_outbody(smb2req, 0x04);
184 if (outbody.data == NULL) {
185 error = smbd_smb2_request_error(smb2req, NT_STATUS_NO_MEMORY);
186 if (!NT_STATUS_IS_OK(error)) {
187 smbd_server_connection_terminate(smb2req->xconn,
188 nt_errstr(error));
189 return;
191 return;
194 SSVAL(outbody.data, 0x00, 0x04); /* struct size */
195 SSVAL(outbody.data, 0x02, 0); /* reserved */
197 error = smbd_smb2_request_done(smb2req, outbody, NULL);
198 if (!NT_STATUS_IS_OK(error)) {
199 smbd_server_connection_terminate(smb2req->xconn,
200 nt_errstr(error));
201 return;
205 static void smbd_smb2_lock_cleanup(struct tevent_req *req,
206 enum tevent_req_state req_state);
207 static void smbd_smb2_lock_try(struct tevent_req *req);
208 static void smbd_smb2_lock_retry(struct tevent_req *subreq);
209 static bool smbd_smb2_lock_cancel(struct tevent_req *req);
211 static struct tevent_req *smbd_smb2_lock_send(TALLOC_CTX *mem_ctx,
212 struct tevent_context *ev,
213 struct smbd_smb2_request *smb2req,
214 struct files_struct *fsp,
215 uint32_t in_lock_sequence,
216 uint16_t in_lock_count,
217 struct smbd_smb2_lock_element *in_locks)
219 struct tevent_req *req;
220 struct smbd_smb2_lock_state *state;
221 bool isunlock = false;
222 uint16_t i;
223 struct smbd_lock_element *locks;
224 NTSTATUS status;
225 bool check_lock_sequence = false;
226 uint32_t lock_sequence_bucket = 0;
228 req = tevent_req_create(mem_ctx, &state,
229 struct smbd_smb2_lock_state);
230 if (req == NULL) {
231 return NULL;
233 state->ev = ev;
234 state->fsp = fsp;
235 state->smb2req = smb2req;
236 smb2req->subreq = req; /* So we can find this when going async. */
238 tevent_req_set_cleanup_fn(req, smbd_smb2_lock_cleanup);
240 state->smb1req = smbd_smb2_fake_smb_request(smb2req);
241 if (tevent_req_nomem(state->smb1req, req)) {
242 return tevent_req_post(req, ev);
245 DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
246 fsp_str_dbg(fsp), fsp_fnum_dbg(fsp)));
249 * Windows sets check_lock_sequence = true
250 * only for resilient and persistent handles.
252 * [MS-SMB2] 3.3.5.14 Receiving an SMB2 LOCK Request
254 * ... if Open.IsResilient or Open.IsDurable or Open.IsPersistent is
255 * TRUE or if Connection.Dialect belongs to the SMB 3.x dialect family
256 * and Connection.ServerCapabilities includes
257 * SMB2_GLOBAL_CAP_MULTI_CHANNEL bit, the server SHOULD<314>
258 * perform lock sequence * verification ...
260 * <314> Section 3.3.5.14: Windows 7 and Windows Server 2008 R2 perform
261 * lock sequence verification only when Open.IsResilient is TRUE.
262 * Windows 8 through Windows 10 v1909 and Windows Server 2012 through
263 * Windows Server v1909 perform lock sequence verification only when
264 * Open.IsResilient or Open.IsPersistent is TRUE.
266 * Note <314> also applies to all versions (at least) up to
267 * Windows Server v2004.
269 * Hopefully this will be fixed in future Windows versions and they
270 * will avoid Note <314>.
272 * We implement what the specification says by default, but
273 * allow "smb2 disable lock sequence checking = yes" to
274 * behave like Windows again.
276 * Note: that we already check the dialect before setting
277 * SMB2_CAP_MULTI_CHANNEL in smb2_negprot.c
279 if (smb2req->xconn->smb2.server.capabilities & SMB2_CAP_MULTI_CHANNEL) {
280 check_lock_sequence = true;
282 if (fsp->op->global->durable) {
283 check_lock_sequence = true;
286 if (check_lock_sequence) {
287 bool disable_lock_sequence_checking =
288 lp_smb2_disable_lock_sequence_checking();
290 if (disable_lock_sequence_checking) {
291 check_lock_sequence = false;
295 if (check_lock_sequence) {
296 state->lock_sequence_value = in_lock_sequence & 0xF;
297 lock_sequence_bucket = in_lock_sequence >> 4;
299 if ((lock_sequence_bucket > 0) &&
300 (lock_sequence_bucket <= sizeof(fsp->op->global->lock_sequence_array)))
302 uint32_t idx = lock_sequence_bucket - 1;
303 uint8_t *array = fsp->op->global->lock_sequence_array;
305 state->lock_sequence_element = &array[idx];
308 if (state->lock_sequence_element != NULL) {
310 * The incoming 'state->lock_sequence_value' is masked with 0xF.
312 * Note per default '*state->lock_sequence_element'
313 * is invalid, a value of 0xFF that can never match on
314 * incoming value.
316 if (*state->lock_sequence_element == state->lock_sequence_value)
318 DBG_INFO("replayed smb2 lock request detected: "
319 "file %s, value %u, bucket %u\n",
320 fsp_str_dbg(fsp),
321 (unsigned)state->lock_sequence_value,
322 (unsigned)lock_sequence_bucket);
323 tevent_req_done(req);
324 return tevent_req_post(req, ev);
327 * If it's not a replay, mark the element as
328 * invalid again.
330 *state->lock_sequence_element = 0xFF;
333 locks = talloc_array(state, struct smbd_lock_element, in_lock_count);
334 if (locks == NULL) {
335 tevent_req_nterror(req, NT_STATUS_NO_MEMORY);
336 return tevent_req_post(req, ev);
339 switch (in_locks[0].flags) {
340 case SMB2_LOCK_FLAG_SHARED:
341 case SMB2_LOCK_FLAG_EXCLUSIVE:
342 if (in_lock_count > 1) {
343 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
344 return tevent_req_post(req, ev);
346 state->blocking = true;
347 break;
349 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
350 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
351 break;
353 case SMB2_LOCK_FLAG_UNLOCK:
354 /* only the first lock gives the UNLOCK bit - see
355 MS-SMB2 3.3.5.14 */
356 isunlock = true;
357 break;
359 default:
360 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
361 return tevent_req_post(req, ev);
364 if (!isunlock && (in_lock_count > 1)) {
367 * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
368 * have more than one lock and one of those is blocking.
371 for (i=0; i<in_lock_count; i++) {
372 uint32_t flags = in_locks[i].flags;
374 if ((flags & SMB2_LOCK_FLAG_FAIL_IMMEDIATELY) == 0) {
375 tevent_req_nterror(
376 req, NT_STATUS_INVALID_PARAMETER);
377 return tevent_req_post(req, ev);
382 for (i=0; i<in_lock_count; i++) {
383 bool invalid = false;
384 bool posix_handle =(fsp->posix_flags & FSP_POSIX_FLAGS_OPEN);
386 switch (in_locks[i].flags) {
387 case SMB2_LOCK_FLAG_SHARED:
388 case SMB2_LOCK_FLAG_EXCLUSIVE:
389 if (isunlock) {
390 invalid = true;
391 break;
393 break;
395 case SMB2_LOCK_FLAG_SHARED|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
396 case SMB2_LOCK_FLAG_EXCLUSIVE|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY:
397 if (isunlock) {
398 invalid = true;
400 break;
402 case SMB2_LOCK_FLAG_UNLOCK:
403 if (!isunlock) {
404 tevent_req_nterror(req,
405 NT_STATUS_INVALID_PARAMETER);
406 return tevent_req_post(req, ev);
408 break;
410 default:
411 if (isunlock) {
413 * If the first element was a UNLOCK
414 * we need to defer the error response
415 * to the backend, because we need to process
416 * all unlock elements before
418 invalid = true;
419 break;
421 tevent_req_nterror(req, NT_STATUS_INVALID_PARAMETER);
422 return tevent_req_post(req, ev);
425 locks[i].req_guid = smbd_request_guid(smb2req->smb1req, i);
426 locks[i].smblctx = fsp->op->global->open_persistent_id;
427 locks[i].offset = in_locks[i].offset;
428 locks[i].count = in_locks[i].length;
430 if (posix_handle) {
431 locks[i].lock_flav = POSIX_LOCK;
432 } else {
433 locks[i].lock_flav = WINDOWS_LOCK;
436 if (in_locks[i].flags & SMB2_LOCK_FLAG_EXCLUSIVE) {
437 if (posix_handle && fsp->fsp_flags.can_write == false) {
439 * Can't get a write lock on a posix
440 * read-only handle.
442 DBG_INFO("POSIX write lock requested "
443 "on read-only handle for file %s\n",
444 fsp_str_dbg(fsp));
445 tevent_req_nterror(req,
446 NT_STATUS_INVALID_HANDLE);
447 return tevent_req_post(req, ev);
449 locks[i].brltype = WRITE_LOCK;
450 } else if (in_locks[i].flags & SMB2_LOCK_FLAG_SHARED) {
451 locks[i].brltype = READ_LOCK;
452 } else if (invalid) {
454 * this is an invalid UNLOCK element
455 * and the backend needs to test for
456 * brltype != UNLOCK_LOCK and return
457 * NT_STATUS_INVALID_PARAMETER
459 locks[i].brltype = READ_LOCK;
460 } else {
461 locks[i].brltype = UNLOCK_LOCK;
463 locks[i].lock_flav = WINDOWS_LOCK;
465 DBG_DEBUG("index %"PRIu16" offset=%"PRIu64", count=%"PRIu64", "
466 "smblctx = %"PRIu64" type %d\n",
468 locks[i].offset,
469 locks[i].count,
470 locks[i].smblctx,
471 (int)locks[i].brltype);
474 state->locks = locks;
475 state->lock_count = in_lock_count;
477 if (isunlock) {
478 status = smbd_do_unlocking(
479 state->smb1req, fsp, in_lock_count, locks);
481 if (tevent_req_nterror(req, status)) {
482 return tevent_req_post(req, ev);
484 tevent_req_done(req);
485 return tevent_req_post(req, ev);
488 smbd_smb2_lock_try(req);
489 if (!tevent_req_is_in_progress(req)) {
490 return tevent_req_post(req, ev);
493 tevent_req_defer_callback(req, smb2req->sconn->ev_ctx);
494 aio_add_req_to_fsp(state->fsp, req);
495 tevent_req_set_cancel_fn(req, smbd_smb2_lock_cancel);
497 return req;
500 static void smbd_smb2_lock_cleanup(struct tevent_req *req,
501 enum tevent_req_state req_state)
503 struct smbd_smb2_lock_state *state = tevent_req_data(
504 req, struct smbd_smb2_lock_state);
506 if (req_state != TEVENT_REQ_DONE) {
507 return;
510 if (state->lock_sequence_element != NULL) {
512 * On success we remember the given/incoming
513 * value (which was masked with 0xF.
515 *state->lock_sequence_element = state->lock_sequence_value;
519 static void smbd_smb2_lock_update_retry_msecs(
520 struct smbd_smb2_lock_state *state)
523 * The default lp_lock_spin_time() is 200ms,
524 * we just use half of it to trigger the first retry.
526 * v_min is in the range of 0.001 to 10 secs
527 * (0.1 secs by default)
529 * v_max is in the range of 0.01 to 100 secs
530 * (1.0 secs by default)
532 * The typical steps are:
533 * 0.1, 0.2, 0.3, 0.4, ... 1.0
535 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
536 uint32_t v_max = 10 * v_min;
538 if (state->retry_msecs >= v_max) {
539 state->retry_msecs = v_max;
540 return;
543 state->retry_msecs += v_min;
546 static void smbd_smb2_lock_update_polling_msecs(
547 struct smbd_smb2_lock_state *state)
550 * The default lp_lock_spin_time() is 200ms.
552 * v_min is in the range of 0.002 to 20 secs
553 * (0.2 secs by default)
555 * v_max is in the range of 0.02 to 200 secs
556 * (2.0 secs by default)
558 * The typical steps are:
559 * 0.2, 0.4, 0.6, 0.8, ... 2.0
561 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
562 uint32_t v_max = 10 * v_min;
564 if (state->polling_msecs >= v_max) {
565 state->polling_msecs = v_max;
566 return;
569 state->polling_msecs += v_min;
572 static void smbd_smb2_lock_try(struct tevent_req *req)
574 struct smbd_smb2_lock_state *state = tevent_req_data(
575 req, struct smbd_smb2_lock_state);
576 struct share_mode_lock *lck = NULL;
577 uint16_t blocker_idx;
578 struct server_id blocking_pid = { 0 };
579 uint64_t blocking_smblctx;
580 NTSTATUS status;
581 struct tevent_req *subreq = NULL;
582 struct timeval endtime = { 0 };
584 lck = get_existing_share_mode_lock(
585 talloc_tos(), state->fsp->file_id);
586 if (tevent_req_nomem(lck, req)) {
587 return;
590 status = smbd_do_locks_try(
591 state->fsp,
592 state->lock_count,
593 state->locks,
594 &blocker_idx,
595 &blocking_pid,
596 &blocking_smblctx);
597 if (NT_STATUS_IS_OK(status)) {
598 TALLOC_FREE(lck);
599 tevent_req_done(req);
600 return;
602 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
604 * We got NT_STATUS_RETRY,
605 * we reset polling_msecs so that
606 * that the retries based on LOCK_NOT_GRANTED
607 * will later start with small intervalls again.
609 state->polling_msecs = 0;
612 * The backend wasn't able to decide yet.
613 * We need to wait even for non-blocking
614 * locks.
616 * The backend uses blocking_smblctx == UINT64_MAX
617 * to indicate that we should use retry timers.
619 * It uses blocking_smblctx == 0 to indicate
620 * it will use share_mode_wakeup_waiters()
621 * to wake us. Note that unrelated changes in
622 * locking.tdb may cause retries.
625 if (blocking_smblctx != UINT64_MAX) {
626 SMB_ASSERT(blocking_smblctx == 0);
627 goto setup_retry;
630 smbd_smb2_lock_update_retry_msecs(state);
632 DBG_DEBUG("Waiting for a backend decision. "
633 "Retry in %"PRIu32" msecs\n",
634 state->retry_msecs);
637 * We completely ignore state->endtime here
638 * we we'll wait for a backend decision forever.
639 * If the backend is smart enough to implement
640 * some NT_STATUS_RETRY logic, it has to
641 * switch to any other status after in order
642 * to avoid waiting forever.
644 endtime = timeval_current_ofs_msec(state->retry_msecs);
645 goto setup_retry;
647 if (NT_STATUS_EQUAL(status, NT_STATUS_FILE_LOCK_CONFLICT)) {
649 * This is a bug and will be changed into an assert
650 * in future version. We should only
651 * ever get NT_STATUS_LOCK_NOT_GRANTED here!
653 static uint64_t _bug_count;
654 int _level = (_bug_count++ == 0) ? DBGLVL_ERR: DBGLVL_DEBUG;
655 DBG_PREFIX(_level, ("BUG: Got %s mapping to "
656 "NT_STATUS_LOCK_NOT_GRANTED\n",
657 nt_errstr(status)));
658 status = NT_STATUS_LOCK_NOT_GRANTED;
660 if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
661 TALLOC_FREE(lck);
662 tevent_req_nterror(req, status);
663 return;
666 * We got LOCK_NOT_GRANTED, make sure
667 * a following STATUS_RETRY will start
668 * with short intervalls again.
670 state->retry_msecs = 0;
672 if (!state->blocking) {
673 TALLOC_FREE(lck);
674 tevent_req_nterror(req, status);
675 return;
678 if (blocking_smblctx == UINT64_MAX) {
679 smbd_smb2_lock_update_polling_msecs(state);
681 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
682 state->polling_msecs);
684 endtime = timeval_current_ofs_msec(state->polling_msecs);
687 setup_retry:
688 DBG_DEBUG("Watching share mode lock\n");
690 subreq = share_mode_watch_send(
691 state, state->ev, lck, blocking_pid);
692 TALLOC_FREE(lck);
693 if (tevent_req_nomem(subreq, req)) {
694 return;
696 tevent_req_set_callback(subreq, smbd_smb2_lock_retry, req);
698 if (!timeval_is_zero(&endtime)) {
699 bool ok;
701 ok = tevent_req_set_endtime(subreq,
702 state->ev,
703 endtime);
704 if (!ok) {
705 tevent_req_oom(req);
706 return;
711 static void smbd_smb2_lock_retry(struct tevent_req *subreq)
713 struct tevent_req *req = tevent_req_callback_data(
714 subreq, struct tevent_req);
715 struct smbd_smb2_lock_state *state = tevent_req_data(
716 req, struct smbd_smb2_lock_state);
717 NTSTATUS status;
718 bool ok;
721 * Make sure we run as the user again
723 ok = change_to_user_and_service_by_fsp(state->fsp);
724 if (!ok) {
725 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
726 return;
729 status = share_mode_watch_recv(subreq, NULL, NULL);
730 TALLOC_FREE(subreq);
731 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
733 * This is just a trigger for a timed retry.
735 status = NT_STATUS_OK;
737 if (tevent_req_nterror(req, status)) {
738 return;
741 smbd_smb2_lock_try(req);
744 static NTSTATUS smbd_smb2_lock_recv(struct tevent_req *req)
746 return tevent_req_simple_recv_ntstatus(req);
749 /****************************************************************
750 Cancel an outstanding blocking lock request.
751 *****************************************************************/
753 static bool smbd_smb2_lock_cancel(struct tevent_req *req)
755 struct smbd_smb2_request *smb2req = NULL;
756 struct smbd_smb2_lock_state *state = tevent_req_data(req,
757 struct smbd_smb2_lock_state);
758 if (!state) {
759 return false;
762 if (!state->smb2req) {
763 return false;
766 smb2req = state->smb2req;
769 * If the request is canceled because of close, logoff or tdis
770 * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
771 * NT_STATUS_CANCELLED.
773 if (state->fsp->fsp_flags.closing ||
774 !NT_STATUS_IS_OK(smb2req->session->status) ||
775 !NT_STATUS_IS_OK(smb2req->tcon->status)) {
776 tevent_req_nterror(req, NT_STATUS_RANGE_NOT_LOCKED);
777 return true;
780 tevent_req_nterror(req, NT_STATUS_CANCELLED);
781 return true;