2 Unix SMB/CIFS implementation.
5 Copyright (C) Stefan Metzmacher 2009
6 Copyright (C) Jeremy Allison 2010
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "locking/share_mode_lock.h"
24 #include "smbd/smbd.h"
25 #include "smbd/globals.h"
26 #include "../libcli/smb/smb_common.h"
27 #include "../lib/util/tevent_ntstatus.h"
28 #include "lib/dbwrap/dbwrap_watch.h"
29 #include "librpc/gen_ndr/open_files.h"
33 #define DBGC_CLASS DBGC_SMB2
35 struct smbd_smb2_lock_element
{
41 struct smbd_smb2_lock_state
{
42 struct tevent_context
*ev
;
43 struct smbd_smb2_request
*smb2req
;
44 struct smb_request
*smb1req
;
45 struct files_struct
*fsp
;
47 uint32_t polling_msecs
;
50 struct smbd_lock_element
*locks
;
51 uint8_t lock_sequence_value
;
52 uint8_t *lock_sequence_element
;
55 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
56 struct tevent_context
*ev
,
57 struct smbd_smb2_request
*smb2req
,
58 struct files_struct
*in_fsp
,
59 uint32_t in_lock_sequence
,
60 uint16_t in_lock_count
,
61 struct smbd_smb2_lock_element
*in_locks
);
62 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
);
64 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
);
65 NTSTATUS
smbd_smb2_request_process_lock(struct smbd_smb2_request
*req
)
67 const uint8_t *inbody
;
68 uint16_t in_lock_count
;
69 uint32_t in_lock_sequence
;
70 uint64_t in_file_id_persistent
;
71 uint64_t in_file_id_volatile
;
72 struct files_struct
*in_fsp
;
73 struct smbd_smb2_lock_element
*in_locks
;
74 struct tevent_req
*subreq
;
75 const uint8_t *lock_buffer
;
79 status
= smbd_smb2_request_verify_sizes(req
, 0x30);
80 if (!NT_STATUS_IS_OK(status
)) {
81 return smbd_smb2_request_error(req
, status
);
83 inbody
= SMBD_SMB2_IN_BODY_PTR(req
);
85 in_lock_count
= CVAL(inbody
, 0x02);
86 if (req
->xconn
->protocol
>= PROTOCOL_SMB2_10
) {
87 in_lock_sequence
= IVAL(inbody
, 0x04);
89 /* 0x04 - 4 bytes reserved */
92 in_file_id_persistent
= BVAL(inbody
, 0x08);
93 in_file_id_volatile
= BVAL(inbody
, 0x10);
95 if (in_lock_count
< 1) {
96 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
99 if (((in_lock_count
- 1) * 0x18) > SMBD_SMB2_IN_DYN_LEN(req
)) {
100 return smbd_smb2_request_error(req
, NT_STATUS_INVALID_PARAMETER
);
103 in_locks
= talloc_array(req
, struct smbd_smb2_lock_element
,
105 if (in_locks
== NULL
) {
106 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
110 lock_buffer
= inbody
+ 0x18;
112 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
113 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
114 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
115 /* 0x14 - 4 reserved bytes */
117 status
= req
->session
->status
;
118 if (NT_STATUS_EQUAL(status
, NT_STATUS_NETWORK_SESSION_EXPIRED
)) {
120 * We need to catch NT_STATUS_NETWORK_SESSION_EXPIRED
121 * for lock requests only.
123 * Unlock requests still need to be processed!
125 * This means smbd_smb2_request_check_session()
126 * can't handle the difference and always
127 * allows SMB2_OP_LOCK.
129 if (in_locks
[0].flags
!= SMB2_LOCK_FLAG_UNLOCK
) {
130 return smbd_smb2_request_error(req
, status
);
134 lock_buffer
= SMBD_SMB2_IN_DYN_PTR(req
);
136 for (l
=1; l
< in_lock_count
; l
++) {
137 in_locks
[l
].offset
= BVAL(lock_buffer
, 0x00);
138 in_locks
[l
].length
= BVAL(lock_buffer
, 0x08);
139 in_locks
[l
].flags
= IVAL(lock_buffer
, 0x10);
140 /* 0x14 - 4 reserved bytes */
145 in_fsp
= file_fsp_smb2(req
, in_file_id_persistent
, in_file_id_volatile
);
146 if (in_fsp
== NULL
) {
147 return smbd_smb2_request_error(req
, NT_STATUS_FILE_CLOSED
);
150 subreq
= smbd_smb2_lock_send(req
, req
->sconn
->ev_ctx
,
155 if (subreq
== NULL
) {
156 return smbd_smb2_request_error(req
, NT_STATUS_NO_MEMORY
);
158 tevent_req_set_callback(subreq
, smbd_smb2_request_lock_done
, req
);
160 return smbd_smb2_request_pending_queue(req
, subreq
, 500);
163 static void smbd_smb2_request_lock_done(struct tevent_req
*subreq
)
165 struct smbd_smb2_request
*smb2req
= tevent_req_callback_data(subreq
,
166 struct smbd_smb2_request
);
169 NTSTATUS error
; /* transport error */
171 status
= smbd_smb2_lock_recv(subreq
);
173 if (!NT_STATUS_IS_OK(status
)) {
174 error
= smbd_smb2_request_error(smb2req
, status
);
175 if (!NT_STATUS_IS_OK(error
)) {
176 smbd_server_connection_terminate(smb2req
->xconn
,
183 outbody
= smbd_smb2_generate_outbody(smb2req
, 0x04);
184 if (outbody
.data
== NULL
) {
185 error
= smbd_smb2_request_error(smb2req
, NT_STATUS_NO_MEMORY
);
186 if (!NT_STATUS_IS_OK(error
)) {
187 smbd_server_connection_terminate(smb2req
->xconn
,
194 SSVAL(outbody
.data
, 0x00, 0x04); /* struct size */
195 SSVAL(outbody
.data
, 0x02, 0); /* reserved */
197 error
= smbd_smb2_request_done(smb2req
, outbody
, NULL
);
198 if (!NT_STATUS_IS_OK(error
)) {
199 smbd_server_connection_terminate(smb2req
->xconn
,
205 static void smbd_smb2_lock_cleanup(struct tevent_req
*req
,
206 enum tevent_req_state req_state
);
207 static void smbd_smb2_lock_try(struct tevent_req
*req
);
208 static void smbd_smb2_lock_retry(struct tevent_req
*subreq
);
209 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
);
211 static struct tevent_req
*smbd_smb2_lock_send(TALLOC_CTX
*mem_ctx
,
212 struct tevent_context
*ev
,
213 struct smbd_smb2_request
*smb2req
,
214 struct files_struct
*fsp
,
215 uint32_t in_lock_sequence
,
216 uint16_t in_lock_count
,
217 struct smbd_smb2_lock_element
*in_locks
)
219 struct tevent_req
*req
;
220 struct smbd_smb2_lock_state
*state
;
221 bool isunlock
= false;
223 struct smbd_lock_element
*locks
;
225 bool check_lock_sequence
= false;
226 uint32_t lock_sequence_bucket
= 0;
228 req
= tevent_req_create(mem_ctx
, &state
,
229 struct smbd_smb2_lock_state
);
235 state
->smb2req
= smb2req
;
236 smb2req
->subreq
= req
; /* So we can find this when going async. */
238 tevent_req_set_cleanup_fn(req
, smbd_smb2_lock_cleanup
);
240 state
->smb1req
= smbd_smb2_fake_smb_request(smb2req
);
241 if (tevent_req_nomem(state
->smb1req
, req
)) {
242 return tevent_req_post(req
, ev
);
245 DEBUG(10,("smbd_smb2_lock_send: %s - %s\n",
246 fsp_str_dbg(fsp
), fsp_fnum_dbg(fsp
)));
249 * Windows sets check_lock_sequence = true
250 * only for resilient and persistent handles.
252 * [MS-SMB2] 3.3.5.14 Receiving an SMB2 LOCK Request
254 * ... if Open.IsResilient or Open.IsDurable or Open.IsPersistent is
255 * TRUE or if Connection.Dialect belongs to the SMB 3.x dialect family
256 * and Connection.ServerCapabilities includes
257 * SMB2_GLOBAL_CAP_MULTI_CHANNEL bit, the server SHOULD<314>
258 * perform lock sequence * verification ...
260 * <314> Section 3.3.5.14: Windows 7 and Windows Server 2008 R2 perform
261 * lock sequence verification only when Open.IsResilient is TRUE.
262 * Windows 8 through Windows 10 v1909 and Windows Server 2012 through
263 * Windows Server v1909 perform lock sequence verification only when
264 * Open.IsResilient or Open.IsPersistent is TRUE.
266 * Note <314> also applies to all versions (at least) up to
267 * Windows Server v2004.
269 * Hopefully this will be fixed in future Windows versions and they
270 * will avoid Note <314>.
272 * We implement what the specification says by default, but
273 * allow "smb2 disable lock sequence checking = yes" to
274 * behave like Windows again.
276 * Note: that we already check the dialect before setting
277 * SMB2_CAP_MULTI_CHANNEL in smb2_negprot.c
279 if (smb2req
->xconn
->smb2
.server
.capabilities
& SMB2_CAP_MULTI_CHANNEL
) {
280 check_lock_sequence
= true;
282 if (fsp
->op
->global
->durable
) {
283 check_lock_sequence
= true;
286 if (check_lock_sequence
) {
287 bool disable_lock_sequence_checking
=
288 lp_smb2_disable_lock_sequence_checking();
290 if (disable_lock_sequence_checking
) {
291 check_lock_sequence
= false;
295 if (check_lock_sequence
) {
296 state
->lock_sequence_value
= in_lock_sequence
& 0xF;
297 lock_sequence_bucket
= in_lock_sequence
>> 4;
299 if ((lock_sequence_bucket
> 0) &&
300 (lock_sequence_bucket
<= sizeof(fsp
->op
->global
->lock_sequence_array
)))
302 uint32_t idx
= lock_sequence_bucket
- 1;
303 uint8_t *array
= fsp
->op
->global
->lock_sequence_array
;
305 state
->lock_sequence_element
= &array
[idx
];
308 if (state
->lock_sequence_element
!= NULL
) {
310 * The incoming 'state->lock_sequence_value' is masked with 0xF.
312 * Note per default '*state->lock_sequence_element'
313 * is invalid, a value of 0xFF that can never match on
316 if (*state
->lock_sequence_element
== state
->lock_sequence_value
)
318 DBG_INFO("replayed smb2 lock request detected: "
319 "file %s, value %u, bucket %u\n",
321 (unsigned)state
->lock_sequence_value
,
322 (unsigned)lock_sequence_bucket
);
323 tevent_req_done(req
);
324 return tevent_req_post(req
, ev
);
327 * If it's not a replay, mark the element as
330 *state
->lock_sequence_element
= 0xFF;
333 locks
= talloc_array(state
, struct smbd_lock_element
, in_lock_count
);
335 tevent_req_nterror(req
, NT_STATUS_NO_MEMORY
);
336 return tevent_req_post(req
, ev
);
339 switch (in_locks
[0].flags
) {
340 case SMB2_LOCK_FLAG_SHARED
:
341 case SMB2_LOCK_FLAG_EXCLUSIVE
:
342 if (in_lock_count
> 1) {
343 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
344 return tevent_req_post(req
, ev
);
346 state
->blocking
= true;
349 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
350 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
353 case SMB2_LOCK_FLAG_UNLOCK
:
354 /* only the first lock gives the UNLOCK bit - see
360 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
361 return tevent_req_post(req
, ev
);
364 if (!isunlock
&& (in_lock_count
> 1)) {
367 * 3.3.5.14.2 says we SHOULD fail with INVALID_PARAMETER if we
368 * have more than one lock and one of those is blocking.
371 for (i
=0; i
<in_lock_count
; i
++) {
372 uint32_t flags
= in_locks
[i
].flags
;
374 if ((flags
& SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
) == 0) {
376 req
, NT_STATUS_INVALID_PARAMETER
);
377 return tevent_req_post(req
, ev
);
382 for (i
=0; i
<in_lock_count
; i
++) {
383 bool invalid
= false;
385 switch (in_locks
[i
].flags
) {
386 case SMB2_LOCK_FLAG_SHARED
:
387 case SMB2_LOCK_FLAG_EXCLUSIVE
:
394 case SMB2_LOCK_FLAG_SHARED
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
395 case SMB2_LOCK_FLAG_EXCLUSIVE
|SMB2_LOCK_FLAG_FAIL_IMMEDIATELY
:
401 case SMB2_LOCK_FLAG_UNLOCK
:
403 tevent_req_nterror(req
,
404 NT_STATUS_INVALID_PARAMETER
);
405 return tevent_req_post(req
, ev
);
412 * If the first element was a UNLOCK
413 * we need to defer the error response
414 * to the backend, because we need to process
415 * all unlock elements before
420 tevent_req_nterror(req
, NT_STATUS_INVALID_PARAMETER
);
421 return tevent_req_post(req
, ev
);
424 locks
[i
].req_guid
= smbd_request_guid(smb2req
->smb1req
, i
);
425 locks
[i
].smblctx
= fsp
->op
->global
->open_persistent_id
;
426 locks
[i
].offset
= in_locks
[i
].offset
;
427 locks
[i
].count
= in_locks
[i
].length
;
429 if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_EXCLUSIVE
) {
430 locks
[i
].brltype
= WRITE_LOCK
;
431 } else if (in_locks
[i
].flags
& SMB2_LOCK_FLAG_SHARED
) {
432 locks
[i
].brltype
= READ_LOCK
;
433 } else if (invalid
) {
435 * this is an invalid UNLOCK element
436 * and the backend needs to test for
437 * brltype != UNLOCK_LOCK and return
438 * NT_STATUS_INVALID_PARAMETER
440 locks
[i
].brltype
= READ_LOCK
;
442 locks
[i
].brltype
= UNLOCK_LOCK
;
445 DBG_DEBUG("index %"PRIu16
" offset=%"PRIu64
", count=%"PRIu64
", "
446 "smblctx = %"PRIu64
" type %d\n",
451 (int)locks
[i
].brltype
);
454 state
->locks
= locks
;
455 state
->lock_count
= in_lock_count
;
458 status
= smbd_do_unlocking(
459 state
->smb1req
, fsp
, in_lock_count
, locks
, WINDOWS_LOCK
);
461 if (tevent_req_nterror(req
, status
)) {
462 return tevent_req_post(req
, ev
);
464 tevent_req_done(req
);
465 return tevent_req_post(req
, ev
);
468 smbd_smb2_lock_try(req
);
469 if (!tevent_req_is_in_progress(req
)) {
470 return tevent_req_post(req
, ev
);
473 tevent_req_defer_callback(req
, smb2req
->sconn
->ev_ctx
);
474 aio_add_req_to_fsp(state
->fsp
, req
);
475 tevent_req_set_cancel_fn(req
, smbd_smb2_lock_cancel
);
480 static void smbd_smb2_lock_cleanup(struct tevent_req
*req
,
481 enum tevent_req_state req_state
)
483 struct smbd_smb2_lock_state
*state
= tevent_req_data(
484 req
, struct smbd_smb2_lock_state
);
486 if (req_state
!= TEVENT_REQ_DONE
) {
490 if (state
->lock_sequence_element
!= NULL
) {
492 * On success we remember the given/incoming
493 * value (which was masked with 0xF.
495 *state
->lock_sequence_element
= state
->lock_sequence_value
;
499 static void smbd_smb2_lock_update_retry_msecs(
500 struct smbd_smb2_lock_state
*state
)
503 * The default lp_lock_spin_time() is 200ms,
504 * we just use half of it to trigger the first retry.
506 * v_min is in the range of 0.001 to 10 secs
507 * (0.1 secs by default)
509 * v_max is in the range of 0.01 to 100 secs
510 * (1.0 secs by default)
512 * The typical steps are:
513 * 0.1, 0.2, 0.3, 0.4, ... 1.0
515 uint32_t v_min
= MAX(2, MIN(20000, lp_lock_spin_time()))/2;
516 uint32_t v_max
= 10 * v_min
;
518 if (state
->retry_msecs
>= v_max
) {
519 state
->retry_msecs
= v_max
;
523 state
->retry_msecs
+= v_min
;
526 static void smbd_smb2_lock_update_polling_msecs(
527 struct smbd_smb2_lock_state
*state
)
530 * The default lp_lock_spin_time() is 200ms.
532 * v_min is in the range of 0.002 to 20 secs
533 * (0.2 secs by default)
535 * v_max is in the range of 0.02 to 200 secs
536 * (2.0 secs by default)
538 * The typical steps are:
539 * 0.2, 0.4, 0.6, 0.8, ... 2.0
541 uint32_t v_min
= MAX(2, MIN(20000, lp_lock_spin_time()));
542 uint32_t v_max
= 10 * v_min
;
544 if (state
->polling_msecs
>= v_max
) {
545 state
->polling_msecs
= v_max
;
549 state
->polling_msecs
+= v_min
;
552 static void smbd_smb2_lock_try(struct tevent_req
*req
)
554 struct smbd_smb2_lock_state
*state
= tevent_req_data(
555 req
, struct smbd_smb2_lock_state
);
556 struct share_mode_lock
*lck
= NULL
;
557 uint16_t blocker_idx
;
558 struct server_id blocking_pid
= { 0 };
559 uint64_t blocking_smblctx
;
561 struct tevent_req
*subreq
= NULL
;
562 struct timeval endtime
= { 0 };
564 lck
= get_existing_share_mode_lock(
565 talloc_tos(), state
->fsp
->file_id
);
566 if (tevent_req_nomem(lck
, req
)) {
570 status
= smbd_do_locks_try(
578 if (NT_STATUS_IS_OK(status
)) {
580 tevent_req_done(req
);
583 if (NT_STATUS_EQUAL(status
, NT_STATUS_RETRY
)) {
585 * We got NT_STATUS_RETRY,
586 * we reset polling_msecs so that
587 * that the retries based on LOCK_NOT_GRANTED
588 * will later start with small intervalls again.
590 state
->polling_msecs
= 0;
593 * The backend wasn't able to decide yet.
594 * We need to wait even for non-blocking
597 * The backend uses blocking_smblctx == UINT64_MAX
598 * to indicate that we should use retry timers.
600 * It uses blocking_smblctx == 0 to indicate
601 * it will use share_mode_wakeup_waiters()
602 * to wake us. Note that unrelated changes in
603 * locking.tdb may cause retries.
606 if (blocking_smblctx
!= UINT64_MAX
) {
607 SMB_ASSERT(blocking_smblctx
== 0);
611 smbd_smb2_lock_update_retry_msecs(state
);
613 DBG_DEBUG("Waiting for a backend decision. "
614 "Retry in %"PRIu32
" msecs\n",
618 * We completely ignore state->endtime here
619 * we we'll wait for a backend decision forever.
620 * If the backend is smart enough to implement
621 * some NT_STATUS_RETRY logic, it has to
622 * switch to any other status after in order
623 * to avoid waiting forever.
625 endtime
= timeval_current_ofs_msec(state
->retry_msecs
);
628 if (NT_STATUS_EQUAL(status
, NT_STATUS_FILE_LOCK_CONFLICT
)) {
630 * This is a bug and will be changed into an assert
631 * in future version. We should only
632 * ever get NT_STATUS_LOCK_NOT_GRANTED here!
634 static uint64_t _bug_count
;
635 int _level
= (_bug_count
++ == 0) ? DBGLVL_ERR
: DBGLVL_DEBUG
;
636 DBG_PREFIX(_level
, ("BUG: Got %s mapping to "
637 "NT_STATUS_LOCK_NOT_GRANTED\n",
639 status
= NT_STATUS_LOCK_NOT_GRANTED
;
641 if (!NT_STATUS_EQUAL(status
, NT_STATUS_LOCK_NOT_GRANTED
)) {
643 tevent_req_nterror(req
, status
);
647 * We got LOCK_NOT_GRANTED, make sure
648 * a following STATUS_RETRY will start
649 * with short intervalls again.
651 state
->retry_msecs
= 0;
653 if (!state
->blocking
) {
655 tevent_req_nterror(req
, status
);
659 if (blocking_smblctx
== UINT64_MAX
) {
660 smbd_smb2_lock_update_polling_msecs(state
);
662 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32
" msecs\n",
663 state
->polling_msecs
);
665 endtime
= timeval_current_ofs_msec(state
->polling_msecs
);
669 DBG_DEBUG("Watching share mode lock\n");
671 subreq
= share_mode_watch_send(
672 state
, state
->ev
, lck
, blocking_pid
);
674 if (tevent_req_nomem(subreq
, req
)) {
677 tevent_req_set_callback(subreq
, smbd_smb2_lock_retry
, req
);
679 if (!timeval_is_zero(&endtime
)) {
682 ok
= tevent_req_set_endtime(subreq
,
692 static void smbd_smb2_lock_retry(struct tevent_req
*subreq
)
694 struct tevent_req
*req
= tevent_req_callback_data(
695 subreq
, struct tevent_req
);
696 struct smbd_smb2_lock_state
*state
= tevent_req_data(
697 req
, struct smbd_smb2_lock_state
);
702 * Make sure we run as the user again
704 ok
= change_to_user_and_service_by_fsp(state
->fsp
);
706 tevent_req_nterror(req
, NT_STATUS_ACCESS_DENIED
);
710 status
= share_mode_watch_recv(subreq
, NULL
, NULL
);
712 if (NT_STATUS_EQUAL(status
, NT_STATUS_IO_TIMEOUT
)) {
714 * This is just a trigger for a timed retry.
716 status
= NT_STATUS_OK
;
718 if (tevent_req_nterror(req
, status
)) {
722 smbd_smb2_lock_try(req
);
725 static NTSTATUS
smbd_smb2_lock_recv(struct tevent_req
*req
)
727 return tevent_req_simple_recv_ntstatus(req
);
730 /****************************************************************
731 Cancel an outstanding blocking lock request.
732 *****************************************************************/
734 static bool smbd_smb2_lock_cancel(struct tevent_req
*req
)
736 struct smbd_smb2_request
*smb2req
= NULL
;
737 struct smbd_smb2_lock_state
*state
= tevent_req_data(req
,
738 struct smbd_smb2_lock_state
);
743 if (!state
->smb2req
) {
747 smb2req
= state
->smb2req
;
750 * If the request is canceled because of close, logoff or tdis
751 * the status is NT_STATUS_RANGE_NOT_LOCKED instead of
752 * NT_STATUS_CANCELLED.
754 if (state
->fsp
->fsp_flags
.closing
||
755 !NT_STATUS_IS_OK(smb2req
->session
->status
) ||
756 !NT_STATUS_IS_OK(smb2req
->tcon
->status
)) {
757 tevent_req_nterror(req
, NT_STATUS_RANGE_NOT_LOCKED
);
761 tevent_req_nterror(req
, NT_STATUS_CANCELLED
);