2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "locking/share_mode_lock.h"
22 #include "smbd/smbd.h"
23 #include "smbd/globals.h"
25 #include "lib/util/tevent_ntstatus.h"
26 #include "lib/dbwrap/dbwrap_watch.h"
27 #include "librpc/gen_ndr/ndr_open_files.h"
30 #define DBGC_CLASS DBGC_LOCKING
32 NTSTATUS
smbd_do_locks_try(
33 struct files_struct
*fsp
,
35 struct smbd_lock_element
*locks
,
36 uint16_t *blocker_idx
,
37 struct server_id
*blocking_pid
,
38 uint64_t *blocking_smblctx
)
40 NTSTATUS status
= NT_STATUS_OK
;
43 for (i
=0; i
<num_locks
; i
++) {
44 struct smbd_lock_element
*e
= &locks
[i
];
48 locks
, /* req_mem_ctx */
57 if (!NT_STATUS_IS_OK(status
)) {
62 if (NT_STATUS_IS_OK(status
)) {
69 * Undo the locks we successfully got
71 for (i
= i
-1; i
!= UINT16_MAX
; i
--) {
72 struct smbd_lock_element
*e
= &locks
[i
];
83 static bool smbd_smb1_fsp_add_blocked_lock_req(
84 struct files_struct
*fsp
, struct tevent_req
*req
)
86 size_t num_reqs
= talloc_array_length(fsp
->blocked_smb1_lock_reqs
);
87 struct tevent_req
**tmp
= NULL
;
91 fsp
->blocked_smb1_lock_reqs
,
97 fsp
->blocked_smb1_lock_reqs
= tmp
;
98 fsp
->blocked_smb1_lock_reqs
[num_reqs
] = req
;
102 struct smbd_smb1_do_locks_state
{
103 struct tevent_context
*ev
;
104 struct smb_request
*smbreq
;
105 struct files_struct
*fsp
;
107 uint32_t polling_msecs
;
108 uint32_t retry_msecs
;
109 struct timeval endtime
;
110 bool large_offset
; /* required for correct cancel */
112 struct smbd_lock_element
*locks
;
114 NTSTATUS deny_status
;
117 static void smbd_smb1_do_locks_try(struct tevent_req
*req
);
118 static void smbd_smb1_do_locks_retry(struct tevent_req
*subreq
);
119 static void smbd_smb1_blocked_locks_cleanup(
120 struct tevent_req
*req
, enum tevent_req_state req_state
);
121 static NTSTATUS
smbd_smb1_do_locks_check(
122 struct files_struct
*fsp
,
124 struct smbd_lock_element
*locks
,
125 uint16_t *blocker_idx
,
126 struct server_id
*blocking_pid
,
127 uint64_t *blocking_smblctx
);
129 static void smbd_smb1_do_locks_setup_timeout(
130 struct smbd_smb1_do_locks_state
*state
,
131 const struct smbd_lock_element
*blocker
)
133 struct files_struct
*fsp
= state
->fsp
;
135 if (!timeval_is_zero(&state
->endtime
)) {
142 if ((state
->timeout
!= 0) && (state
->timeout
!= UINT32_MAX
)) {
144 * Windows internal resolution for blocking locks
145 * seems to be about 200ms... Don't wait for less than
148 state
->timeout
= MAX(state
->timeout
, lp_lock_spin_time());
151 if (state
->timeout
!= 0) {
155 if (blocker
== NULL
) {
159 if ((blocker
->offset
>= 0xEF000000) &&
160 ((blocker
->offset
>> 63) == 0)) {
162 * This must be an optimization of an ancient
165 state
->timeout
= lp_lock_spin_time();
168 if (fsp
->fsp_flags
.lock_failure_seen
&&
169 (blocker
->offset
== fsp
->lock_failure_offset
)) {
171 * Delay repeated lock attempts on the same
172 * lock. Maybe a more advanced version of the
175 DBG_DEBUG("Delaying lock request due to previous "
177 state
->timeout
= lp_lock_spin_time();
182 * Note state->timeout might still 0,
183 * but that's ok, as we don't want to retry
186 state
->endtime
= timeval_add(&state
->smbreq
->request_time
,
187 state
->timeout
/ 1000,
188 (state
->timeout
% 1000) * 1000);
191 static void smbd_smb1_do_locks_update_retry_msecs(
192 struct smbd_smb1_do_locks_state
*state
)
195 * The default lp_lock_spin_time() is 200ms,
196 * we just use half of it to trigger the first retry.
198 * v_min is in the range of 0.001 to 10 secs
199 * (0.1 secs by default)
201 * v_max is in the range of 0.01 to 100 secs
202 * (1.0 secs by default)
204 * The typical steps are:
205 * 0.1, 0.2, 0.3, 0.4, ... 1.0
207 uint32_t v_min
= MAX(2, MIN(20000, lp_lock_spin_time()))/2;
208 uint32_t v_max
= 10 * v_min
;
210 if (state
->retry_msecs
>= v_max
) {
211 state
->retry_msecs
= v_max
;
215 state
->retry_msecs
+= v_min
;
218 static void smbd_smb1_do_locks_update_polling_msecs(
219 struct smbd_smb1_do_locks_state
*state
)
222 * The default lp_lock_spin_time() is 200ms.
224 * v_min is in the range of 0.002 to 20 secs
225 * (0.2 secs by default)
227 * v_max is in the range of 0.02 to 200 secs
228 * (2.0 secs by default)
230 * The typical steps are:
231 * 0.2, 0.4, 0.6, 0.8, ... 2.0
233 uint32_t v_min
= MAX(2, MIN(20000, lp_lock_spin_time()));
234 uint32_t v_max
= 10 * v_min
;
236 if (state
->polling_msecs
>= v_max
) {
237 state
->polling_msecs
= v_max
;
241 state
->polling_msecs
+= v_min
;
244 struct tevent_req
*smbd_smb1_do_locks_send(
246 struct tevent_context
*ev
,
247 struct smb_request
**smbreq
, /* talloc_move()d into our state */
248 struct files_struct
*fsp
,
249 uint32_t lock_timeout
,
252 struct smbd_lock_element
*locks
)
254 struct tevent_req
*req
= NULL
;
255 struct smbd_smb1_do_locks_state
*state
= NULL
;
258 req
= tevent_req_create(
259 mem_ctx
, &state
, struct smbd_smb1_do_locks_state
);
264 state
->smbreq
= talloc_move(state
, smbreq
);
266 state
->timeout
= lock_timeout
;
267 state
->large_offset
= large_offset
;
268 state
->num_locks
= num_locks
;
269 state
->locks
= locks
;
270 state
->deny_status
= NT_STATUS_LOCK_NOT_GRANTED
;
272 DBG_DEBUG("state=%p, state->smbreq=%p\n", state
, state
->smbreq
);
274 if (num_locks
== 0 || locks
== NULL
) {
275 DBG_DEBUG("no locks\n");
276 tevent_req_done(req
);
277 return tevent_req_post(req
, ev
);
280 if (state
->locks
[0].lock_flav
== POSIX_LOCK
) {
282 * SMB1 posix locks always use
283 * NT_STATUS_FILE_LOCK_CONFLICT.
285 state
->deny_status
= NT_STATUS_FILE_LOCK_CONFLICT
;
288 smbd_smb1_do_locks_try(req
);
289 if (!tevent_req_is_in_progress(req
)) {
290 return tevent_req_post(req
, ev
);
293 ok
= smbd_smb1_fsp_add_blocked_lock_req(fsp
, req
);
296 return tevent_req_post(req
, ev
);
298 tevent_req_set_cleanup_fn(req
, smbd_smb1_blocked_locks_cleanup
);
302 static void smbd_smb1_blocked_locks_cleanup(
303 struct tevent_req
*req
, enum tevent_req_state req_state
)
305 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
306 req
, struct smbd_smb1_do_locks_state
);
307 struct files_struct
*fsp
= state
->fsp
;
308 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
309 size_t num_blocked
= talloc_array_length(blocked
);
312 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
317 if (req_state
== TEVENT_REQ_RECEIVED
) {
318 DBG_DEBUG("already received\n");
322 for (i
=0; i
<num_blocked
; i
++) {
323 if (blocked
[i
] == req
) {
327 SMB_ASSERT(i
<num_blocked
);
329 ARRAY_DEL_ELEMENT(blocked
, i
, num_blocked
);
331 fsp
->blocked_smb1_lock_reqs
= talloc_realloc(
332 fsp
, blocked
, struct tevent_req
*, num_blocked
-1);
335 static NTSTATUS
smbd_smb1_do_locks_check_blocked(
336 uint16_t num_blocked
,
337 struct smbd_lock_element
*blocked
,
339 struct smbd_lock_element
*locks
,
340 uint16_t *blocker_idx
,
341 uint64_t *blocking_smblctx
)
345 for (li
=0; li
< num_locks
; li
++) {
346 struct smbd_lock_element
*l
= &locks
[li
];
350 valid
= byte_range_valid(l
->offset
, l
->count
);
352 return NT_STATUS_INVALID_LOCK_RANGE
;
355 for (bi
= 0; bi
< num_blocked
; bi
++) {
356 struct smbd_lock_element
*b
= &blocked
[li
];
359 /* Read locks never conflict. */
360 if (l
->brltype
== READ_LOCK
&& b
->brltype
== READ_LOCK
) {
364 overlap
= byte_range_overlap(l
->offset
,
373 *blocking_smblctx
= b
->smblctx
;
374 return NT_STATUS_LOCK_NOT_GRANTED
;
381 static NTSTATUS
smbd_smb1_do_locks_check(
382 struct files_struct
*fsp
,
384 struct smbd_lock_element
*locks
,
385 uint16_t *blocker_idx
,
386 struct server_id
*blocking_pid
,
387 uint64_t *blocking_smblctx
)
389 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
390 size_t num_blocked
= talloc_array_length(blocked
);
395 * We check the pending/blocked requests
396 * from the oldest to the youngest request.
398 * Note due to the retry logic the current request
399 * might already be in the list.
402 for (bi
= 0; bi
< num_blocked
; bi
++) {
403 struct smbd_smb1_do_locks_state
*blocked_state
=
404 tevent_req_data(blocked
[bi
],
405 struct smbd_smb1_do_locks_state
);
407 if (blocked_state
->locks
== locks
) {
408 SMB_ASSERT(blocked_state
->num_locks
== num_locks
);
411 * We found ourself...
416 status
= smbd_smb1_do_locks_check_blocked(
417 blocked_state
->num_locks
,
418 blocked_state
->locks
,
423 if (!NT_STATUS_IS_OK(status
)) {
424 *blocking_pid
= messaging_server_id(
425 fsp
->conn
->sconn
->msg_ctx
);
430 status
= smbd_do_locks_try(
437 if (!NT_STATUS_IS_OK(status
)) {
444 static void smbd_smb1_do_locks_try(struct tevent_req
*req
)
446 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
447 req
, struct smbd_smb1_do_locks_state
);
448 struct files_struct
*fsp
= state
->fsp
;
449 struct share_mode_lock
*lck
;
450 struct timeval endtime
= { 0 };
451 struct server_id blocking_pid
= { 0 };
452 uint64_t blocking_smblctx
= 0;
453 struct tevent_req
*subreq
= NULL
;
458 lck
= get_existing_share_mode_lock(state
, fsp
->file_id
);
459 if (tevent_req_nomem(lck
, req
)) {
460 DBG_DEBUG("Could not get share mode lock\n");
464 status
= smbd_smb1_do_locks_check(
471 if (NT_STATUS_IS_OK(status
)) {
474 if (NT_STATUS_EQUAL(status
, NT_STATUS_RETRY
)) {
476 * We got NT_STATUS_RETRY,
477 * we reset polling_msecs so that
478 * that the retries based on LOCK_NOT_GRANTED
479 * will later start with small intervals again.
481 state
->polling_msecs
= 0;
484 * The backend wasn't able to decide yet.
485 * We need to wait even for non-blocking
488 * The backend uses blocking_smblctx == UINT64_MAX
489 * to indicate that we should use retry timers.
491 * It uses blocking_smblctx == 0 to indicate
492 * it will use share_mode_wakeup_waiters()
493 * to wake us. Note that unrelated changes in
494 * locking.tdb may cause retries.
497 if (blocking_smblctx
!= UINT64_MAX
) {
498 SMB_ASSERT(blocking_smblctx
== 0);
502 smbd_smb1_do_locks_update_retry_msecs(state
);
504 DBG_DEBUG("Waiting for a backend decision. "
505 "Retry in %"PRIu32
" msecs\n",
509 * We completely ignore state->endtime here
510 * we we'll wait for a backend decision forever.
511 * If the backend is smart enough to implement
512 * some NT_STATUS_RETRY logic, it has to
513 * switch to any other status after in order
514 * to avoid waiting forever.
516 endtime
= timeval_current_ofs_msec(state
->retry_msecs
);
519 if (!ERROR_WAS_LOCK_DENIED(status
)) {
523 * We got LOCK_NOT_GRANTED, make sure
524 * a following STATUS_RETRY will start
525 * with short intervals again.
527 state
->retry_msecs
= 0;
529 smbd_smb1_do_locks_setup_timeout(state
, &state
->locks
[state
->blocker
]);
530 DBG_DEBUG("timeout=%"PRIu32
", blocking_smblctx=%"PRIu64
"\n",
535 * The client specified timeout expired
536 * avoid further retries.
538 * Otherwise keep waiting either waiting
539 * for changes in locking.tdb or the polling
540 * mode timers waiting for posix locks.
542 * If the endtime is not expired yet,
543 * it means we'll retry after a timeout.
544 * In that case we'll have to return
545 * NT_STATUS_FILE_LOCK_CONFLICT
546 * instead of NT_STATUS_LOCK_NOT_GRANTED.
548 expired
= timeval_expired(&state
->endtime
);
550 status
= state
->deny_status
;
553 state
->deny_status
= NT_STATUS_FILE_LOCK_CONFLICT
;
555 endtime
= state
->endtime
;
557 if (blocking_smblctx
== UINT64_MAX
) {
560 smbd_smb1_do_locks_update_polling_msecs(state
);
562 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32
" msecs\n",
563 state
->polling_msecs
);
565 tmp
= timeval_current_ofs_msec(state
->polling_msecs
);
566 endtime
= timeval_min(&endtime
, &tmp
);
570 subreq
= share_mode_watch_send(
571 state
, state
->ev
, lck
, blocking_pid
);
572 if (tevent_req_nomem(subreq
, req
)) {
576 tevent_req_set_callback(subreq
, smbd_smb1_do_locks_retry
, req
);
578 if (timeval_is_zero(&endtime
)) {
582 ok
= tevent_req_set_endtime(subreq
, state
->ev
, endtime
);
584 status
= NT_STATUS_NO_MEMORY
;
590 smbd_smb1_brl_finish_by_req(req
, status
);
593 static void smbd_smb1_do_locks_retry(struct tevent_req
*subreq
)
595 struct tevent_req
*req
= tevent_req_callback_data(
596 subreq
, struct tevent_req
);
597 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
598 req
, struct smbd_smb1_do_locks_state
);
603 * Make sure we run as the user again
605 ok
= change_to_user_and_service_by_fsp(state
->fsp
);
607 tevent_req_nterror(req
, NT_STATUS_ACCESS_DENIED
);
611 status
= share_mode_watch_recv(subreq
, NULL
, NULL
);
614 DBG_DEBUG("share_mode_watch_recv returned %s\n",
618 * We ignore any errors here, it's most likely
619 * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
621 * In any case we can just give it a retry.
624 smbd_smb1_do_locks_try(req
);
627 NTSTATUS
smbd_smb1_do_locks_recv(struct tevent_req
*req
)
629 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
630 req
, struct smbd_smb1_do_locks_state
);
631 NTSTATUS status
= NT_STATUS_OK
;
634 err
= tevent_req_is_nterror(req
, &status
);
636 DBG_DEBUG("err=%d, status=%s\n", (int)err
, nt_errstr(status
));
638 if (tevent_req_is_nterror(req
, &status
)) {
639 struct files_struct
*fsp
= state
->fsp
;
640 struct smbd_lock_element
*blocker
=
641 &state
->locks
[state
->blocker
];
643 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64
"\n",
646 fsp
->fsp_flags
.lock_failure_seen
= true;
647 fsp
->lock_failure_offset
= blocker
->offset
;
651 tevent_req_received(req
);
656 bool smbd_smb1_do_locks_extract_smbreq(
657 struct tevent_req
*req
,
659 struct smb_request
**psmbreq
)
661 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
662 req
, struct smbd_smb1_do_locks_state
);
664 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
669 if (state
->smbreq
== NULL
) {
672 *psmbreq
= talloc_move(mem_ctx
, &state
->smbreq
);
676 void smbd_smb1_brl_finish_by_req(struct tevent_req
*req
, NTSTATUS status
)
678 DBG_DEBUG("req=%p, status=%s\n", req
, nt_errstr(status
));
680 if (NT_STATUS_IS_OK(status
)) {
681 tevent_req_done(req
);
683 tevent_req_nterror(req
, status
);
687 bool smbd_smb1_brl_finish_by_lock(
688 struct files_struct
*fsp
,
690 struct smbd_lock_element lock
,
691 NTSTATUS finish_status
)
693 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
694 size_t num_blocked
= talloc_array_length(blocked
);
697 DBG_DEBUG("num_blocked=%zu\n", num_blocked
);
699 for (i
=0; i
<num_blocked
; i
++) {
700 struct tevent_req
*req
= blocked
[i
];
701 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
702 req
, struct smbd_smb1_do_locks_state
);
705 DBG_DEBUG("i=%zu, req=%p\n", i
, req
);
707 if (state
->large_offset
!= large_offset
) {
711 for (j
=0; j
<state
->num_locks
; j
++) {
712 struct smbd_lock_element
*l
= &state
->locks
[j
];
714 if ((lock
.smblctx
== l
->smblctx
) &&
715 (lock
.offset
== l
->offset
) &&
716 (lock
.count
== l
->count
)) {
717 smbd_smb1_brl_finish_by_req(
726 static struct files_struct
*smbd_smb1_brl_finish_by_mid_fn(
727 struct files_struct
*fsp
, void *private_data
)
729 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
730 size_t num_blocked
= talloc_array_length(blocked
);
731 uint64_t mid
= *((uint64_t *)private_data
);
734 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp
, num_blocked
);
736 for (i
=0; i
<num_blocked
; i
++) {
737 struct tevent_req
*req
= blocked
[i
];
738 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
739 req
, struct smbd_smb1_do_locks_state
);
740 struct smb_request
*smbreq
= state
->smbreq
;
742 if (smbreq
->mid
== mid
) {
743 tevent_req_nterror(req
, NT_STATUS_FILE_LOCK_CONFLICT
);
752 * This walks the list of fsps, we store the blocked reqs attached to
753 * them. It can be expensive, but this is legacy SMB1 and trying to
754 * remember looking at traces I don't really see many of those calls.
757 bool smbd_smb1_brl_finish_by_mid(
758 struct smbd_server_connection
*sconn
, uint64_t mid
)
760 struct files_struct
*found
= files_forall(
761 sconn
, smbd_smb1_brl_finish_by_mid_fn
, &mid
);
762 return (found
!= NULL
);