python: models: rename argument ldb to samdb
[samba.git] / source3 / smbd / blocking.c
blob8b41288bfbf260f9d2276337fc9e1d5c765829f0
1 /*
2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "locking/share_mode_lock.h"
22 #include "smbd/smbd.h"
23 #include "smbd/globals.h"
24 #include "messages.h"
25 #include "lib/util/tevent_ntstatus.h"
26 #include "lib/dbwrap/dbwrap_watch.h"
27 #include "librpc/gen_ndr/ndr_open_files.h"
29 #undef DBGC_CLASS
30 #define DBGC_CLASS DBGC_LOCKING
32 NTSTATUS smbd_do_locks_try(
33 struct files_struct *fsp,
34 uint16_t num_locks,
35 struct smbd_lock_element *locks,
36 uint16_t *blocker_idx,
37 struct server_id *blocking_pid,
38 uint64_t *blocking_smblctx)
40 NTSTATUS status = NT_STATUS_OK;
41 uint16_t i;
43 for (i=0; i<num_locks; i++) {
44 struct smbd_lock_element *e = &locks[i];
46 status = do_lock(
47 fsp,
48 locks, /* req_mem_ctx */
49 &e->req_guid,
50 e->smblctx,
51 e->count,
52 e->offset,
53 e->brltype,
54 e->lock_flav,
55 blocking_pid,
56 blocking_smblctx);
57 if (!NT_STATUS_IS_OK(status)) {
58 break;
62 if (NT_STATUS_IS_OK(status)) {
63 return NT_STATUS_OK;
66 *blocker_idx = i;
69 * Undo the locks we successfully got
71 for (i = i-1; i != UINT16_MAX; i--) {
72 struct smbd_lock_element *e = &locks[i];
73 do_unlock(fsp,
74 e->smblctx,
75 e->count,
76 e->offset,
77 e->lock_flav);
80 return status;
83 static bool smbd_smb1_fsp_add_blocked_lock_req(
84 struct files_struct *fsp, struct tevent_req *req)
86 size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
87 struct tevent_req **tmp = NULL;
89 tmp = talloc_realloc(
90 fsp,
91 fsp->blocked_smb1_lock_reqs,
92 struct tevent_req *,
93 num_reqs+1);
94 if (tmp == NULL) {
95 return false;
97 fsp->blocked_smb1_lock_reqs = tmp;
98 fsp->blocked_smb1_lock_reqs[num_reqs] = req;
99 return true;
102 struct smbd_smb1_do_locks_state {
103 struct tevent_context *ev;
104 struct smb_request *smbreq;
105 struct files_struct *fsp;
106 uint32_t timeout;
107 uint32_t polling_msecs;
108 uint32_t retry_msecs;
109 struct timeval endtime;
110 bool large_offset; /* required for correct cancel */
111 uint16_t num_locks;
112 struct smbd_lock_element *locks;
113 uint16_t blocker;
114 NTSTATUS deny_status;
117 static void smbd_smb1_do_locks_try(struct tevent_req *req);
118 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
119 static void smbd_smb1_blocked_locks_cleanup(
120 struct tevent_req *req, enum tevent_req_state req_state);
121 static NTSTATUS smbd_smb1_do_locks_check(
122 struct files_struct *fsp,
123 uint16_t num_locks,
124 struct smbd_lock_element *locks,
125 uint16_t *blocker_idx,
126 struct server_id *blocking_pid,
127 uint64_t *blocking_smblctx);
129 static void smbd_smb1_do_locks_setup_timeout(
130 struct smbd_smb1_do_locks_state *state,
131 const struct smbd_lock_element *blocker)
133 struct files_struct *fsp = state->fsp;
135 if (!timeval_is_zero(&state->endtime)) {
137 * already done
139 return;
142 if ((state->timeout != 0) && (state->timeout != UINT32_MAX)) {
144 * Windows internal resolution for blocking locks
145 * seems to be about 200ms... Don't wait for less than
146 * that. JRA.
148 state->timeout = MAX(state->timeout, lp_lock_spin_time());
151 if (state->timeout != 0) {
152 goto set_endtime;
155 if (blocker == NULL) {
156 goto set_endtime;
159 if ((blocker->offset >= 0xEF000000) &&
160 ((blocker->offset >> 63) == 0)) {
162 * This must be an optimization of an ancient
163 * application bug...
165 state->timeout = lp_lock_spin_time();
168 if (fsp->fsp_flags.lock_failure_seen &&
169 (blocker->offset == fsp->lock_failure_offset)) {
171 * Delay repeated lock attempts on the same
172 * lock. Maybe a more advanced version of the
173 * above check?
175 DBG_DEBUG("Delaying lock request due to previous "
176 "failure\n");
177 state->timeout = lp_lock_spin_time();
180 set_endtime:
182 * Note state->timeout might still 0,
183 * but that's ok, as we don't want to retry
184 * in that case.
186 state->endtime = timeval_add(&state->smbreq->request_time,
187 state->timeout / 1000,
188 (state->timeout % 1000) * 1000);
191 static void smbd_smb1_do_locks_update_retry_msecs(
192 struct smbd_smb1_do_locks_state *state)
195 * The default lp_lock_spin_time() is 200ms,
196 * we just use half of it to trigger the first retry.
198 * v_min is in the range of 0.001 to 10 secs
199 * (0.1 secs by default)
201 * v_max is in the range of 0.01 to 100 secs
202 * (1.0 secs by default)
204 * The typical steps are:
205 * 0.1, 0.2, 0.3, 0.4, ... 1.0
207 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()))/2;
208 uint32_t v_max = 10 * v_min;
210 if (state->retry_msecs >= v_max) {
211 state->retry_msecs = v_max;
212 return;
215 state->retry_msecs += v_min;
218 static void smbd_smb1_do_locks_update_polling_msecs(
219 struct smbd_smb1_do_locks_state *state)
222 * The default lp_lock_spin_time() is 200ms.
224 * v_min is in the range of 0.002 to 20 secs
225 * (0.2 secs by default)
227 * v_max is in the range of 0.02 to 200 secs
228 * (2.0 secs by default)
230 * The typical steps are:
231 * 0.2, 0.4, 0.6, 0.8, ... 2.0
233 uint32_t v_min = MAX(2, MIN(20000, lp_lock_spin_time()));
234 uint32_t v_max = 10 * v_min;
236 if (state->polling_msecs >= v_max) {
237 state->polling_msecs = v_max;
238 return;
241 state->polling_msecs += v_min;
244 struct tevent_req *smbd_smb1_do_locks_send(
245 TALLOC_CTX *mem_ctx,
246 struct tevent_context *ev,
247 struct smb_request **smbreq, /* talloc_move()d into our state */
248 struct files_struct *fsp,
249 uint32_t lock_timeout,
250 bool large_offset,
251 uint16_t num_locks,
252 struct smbd_lock_element *locks)
254 struct tevent_req *req = NULL;
255 struct smbd_smb1_do_locks_state *state = NULL;
256 bool ok;
258 req = tevent_req_create(
259 mem_ctx, &state, struct smbd_smb1_do_locks_state);
260 if (req == NULL) {
261 return NULL;
263 state->ev = ev;
264 state->smbreq = talloc_move(state, smbreq);
265 state->fsp = fsp;
266 state->timeout = lock_timeout;
267 state->large_offset = large_offset;
268 state->num_locks = num_locks;
269 state->locks = locks;
270 state->deny_status = NT_STATUS_LOCK_NOT_GRANTED;
272 DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
274 if (num_locks == 0 || locks == NULL) {
275 DBG_DEBUG("no locks\n");
276 tevent_req_done(req);
277 return tevent_req_post(req, ev);
280 if (state->locks[0].lock_flav == POSIX_LOCK) {
282 * SMB1 posix locks always use
283 * NT_STATUS_FILE_LOCK_CONFLICT.
285 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
288 smbd_smb1_do_locks_try(req);
289 if (!tevent_req_is_in_progress(req)) {
290 return tevent_req_post(req, ev);
293 ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
294 if (!ok) {
295 tevent_req_oom(req);
296 return tevent_req_post(req, ev);
298 tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
299 return req;
302 static void smbd_smb1_blocked_locks_cleanup(
303 struct tevent_req *req, enum tevent_req_state req_state)
305 struct smbd_smb1_do_locks_state *state = tevent_req_data(
306 req, struct smbd_smb1_do_locks_state);
307 struct files_struct *fsp = state->fsp;
308 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
309 size_t num_blocked = talloc_array_length(blocked);
310 size_t i;
312 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
313 req,
314 state,
315 (int)req_state);
317 if (req_state == TEVENT_REQ_RECEIVED) {
318 DBG_DEBUG("already received\n");
319 return;
322 for (i=0; i<num_blocked; i++) {
323 if (blocked[i] == req) {
324 break;
327 SMB_ASSERT(i<num_blocked);
329 ARRAY_DEL_ELEMENT(blocked, i, num_blocked);
331 fsp->blocked_smb1_lock_reqs = talloc_realloc(
332 fsp, blocked, struct tevent_req *, num_blocked-1);
335 static NTSTATUS smbd_smb1_do_locks_check_blocked(
336 uint16_t num_blocked,
337 struct smbd_lock_element *blocked,
338 uint16_t num_locks,
339 struct smbd_lock_element *locks,
340 uint16_t *blocker_idx,
341 uint64_t *blocking_smblctx)
343 uint16_t li;
345 for (li=0; li < num_locks; li++) {
346 struct smbd_lock_element *l = &locks[li];
347 uint16_t bi;
348 bool valid;
350 valid = byte_range_valid(l->offset, l->count);
351 if (!valid) {
352 return NT_STATUS_INVALID_LOCK_RANGE;
355 for (bi = 0; bi < num_blocked; bi++) {
356 struct smbd_lock_element *b = &blocked[li];
357 bool overlap;
359 /* Read locks never conflict. */
360 if (l->brltype == READ_LOCK && b->brltype == READ_LOCK) {
361 continue;
364 overlap = byte_range_overlap(l->offset,
365 l->count,
366 b->offset,
367 b->count);
368 if (!overlap) {
369 continue;
372 *blocker_idx = li;
373 *blocking_smblctx = b->smblctx;
374 return NT_STATUS_LOCK_NOT_GRANTED;
378 return NT_STATUS_OK;
381 static NTSTATUS smbd_smb1_do_locks_check(
382 struct files_struct *fsp,
383 uint16_t num_locks,
384 struct smbd_lock_element *locks,
385 uint16_t *blocker_idx,
386 struct server_id *blocking_pid,
387 uint64_t *blocking_smblctx)
389 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
390 size_t num_blocked = talloc_array_length(blocked);
391 NTSTATUS status;
392 size_t bi;
395 * We check the pending/blocked requests
396 * from the oldest to the youngest request.
398 * Note due to the retry logic the current request
399 * might already be in the list.
402 for (bi = 0; bi < num_blocked; bi++) {
403 struct smbd_smb1_do_locks_state *blocked_state =
404 tevent_req_data(blocked[bi],
405 struct smbd_smb1_do_locks_state);
407 if (blocked_state->locks == locks) {
408 SMB_ASSERT(blocked_state->num_locks == num_locks);
411 * We found ourself...
413 break;
416 status = smbd_smb1_do_locks_check_blocked(
417 blocked_state->num_locks,
418 blocked_state->locks,
419 num_locks,
420 locks,
421 blocker_idx,
422 blocking_smblctx);
423 if (!NT_STATUS_IS_OK(status)) {
424 *blocking_pid = messaging_server_id(
425 fsp->conn->sconn->msg_ctx);
426 return status;
430 status = smbd_do_locks_try(
431 fsp,
432 num_locks,
433 locks,
434 blocker_idx,
435 blocking_pid,
436 blocking_smblctx);
437 if (!NT_STATUS_IS_OK(status)) {
438 return status;
441 return NT_STATUS_OK;
444 static void smbd_smb1_do_locks_try(struct tevent_req *req)
446 struct smbd_smb1_do_locks_state *state = tevent_req_data(
447 req, struct smbd_smb1_do_locks_state);
448 struct files_struct *fsp = state->fsp;
449 struct share_mode_lock *lck;
450 struct timeval endtime = { 0 };
451 struct server_id blocking_pid = { 0 };
452 uint64_t blocking_smblctx = 0;
453 struct tevent_req *subreq = NULL;
454 NTSTATUS status;
455 bool ok;
456 bool expired;
458 lck = get_existing_share_mode_lock(state, fsp->file_id);
459 if (tevent_req_nomem(lck, req)) {
460 DBG_DEBUG("Could not get share mode lock\n");
461 return;
464 status = smbd_smb1_do_locks_check(
465 fsp,
466 state->num_locks,
467 state->locks,
468 &state->blocker,
469 &blocking_pid,
470 &blocking_smblctx);
471 if (NT_STATUS_IS_OK(status)) {
472 goto done;
474 if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
476 * We got NT_STATUS_RETRY,
477 * we reset polling_msecs so that
478 * that the retries based on LOCK_NOT_GRANTED
479 * will later start with small intervals again.
481 state->polling_msecs = 0;
484 * The backend wasn't able to decide yet.
485 * We need to wait even for non-blocking
486 * locks.
488 * The backend uses blocking_smblctx == UINT64_MAX
489 * to indicate that we should use retry timers.
491 * It uses blocking_smblctx == 0 to indicate
492 * it will use share_mode_wakeup_waiters()
493 * to wake us. Note that unrelated changes in
494 * locking.tdb may cause retries.
497 if (blocking_smblctx != UINT64_MAX) {
498 SMB_ASSERT(blocking_smblctx == 0);
499 goto setup_retry;
502 smbd_smb1_do_locks_update_retry_msecs(state);
504 DBG_DEBUG("Waiting for a backend decision. "
505 "Retry in %"PRIu32" msecs\n",
506 state->retry_msecs);
509 * We completely ignore state->endtime here
510 * we we'll wait for a backend decision forever.
511 * If the backend is smart enough to implement
512 * some NT_STATUS_RETRY logic, it has to
513 * switch to any other status after in order
514 * to avoid waiting forever.
516 endtime = timeval_current_ofs_msec(state->retry_msecs);
517 goto setup_retry;
519 if (!ERROR_WAS_LOCK_DENIED(status)) {
520 goto done;
523 * We got LOCK_NOT_GRANTED, make sure
524 * a following STATUS_RETRY will start
525 * with short intervals again.
527 state->retry_msecs = 0;
529 smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
530 DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
531 state->timeout,
532 blocking_smblctx);
535 * The client specified timeout expired
536 * avoid further retries.
538 * Otherwise keep waiting either waiting
539 * for changes in locking.tdb or the polling
540 * mode timers waiting for posix locks.
542 * If the endtime is not expired yet,
543 * it means we'll retry after a timeout.
544 * In that case we'll have to return
545 * NT_STATUS_FILE_LOCK_CONFLICT
546 * instead of NT_STATUS_LOCK_NOT_GRANTED.
548 expired = timeval_expired(&state->endtime);
549 if (expired) {
550 status = state->deny_status;
551 goto done;
553 state->deny_status = NT_STATUS_FILE_LOCK_CONFLICT;
555 endtime = state->endtime;
557 if (blocking_smblctx == UINT64_MAX) {
558 struct timeval tmp;
560 smbd_smb1_do_locks_update_polling_msecs(state);
562 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
563 state->polling_msecs);
565 tmp = timeval_current_ofs_msec(state->polling_msecs);
566 endtime = timeval_min(&endtime, &tmp);
569 setup_retry:
570 subreq = share_mode_watch_send(
571 state, state->ev, lck, blocking_pid);
572 if (tevent_req_nomem(subreq, req)) {
573 goto done;
575 TALLOC_FREE(lck);
576 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
578 if (timeval_is_zero(&endtime)) {
579 return;
582 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
583 if (!ok) {
584 status = NT_STATUS_NO_MEMORY;
585 goto done;
587 return;
588 done:
589 TALLOC_FREE(lck);
590 smbd_smb1_brl_finish_by_req(req, status);
593 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
595 struct tevent_req *req = tevent_req_callback_data(
596 subreq, struct tevent_req);
597 struct smbd_smb1_do_locks_state *state = tevent_req_data(
598 req, struct smbd_smb1_do_locks_state);
599 NTSTATUS status;
600 bool ok;
603 * Make sure we run as the user again
605 ok = change_to_user_and_service_by_fsp(state->fsp);
606 if (!ok) {
607 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
608 return;
611 status = share_mode_watch_recv(subreq, NULL, NULL);
612 TALLOC_FREE(subreq);
614 DBG_DEBUG("share_mode_watch_recv returned %s\n",
615 nt_errstr(status));
618 * We ignore any errors here, it's most likely
619 * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
621 * In any case we can just give it a retry.
624 smbd_smb1_do_locks_try(req);
627 NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
629 struct smbd_smb1_do_locks_state *state = tevent_req_data(
630 req, struct smbd_smb1_do_locks_state);
631 NTSTATUS status = NT_STATUS_OK;
632 bool err;
634 err = tevent_req_is_nterror(req, &status);
636 DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
638 if (tevent_req_is_nterror(req, &status)) {
639 struct files_struct *fsp = state->fsp;
640 struct smbd_lock_element *blocker =
641 &state->locks[state->blocker];
643 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
644 blocker->offset);
646 fsp->fsp_flags.lock_failure_seen = true;
647 fsp->lock_failure_offset = blocker->offset;
648 return status;
651 tevent_req_received(req);
653 return NT_STATUS_OK;
656 bool smbd_smb1_do_locks_extract_smbreq(
657 struct tevent_req *req,
658 TALLOC_CTX *mem_ctx,
659 struct smb_request **psmbreq)
661 struct smbd_smb1_do_locks_state *state = tevent_req_data(
662 req, struct smbd_smb1_do_locks_state);
664 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
665 req,
666 state,
667 state->smbreq);
669 if (state->smbreq == NULL) {
670 return false;
672 *psmbreq = talloc_move(mem_ctx, &state->smbreq);
673 return true;
676 void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
678 DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
680 if (NT_STATUS_IS_OK(status)) {
681 tevent_req_done(req);
682 } else {
683 tevent_req_nterror(req, status);
687 bool smbd_smb1_brl_finish_by_lock(
688 struct files_struct *fsp,
689 bool large_offset,
690 struct smbd_lock_element lock,
691 NTSTATUS finish_status)
693 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
694 size_t num_blocked = talloc_array_length(blocked);
695 size_t i;
697 DBG_DEBUG("num_blocked=%zu\n", num_blocked);
699 for (i=0; i<num_blocked; i++) {
700 struct tevent_req *req = blocked[i];
701 struct smbd_smb1_do_locks_state *state = tevent_req_data(
702 req, struct smbd_smb1_do_locks_state);
703 uint16_t j;
705 DBG_DEBUG("i=%zu, req=%p\n", i, req);
707 if (state->large_offset != large_offset) {
708 continue;
711 for (j=0; j<state->num_locks; j++) {
712 struct smbd_lock_element *l = &state->locks[j];
714 if ((lock.smblctx == l->smblctx) &&
715 (lock.offset == l->offset) &&
716 (lock.count == l->count)) {
717 smbd_smb1_brl_finish_by_req(
718 req, finish_status);
719 return true;
723 return false;
726 static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
727 struct files_struct *fsp, void *private_data)
729 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
730 size_t num_blocked = talloc_array_length(blocked);
731 uint64_t mid = *((uint64_t *)private_data);
732 size_t i;
734 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
736 for (i=0; i<num_blocked; i++) {
737 struct tevent_req *req = blocked[i];
738 struct smbd_smb1_do_locks_state *state = tevent_req_data(
739 req, struct smbd_smb1_do_locks_state);
740 struct smb_request *smbreq = state->smbreq;
742 if (smbreq->mid == mid) {
743 tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
744 return fsp;
748 return NULL;
752 * This walks the list of fsps, we store the blocked reqs attached to
753 * them. It can be expensive, but this is legacy SMB1 and trying to
754 * remember looking at traces I don't really see many of those calls.
757 bool smbd_smb1_brl_finish_by_mid(
758 struct smbd_server_connection *sconn, uint64_t mid)
760 struct files_struct *found = files_forall(
761 sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
762 return (found != NULL);