s3:blocking: demonstrate the posix lock retry fails
[Samba.git] / source3 / smbd / blocking.c
blob91438fe44860d21f0d145fd3943021c454582e3f
1 /*
2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "includes.h"
21 #include "smbd/smbd.h"
22 #include "smbd/globals.h"
23 #include "messages.h"
24 #include "lib/util/tevent_ntstatus.h"
25 #include "lib/dbwrap/dbwrap_watch.h"
26 #include "librpc/gen_ndr/ndr_open_files.h"
28 #undef DBGC_CLASS
29 #define DBGC_CLASS DBGC_LOCKING
31 NTSTATUS smbd_do_locks_try(
32 struct files_struct *fsp,
33 enum brl_flavour lock_flav,
34 uint16_t num_locks,
35 struct smbd_lock_element *locks,
36 uint16_t *blocker_idx,
37 struct server_id *blocking_pid,
38 uint64_t *blocking_smblctx)
40 NTSTATUS status = NT_STATUS_OK;
41 uint16_t i;
43 for (i=0; i<num_locks; i++) {
44 struct smbd_lock_element *e = &locks[i];
46 status = do_lock(
47 fsp,
48 e->smblctx,
49 e->count,
50 e->offset,
51 e->brltype,
52 lock_flav,
53 blocking_pid,
54 blocking_smblctx);
55 if (!NT_STATUS_IS_OK(status)) {
56 break;
60 if (NT_STATUS_IS_OK(status)) {
61 return NT_STATUS_OK;
64 *blocker_idx = i;
67 * Undo the locks we successfully got
69 for (i = i-1; i != UINT16_MAX; i--) {
70 struct smbd_lock_element *e = &locks[i];
71 do_unlock(fsp,
72 e->smblctx,
73 e->count,
74 e->offset,
75 lock_flav);
78 return status;
81 static bool smbd_smb1_fsp_add_blocked_lock_req(
82 struct files_struct *fsp, struct tevent_req *req)
84 size_t num_reqs = talloc_array_length(fsp->blocked_smb1_lock_reqs);
85 struct tevent_req **tmp = NULL;
87 tmp = talloc_realloc(
88 fsp,
89 fsp->blocked_smb1_lock_reqs,
90 struct tevent_req *,
91 num_reqs+1);
92 if (tmp == NULL) {
93 return false;
95 fsp->blocked_smb1_lock_reqs = tmp;
96 fsp->blocked_smb1_lock_reqs[num_reqs] = req;
97 return true;
100 struct smbd_smb1_do_locks_state {
101 struct tevent_context *ev;
102 struct smb_request *smbreq;
103 struct files_struct *fsp;
104 struct timeval endtime;
105 bool large_offset; /* required for correct cancel */
106 enum brl_flavour lock_flav;
107 uint16_t num_locks;
108 struct smbd_lock_element *locks;
109 uint16_t blocker;
112 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
113 static void smbd_smb1_blocked_locks_cleanup(
114 struct tevent_req *req, enum tevent_req_state req_state);
116 struct tevent_req *smbd_smb1_do_locks_send(
117 TALLOC_CTX *mem_ctx,
118 struct tevent_context *ev,
119 struct smb_request **smbreq, /* talloc_move()d into our state */
120 struct files_struct *fsp,
121 uint32_t timeout,
122 bool large_offset,
123 enum brl_flavour lock_flav,
124 uint16_t num_locks,
125 struct smbd_lock_element *locks)
127 struct tevent_req *req = NULL, *subreq = NULL;
128 struct smbd_smb1_do_locks_state *state = NULL;
129 struct share_mode_lock *lck = NULL;
130 struct server_id blocking_pid = { 0 };
131 uint64_t blocking_smblctx = 0;
132 struct timeval endtime;
133 NTSTATUS status = NT_STATUS_OK;
134 bool ok;
136 req = tevent_req_create(
137 mem_ctx, &state, struct smbd_smb1_do_locks_state);
138 if (req == NULL) {
139 return NULL;
141 state->ev = ev;
142 state->smbreq = talloc_move(state, smbreq);
143 state->fsp = fsp;
144 state->large_offset = large_offset;
145 state->lock_flav = lock_flav;
146 state->num_locks = num_locks;
147 state->locks = locks;
149 DBG_DEBUG("state=%p, state->smbreq=%p\n", state, state->smbreq);
151 if (num_locks == 0) {
152 DBG_DEBUG("no locks\n");
153 tevent_req_done(req);
154 return tevent_req_post(req, ev);
157 if ((timeout != 0) && (timeout != UINT32_MAX)) {
159 * Windows internal resolution for blocking locks
160 * seems to be about 200ms... Don't wait for less than
161 * that. JRA.
163 timeout = MAX(timeout, lp_lock_spin_time());
166 lck = get_existing_share_mode_lock(state, state->fsp->file_id);
167 if (tevent_req_nomem(lck, req)) {
168 DBG_DEBUG("Could not get share mode lock\n");
169 return tevent_req_post(req, ev);
172 status = smbd_do_locks_try(
173 state->fsp,
174 state->lock_flav,
175 state->num_locks,
176 state->locks,
177 &state->blocker,
178 &blocking_pid,
179 &blocking_smblctx);
180 if (NT_STATUS_IS_OK(status)) {
181 tevent_req_done(req);
182 goto done;
184 if (!ERROR_WAS_LOCK_DENIED(status)) {
185 tevent_req_nterror(req, status);
186 goto done;
189 if (timeout == 0) {
190 struct smbd_lock_element *blocker = &locks[state->blocker];
192 if ((blocker->offset >= 0xEF000000) &&
193 ((blocker->offset >> 63) == 0)) {
195 * This must be an optimization of an ancient
196 * application bug...
198 timeout = lp_lock_spin_time();
201 if ((fsp->lock_failure_seen) &&
202 (blocker->offset == fsp->lock_failure_offset)) {
204 * Delay repeated lock attempts on the same
205 * lock. Maybe a more advanced version of the
206 * above check?
208 DBG_DEBUG("Delaying lock request due to previous "
209 "failure\n");
210 timeout = lp_lock_spin_time();
214 DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
215 timeout,
216 blocking_smblctx);
218 if (timeout == 0) {
219 tevent_req_nterror(req, status);
220 goto done;
223 subreq = dbwrap_watched_watch_send(
224 state, state->ev, lck->data->record, blocking_pid);
225 if (tevent_req_nomem(subreq, req)) {
226 goto done;
228 TALLOC_FREE(lck);
229 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
231 state->endtime = timeval_current_ofs_msec(timeout);
232 endtime = state->endtime;
234 if (blocking_smblctx == UINT64_MAX) {
235 struct timeval tmp;
237 DBG_DEBUG("Blocked on a posix lock. Retry in one second\n");
239 tmp = timeval_current_ofs(15, 0);
240 endtime = timeval_min(&endtime, &tmp);
243 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
244 if (!ok) {
245 tevent_req_oom(req);
246 goto done;
249 ok = smbd_smb1_fsp_add_blocked_lock_req(fsp, req);
250 if (!ok) {
251 tevent_req_oom(req);
252 goto done;
254 tevent_req_set_cleanup_fn(req, smbd_smb1_blocked_locks_cleanup);
255 return req;
256 done:
257 TALLOC_FREE(lck);
258 return tevent_req_post(req, ev);
261 static void smbd_smb1_blocked_locks_cleanup(
262 struct tevent_req *req, enum tevent_req_state req_state)
264 struct smbd_smb1_do_locks_state *state = tevent_req_data(
265 req, struct smbd_smb1_do_locks_state);
266 struct files_struct *fsp = state->fsp;
267 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
268 size_t num_blocked = talloc_array_length(blocked);
269 size_t i, num_after;
271 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
272 req,
273 state,
274 (int)req_state);
276 if (req_state == TEVENT_REQ_RECEIVED) {
277 DBG_DEBUG("already received\n");
278 return;
281 for (i=0; i<num_blocked; i++) {
282 if (blocked[i] == req) {
283 break;
286 SMB_ASSERT(i<num_blocked);
288 num_after = num_blocked - (i+1);
290 if (num_after > 0) {
292 * The locks need to be kept in order, see
293 * raw.lock.multilock2
295 memmove(&blocked[i],
296 &blocked[i+1],
297 sizeof(*blocked) * num_after);
299 fsp->blocked_smb1_lock_reqs = talloc_realloc(
300 fsp, blocked, struct tevent_req *, num_blocked-1);
303 static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
305 struct tevent_req *req = tevent_req_callback_data(
306 subreq, struct tevent_req);
307 struct smbd_smb1_do_locks_state *state = tevent_req_data(
308 req, struct smbd_smb1_do_locks_state);
309 struct files_struct *fsp = state->fsp;
310 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
311 struct tevent_req *retry_req = blocked[0];
312 struct smbd_smb1_do_locks_state *retry_state = tevent_req_data(
313 retry_req, struct smbd_smb1_do_locks_state);
314 struct share_mode_lock *lck;
315 struct timeval endtime;
316 struct server_id blocking_pid = { 0 };
317 uint64_t blocking_smblctx = 0;
318 NTSTATUS status;
319 bool ok;
322 * Make sure we run as the user again
324 ok = change_to_user_by_fsp(state->fsp);
325 if (!ok) {
326 tevent_req_nterror(req, NT_STATUS_ACCESS_DENIED);
327 return;
330 status = dbwrap_watched_watch_recv(subreq, NULL, NULL);
331 TALLOC_FREE(subreq);
333 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
334 nt_errstr(status));
336 if (NT_STATUS_EQUAL(status, NT_STATUS_IO_TIMEOUT)) {
337 double elapsed = timeval_elapsed(&state->endtime);
338 if (elapsed > 0) {
339 smbd_smb1_brl_finish_by_req(
340 req, NT_STATUS_FILE_LOCK_CONFLICT);
341 return;
344 * This is a posix lock retry. Just retry.
348 lck = get_existing_share_mode_lock(state, fsp->file_id);
349 if (tevent_req_nomem(lck, req)) {
350 DBG_DEBUG("Could not get share mode lock\n");
351 return;
354 status = smbd_do_locks_try(
355 fsp,
356 retry_state->lock_flav,
357 retry_state->num_locks,
358 retry_state->locks,
359 &state->blocker,
360 &blocking_pid,
361 &blocking_smblctx);
362 if (NT_STATUS_IS_OK(status)) {
363 goto done;
365 if (!ERROR_WAS_LOCK_DENIED(status)) {
366 goto done;
369 subreq = dbwrap_watched_watch_send(
370 state, state->ev, lck->data->record, blocking_pid);
371 if (tevent_req_nomem(subreq, req)) {
372 goto done;
374 TALLOC_FREE(lck);
375 tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
377 endtime = state->endtime;
379 if (blocking_smblctx == UINT64_MAX) {
380 struct timeval tmp;
382 DBG_DEBUG("Blocked on a posix lock. Retry in one second\n");
384 tmp = timeval_current_ofs(15, 0);
385 endtime = timeval_min(&endtime, &tmp);
388 ok = tevent_req_set_endtime(subreq, state->ev, endtime);
389 if (!ok) {
390 status = NT_STATUS_NO_MEMORY;
391 goto done;
393 return;
394 done:
395 TALLOC_FREE(lck);
396 smbd_smb1_brl_finish_by_req(req, status);
399 NTSTATUS smbd_smb1_do_locks_recv(struct tevent_req *req)
401 struct smbd_smb1_do_locks_state *state = tevent_req_data(
402 req, struct smbd_smb1_do_locks_state);
403 NTSTATUS status = NT_STATUS_OK;
404 bool err;
406 err = tevent_req_is_nterror(req, &status);
408 DBG_DEBUG("err=%d, status=%s\n", (int)err, nt_errstr(status));
410 if (tevent_req_is_nterror(req, &status)) {
411 struct files_struct *fsp = state->fsp;
412 struct smbd_lock_element *blocker =
413 &state->locks[state->blocker];
415 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64"\n",
416 blocker->offset);
418 fsp->lock_failure_seen = true;
419 fsp->lock_failure_offset = blocker->offset;
420 return status;
423 tevent_req_received(req);
425 return NT_STATUS_OK;
428 bool smbd_smb1_do_locks_extract_smbreq(
429 struct tevent_req *req,
430 TALLOC_CTX *mem_ctx,
431 struct smb_request **psmbreq)
433 struct smbd_smb1_do_locks_state *state = tevent_req_data(
434 req, struct smbd_smb1_do_locks_state);
436 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
437 req,
438 state,
439 state->smbreq);
441 if (state->smbreq == NULL) {
442 return false;
444 *psmbreq = talloc_move(mem_ctx, &state->smbreq);
445 return true;
448 void smbd_smb1_brl_finish_by_req(struct tevent_req *req, NTSTATUS status)
450 DBG_DEBUG("req=%p, status=%s\n", req, nt_errstr(status));
452 if (NT_STATUS_IS_OK(status)) {
453 tevent_req_done(req);
454 } else {
455 tevent_req_nterror(req, status);
459 bool smbd_smb1_brl_finish_by_lock(
460 struct files_struct *fsp,
461 bool large_offset,
462 enum brl_flavour lock_flav,
463 struct smbd_lock_element lock,
464 NTSTATUS finish_status)
466 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
467 size_t num_blocked = talloc_array_length(blocked);
468 size_t i;
470 DBG_DEBUG("num_blocked=%zu\n", num_blocked);
472 for (i=0; i<num_blocked; i++) {
473 struct tevent_req *req = blocked[i];
474 struct smbd_smb1_do_locks_state *state = tevent_req_data(
475 req, struct smbd_smb1_do_locks_state);
476 uint16_t j;
478 DBG_DEBUG("i=%zu, req=%p\n", i, req);
480 if ((state->large_offset != large_offset) ||
481 (state->lock_flav != lock_flav)) {
482 continue;
485 for (j=0; j<state->num_locks; j++) {
486 struct smbd_lock_element *l = &state->locks[j];
488 if ((lock.smblctx == l->smblctx) &&
489 (lock.offset == l->offset) &&
490 (lock.count == l->count)) {
491 smbd_smb1_brl_finish_by_req(
492 req, finish_status);
493 return true;
497 return false;
500 static struct files_struct *smbd_smb1_brl_finish_by_mid_fn(
501 struct files_struct *fsp, void *private_data)
503 struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
504 size_t num_blocked = talloc_array_length(blocked);
505 uint64_t mid = *((uint64_t *)private_data);
506 size_t i;
508 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp, num_blocked);
510 for (i=0; i<num_blocked; i++) {
511 struct tevent_req *req = blocked[i];
512 struct smbd_smb1_do_locks_state *state = tevent_req_data(
513 req, struct smbd_smb1_do_locks_state);
514 struct smb_request *smbreq = state->smbreq;
516 if (smbreq->mid == mid) {
517 tevent_req_nterror(req, NT_STATUS_FILE_LOCK_CONFLICT);
518 return fsp;
522 return NULL;
526 * This walks the list of fsps, we store the blocked reqs attached to
527 * them. It can be expensive, but this is legacy SMB1 and trying to
528 * remember looking at traces I don't reall many of those calls.
531 bool smbd_smb1_brl_finish_by_mid(
532 struct smbd_server_connection *sconn, uint64_t mid)
534 struct files_struct *found = files_forall(
535 sconn, smbd_smb1_brl_finish_by_mid_fn, &mid);
536 return (found != NULL);