2 Unix SMB/CIFS implementation.
3 Blocking Locking functions
4 Copyright (C) Jeremy Allison 1998-2003
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "smbd/smbd.h"
22 #include "smbd/globals.h"
24 #include "lib/util/tevent_ntstatus.h"
25 #include "lib/dbwrap/dbwrap_watch.h"
26 #include "librpc/gen_ndr/ndr_open_files.h"
29 #define DBGC_CLASS DBGC_LOCKING
31 NTSTATUS
smbd_do_locks_try(
32 struct files_struct
*fsp
,
33 enum brl_flavour lock_flav
,
35 struct smbd_lock_element
*locks
,
36 uint16_t *blocker_idx
,
37 struct server_id
*blocking_pid
,
38 uint64_t *blocking_smblctx
)
40 NTSTATUS status
= NT_STATUS_OK
;
43 for (i
=0; i
<num_locks
; i
++) {
44 struct smbd_lock_element
*e
= &locks
[i
];
55 if (!NT_STATUS_IS_OK(status
)) {
60 if (NT_STATUS_IS_OK(status
)) {
67 * Undo the locks we successfully got
69 for (i
= i
-1; i
!= UINT16_MAX
; i
--) {
70 struct smbd_lock_element
*e
= &locks
[i
];
81 static bool smbd_smb1_fsp_add_blocked_lock_req(
82 struct files_struct
*fsp
, struct tevent_req
*req
)
84 size_t num_reqs
= talloc_array_length(fsp
->blocked_smb1_lock_reqs
);
85 struct tevent_req
**tmp
= NULL
;
89 fsp
->blocked_smb1_lock_reqs
,
95 fsp
->blocked_smb1_lock_reqs
= tmp
;
96 fsp
->blocked_smb1_lock_reqs
[num_reqs
] = req
;
100 struct smbd_smb1_do_locks_state
{
101 struct tevent_context
*ev
;
102 struct smb_request
*smbreq
;
103 struct files_struct
*fsp
;
105 uint32_t polling_msecs
;
106 struct timeval endtime
;
107 bool large_offset
; /* required for correct cancel */
108 enum brl_flavour lock_flav
;
110 struct smbd_lock_element
*locks
;
112 NTSTATUS deny_status
;
115 static void smbd_smb1_do_locks_try(struct tevent_req
*req
);
116 static void smbd_smb1_do_locks_retry(struct tevent_req
*subreq
);
117 static void smbd_smb1_blocked_locks_cleanup(
118 struct tevent_req
*req
, enum tevent_req_state req_state
);
120 static void smbd_smb1_do_locks_update_polling_msecs(
121 struct smbd_smb1_do_locks_state
*state
)
124 * The default lp_lock_spin_time() is 200ms.
126 * v_min is in the range of 0.002 to 20 secs
127 * (0.2 secs by default)
129 * v_max is in the range of 0.02 to 200 secs
130 * (2.0 secs by default)
132 * The typical steps are:
133 * 0.2, 0.4, 0.6, 0.8, ... 2.0
135 uint32_t v_min
= MAX(2, MIN(20000, lp_lock_spin_time()));
136 uint32_t v_max
= 10 * v_min
;
138 if (state
->polling_msecs
>= v_max
) {
139 state
->polling_msecs
= v_max
;
143 state
->polling_msecs
+= v_min
;
146 struct tevent_req
*smbd_smb1_do_locks_send(
148 struct tevent_context
*ev
,
149 struct smb_request
**smbreq
, /* talloc_move()d into our state */
150 struct files_struct
*fsp
,
151 uint32_t lock_timeout
,
153 enum brl_flavour lock_flav
,
155 struct smbd_lock_element
*locks
)
157 struct tevent_req
*req
= NULL
, *subreq
= NULL
;
158 struct smbd_smb1_do_locks_state
*state
= NULL
;
159 struct share_mode_lock
*lck
= NULL
;
160 struct server_id blocking_pid
= { 0 };
161 uint64_t blocking_smblctx
= 0;
162 struct timeval endtime
= { 0 };
163 NTSTATUS status
= NT_STATUS_OK
;
167 req
= tevent_req_create(
168 mem_ctx
, &state
, struct smbd_smb1_do_locks_state
);
173 state
->smbreq
= talloc_move(state
, smbreq
);
175 state
->timeout
= lock_timeout
;
176 state
->large_offset
= large_offset
;
177 state
->lock_flav
= lock_flav
;
178 state
->num_locks
= num_locks
;
179 state
->locks
= locks
;
181 if (lock_flav
== POSIX_LOCK
) {
183 * SMB1 posix locks always use
184 * NT_STATUS_FILE_LOCK_CONFLICT.
186 state
->deny_status
= NT_STATUS_FILE_LOCK_CONFLICT
;
188 state
->deny_status
= NT_STATUS_LOCK_NOT_GRANTED
;
191 DBG_DEBUG("state=%p, state->smbreq=%p\n", state
, state
->smbreq
);
193 if (num_locks
== 0) {
194 DBG_DEBUG("no locks\n");
195 tevent_req_done(req
);
196 return tevent_req_post(req
, ev
);
199 if ((state
->timeout
!= 0) && (state
->timeout
!= UINT32_MAX
)) {
201 * Windows internal resolution for blocking locks
202 * seems to be about 200ms... Don't wait for less than
205 state
->timeout
= MAX(state
->timeout
, lp_lock_spin_time());
208 lck
= get_existing_share_mode_lock(state
, state
->fsp
->file_id
);
209 if (tevent_req_nomem(lck
, req
)) {
210 DBG_DEBUG("Could not get share mode lock\n");
211 return tevent_req_post(req
, ev
);
214 status
= smbd_do_locks_try(
222 if (NT_STATUS_IS_OK(status
)) {
223 tevent_req_done(req
);
226 if (!ERROR_WAS_LOCK_DENIED(status
)) {
227 tevent_req_nterror(req
, status
);
231 if (state
->timeout
== 0) {
232 struct smbd_lock_element
*blocker
= &locks
[state
->blocker
];
234 if ((blocker
->offset
>= 0xEF000000) &&
235 ((blocker
->offset
>> 63) == 0)) {
237 * This must be an optimization of an ancient
240 state
->timeout
= lp_lock_spin_time();
243 if ((fsp
->lock_failure_seen
) &&
244 (blocker
->offset
== fsp
->lock_failure_offset
)) {
246 * Delay repeated lock attempts on the same
247 * lock. Maybe a more advanced version of the
250 DBG_DEBUG("Delaying lock request due to previous "
252 state
->timeout
= lp_lock_spin_time();
255 state
->endtime
= timeval_current_ofs_msec(state
->timeout
);
257 DBG_DEBUG("timeout=%"PRIu32
", blocking_smblctx=%"PRIu64
"\n",
262 * The client specified timeout expired
263 * avoid further retries.
265 * Otherwise keep waiting either waiting
266 * for changes in locking.tdb or the polling
267 * mode timers waiting for posix locks.
269 * If the endtime is not elapsed yet,
270 * it means we'll retry after a timeout.
271 * In that case we'll have to return
272 * NT_STATUS_FILE_LOCK_CONFLICT
273 * instead of NT_STATUS_LOCK_NOT_GRANTED.
275 expired
= timeval_expired(&state
->endtime
);
277 status
= state
->deny_status
;
278 tevent_req_nterror(req
, status
);
281 state
->deny_status
= NT_STATUS_FILE_LOCK_CONFLICT
;
283 subreq
= dbwrap_watched_watch_send(
284 state
, state
->ev
, lck
->data
->record
, blocking_pid
);
285 if (tevent_req_nomem(subreq
, req
)) {
289 tevent_req_set_callback(subreq
, smbd_smb1_do_locks_retry
, req
);
291 endtime
= state
->endtime
;
293 if (blocking_smblctx
== UINT64_MAX
) {
296 smbd_smb1_do_locks_update_polling_msecs(state
);
298 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32
" msecs\n",
299 state
->polling_msecs
);
301 tmp
= timeval_current_ofs_msec(state
->polling_msecs
);
302 endtime
= timeval_min(&endtime
, &tmp
);
305 ok
= tevent_req_set_endtime(subreq
, state
->ev
, endtime
);
311 ok
= smbd_smb1_fsp_add_blocked_lock_req(fsp
, req
);
316 tevent_req_set_cleanup_fn(req
, smbd_smb1_blocked_locks_cleanup
);
320 return tevent_req_post(req
, ev
);
323 static void smbd_smb1_blocked_locks_cleanup(
324 struct tevent_req
*req
, enum tevent_req_state req_state
)
326 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
327 req
, struct smbd_smb1_do_locks_state
);
328 struct files_struct
*fsp
= state
->fsp
;
329 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
330 size_t num_blocked
= talloc_array_length(blocked
);
333 DBG_DEBUG("req=%p, state=%p, req_state=%d\n",
338 if (req_state
== TEVENT_REQ_RECEIVED
) {
339 DBG_DEBUG("already received\n");
343 for (i
=0; i
<num_blocked
; i
++) {
344 if (blocked
[i
] == req
) {
348 SMB_ASSERT(i
<num_blocked
);
350 num_after
= num_blocked
- (i
+1);
354 * The locks need to be kept in order, see
355 * raw.lock.multilock2
359 sizeof(*blocked
) * num_after
);
361 fsp
->blocked_smb1_lock_reqs
= talloc_realloc(
362 fsp
, blocked
, struct tevent_req
*, num_blocked
-1);
365 static void smbd_smb1_do_locks_try(struct tevent_req
*req
)
367 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
368 req
, struct smbd_smb1_do_locks_state
);
369 struct files_struct
*fsp
= state
->fsp
;
370 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
371 struct tevent_req
*retry_req
= blocked
[0];
372 struct smbd_smb1_do_locks_state
*retry_state
= tevent_req_data(
373 retry_req
, struct smbd_smb1_do_locks_state
);
374 struct share_mode_lock
*lck
;
375 struct timeval endtime
= { 0 };
376 struct server_id blocking_pid
= { 0 };
377 uint64_t blocking_smblctx
= 0;
378 struct tevent_req
*subreq
= NULL
;
383 lck
= get_existing_share_mode_lock(state
, fsp
->file_id
);
384 if (tevent_req_nomem(lck
, req
)) {
385 DBG_DEBUG("Could not get share mode lock\n");
389 status
= smbd_do_locks_try(
391 retry_state
->lock_flav
,
392 retry_state
->num_locks
,
397 if (NT_STATUS_IS_OK(status
)) {
400 if (!ERROR_WAS_LOCK_DENIED(status
)) {
405 * The client specified timeout expired
406 * avoid further retries.
408 * Otherwise keep waiting either waiting
409 * for changes in locking.tdb or the polling
410 * mode timers waiting for posix locks.
412 * If the endtime is not expired yet,
413 * it means we'll retry after a timeout.
414 * In that case we'll have to return
415 * NT_STATUS_FILE_LOCK_CONFLICT
416 * instead of NT_STATUS_LOCK_NOT_GRANTED.
418 expired
= timeval_expired(&state
->endtime
);
420 status
= state
->deny_status
;
423 state
->deny_status
= NT_STATUS_FILE_LOCK_CONFLICT
;
425 subreq
= dbwrap_watched_watch_send(
426 state
, state
->ev
, lck
->data
->record
, blocking_pid
);
427 if (tevent_req_nomem(subreq
, req
)) {
431 tevent_req_set_callback(subreq
, smbd_smb1_do_locks_retry
, req
);
433 endtime
= state
->endtime
;
435 if (blocking_smblctx
== UINT64_MAX
) {
438 smbd_smb1_do_locks_update_polling_msecs(state
);
440 DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32
" msecs\n",
441 state
->polling_msecs
);
443 tmp
= timeval_current_ofs_msec(state
->polling_msecs
);
444 endtime
= timeval_min(&endtime
, &tmp
);
447 ok
= tevent_req_set_endtime(subreq
, state
->ev
, endtime
);
449 status
= NT_STATUS_NO_MEMORY
;
455 smbd_smb1_brl_finish_by_req(req
, status
);
458 static void smbd_smb1_do_locks_retry(struct tevent_req
*subreq
)
460 struct tevent_req
*req
= tevent_req_callback_data(
461 subreq
, struct tevent_req
);
462 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
463 req
, struct smbd_smb1_do_locks_state
);
468 * Make sure we run as the user again
470 ok
= change_to_user_by_fsp(state
->fsp
);
472 tevent_req_nterror(req
, NT_STATUS_ACCESS_DENIED
);
476 status
= dbwrap_watched_watch_recv(subreq
, NULL
, NULL
);
479 DBG_DEBUG("dbwrap_watched_watch_recv returned %s\n",
483 * We ignore any errors here, it's most likely
484 * we just get NT_STATUS_OK or NT_STATUS_IO_TIMEOUT.
486 * In any case we can just give it a retry.
489 smbd_smb1_do_locks_try(req
);
492 NTSTATUS
smbd_smb1_do_locks_recv(struct tevent_req
*req
)
494 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
495 req
, struct smbd_smb1_do_locks_state
);
496 NTSTATUS status
= NT_STATUS_OK
;
499 err
= tevent_req_is_nterror(req
, &status
);
501 DBG_DEBUG("err=%d, status=%s\n", (int)err
, nt_errstr(status
));
503 if (tevent_req_is_nterror(req
, &status
)) {
504 struct files_struct
*fsp
= state
->fsp
;
505 struct smbd_lock_element
*blocker
=
506 &state
->locks
[state
->blocker
];
508 DBG_DEBUG("Setting lock_failure_offset=%"PRIu64
"\n",
511 fsp
->lock_failure_seen
= true;
512 fsp
->lock_failure_offset
= blocker
->offset
;
516 tevent_req_received(req
);
521 bool smbd_smb1_do_locks_extract_smbreq(
522 struct tevent_req
*req
,
524 struct smb_request
**psmbreq
)
526 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
527 req
, struct smbd_smb1_do_locks_state
);
529 DBG_DEBUG("req=%p, state=%p, state->smbreq=%p\n",
534 if (state
->smbreq
== NULL
) {
537 *psmbreq
= talloc_move(mem_ctx
, &state
->smbreq
);
541 void smbd_smb1_brl_finish_by_req(struct tevent_req
*req
, NTSTATUS status
)
543 DBG_DEBUG("req=%p, status=%s\n", req
, nt_errstr(status
));
545 if (NT_STATUS_IS_OK(status
)) {
546 tevent_req_done(req
);
548 tevent_req_nterror(req
, status
);
552 bool smbd_smb1_brl_finish_by_lock(
553 struct files_struct
*fsp
,
555 enum brl_flavour lock_flav
,
556 struct smbd_lock_element lock
,
557 NTSTATUS finish_status
)
559 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
560 size_t num_blocked
= talloc_array_length(blocked
);
563 DBG_DEBUG("num_blocked=%zu\n", num_blocked
);
565 for (i
=0; i
<num_blocked
; i
++) {
566 struct tevent_req
*req
= blocked
[i
];
567 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
568 req
, struct smbd_smb1_do_locks_state
);
571 DBG_DEBUG("i=%zu, req=%p\n", i
, req
);
573 if ((state
->large_offset
!= large_offset
) ||
574 (state
->lock_flav
!= lock_flav
)) {
578 for (j
=0; j
<state
->num_locks
; j
++) {
579 struct smbd_lock_element
*l
= &state
->locks
[j
];
581 if ((lock
.smblctx
== l
->smblctx
) &&
582 (lock
.offset
== l
->offset
) &&
583 (lock
.count
== l
->count
)) {
584 smbd_smb1_brl_finish_by_req(
593 static struct files_struct
*smbd_smb1_brl_finish_by_mid_fn(
594 struct files_struct
*fsp
, void *private_data
)
596 struct tevent_req
**blocked
= fsp
->blocked_smb1_lock_reqs
;
597 size_t num_blocked
= talloc_array_length(blocked
);
598 uint64_t mid
= *((uint64_t *)private_data
);
601 DBG_DEBUG("fsp=%p, num_blocked=%zu\n", fsp
, num_blocked
);
603 for (i
=0; i
<num_blocked
; i
++) {
604 struct tevent_req
*req
= blocked
[i
];
605 struct smbd_smb1_do_locks_state
*state
= tevent_req_data(
606 req
, struct smbd_smb1_do_locks_state
);
607 struct smb_request
*smbreq
= state
->smbreq
;
609 if (smbreq
->mid
== mid
) {
610 tevent_req_nterror(req
, NT_STATUS_FILE_LOCK_CONFLICT
);
619 * This walks the list of fsps, we store the blocked reqs attached to
620 * them. It can be expensive, but this is legacy SMB1 and trying to
621 * remember looking at traces I don't reall many of those calls.
624 bool smbd_smb1_brl_finish_by_mid(
625 struct smbd_server_connection
*sconn
, uint64_t mid
)
627 struct files_struct
*found
= files_forall(
628 sconn
, smbd_smb1_brl_finish_by_mid_fn
, &mid
);
629 return (found
!= NULL
);