2 Unix SMB/CIFS implementation.
4 POSIX NTVFS backend - locking
6 Copyright (C) Andrew Tridgell 2004
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "../lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
30 check if we can perform IO on a range that might be locked
32 NTSTATUS
pvfs_check_lock(struct pvfs_state
*pvfs
,
35 uint64_t offset
, uint64_t count
,
38 if (!(pvfs
->flags
& PVFS_FLAG_STRICT_LOCKING
)) {
42 return brlock_locktest(pvfs
->brl_context
,
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock
{
50 struct pvfs_pending_lock
*next
, *prev
;
51 struct pvfs_state
*pvfs
;
54 struct ntvfs_request
*req
;
56 struct pvfs_wait
*wait_handle
;
57 struct timeval end_time
;
61 a secondary attempt to setup a lock has failed - back out
62 the locks we did get and send an error
64 static void pvfs_lock_async_failed(struct pvfs_state
*pvfs
,
65 struct ntvfs_request
*req
,
67 struct smb_lock_entry
*locks
,
71 /* undo the locks we just did */
73 brlock_unlock(pvfs
->brl_context
,
80 req
->async_states
->status
= status
;
81 req
->async_states
->send_fn(req
);
86 called when we receive a pending lock notification. It means that
87 either our lock timed out or someone else has unlocked a overlapping
88 range, so we should try the lock again. Note that on timeout we
89 do retry the lock, giving it a last chance.
91 static void pvfs_pending_lock_continue(void *private_data
, enum pvfs_wait_notice reason
)
93 struct pvfs_pending_lock
*pending
= talloc_get_type(private_data
,
94 struct pvfs_pending_lock
);
95 struct pvfs_state
*pvfs
= pending
->pvfs
;
96 struct pvfs_file
*f
= pending
->f
;
97 struct ntvfs_request
*req
= pending
->req
;
98 union smb_lock
*lck
= pending
->lck
;
99 struct smb_lock_entry
*locks
;
105 timed_out
= (reason
!= PVFS_WAIT_EVENT
);
107 locks
= lck
->lockx
.in
.locks
+ lck
->lockx
.in
.ulock_cnt
;
109 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_SHARED_LOCK
) {
115 DLIST_REMOVE(f
->pending_list
, pending
);
117 /* we don't retry on a cancel */
118 if (reason
== PVFS_WAIT_CANCEL
) {
119 if (pvfs
->ntvfs
->ctx
->protocol
< PROTOCOL_SMB2_02
) {
120 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
122 status
= NT_STATUS_CANCELLED
;
126 * here it's important to pass the pending pointer
127 * because with this we'll get the correct error code
128 * FILE_LOCK_CONFLICT in the error case
130 status
= brlock_lock(pvfs
->brl_context
,
132 locks
[pending
->pending_lock
].pid
,
133 locks
[pending
->pending_lock
].offset
,
134 locks
[pending
->pending_lock
].count
,
137 if (NT_STATUS_IS_OK(status
)) {
142 /* if we have failed and timed out, or succeeded, then we
143 don't need the pending lock any more */
144 if (NT_STATUS_IS_OK(status
) || timed_out
) {
146 status2
= brlock_remove_pending(pvfs
->brl_context
,
147 f
->brl_handle
, pending
);
148 if (!NT_STATUS_IS_OK(status2
)) {
149 DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2
)));
151 talloc_free(pending
->wait_handle
);
154 if (!NT_STATUS_IS_OK(status
)) {
156 /* no more chances */
157 pvfs_lock_async_failed(pvfs
, req
, f
, locks
, pending
->pending_lock
, status
);
158 talloc_free(pending
);
160 /* we can try again */
161 DLIST_ADD(f
->pending_list
, pending
);
166 /* if we haven't timed out yet, then we can do more pending locks */
167 if (rw
== READ_LOCK
) {
168 rw
= PENDING_READ_LOCK
;
170 rw
= PENDING_WRITE_LOCK
;
173 /* we've now got the pending lock. try and get the rest, which might
174 lead to more pending locks */
175 for (i
=pending
->pending_lock
+1;i
<lck
->lockx
.in
.lock_cnt
;i
++) {
177 pending
->pending_lock
= i
;
180 status
= brlock_lock(pvfs
->brl_context
,
186 if (!NT_STATUS_IS_OK(status
)) {
188 /* a timed lock failed - setup a wait message to handle
189 the pending lock notification or a timeout */
190 pending
->wait_handle
= pvfs_wait_message(pvfs
, req
, MSG_BRL_RETRY
,
192 pvfs_pending_lock_continue
,
194 if (pending
->wait_handle
== NULL
) {
195 pvfs_lock_async_failed(pvfs
, req
, f
, locks
, i
, NT_STATUS_NO_MEMORY
);
196 talloc_free(pending
);
198 talloc_steal(pending
, pending
->wait_handle
);
199 DLIST_ADD(f
->pending_list
, pending
);
203 pvfs_lock_async_failed(pvfs
, req
, f
, locks
, i
, status
);
204 talloc_free(pending
);
211 /* we've managed to get all the locks. Tell the client */
212 req
->async_states
->status
= NT_STATUS_OK
;
213 req
->async_states
->send_fn(req
);
214 talloc_free(pending
);
219 called when we close a file that might have locks
221 void pvfs_lock_close(struct pvfs_state
*pvfs
, struct pvfs_file
*f
)
223 struct pvfs_pending_lock
*p
, *next
;
225 if (f
->lock_count
|| f
->pending_list
) {
226 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
227 (double)f
->lock_count
));
228 brlock_close(f
->pvfs
->brl_context
, f
->brl_handle
);
232 /* reply to all the pending lock requests, telling them the
234 for (p
=f
->pending_list
;p
;p
=next
) {
236 DLIST_REMOVE(f
->pending_list
, p
);
237 p
->req
->async_states
->status
= NT_STATUS_RANGE_NOT_LOCKED
;
238 p
->req
->async_states
->send_fn(p
->req
);
244 cancel a set of locks
246 static NTSTATUS
pvfs_lock_cancel(struct pvfs_state
*pvfs
, struct ntvfs_request
*req
, union smb_lock
*lck
,
249 struct pvfs_pending_lock
*p
;
251 for (p
=f
->pending_list
;p
;p
=p
->next
) {
252 /* check if the lock request matches exactly - you can only cancel with exact matches */
253 if (p
->lck
->lockx
.in
.ulock_cnt
== lck
->lockx
.in
.ulock_cnt
&&
254 p
->lck
->lockx
.in
.lock_cnt
== lck
->lockx
.in
.lock_cnt
&&
255 p
->lck
->lockx
.in
.file
.ntvfs
== lck
->lockx
.in
.file
.ntvfs
&&
256 p
->lck
->lockx
.in
.mode
== (lck
->lockx
.in
.mode
& ~LOCKING_ANDX_CANCEL_LOCK
)) {
259 for (i
=0;i
<lck
->lockx
.in
.ulock_cnt
+ lck
->lockx
.in
.lock_cnt
;i
++) {
260 if (p
->lck
->lockx
.in
.locks
[i
].pid
!= lck
->lockx
.in
.locks
[i
].pid
||
261 p
->lck
->lockx
.in
.locks
[i
].offset
!= lck
->lockx
.in
.locks
[i
].offset
||
262 p
->lck
->lockx
.in
.locks
[i
].count
!= lck
->lockx
.in
.locks
[i
].count
) {
266 if (i
< lck
->lockx
.in
.ulock_cnt
) continue;
268 /* an exact match! we can cancel it, which is equivalent
269 to triggering the timeout early */
270 pvfs_pending_lock_continue(p
, PVFS_WAIT_TIMEOUT
);
275 return NT_STATUS_DOS(ERRDOS
, ERRcancelviolation
);
280 lock or unlock a byte range
282 NTSTATUS
pvfs_lock(struct ntvfs_module_context
*ntvfs
,
283 struct ntvfs_request
*req
, union smb_lock
*lck
)
285 struct pvfs_state
*pvfs
= talloc_get_type(ntvfs
->private_data
,
288 struct smb_lock_entry
*locks
;
291 struct pvfs_pending_lock
*pending
= NULL
;
294 if (lck
->generic
.level
!= RAW_LOCK_GENERIC
) {
295 return ntvfs_map_lock(ntvfs
, req
, lck
);
298 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_OPLOCK_RELEASE
) {
299 return pvfs_oplock_release(ntvfs
, req
, lck
);
302 f
= pvfs_find_fd(pvfs
, req
, lck
->lockx
.in
.file
.ntvfs
);
304 return NT_STATUS_INVALID_HANDLE
;
307 if (f
->handle
->fd
== -1) {
308 return NT_STATUS_FILE_IS_A_DIRECTORY
;
311 status
= pvfs_break_level2_oplocks(f
);
312 NT_STATUS_NOT_OK_RETURN(status
);
314 if (lck
->lockx
.in
.timeout
!= 0 &&
315 (req
->async_states
->state
& NTVFS_ASYNC_STATE_MAY_ASYNC
)) {
316 pending
= talloc(f
, struct pvfs_pending_lock
);
317 if (pending
== NULL
) {
318 return NT_STATUS_NO_MEMORY
;
321 pending
->pvfs
= pvfs
;
327 timeval_current_ofs_msec(lck
->lockx
.in
.timeout
);
330 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_SHARED_LOCK
) {
331 rw
= pending
? PENDING_READ_LOCK
: READ_LOCK
;
333 rw
= pending
? PENDING_WRITE_LOCK
: WRITE_LOCK
;
336 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_CANCEL_LOCK
) {
337 talloc_free(pending
);
338 return pvfs_lock_cancel(pvfs
, req
, lck
, f
);
341 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_CHANGE_LOCKTYPE
) {
342 /* this seems to not be supported by any windows server,
343 or used by any clients */
344 talloc_free(pending
);
345 return NT_STATUS_DOS(ERRDOS
, ERRnoatomiclocks
);
348 /* the unlocks happen first */
349 locks
= lck
->lockx
.in
.locks
;
351 for (i
=0;i
<lck
->lockx
.in
.ulock_cnt
;i
++) {
352 status
= brlock_unlock(pvfs
->brl_context
,
357 if (!NT_STATUS_IS_OK(status
)) {
358 talloc_free(pending
);
366 for (i
=0;i
<lck
->lockx
.in
.lock_cnt
;i
++) {
368 pending
->pending_lock
= i
;
371 status
= brlock_lock(pvfs
->brl_context
,
377 if (!NT_STATUS_IS_OK(status
)) {
379 /* a timed lock failed - setup a wait message to handle
380 the pending lock notification or a timeout */
381 pending
->wait_handle
= pvfs_wait_message(pvfs
, req
, MSG_BRL_RETRY
,
383 pvfs_pending_lock_continue
,
385 if (pending
->wait_handle
== NULL
) {
386 talloc_free(pending
);
387 return NT_STATUS_NO_MEMORY
;
389 talloc_steal(pending
, pending
->wait_handle
);
390 DLIST_ADD(f
->pending_list
, pending
);
394 /* undo the locks we just did */
396 brlock_unlock(pvfs
->brl_context
,
403 talloc_free(pending
);
409 talloc_free(pending
);