2 Unix SMB/CIFS implementation.
4 POSIX NTVFS backend - locking
6 Copyright (C) Andrew Tridgell 2004
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "../lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
30 check if we can perform IO on a range that might be locked
32 NTSTATUS
pvfs_check_lock(struct pvfs_state
*pvfs
,
35 uint64_t offset
, uint64_t count
,
38 if (!(pvfs
->flags
& PVFS_FLAG_STRICT_LOCKING
)) {
42 return brlock_locktest(pvfs
->brl_context
,
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock
{
50 struct pvfs_pending_lock
*next
, *prev
;
51 struct pvfs_state
*pvfs
;
54 struct ntvfs_request
*req
;
56 struct pvfs_wait
*wait_handle
;
57 struct timeval end_time
;
61 a secondary attempt to setup a lock has failed - back out
62 the locks we did get and send an error
64 static void pvfs_lock_async_failed(struct pvfs_state
*pvfs
,
65 struct ntvfs_request
*req
,
67 struct smb_lock_entry
*locks
,
71 /* undo the locks we just did */
73 brlock_unlock(pvfs
->brl_context
,
80 req
->async_states
->status
= status
;
81 req
->async_states
->send_fn(req
);
86 called when we receive a pending lock notification. It means that
87 either our lock timed out or someone else has unlocked a overlapping
88 range, so we should try the lock again. Note that on timeout we
89 do retry the lock, giving it a last chance.
91 static void pvfs_pending_lock_continue(void *private_data
, enum pvfs_wait_notice reason
)
93 struct pvfs_pending_lock
*pending
= talloc_get_type(private_data
,
94 struct pvfs_pending_lock
);
95 struct pvfs_state
*pvfs
= pending
->pvfs
;
96 struct pvfs_file
*f
= pending
->f
;
97 struct ntvfs_request
*req
= pending
->req
;
98 union smb_lock
*lck
= pending
->lck
;
99 struct smb_lock_entry
*locks
;
105 timed_out
= (reason
!= PVFS_WAIT_EVENT
);
107 locks
= lck
->lockx
.in
.locks
+ lck
->lockx
.in
.ulock_cnt
;
109 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_SHARED_LOCK
) {
115 DLIST_REMOVE(f
->pending_list
, pending
);
117 /* we don't retry on a cancel */
118 if (reason
== PVFS_WAIT_CANCEL
) {
119 if (pvfs
->ntvfs
->ctx
->protocol
< PROTOCOL_SMB2_02
) {
120 status
= NT_STATUS_FILE_LOCK_CONFLICT
;
122 status
= NT_STATUS_CANCELLED
;
126 * here it's important to pass the pending pointer
127 * because with this we'll get the correct error code
128 * FILE_LOCK_CONFLICT in the error case
130 status
= brlock_lock(pvfs
->brl_context
,
132 locks
[pending
->pending_lock
].pid
,
133 locks
[pending
->pending_lock
].offset
,
134 locks
[pending
->pending_lock
].count
,
137 if (NT_STATUS_IS_OK(status
)) {
142 /* if we have failed and timed out, or succeeded, then we
143 don't need the pending lock any more */
144 if (NT_STATUS_IS_OK(status
) || timed_out
) {
146 status2
= brlock_remove_pending(pvfs
->brl_context
,
147 f
->brl_handle
, pending
);
148 if (!NT_STATUS_IS_OK(status2
)) {
149 DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2
)));
151 talloc_free(pending
->wait_handle
);
154 if (!NT_STATUS_IS_OK(status
)) {
156 /* no more chances */
157 pvfs_lock_async_failed(pvfs
, req
, f
, locks
, pending
->pending_lock
, status
);
158 talloc_free(pending
);
160 /* we can try again */
161 DLIST_ADD(f
->pending_list
, pending
);
166 /* if we haven't timed out yet, then we can do more pending locks */
167 if (rw
== READ_LOCK
) {
168 rw
= PENDING_READ_LOCK
;
170 rw
= PENDING_WRITE_LOCK
;
173 /* we've now got the pending lock. try and get the rest, which might
174 lead to more pending locks */
175 for (i
=pending
->pending_lock
+1;i
<lck
->lockx
.in
.lock_cnt
;i
++) {
176 pending
->pending_lock
= i
;
178 status
= brlock_lock(pvfs
->brl_context
,
184 if (!NT_STATUS_IS_OK(status
)) {
185 /* a timed lock failed - setup a wait message to handle
186 the pending lock notification or a timeout */
187 pending
->wait_handle
= pvfs_wait_message(pvfs
, req
, MSG_BRL_RETRY
,
189 pvfs_pending_lock_continue
,
191 if (pending
->wait_handle
== NULL
) {
192 pvfs_lock_async_failed(pvfs
, req
, f
, locks
, i
, NT_STATUS_NO_MEMORY
);
193 talloc_free(pending
);
195 talloc_steal(pending
, pending
->wait_handle
);
196 DLIST_ADD(f
->pending_list
, pending
);
204 /* we've managed to get all the locks. Tell the client */
205 req
->async_states
->status
= NT_STATUS_OK
;
206 req
->async_states
->send_fn(req
);
207 talloc_free(pending
);
212 called when we close a file that might have locks
214 void pvfs_lock_close(struct pvfs_state
*pvfs
, struct pvfs_file
*f
)
216 struct pvfs_pending_lock
*p
, *next
;
218 if (f
->lock_count
|| f
->pending_list
) {
219 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
220 (double)f
->lock_count
));
221 brlock_close(f
->pvfs
->brl_context
, f
->brl_handle
);
225 /* reply to all the pending lock requests, telling them the
227 for (p
=f
->pending_list
;p
;p
=next
) {
229 DLIST_REMOVE(f
->pending_list
, p
);
230 p
->req
->async_states
->status
= NT_STATUS_RANGE_NOT_LOCKED
;
231 p
->req
->async_states
->send_fn(p
->req
);
237 cancel a set of locks
239 static NTSTATUS
pvfs_lock_cancel(struct pvfs_state
*pvfs
, struct ntvfs_request
*req
, union smb_lock
*lck
,
242 struct pvfs_pending_lock
*p
;
244 for (p
=f
->pending_list
;p
;p
=p
->next
) {
245 /* check if the lock request matches exactly - you can only cancel with exact matches */
246 if (p
->lck
->lockx
.in
.ulock_cnt
== lck
->lockx
.in
.ulock_cnt
&&
247 p
->lck
->lockx
.in
.lock_cnt
== lck
->lockx
.in
.lock_cnt
&&
248 p
->lck
->lockx
.in
.file
.ntvfs
== lck
->lockx
.in
.file
.ntvfs
&&
249 p
->lck
->lockx
.in
.mode
== (lck
->lockx
.in
.mode
& ~LOCKING_ANDX_CANCEL_LOCK
)) {
252 for (i
=0;i
<lck
->lockx
.in
.ulock_cnt
+ lck
->lockx
.in
.lock_cnt
;i
++) {
253 if (p
->lck
->lockx
.in
.locks
[i
].pid
!= lck
->lockx
.in
.locks
[i
].pid
||
254 p
->lck
->lockx
.in
.locks
[i
].offset
!= lck
->lockx
.in
.locks
[i
].offset
||
255 p
->lck
->lockx
.in
.locks
[i
].count
!= lck
->lockx
.in
.locks
[i
].count
) {
259 if (i
< lck
->lockx
.in
.ulock_cnt
) continue;
261 /* an exact match! we can cancel it, which is equivalent
262 to triggering the timeout early */
263 pvfs_pending_lock_continue(p
, PVFS_WAIT_TIMEOUT
);
268 return NT_STATUS_DOS(ERRDOS
, ERRcancelviolation
);
273 lock or unlock a byte range
275 NTSTATUS
pvfs_lock(struct ntvfs_module_context
*ntvfs
,
276 struct ntvfs_request
*req
, union smb_lock
*lck
)
278 struct pvfs_state
*pvfs
= talloc_get_type(ntvfs
->private_data
,
281 struct smb_lock_entry
*locks
;
284 struct pvfs_pending_lock
*pending
= NULL
;
287 if (lck
->generic
.level
!= RAW_LOCK_GENERIC
) {
288 return ntvfs_map_lock(ntvfs
, req
, lck
);
291 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_OPLOCK_RELEASE
) {
292 return pvfs_oplock_release(ntvfs
, req
, lck
);
295 f
= pvfs_find_fd(pvfs
, req
, lck
->lockx
.in
.file
.ntvfs
);
297 return NT_STATUS_INVALID_HANDLE
;
300 if (f
->handle
->fd
== -1) {
301 return NT_STATUS_FILE_IS_A_DIRECTORY
;
304 status
= pvfs_break_level2_oplocks(f
);
305 NT_STATUS_NOT_OK_RETURN(status
);
307 if (lck
->lockx
.in
.timeout
!= 0 &&
308 (req
->async_states
->state
& NTVFS_ASYNC_STATE_MAY_ASYNC
)) {
309 pending
= talloc(f
, struct pvfs_pending_lock
);
310 if (pending
== NULL
) {
311 return NT_STATUS_NO_MEMORY
;
314 pending
->pvfs
= pvfs
;
320 timeval_current_ofs_msec(lck
->lockx
.in
.timeout
);
323 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_SHARED_LOCK
) {
324 rw
= pending
? PENDING_READ_LOCK
: READ_LOCK
;
326 rw
= pending
? PENDING_WRITE_LOCK
: WRITE_LOCK
;
329 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_CANCEL_LOCK
) {
330 talloc_free(pending
);
331 return pvfs_lock_cancel(pvfs
, req
, lck
, f
);
334 if (lck
->lockx
.in
.mode
& LOCKING_ANDX_CHANGE_LOCKTYPE
) {
335 /* this seems to not be supported by any windows server,
336 or used by any clients */
337 talloc_free(pending
);
338 return NT_STATUS_DOS(ERRDOS
, ERRnoatomiclocks
);
341 /* the unlocks happen first */
342 locks
= lck
->lockx
.in
.locks
;
344 for (i
=0;i
<lck
->lockx
.in
.ulock_cnt
;i
++) {
345 status
= brlock_unlock(pvfs
->brl_context
,
350 if (!NT_STATUS_IS_OK(status
)) {
351 talloc_free(pending
);
359 for (i
=0;i
<lck
->lockx
.in
.lock_cnt
;i
++) {
361 pending
->pending_lock
= i
;
364 status
= brlock_lock(pvfs
->brl_context
,
370 if (!NT_STATUS_IS_OK(status
)) {
372 /* a timed lock failed - setup a wait message to handle
373 the pending lock notification or a timeout */
374 pending
->wait_handle
= pvfs_wait_message(pvfs
, req
, MSG_BRL_RETRY
,
376 pvfs_pending_lock_continue
,
378 if (pending
->wait_handle
== NULL
) {
379 talloc_free(pending
);
380 return NT_STATUS_NO_MEMORY
;
382 talloc_steal(pending
, pending
->wait_handle
);
383 DLIST_ADD(f
->pending_list
, pending
);
387 /* undo the locks we just did */
389 brlock_unlock(pvfs
->brl_context
,
396 talloc_free(pending
);
402 talloc_free(pending
);