s3-torture: run_fdsesstest(): replace cli_read_old() with cli_read()
[Samba/gebeck_regimport.git] / source4 / ntvfs / posix / pvfs_lock.c
blob0d99860e59be3ea46073e8d261d9349f650f9b0e
1 /*
2 Unix SMB/CIFS implementation.
4 POSIX NTVFS backend - locking
6 Copyright (C) Andrew Tridgell 2004
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "../lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
30 check if we can perform IO on a range that might be locked
32 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33 struct pvfs_file *f,
34 uint32_t smbpid,
35 uint64_t offset, uint64_t count,
36 enum brl_type rw)
38 if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39 return NT_STATUS_OK;
42 return brlock_locktest(pvfs->brl_context,
43 f->brl_handle,
44 smbpid,
45 offset, count, rw);
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock {
50 struct pvfs_pending_lock *next, *prev;
51 struct pvfs_state *pvfs;
52 union smb_lock *lck;
53 struct pvfs_file *f;
54 struct ntvfs_request *req;
55 int pending_lock;
56 struct pvfs_wait *wait_handle;
57 struct timeval end_time;
61 a secondary attempt to setup a lock has failed - back out
62 the locks we did get and send an error
64 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65 struct ntvfs_request *req,
66 struct pvfs_file *f,
67 struct smb_lock_entry *locks,
68 int i,
69 NTSTATUS status)
71 /* undo the locks we just did */
72 for (i--;i>=0;i--) {
73 brlock_unlock(pvfs->brl_context,
74 f->brl_handle,
75 locks[i].pid,
76 locks[i].offset,
77 locks[i].count);
78 f->lock_count--;
80 req->async_states->status = status;
81 req->async_states->send_fn(req);
86 called when we receive a pending lock notification. It means that
87 either our lock timed out or someone else has unlocked a overlapping
88 range, so we should try the lock again. Note that on timeout we
89 do retry the lock, giving it a last chance.
91 static void pvfs_pending_lock_continue(void *private_data, enum pvfs_wait_notice reason)
93 struct pvfs_pending_lock *pending = talloc_get_type(private_data,
94 struct pvfs_pending_lock);
95 struct pvfs_state *pvfs = pending->pvfs;
96 struct pvfs_file *f = pending->f;
97 struct ntvfs_request *req = pending->req;
98 union smb_lock *lck = pending->lck;
99 struct smb_lock_entry *locks;
100 enum brl_type rw;
101 NTSTATUS status;
102 int i;
103 bool timed_out;
105 timed_out = (reason != PVFS_WAIT_EVENT);
107 locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
109 if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
110 rw = READ_LOCK;
111 } else {
112 rw = WRITE_LOCK;
115 DLIST_REMOVE(f->pending_list, pending);
117 /* we don't retry on a cancel */
118 if (reason == PVFS_WAIT_CANCEL) {
119 if (pvfs->ntvfs->ctx->protocol != PROTOCOL_SMB2) {
120 status = NT_STATUS_FILE_LOCK_CONFLICT;
121 } else {
122 status = NT_STATUS_CANCELLED;
124 } else {
126 * here it's important to pass the pending pointer
127 * because with this we'll get the correct error code
128 * FILE_LOCK_CONFLICT in the error case
130 status = brlock_lock(pvfs->brl_context,
131 f->brl_handle,
132 locks[pending->pending_lock].pid,
133 locks[pending->pending_lock].offset,
134 locks[pending->pending_lock].count,
135 rw, pending);
137 if (NT_STATUS_IS_OK(status)) {
138 f->lock_count++;
139 timed_out = false;
142 /* if we have failed and timed out, or succeeded, then we
143 don't need the pending lock any more */
144 if (NT_STATUS_IS_OK(status) || timed_out) {
145 NTSTATUS status2;
146 status2 = brlock_remove_pending(pvfs->brl_context,
147 f->brl_handle, pending);
148 if (!NT_STATUS_IS_OK(status2)) {
149 DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
151 talloc_free(pending->wait_handle);
154 if (!NT_STATUS_IS_OK(status)) {
155 if (timed_out) {
156 /* no more chances */
157 pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
158 talloc_free(pending);
159 } else {
160 /* we can try again */
161 DLIST_ADD(f->pending_list, pending);
163 return;
166 /* if we haven't timed out yet, then we can do more pending locks */
167 if (rw == READ_LOCK) {
168 rw = PENDING_READ_LOCK;
169 } else {
170 rw = PENDING_WRITE_LOCK;
173 /* we've now got the pending lock. try and get the rest, which might
174 lead to more pending locks */
175 for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {
176 if (pending) {
177 pending->pending_lock = i;
180 status = brlock_lock(pvfs->brl_context,
181 f->brl_handle,
182 locks[i].pid,
183 locks[i].offset,
184 locks[i].count,
185 rw, pending);
186 if (!NT_STATUS_IS_OK(status)) {
187 if (pending) {
188 /* a timed lock failed - setup a wait message to handle
189 the pending lock notification or a timeout */
190 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
191 pending->end_time,
192 pvfs_pending_lock_continue,
193 pending);
194 if (pending->wait_handle == NULL) {
195 pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
196 talloc_free(pending);
197 } else {
198 talloc_steal(pending, pending->wait_handle);
199 DLIST_ADD(f->pending_list, pending);
201 return;
203 pvfs_lock_async_failed(pvfs, req, f, locks, i, status);
204 talloc_free(pending);
205 return;
208 f->lock_count++;
211 /* we've managed to get all the locks. Tell the client */
212 req->async_states->status = NT_STATUS_OK;
213 req->async_states->send_fn(req);
214 talloc_free(pending);
219 called when we close a file that might have locks
221 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
223 struct pvfs_pending_lock *p, *next;
225 if (f->lock_count || f->pending_list) {
226 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
227 (double)f->lock_count));
228 brlock_close(f->pvfs->brl_context, f->brl_handle);
229 f->lock_count = 0;
232 /* reply to all the pending lock requests, telling them the
233 lock failed */
234 for (p=f->pending_list;p;p=next) {
235 next = p->next;
236 DLIST_REMOVE(f->pending_list, p);
237 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
238 p->req->async_states->send_fn(p->req);
244 cancel a set of locks
246 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
247 struct pvfs_file *f)
249 struct pvfs_pending_lock *p;
251 for (p=f->pending_list;p;p=p->next) {
252 /* check if the lock request matches exactly - you can only cancel with exact matches */
253 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
254 p->lck->lockx.in.lock_cnt == lck->lockx.in.lock_cnt &&
255 p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
256 p->lck->lockx.in.mode == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
257 int i;
259 for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
260 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
261 p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
262 p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
263 break;
266 if (i < lck->lockx.in.ulock_cnt) continue;
268 /* an exact match! we can cancel it, which is equivalent
269 to triggering the timeout early */
270 pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
271 return NT_STATUS_OK;
275 return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
280 lock or unlock a byte range
282 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
283 struct ntvfs_request *req, union smb_lock *lck)
285 struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
286 struct pvfs_state);
287 struct pvfs_file *f;
288 struct smb_lock_entry *locks;
289 int i;
290 enum brl_type rw;
291 struct pvfs_pending_lock *pending = NULL;
292 NTSTATUS status;
294 if (lck->generic.level != RAW_LOCK_GENERIC) {
295 return ntvfs_map_lock(ntvfs, req, lck);
298 if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
299 return pvfs_oplock_release(ntvfs, req, lck);
302 f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
303 if (!f) {
304 return NT_STATUS_INVALID_HANDLE;
307 if (f->handle->fd == -1) {
308 return NT_STATUS_FILE_IS_A_DIRECTORY;
311 status = pvfs_break_level2_oplocks(f);
312 NT_STATUS_NOT_OK_RETURN(status);
314 if (lck->lockx.in.timeout != 0 &&
315 (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
316 pending = talloc(f, struct pvfs_pending_lock);
317 if (pending == NULL) {
318 return NT_STATUS_NO_MEMORY;
321 pending->pvfs = pvfs;
322 pending->lck = lck;
323 pending->f = f;
324 pending->req = req;
326 pending->end_time =
327 timeval_current_ofs_msec(lck->lockx.in.timeout);
330 if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
331 rw = pending? PENDING_READ_LOCK : READ_LOCK;
332 } else {
333 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
336 if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
337 talloc_free(pending);
338 return pvfs_lock_cancel(pvfs, req, lck, f);
341 if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
342 /* this seems to not be supported by any windows server,
343 or used by any clients */
344 talloc_free(pending);
345 return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
348 /* the unlocks happen first */
349 locks = lck->lockx.in.locks;
351 for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
352 status = brlock_unlock(pvfs->brl_context,
353 f->brl_handle,
354 locks[i].pid,
355 locks[i].offset,
356 locks[i].count);
357 if (!NT_STATUS_IS_OK(status)) {
358 talloc_free(pending);
359 return status;
361 f->lock_count--;
364 locks += i;
366 for (i=0;i<lck->lockx.in.lock_cnt;i++) {
367 if (pending) {
368 pending->pending_lock = i;
371 status = brlock_lock(pvfs->brl_context,
372 f->brl_handle,
373 locks[i].pid,
374 locks[i].offset,
375 locks[i].count,
376 rw, pending);
377 if (!NT_STATUS_IS_OK(status)) {
378 if (pending) {
379 /* a timed lock failed - setup a wait message to handle
380 the pending lock notification or a timeout */
381 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
382 pending->end_time,
383 pvfs_pending_lock_continue,
384 pending);
385 if (pending->wait_handle == NULL) {
386 talloc_free(pending);
387 return NT_STATUS_NO_MEMORY;
389 talloc_steal(pending, pending->wait_handle);
390 DLIST_ADD(f->pending_list, pending);
391 return NT_STATUS_OK;
394 /* undo the locks we just did */
395 for (i--;i>=0;i--) {
396 brlock_unlock(pvfs->brl_context,
397 f->brl_handle,
398 locks[i].pid,
399 locks[i].offset,
400 locks[i].count);
401 f->lock_count--;
403 talloc_free(pending);
404 return status;
406 f->lock_count++;
409 talloc_free(pending);
410 return NT_STATUS_OK;