ctdb-failover: Split statd_callout add-client/del-client
[Samba.git] / source4 / ntvfs / posix / pvfs_lock.c
blob0802dedc67672682926eb37d6d540b1f8432f96f
1 /*
2 Unix SMB/CIFS implementation.
4 POSIX NTVFS backend - locking
6 Copyright (C) Andrew Tridgell 2004
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "includes.h"
23 #include "vfs_posix.h"
24 #include "system/time.h"
25 #include "../lib/util/dlinklist.h"
26 #include "messaging/messaging.h"
30 check if we can perform IO on a range that might be locked
32 NTSTATUS pvfs_check_lock(struct pvfs_state *pvfs,
33 struct pvfs_file *f,
34 uint32_t smbpid,
35 uint64_t offset, uint64_t count,
36 enum brl_type rw)
38 if (!(pvfs->flags & PVFS_FLAG_STRICT_LOCKING)) {
39 return NT_STATUS_OK;
42 return brlock_locktest(pvfs->brl_context,
43 f->brl_handle,
44 smbpid,
45 offset, count, rw);
48 /* this state structure holds information about a lock we are waiting on */
49 struct pvfs_pending_lock {
50 struct pvfs_pending_lock *next, *prev;
51 struct pvfs_state *pvfs;
52 union smb_lock *lck;
53 struct pvfs_file *f;
54 struct ntvfs_request *req;
55 int pending_lock;
56 struct pvfs_wait *wait_handle;
57 struct timeval end_time;
61 a secondary attempt to setup a lock has failed - back out
62 the locks we did get and send an error
64 static void pvfs_lock_async_failed(struct pvfs_state *pvfs,
65 struct ntvfs_request *req,
66 struct pvfs_file *f,
67 struct smb_lock_entry *locks,
68 int i,
69 NTSTATUS status)
71 /* undo the locks we just did */
72 for (i--;i>=0;i--) {
73 brlock_unlock(pvfs->brl_context,
74 f->brl_handle,
75 locks[i].pid,
76 locks[i].offset,
77 locks[i].count);
78 f->lock_count--;
80 req->async_states->status = status;
81 req->async_states->send_fn(req);
86 called when we receive a pending lock notification. It means that
87 either our lock timed out or someone else has unlocked a overlapping
88 range, so we should try the lock again. Note that on timeout we
89 do retry the lock, giving it a last chance.
91 static void pvfs_pending_lock_continue(void *private_data, enum pvfs_wait_notice reason)
93 struct pvfs_pending_lock *pending = talloc_get_type(private_data,
94 struct pvfs_pending_lock);
95 struct pvfs_state *pvfs = pending->pvfs;
96 struct pvfs_file *f = pending->f;
97 struct ntvfs_request *req = pending->req;
98 union smb_lock *lck = pending->lck;
99 struct smb_lock_entry *locks;
100 enum brl_type rw;
101 NTSTATUS status;
102 int i;
103 bool timed_out;
105 timed_out = (reason != PVFS_WAIT_EVENT);
107 locks = lck->lockx.in.locks + lck->lockx.in.ulock_cnt;
109 if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
110 rw = READ_LOCK;
111 } else {
112 rw = WRITE_LOCK;
115 DLIST_REMOVE(f->pending_list, pending);
117 /* we don't retry on a cancel */
118 if (reason == PVFS_WAIT_CANCEL) {
119 if (pvfs->ntvfs->ctx->protocol < PROTOCOL_SMB2_02) {
120 status = NT_STATUS_FILE_LOCK_CONFLICT;
121 } else {
122 status = NT_STATUS_CANCELLED;
124 } else {
126 * here it's important to pass the pending pointer
127 * because with this we'll get the correct error code
128 * FILE_LOCK_CONFLICT in the error case
130 status = brlock_lock(pvfs->brl_context,
131 f->brl_handle,
132 locks[pending->pending_lock].pid,
133 locks[pending->pending_lock].offset,
134 locks[pending->pending_lock].count,
135 rw, pending);
137 if (NT_STATUS_IS_OK(status)) {
138 f->lock_count++;
139 timed_out = false;
142 /* if we have failed and timed out, or succeeded, then we
143 don't need the pending lock any more */
144 if (NT_STATUS_IS_OK(status) || timed_out) {
145 NTSTATUS status2;
146 status2 = brlock_remove_pending(pvfs->brl_context,
147 f->brl_handle, pending);
148 if (!NT_STATUS_IS_OK(status2)) {
149 DEBUG(0,("pvfs_lock: failed to remove pending lock - %s\n", nt_errstr(status2)));
151 talloc_free(pending->wait_handle);
154 if (!NT_STATUS_IS_OK(status)) {
155 if (timed_out) {
156 /* no more chances */
157 pvfs_lock_async_failed(pvfs, req, f, locks, pending->pending_lock, status);
158 talloc_free(pending);
159 } else {
160 /* we can try again */
161 DLIST_ADD(f->pending_list, pending);
163 return;
166 /* if we haven't timed out yet, then we can do more pending locks */
167 if (rw == READ_LOCK) {
168 rw = PENDING_READ_LOCK;
169 } else {
170 rw = PENDING_WRITE_LOCK;
173 /* we've now got the pending lock. try and get the rest, which might
174 lead to more pending locks */
175 for (i=pending->pending_lock+1;i<lck->lockx.in.lock_cnt;i++) {
176 pending->pending_lock = i;
178 status = brlock_lock(pvfs->brl_context,
179 f->brl_handle,
180 locks[i].pid,
181 locks[i].offset,
182 locks[i].count,
183 rw, pending);
184 if (!NT_STATUS_IS_OK(status)) {
185 /* a timed lock failed - setup a wait message to handle
186 the pending lock notification or a timeout */
187 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
188 pending->end_time,
189 pvfs_pending_lock_continue,
190 pending);
191 if (pending->wait_handle == NULL) {
192 pvfs_lock_async_failed(pvfs, req, f, locks, i, NT_STATUS_NO_MEMORY);
193 talloc_free(pending);
194 } else {
195 talloc_steal(pending, pending->wait_handle);
196 DLIST_ADD(f->pending_list, pending);
198 return;
201 f->lock_count++;
204 /* we've managed to get all the locks. Tell the client */
205 req->async_states->status = NT_STATUS_OK;
206 req->async_states->send_fn(req);
207 talloc_free(pending);
212 called when we close a file that might have locks
214 void pvfs_lock_close(struct pvfs_state *pvfs, struct pvfs_file *f)
216 struct pvfs_pending_lock *p, *next;
218 if (f->lock_count || f->pending_list) {
219 DEBUG(5,("pvfs_lock: removing %.0f locks on close\n",
220 (double)f->lock_count));
221 brlock_close(f->pvfs->brl_context, f->brl_handle);
222 f->lock_count = 0;
225 /* reply to all the pending lock requests, telling them the
226 lock failed */
227 for (p=f->pending_list;p;p=next) {
228 next = p->next;
229 DLIST_REMOVE(f->pending_list, p);
230 p->req->async_states->status = NT_STATUS_RANGE_NOT_LOCKED;
231 p->req->async_states->send_fn(p->req);
237 cancel a set of locks
239 static NTSTATUS pvfs_lock_cancel(struct pvfs_state *pvfs, struct ntvfs_request *req, union smb_lock *lck,
240 struct pvfs_file *f)
242 struct pvfs_pending_lock *p;
244 for (p=f->pending_list;p;p=p->next) {
245 /* check if the lock request matches exactly - you can only cancel with exact matches */
246 if (p->lck->lockx.in.ulock_cnt == lck->lockx.in.ulock_cnt &&
247 p->lck->lockx.in.lock_cnt == lck->lockx.in.lock_cnt &&
248 p->lck->lockx.in.file.ntvfs== lck->lockx.in.file.ntvfs &&
249 p->lck->lockx.in.mode == (lck->lockx.in.mode & ~LOCKING_ANDX_CANCEL_LOCK)) {
250 int i;
252 for (i=0;i<lck->lockx.in.ulock_cnt + lck->lockx.in.lock_cnt;i++) {
253 if (p->lck->lockx.in.locks[i].pid != lck->lockx.in.locks[i].pid ||
254 p->lck->lockx.in.locks[i].offset != lck->lockx.in.locks[i].offset ||
255 p->lck->lockx.in.locks[i].count != lck->lockx.in.locks[i].count) {
256 break;
259 if (i < lck->lockx.in.ulock_cnt) continue;
261 /* an exact match! we can cancel it, which is equivalent
262 to triggering the timeout early */
263 pvfs_pending_lock_continue(p, PVFS_WAIT_TIMEOUT);
264 return NT_STATUS_OK;
268 return NT_STATUS_DOS(ERRDOS, ERRcancelviolation);
273 lock or unlock a byte range
275 NTSTATUS pvfs_lock(struct ntvfs_module_context *ntvfs,
276 struct ntvfs_request *req, union smb_lock *lck)
278 struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
279 struct pvfs_state);
280 struct pvfs_file *f;
281 struct smb_lock_entry *locks;
282 int i;
283 enum brl_type rw;
284 struct pvfs_pending_lock *pending = NULL;
285 NTSTATUS status;
287 if (lck->generic.level != RAW_LOCK_GENERIC) {
288 return ntvfs_map_lock(ntvfs, req, lck);
291 if (lck->lockx.in.mode & LOCKING_ANDX_OPLOCK_RELEASE) {
292 return pvfs_oplock_release(ntvfs, req, lck);
295 f = pvfs_find_fd(pvfs, req, lck->lockx.in.file.ntvfs);
296 if (!f) {
297 return NT_STATUS_INVALID_HANDLE;
300 if (f->handle->fd == -1) {
301 return NT_STATUS_FILE_IS_A_DIRECTORY;
304 status = pvfs_break_level2_oplocks(f);
305 NT_STATUS_NOT_OK_RETURN(status);
307 if (lck->lockx.in.timeout != 0 &&
308 (req->async_states->state & NTVFS_ASYNC_STATE_MAY_ASYNC)) {
309 pending = talloc(f, struct pvfs_pending_lock);
310 if (pending == NULL) {
311 return NT_STATUS_NO_MEMORY;
314 pending->pvfs = pvfs;
315 pending->lck = lck;
316 pending->f = f;
317 pending->req = req;
319 pending->end_time =
320 timeval_current_ofs_msec(lck->lockx.in.timeout);
323 if (lck->lockx.in.mode & LOCKING_ANDX_SHARED_LOCK) {
324 rw = pending? PENDING_READ_LOCK : READ_LOCK;
325 } else {
326 rw = pending? PENDING_WRITE_LOCK : WRITE_LOCK;
329 if (lck->lockx.in.mode & LOCKING_ANDX_CANCEL_LOCK) {
330 talloc_free(pending);
331 return pvfs_lock_cancel(pvfs, req, lck, f);
334 if (lck->lockx.in.mode & LOCKING_ANDX_CHANGE_LOCKTYPE) {
335 /* this seems to not be supported by any windows server,
336 or used by any clients */
337 talloc_free(pending);
338 return NT_STATUS_DOS(ERRDOS, ERRnoatomiclocks);
341 /* the unlocks happen first */
342 locks = lck->lockx.in.locks;
344 for (i=0;i<lck->lockx.in.ulock_cnt;i++) {
345 status = brlock_unlock(pvfs->brl_context,
346 f->brl_handle,
347 locks[i].pid,
348 locks[i].offset,
349 locks[i].count);
350 if (!NT_STATUS_IS_OK(status)) {
351 talloc_free(pending);
352 return status;
354 f->lock_count--;
357 locks += i;
359 for (i=0;i<lck->lockx.in.lock_cnt;i++) {
360 if (pending) {
361 pending->pending_lock = i;
364 status = brlock_lock(pvfs->brl_context,
365 f->brl_handle,
366 locks[i].pid,
367 locks[i].offset,
368 locks[i].count,
369 rw, pending);
370 if (!NT_STATUS_IS_OK(status)) {
371 if (pending) {
372 /* a timed lock failed - setup a wait message to handle
373 the pending lock notification or a timeout */
374 pending->wait_handle = pvfs_wait_message(pvfs, req, MSG_BRL_RETRY,
375 pending->end_time,
376 pvfs_pending_lock_continue,
377 pending);
378 if (pending->wait_handle == NULL) {
379 talloc_free(pending);
380 return NT_STATUS_NO_MEMORY;
382 talloc_steal(pending, pending->wait_handle);
383 DLIST_ADD(f->pending_list, pending);
384 return NT_STATUS_OK;
387 /* undo the locks we just did */
388 for (i--;i>=0;i--) {
389 brlock_unlock(pvfs->brl_context,
390 f->brl_handle,
391 locks[i].pid,
392 locks[i].offset,
393 locks[i].count);
394 f->lock_count--;
396 talloc_free(pending);
397 return status;
399 f->lock_count++;
402 talloc_free(pending);
403 return NT_STATUS_OK;