drm/i915: kill mappable/fenceable disdinction
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfs / delegation.c
blob232a7eead33a488c4a5d67bc83e3a1971d9665df
1 /*
2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/smp_lock.h>
15 #include <linux/spinlock.h>
17 #include <linux/nfs4.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_xdr.h>
21 #include "nfs4_fs.h"
22 #include "delegation.h"
23 #include "internal.h"
25 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
27 if (delegation->cred)
28 put_rpccred(delegation->cred);
29 kfree(delegation);
32 static void nfs_free_delegation_callback(struct rcu_head *head)
34 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
36 nfs_do_free_delegation(delegation);
39 static void nfs_free_delegation(struct nfs_delegation *delegation)
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
44 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
46 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
49 int nfs_have_delegation(struct inode *inode, fmode_t flags)
51 struct nfs_delegation *delegation;
52 int ret = 0;
54 flags &= FMODE_READ|FMODE_WRITE;
55 rcu_read_lock();
56 delegation = rcu_dereference(NFS_I(inode)->delegation);
57 if (delegation != NULL && (delegation->type & flags) == flags) {
58 nfs_mark_delegation_referenced(delegation);
59 ret = 1;
61 rcu_read_unlock();
62 return ret;
65 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
67 struct inode *inode = state->inode;
68 struct file_lock *fl;
69 int status = 0;
71 if (inode->i_flock == NULL)
72 goto out;
74 /* Protect inode->i_flock using the file locks lock */
75 lock_flocks();
76 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
77 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
78 continue;
79 if (nfs_file_open_context(fl->fl_file) != ctx)
80 continue;
81 unlock_flocks();
82 status = nfs4_lock_delegation_recall(state, fl);
83 if (status < 0)
84 goto out;
85 lock_flocks();
87 unlock_flocks();
88 out:
89 return status;
92 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
94 struct nfs_inode *nfsi = NFS_I(inode);
95 struct nfs_open_context *ctx;
96 struct nfs4_state *state;
97 int err;
99 again:
100 spin_lock(&inode->i_lock);
101 list_for_each_entry(ctx, &nfsi->open_files, list) {
102 state = ctx->state;
103 if (state == NULL)
104 continue;
105 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
106 continue;
107 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
108 continue;
109 get_nfs_open_context(ctx);
110 spin_unlock(&inode->i_lock);
111 err = nfs4_open_delegation_recall(ctx, state, stateid);
112 if (err >= 0)
113 err = nfs_delegation_claim_locks(ctx, state);
114 put_nfs_open_context(ctx);
115 if (err != 0)
116 return err;
117 goto again;
119 spin_unlock(&inode->i_lock);
120 return 0;
124 * Set up a delegation on an inode
126 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
128 struct nfs_delegation *delegation;
129 struct rpc_cred *oldcred = NULL;
131 rcu_read_lock();
132 delegation = rcu_dereference(NFS_I(inode)->delegation);
133 if (delegation != NULL) {
134 spin_lock(&delegation->lock);
135 if (delegation->inode != NULL) {
136 memcpy(delegation->stateid.data, res->delegation.data,
137 sizeof(delegation->stateid.data));
138 delegation->type = res->delegation_type;
139 delegation->maxsize = res->maxsize;
140 oldcred = delegation->cred;
141 delegation->cred = get_rpccred(cred);
142 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
143 &delegation->flags);
144 NFS_I(inode)->delegation_state = delegation->type;
145 spin_unlock(&delegation->lock);
146 put_rpccred(oldcred);
147 rcu_read_unlock();
148 } else {
149 /* We appear to have raced with a delegation return. */
150 spin_unlock(&delegation->lock);
151 rcu_read_unlock();
152 nfs_inode_set_delegation(inode, cred, res);
154 } else {
155 rcu_read_unlock();
159 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
161 int res = 0;
163 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
164 nfs_free_delegation(delegation);
165 return res;
168 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
170 struct inode *inode = NULL;
172 spin_lock(&delegation->lock);
173 if (delegation->inode != NULL)
174 inode = igrab(delegation->inode);
175 spin_unlock(&delegation->lock);
176 return inode;
179 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
180 const nfs4_stateid *stateid,
181 struct nfs_client *clp)
183 struct nfs_delegation *delegation =
184 rcu_dereference_protected(nfsi->delegation,
185 lockdep_is_held(&clp->cl_lock));
187 if (delegation == NULL)
188 goto nomatch;
189 spin_lock(&delegation->lock);
190 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
191 sizeof(delegation->stateid.data)) != 0)
192 goto nomatch_unlock;
193 list_del_rcu(&delegation->super_list);
194 delegation->inode = NULL;
195 nfsi->delegation_state = 0;
196 rcu_assign_pointer(nfsi->delegation, NULL);
197 spin_unlock(&delegation->lock);
198 return delegation;
199 nomatch_unlock:
200 spin_unlock(&delegation->lock);
201 nomatch:
202 return NULL;
206 * Set up a delegation on an inode
208 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
210 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
211 struct nfs_inode *nfsi = NFS_I(inode);
212 struct nfs_delegation *delegation, *old_delegation;
213 struct nfs_delegation *freeme = NULL;
214 int status = 0;
216 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
217 if (delegation == NULL)
218 return -ENOMEM;
219 memcpy(delegation->stateid.data, res->delegation.data,
220 sizeof(delegation->stateid.data));
221 delegation->type = res->delegation_type;
222 delegation->maxsize = res->maxsize;
223 delegation->change_attr = nfsi->change_attr;
224 delegation->cred = get_rpccred(cred);
225 delegation->inode = inode;
226 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
227 spin_lock_init(&delegation->lock);
229 spin_lock(&clp->cl_lock);
230 old_delegation = rcu_dereference_protected(nfsi->delegation,
231 lockdep_is_held(&clp->cl_lock));
232 if (old_delegation != NULL) {
233 if (memcmp(&delegation->stateid, &old_delegation->stateid,
234 sizeof(old_delegation->stateid)) == 0 &&
235 delegation->type == old_delegation->type) {
236 goto out;
239 * Deal with broken servers that hand out two
240 * delegations for the same file.
242 dfprintk(FILE, "%s: server %s handed out "
243 "a duplicate delegation!\n",
244 __func__, clp->cl_hostname);
245 if (delegation->type <= old_delegation->type) {
246 freeme = delegation;
247 delegation = NULL;
248 goto out;
250 freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
252 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
253 nfsi->delegation_state = delegation->type;
254 rcu_assign_pointer(nfsi->delegation, delegation);
255 delegation = NULL;
257 /* Ensure we revalidate the attributes and page cache! */
258 spin_lock(&inode->i_lock);
259 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
260 spin_unlock(&inode->i_lock);
262 out:
263 spin_unlock(&clp->cl_lock);
264 if (delegation != NULL)
265 nfs_free_delegation(delegation);
266 if (freeme != NULL)
267 nfs_do_return_delegation(inode, freeme, 0);
268 return status;
272 * Basic procedure for returning a delegation to the server
274 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
276 struct nfs_inode *nfsi = NFS_I(inode);
277 int err;
280 * Guard against new delegated open/lock/unlock calls and against
281 * state recovery
283 down_write(&nfsi->rwsem);
284 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
285 up_write(&nfsi->rwsem);
286 if (err)
287 goto out;
289 err = nfs_do_return_delegation(inode, delegation, issync);
290 out:
291 return err;
295 * Return all delegations that have been marked for return
297 int nfs_client_return_marked_delegations(struct nfs_client *clp)
299 struct nfs_delegation *delegation;
300 struct inode *inode;
301 int err = 0;
303 restart:
304 rcu_read_lock();
305 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
306 if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
307 continue;
308 inode = nfs_delegation_grab_inode(delegation);
309 if (inode == NULL)
310 continue;
311 spin_lock(&clp->cl_lock);
312 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
313 spin_unlock(&clp->cl_lock);
314 rcu_read_unlock();
315 if (delegation != NULL) {
316 filemap_flush(inode->i_mapping);
317 err = __nfs_inode_return_delegation(inode, delegation, 0);
319 iput(inode);
320 if (!err)
321 goto restart;
322 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
323 return err;
325 rcu_read_unlock();
326 return 0;
330 * This function returns the delegation without reclaiming opens
331 * or protecting against delegation reclaims.
332 * It is therefore really only safe to be called from
333 * nfs4_clear_inode()
335 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
337 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
338 struct nfs_inode *nfsi = NFS_I(inode);
339 struct nfs_delegation *delegation;
341 if (rcu_access_pointer(nfsi->delegation) != NULL) {
342 spin_lock(&clp->cl_lock);
343 delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
344 spin_unlock(&clp->cl_lock);
345 if (delegation != NULL)
346 nfs_do_return_delegation(inode, delegation, 0);
350 int nfs_inode_return_delegation(struct inode *inode)
352 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
353 struct nfs_inode *nfsi = NFS_I(inode);
354 struct nfs_delegation *delegation;
355 int err = 0;
357 if (rcu_access_pointer(nfsi->delegation) != NULL) {
358 spin_lock(&clp->cl_lock);
359 delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
360 spin_unlock(&clp->cl_lock);
361 if (delegation != NULL) {
362 nfs_wb_all(inode);
363 err = __nfs_inode_return_delegation(inode, delegation, 1);
366 return err;
369 static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
371 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
372 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
376 * Return all delegations associated to a super block
378 void nfs_super_return_all_delegations(struct super_block *sb)
380 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
381 struct nfs_delegation *delegation;
383 if (clp == NULL)
384 return;
385 rcu_read_lock();
386 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
387 spin_lock(&delegation->lock);
388 if (delegation->inode != NULL && delegation->inode->i_sb == sb)
389 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
390 spin_unlock(&delegation->lock);
392 rcu_read_unlock();
393 if (nfs_client_return_marked_delegations(clp) != 0)
394 nfs4_schedule_state_manager(clp);
397 static
398 void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
400 struct nfs_delegation *delegation;
402 rcu_read_lock();
403 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
404 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
405 continue;
406 if (delegation->type & flags)
407 nfs_mark_return_delegation(clp, delegation);
409 rcu_read_unlock();
412 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
414 nfs_client_mark_return_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
417 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
419 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
420 nfs4_schedule_state_manager(clp);
423 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
425 nfs_client_mark_return_all_delegation_types(clp, flags);
426 nfs_delegation_run_state_manager(clp);
429 void nfs_expire_all_delegations(struct nfs_client *clp)
431 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
435 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
437 void nfs_handle_cb_pathdown(struct nfs_client *clp)
439 if (clp == NULL)
440 return;
441 nfs_client_mark_return_all_delegations(clp);
444 static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
446 struct nfs_delegation *delegation;
448 rcu_read_lock();
449 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
450 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
451 continue;
452 nfs_mark_return_delegation(clp, delegation);
454 rcu_read_unlock();
457 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
459 nfs_client_mark_return_unreferenced_delegations(clp);
460 nfs_delegation_run_state_manager(clp);
464 * Asynchronous delegation recall!
466 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
468 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
469 struct nfs_delegation *delegation;
471 rcu_read_lock();
472 delegation = rcu_dereference(NFS_I(inode)->delegation);
474 if (!clp->cl_mvops->validate_stateid(delegation, stateid)) {
475 rcu_read_unlock();
476 return -ENOENT;
479 nfs_mark_return_delegation(clp, delegation);
480 rcu_read_unlock();
481 nfs_delegation_run_state_manager(clp);
482 return 0;
486 * Retrieve the inode associated with a delegation
488 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
490 struct nfs_delegation *delegation;
491 struct inode *res = NULL;
492 rcu_read_lock();
493 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
494 spin_lock(&delegation->lock);
495 if (delegation->inode != NULL &&
496 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
497 res = igrab(delegation->inode);
499 spin_unlock(&delegation->lock);
500 if (res != NULL)
501 break;
503 rcu_read_unlock();
504 return res;
508 * Mark all delegations as needing to be reclaimed
510 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
512 struct nfs_delegation *delegation;
513 rcu_read_lock();
514 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
515 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
516 rcu_read_unlock();
520 * Reap all unclaimed delegations after reboot recovery is done
522 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
524 struct nfs_delegation *delegation;
525 struct inode *inode;
526 restart:
527 rcu_read_lock();
528 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
529 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
530 continue;
531 inode = nfs_delegation_grab_inode(delegation);
532 if (inode == NULL)
533 continue;
534 spin_lock(&clp->cl_lock);
535 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
536 spin_unlock(&clp->cl_lock);
537 rcu_read_unlock();
538 if (delegation != NULL)
539 nfs_free_delegation(delegation);
540 iput(inode);
541 goto restart;
543 rcu_read_unlock();
546 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
548 struct nfs_inode *nfsi = NFS_I(inode);
549 struct nfs_delegation *delegation;
550 int ret = 0;
552 rcu_read_lock();
553 delegation = rcu_dereference(nfsi->delegation);
554 if (delegation != NULL) {
555 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
556 ret = 1;
558 rcu_read_unlock();
559 return ret;