sched: latencytop support
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfs / delegation.c
blob11833f4caeaa9a2ea5549675636330a689c0e80b
1 /*
2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
23 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
25 kfree(delegation);
28 static void nfs_free_delegation_callback(struct rcu_head *head)
30 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
32 nfs_do_free_delegation(delegation);
35 static void nfs_free_delegation(struct nfs_delegation *delegation)
37 struct rpc_cred *cred;
39 cred = rcu_dereference(delegation->cred);
40 rcu_assign_pointer(delegation->cred, NULL);
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
42 if (cred)
43 put_rpccred(cred);
46 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
48 struct inode *inode = state->inode;
49 struct file_lock *fl;
50 int status;
52 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
53 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
54 continue;
55 if (nfs_file_open_context(fl->fl_file) != ctx)
56 continue;
57 status = nfs4_lock_delegation_recall(state, fl);
58 if (status >= 0)
59 continue;
60 switch (status) {
61 default:
62 printk(KERN_ERR "%s: unhandled error %d.\n",
63 __FUNCTION__, status);
64 case -NFS4ERR_EXPIRED:
65 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
66 case -NFS4ERR_STALE_CLIENTID:
67 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
68 goto out_err;
71 return 0;
72 out_err:
73 return status;
76 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
78 struct nfs_inode *nfsi = NFS_I(inode);
79 struct nfs_open_context *ctx;
80 struct nfs4_state *state;
81 int err;
83 again:
84 spin_lock(&inode->i_lock);
85 list_for_each_entry(ctx, &nfsi->open_files, list) {
86 state = ctx->state;
87 if (state == NULL)
88 continue;
89 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
90 continue;
91 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
92 continue;
93 get_nfs_open_context(ctx);
94 spin_unlock(&inode->i_lock);
95 err = nfs4_open_delegation_recall(ctx, state, stateid);
96 if (err >= 0)
97 err = nfs_delegation_claim_locks(ctx, state);
98 put_nfs_open_context(ctx);
99 if (err != 0)
100 return;
101 goto again;
103 spin_unlock(&inode->i_lock);
107 * Set up a delegation on an inode
109 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
111 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
112 struct rpc_cred *oldcred;
114 if (delegation == NULL)
115 return;
116 memcpy(delegation->stateid.data, res->delegation.data,
117 sizeof(delegation->stateid.data));
118 delegation->type = res->delegation_type;
119 delegation->maxsize = res->maxsize;
120 oldcred = delegation->cred;
121 delegation->cred = get_rpccred(cred);
122 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
123 NFS_I(inode)->delegation_state = delegation->type;
124 smp_wmb();
125 put_rpccred(oldcred);
129 * Set up a delegation on an inode
131 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
133 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
134 struct nfs_inode *nfsi = NFS_I(inode);
135 struct nfs_delegation *delegation;
136 int status = 0;
138 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
139 if (delegation == NULL)
140 return -ENOMEM;
141 memcpy(delegation->stateid.data, res->delegation.data,
142 sizeof(delegation->stateid.data));
143 delegation->type = res->delegation_type;
144 delegation->maxsize = res->maxsize;
145 delegation->change_attr = nfsi->change_attr;
146 delegation->cred = get_rpccred(cred);
147 delegation->inode = inode;
149 spin_lock(&clp->cl_lock);
150 if (rcu_dereference(nfsi->delegation) == NULL) {
151 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
152 nfsi->delegation_state = delegation->type;
153 rcu_assign_pointer(nfsi->delegation, delegation);
154 delegation = NULL;
155 } else {
156 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
157 sizeof(delegation->stateid)) != 0 ||
158 delegation->type != nfsi->delegation->type) {
159 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
160 __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
161 status = -EIO;
165 /* Ensure we revalidate the attributes and page cache! */
166 spin_lock(&inode->i_lock);
167 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
168 spin_unlock(&inode->i_lock);
170 spin_unlock(&clp->cl_lock);
171 if (delegation != NULL)
172 nfs_free_delegation(delegation);
173 return status;
176 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
178 int res = 0;
180 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
181 nfs_free_delegation(delegation);
182 return res;
185 /* Sync all data to disk upon delegation return */
186 static void nfs_msync_inode(struct inode *inode)
188 filemap_fdatawrite(inode->i_mapping);
189 nfs_wb_all(inode);
190 filemap_fdatawait(inode->i_mapping);
194 * Basic procedure for returning a delegation to the server
196 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
198 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
199 struct nfs_inode *nfsi = NFS_I(inode);
201 nfs_msync_inode(inode);
202 down_read(&clp->cl_sem);
203 /* Guard against new delegated open calls */
204 down_write(&nfsi->rwsem);
205 nfs_delegation_claim_opens(inode, &delegation->stateid);
206 up_write(&nfsi->rwsem);
207 up_read(&clp->cl_sem);
208 nfs_msync_inode(inode);
210 return nfs_do_return_delegation(inode, delegation);
213 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
215 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
217 if (delegation == NULL)
218 goto nomatch;
219 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
220 sizeof(delegation->stateid.data)) != 0)
221 goto nomatch;
222 list_del_rcu(&delegation->super_list);
223 nfsi->delegation_state = 0;
224 rcu_assign_pointer(nfsi->delegation, NULL);
225 return delegation;
226 nomatch:
227 return NULL;
230 int nfs_inode_return_delegation(struct inode *inode)
232 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
233 struct nfs_inode *nfsi = NFS_I(inode);
234 struct nfs_delegation *delegation;
235 int err = 0;
237 if (rcu_dereference(nfsi->delegation) != NULL) {
238 spin_lock(&clp->cl_lock);
239 delegation = nfs_detach_delegation_locked(nfsi, NULL);
240 spin_unlock(&clp->cl_lock);
241 if (delegation != NULL)
242 err = __nfs_inode_return_delegation(inode, delegation);
244 return err;
248 * Return all delegations associated to a super block
250 void nfs_return_all_delegations(struct super_block *sb)
252 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
253 struct nfs_delegation *delegation;
254 struct inode *inode;
256 if (clp == NULL)
257 return;
258 restart:
259 rcu_read_lock();
260 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
261 if (delegation->inode->i_sb != sb)
262 continue;
263 inode = igrab(delegation->inode);
264 if (inode == NULL)
265 continue;
266 spin_lock(&clp->cl_lock);
267 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
268 spin_unlock(&clp->cl_lock);
269 rcu_read_unlock();
270 if (delegation != NULL)
271 __nfs_inode_return_delegation(inode, delegation);
272 iput(inode);
273 goto restart;
275 rcu_read_unlock();
278 static int nfs_do_expire_all_delegations(void *ptr)
280 struct nfs_client *clp = ptr;
281 struct nfs_delegation *delegation;
282 struct inode *inode;
284 allow_signal(SIGKILL);
285 restart:
286 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
287 goto out;
288 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
289 goto out;
290 rcu_read_lock();
291 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
292 inode = igrab(delegation->inode);
293 if (inode == NULL)
294 continue;
295 spin_lock(&clp->cl_lock);
296 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
297 spin_unlock(&clp->cl_lock);
298 rcu_read_unlock();
299 if (delegation)
300 __nfs_inode_return_delegation(inode, delegation);
301 iput(inode);
302 goto restart;
304 rcu_read_unlock();
305 out:
306 nfs_put_client(clp);
307 module_put_and_exit(0);
310 void nfs_expire_all_delegations(struct nfs_client *clp)
312 struct task_struct *task;
314 __module_get(THIS_MODULE);
315 atomic_inc(&clp->cl_count);
316 task = kthread_run(nfs_do_expire_all_delegations, clp,
317 "%u.%u.%u.%u-delegreturn",
318 NIPQUAD(clp->cl_addr.sin_addr));
319 if (!IS_ERR(task))
320 return;
321 nfs_put_client(clp);
322 module_put(THIS_MODULE);
326 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
328 void nfs_handle_cb_pathdown(struct nfs_client *clp)
330 struct nfs_delegation *delegation;
331 struct inode *inode;
333 if (clp == NULL)
334 return;
335 restart:
336 rcu_read_lock();
337 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
338 inode = igrab(delegation->inode);
339 if (inode == NULL)
340 continue;
341 spin_lock(&clp->cl_lock);
342 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
343 spin_unlock(&clp->cl_lock);
344 rcu_read_unlock();
345 if (delegation != NULL)
346 __nfs_inode_return_delegation(inode, delegation);
347 iput(inode);
348 goto restart;
350 rcu_read_unlock();
353 struct recall_threadargs {
354 struct inode *inode;
355 struct nfs_client *clp;
356 const nfs4_stateid *stateid;
358 struct completion started;
359 int result;
362 static int recall_thread(void *data)
364 struct recall_threadargs *args = (struct recall_threadargs *)data;
365 struct inode *inode = igrab(args->inode);
366 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
367 struct nfs_inode *nfsi = NFS_I(inode);
368 struct nfs_delegation *delegation;
370 daemonize("nfsv4-delegreturn");
372 nfs_msync_inode(inode);
373 down_read(&clp->cl_sem);
374 down_write(&nfsi->rwsem);
375 spin_lock(&clp->cl_lock);
376 delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
377 if (delegation != NULL)
378 args->result = 0;
379 else
380 args->result = -ENOENT;
381 spin_unlock(&clp->cl_lock);
382 complete(&args->started);
383 nfs_delegation_claim_opens(inode, args->stateid);
384 up_write(&nfsi->rwsem);
385 up_read(&clp->cl_sem);
386 nfs_msync_inode(inode);
388 if (delegation != NULL)
389 nfs_do_return_delegation(inode, delegation);
390 iput(inode);
391 module_put_and_exit(0);
395 * Asynchronous delegation recall!
397 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
399 struct recall_threadargs data = {
400 .inode = inode,
401 .stateid = stateid,
403 int status;
405 init_completion(&data.started);
406 __module_get(THIS_MODULE);
407 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
408 if (status < 0)
409 goto out_module_put;
410 wait_for_completion(&data.started);
411 return data.result;
412 out_module_put:
413 module_put(THIS_MODULE);
414 return status;
418 * Retrieve the inode associated with a delegation
420 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
422 struct nfs_delegation *delegation;
423 struct inode *res = NULL;
424 rcu_read_lock();
425 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
426 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
427 res = igrab(delegation->inode);
428 break;
431 rcu_read_unlock();
432 return res;
436 * Mark all delegations as needing to be reclaimed
438 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
440 struct nfs_delegation *delegation;
441 rcu_read_lock();
442 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
443 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
444 rcu_read_unlock();
448 * Reap all unclaimed delegations after reboot recovery is done
450 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
452 struct nfs_delegation *delegation;
453 restart:
454 rcu_read_lock();
455 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
456 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
457 continue;
458 spin_lock(&clp->cl_lock);
459 delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
460 spin_unlock(&clp->cl_lock);
461 rcu_read_unlock();
462 if (delegation != NULL)
463 nfs_free_delegation(delegation);
464 goto restart;
466 rcu_read_unlock();
469 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
471 struct nfs_inode *nfsi = NFS_I(inode);
472 struct nfs_delegation *delegation;
473 int ret = 0;
475 rcu_read_lock();
476 delegation = rcu_dereference(nfsi->delegation);
477 if (delegation != NULL) {
478 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
479 ret = 1;
481 rcu_read_unlock();
482 return ret;