sched: guest CPU accounting: add guest-CPU /proc/<pid>/stat fields
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfs / delegation.c
blobc55a761c22bb69eb0b9c8a83f3cb575cff5a0845
1 /*
2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
23 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
25 kfree(delegation);
28 static void nfs_free_delegation_callback(struct rcu_head *head)
30 struct nfs_delegation *delegation = container_of(head, struct nfs_delegation, rcu);
32 nfs_do_free_delegation(delegation);
35 static void nfs_free_delegation(struct nfs_delegation *delegation)
37 struct rpc_cred *cred;
39 cred = rcu_dereference(delegation->cred);
40 rcu_assign_pointer(delegation->cred, NULL);
41 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
42 if (cred)
43 put_rpccred(cred);
46 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
48 struct inode *inode = state->inode;
49 struct file_lock *fl;
50 int status;
52 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
53 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
54 continue;
55 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
56 continue;
57 status = nfs4_lock_delegation_recall(state, fl);
58 if (status >= 0)
59 continue;
60 switch (status) {
61 default:
62 printk(KERN_ERR "%s: unhandled error %d.\n",
63 __FUNCTION__, status);
64 case -NFS4ERR_EXPIRED:
65 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
66 case -NFS4ERR_STALE_CLIENTID:
67 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
68 goto out_err;
71 return 0;
72 out_err:
73 return status;
76 static void nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
78 struct nfs_inode *nfsi = NFS_I(inode);
79 struct nfs_open_context *ctx;
80 struct nfs4_state *state;
81 int err;
83 again:
84 spin_lock(&inode->i_lock);
85 list_for_each_entry(ctx, &nfsi->open_files, list) {
86 state = ctx->state;
87 if (state == NULL)
88 continue;
89 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
90 continue;
91 if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0)
92 continue;
93 get_nfs_open_context(ctx);
94 spin_unlock(&inode->i_lock);
95 err = nfs4_open_delegation_recall(ctx, state, stateid);
96 if (err >= 0)
97 err = nfs_delegation_claim_locks(ctx, state);
98 put_nfs_open_context(ctx);
99 if (err != 0)
100 return;
101 goto again;
103 spin_unlock(&inode->i_lock);
107 * Set up a delegation on an inode
109 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
111 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
113 if (delegation == NULL)
114 return;
115 memcpy(delegation->stateid.data, res->delegation.data,
116 sizeof(delegation->stateid.data));
117 delegation->type = res->delegation_type;
118 delegation->maxsize = res->maxsize;
119 put_rpccred(cred);
120 delegation->cred = get_rpccred(cred);
121 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
122 NFS_I(inode)->delegation_state = delegation->type;
123 smp_wmb();
127 * Set up a delegation on an inode
129 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
131 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
132 struct nfs_inode *nfsi = NFS_I(inode);
133 struct nfs_delegation *delegation;
134 int status = 0;
136 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
137 if (delegation == NULL)
138 return -ENOMEM;
139 memcpy(delegation->stateid.data, res->delegation.data,
140 sizeof(delegation->stateid.data));
141 delegation->type = res->delegation_type;
142 delegation->maxsize = res->maxsize;
143 delegation->change_attr = nfsi->change_attr;
144 delegation->cred = get_rpccred(cred);
145 delegation->inode = inode;
147 spin_lock(&clp->cl_lock);
148 if (rcu_dereference(nfsi->delegation) == NULL) {
149 list_add_rcu(&delegation->super_list, &clp->cl_delegations);
150 nfsi->delegation_state = delegation->type;
151 rcu_assign_pointer(nfsi->delegation, delegation);
152 delegation = NULL;
153 } else {
154 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
155 sizeof(delegation->stateid)) != 0 ||
156 delegation->type != nfsi->delegation->type) {
157 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
158 __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
159 status = -EIO;
163 /* Ensure we revalidate the attributes and page cache! */
164 spin_lock(&inode->i_lock);
165 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
166 spin_unlock(&inode->i_lock);
168 spin_unlock(&clp->cl_lock);
169 kfree(delegation);
170 return status;
173 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
175 int res = 0;
177 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
178 nfs_free_delegation(delegation);
179 return res;
182 /* Sync all data to disk upon delegation return */
183 static void nfs_msync_inode(struct inode *inode)
185 filemap_fdatawrite(inode->i_mapping);
186 nfs_wb_all(inode);
187 filemap_fdatawait(inode->i_mapping);
191 * Basic procedure for returning a delegation to the server
193 static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
195 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
196 struct nfs_inode *nfsi = NFS_I(inode);
198 nfs_msync_inode(inode);
199 down_read(&clp->cl_sem);
200 /* Guard against new delegated open calls */
201 down_write(&nfsi->rwsem);
202 nfs_delegation_claim_opens(inode, &delegation->stateid);
203 up_write(&nfsi->rwsem);
204 up_read(&clp->cl_sem);
205 nfs_msync_inode(inode);
207 return nfs_do_return_delegation(inode, delegation);
210 static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
212 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
214 if (delegation == NULL)
215 goto nomatch;
216 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
217 sizeof(delegation->stateid.data)) != 0)
218 goto nomatch;
219 list_del_rcu(&delegation->super_list);
220 nfsi->delegation_state = 0;
221 rcu_assign_pointer(nfsi->delegation, NULL);
222 return delegation;
223 nomatch:
224 return NULL;
227 int nfs_inode_return_delegation(struct inode *inode)
229 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
230 struct nfs_inode *nfsi = NFS_I(inode);
231 struct nfs_delegation *delegation;
232 int err = 0;
234 if (rcu_dereference(nfsi->delegation) != NULL) {
235 spin_lock(&clp->cl_lock);
236 delegation = nfs_detach_delegation_locked(nfsi, NULL);
237 spin_unlock(&clp->cl_lock);
238 if (delegation != NULL)
239 err = __nfs_inode_return_delegation(inode, delegation);
241 return err;
245 * Return all delegations associated to a super block
247 void nfs_return_all_delegations(struct super_block *sb)
249 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
250 struct nfs_delegation *delegation;
251 struct inode *inode;
253 if (clp == NULL)
254 return;
255 restart:
256 rcu_read_lock();
257 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
258 if (delegation->inode->i_sb != sb)
259 continue;
260 inode = igrab(delegation->inode);
261 if (inode == NULL)
262 continue;
263 spin_lock(&clp->cl_lock);
264 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
265 spin_unlock(&clp->cl_lock);
266 rcu_read_unlock();
267 if (delegation != NULL)
268 __nfs_inode_return_delegation(inode, delegation);
269 iput(inode);
270 goto restart;
272 rcu_read_unlock();
275 static int nfs_do_expire_all_delegations(void *ptr)
277 struct nfs_client *clp = ptr;
278 struct nfs_delegation *delegation;
279 struct inode *inode;
281 allow_signal(SIGKILL);
282 restart:
283 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
284 goto out;
285 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
286 goto out;
287 rcu_read_lock();
288 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
289 inode = igrab(delegation->inode);
290 if (inode == NULL)
291 continue;
292 spin_lock(&clp->cl_lock);
293 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
294 spin_unlock(&clp->cl_lock);
295 rcu_read_unlock();
296 if (delegation)
297 __nfs_inode_return_delegation(inode, delegation);
298 iput(inode);
299 goto restart;
301 rcu_read_unlock();
302 out:
303 nfs_put_client(clp);
304 module_put_and_exit(0);
307 void nfs_expire_all_delegations(struct nfs_client *clp)
309 struct task_struct *task;
311 __module_get(THIS_MODULE);
312 atomic_inc(&clp->cl_count);
313 task = kthread_run(nfs_do_expire_all_delegations, clp,
314 "%u.%u.%u.%u-delegreturn",
315 NIPQUAD(clp->cl_addr.sin_addr));
316 if (!IS_ERR(task))
317 return;
318 nfs_put_client(clp);
319 module_put(THIS_MODULE);
323 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
325 void nfs_handle_cb_pathdown(struct nfs_client *clp)
327 struct nfs_delegation *delegation;
328 struct inode *inode;
330 if (clp == NULL)
331 return;
332 restart:
333 rcu_read_lock();
334 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
335 inode = igrab(delegation->inode);
336 if (inode == NULL)
337 continue;
338 spin_lock(&clp->cl_lock);
339 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL);
340 spin_unlock(&clp->cl_lock);
341 rcu_read_unlock();
342 if (delegation != NULL)
343 __nfs_inode_return_delegation(inode, delegation);
344 iput(inode);
345 goto restart;
347 rcu_read_unlock();
350 struct recall_threadargs {
351 struct inode *inode;
352 struct nfs_client *clp;
353 const nfs4_stateid *stateid;
355 struct completion started;
356 int result;
359 static int recall_thread(void *data)
361 struct recall_threadargs *args = (struct recall_threadargs *)data;
362 struct inode *inode = igrab(args->inode);
363 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
364 struct nfs_inode *nfsi = NFS_I(inode);
365 struct nfs_delegation *delegation;
367 daemonize("nfsv4-delegreturn");
369 nfs_msync_inode(inode);
370 down_read(&clp->cl_sem);
371 down_write(&nfsi->rwsem);
372 spin_lock(&clp->cl_lock);
373 delegation = nfs_detach_delegation_locked(nfsi, args->stateid);
374 if (delegation != NULL)
375 args->result = 0;
376 else
377 args->result = -ENOENT;
378 spin_unlock(&clp->cl_lock);
379 complete(&args->started);
380 nfs_delegation_claim_opens(inode, args->stateid);
381 up_write(&nfsi->rwsem);
382 up_read(&clp->cl_sem);
383 nfs_msync_inode(inode);
385 if (delegation != NULL)
386 nfs_do_return_delegation(inode, delegation);
387 iput(inode);
388 module_put_and_exit(0);
392 * Asynchronous delegation recall!
394 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
396 struct recall_threadargs data = {
397 .inode = inode,
398 .stateid = stateid,
400 int status;
402 init_completion(&data.started);
403 __module_get(THIS_MODULE);
404 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
405 if (status < 0)
406 goto out_module_put;
407 wait_for_completion(&data.started);
408 return data.result;
409 out_module_put:
410 module_put(THIS_MODULE);
411 return status;
415 * Retrieve the inode associated with a delegation
417 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
419 struct nfs_delegation *delegation;
420 struct inode *res = NULL;
421 rcu_read_lock();
422 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
423 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
424 res = igrab(delegation->inode);
425 break;
428 rcu_read_unlock();
429 return res;
433 * Mark all delegations as needing to be reclaimed
435 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
437 struct nfs_delegation *delegation;
438 rcu_read_lock();
439 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
440 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
441 rcu_read_unlock();
445 * Reap all unclaimed delegations after reboot recovery is done
447 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
449 struct nfs_delegation *delegation;
450 restart:
451 rcu_read_lock();
452 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
453 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
454 continue;
455 spin_lock(&clp->cl_lock);
456 delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL);
457 spin_unlock(&clp->cl_lock);
458 rcu_read_unlock();
459 if (delegation != NULL)
460 nfs_free_delegation(delegation);
461 goto restart;
463 rcu_read_unlock();
466 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
468 struct nfs_inode *nfsi = NFS_I(inode);
469 struct nfs_delegation *delegation;
470 int ret = 0;
472 rcu_read_lock();
473 delegation = rcu_dereference(nfsi->delegation);
474 if (delegation != NULL) {
475 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
476 ret = 1;
478 rcu_read_unlock();
479 return ret;