[L2TP]: Changes to existing ppp and socket kernel headers for L2TP
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / nfs / delegation.c
blob7f37d1bea83fc3f8c4bdadb968c4f295e1b32cd7
1 /*
2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
23 static void nfs_free_delegation(struct nfs_delegation *delegation)
25 if (delegation->cred)
26 put_rpccred(delegation->cred);
27 kfree(delegation);
30 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
32 struct inode *inode = state->inode;
33 struct file_lock *fl;
34 int status;
36 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
37 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
38 continue;
39 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
40 continue;
41 status = nfs4_lock_delegation_recall(state, fl);
42 if (status >= 0)
43 continue;
44 switch (status) {
45 default:
46 printk(KERN_ERR "%s: unhandled error %d.\n",
47 __FUNCTION__, status);
48 case -NFS4ERR_EXPIRED:
49 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
50 case -NFS4ERR_STALE_CLIENTID:
51 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
52 goto out_err;
55 return 0;
56 out_err:
57 return status;
60 static void nfs_delegation_claim_opens(struct inode *inode)
62 struct nfs_inode *nfsi = NFS_I(inode);
63 struct nfs_open_context *ctx;
64 struct nfs4_state *state;
65 int err;
67 again:
68 spin_lock(&inode->i_lock);
69 list_for_each_entry(ctx, &nfsi->open_files, list) {
70 state = ctx->state;
71 if (state == NULL)
72 continue;
73 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
74 continue;
75 get_nfs_open_context(ctx);
76 spin_unlock(&inode->i_lock);
77 err = nfs4_open_delegation_recall(ctx->dentry, state);
78 if (err >= 0)
79 err = nfs_delegation_claim_locks(ctx, state);
80 put_nfs_open_context(ctx);
81 if (err != 0)
82 return;
83 goto again;
85 spin_unlock(&inode->i_lock);
89 * Set up a delegation on an inode
91 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
93 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
95 if (delegation == NULL)
96 return;
97 memcpy(delegation->stateid.data, res->delegation.data,
98 sizeof(delegation->stateid.data));
99 delegation->type = res->delegation_type;
100 delegation->maxsize = res->maxsize;
101 put_rpccred(cred);
102 delegation->cred = get_rpccred(cred);
103 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
104 NFS_I(inode)->delegation_state = delegation->type;
105 smp_wmb();
109 * Set up a delegation on an inode
111 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
113 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
114 struct nfs_inode *nfsi = NFS_I(inode);
115 struct nfs_delegation *delegation;
116 int status = 0;
118 /* Ensure we first revalidate the attributes and page cache! */
119 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
120 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
122 delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
123 if (delegation == NULL)
124 return -ENOMEM;
125 memcpy(delegation->stateid.data, res->delegation.data,
126 sizeof(delegation->stateid.data));
127 delegation->type = res->delegation_type;
128 delegation->maxsize = res->maxsize;
129 delegation->change_attr = nfsi->change_attr;
130 delegation->cred = get_rpccred(cred);
131 delegation->inode = inode;
133 spin_lock(&clp->cl_lock);
134 if (nfsi->delegation == NULL) {
135 list_add(&delegation->super_list, &clp->cl_delegations);
136 nfsi->delegation = delegation;
137 nfsi->delegation_state = delegation->type;
138 delegation = NULL;
139 } else {
140 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
141 sizeof(delegation->stateid)) != 0 ||
142 delegation->type != nfsi->delegation->type) {
143 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
144 __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
145 status = -EIO;
148 spin_unlock(&clp->cl_lock);
149 kfree(delegation);
150 return status;
153 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
155 int res = 0;
157 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
158 nfs_free_delegation(delegation);
159 return res;
162 /* Sync all data to disk upon delegation return */
163 static void nfs_msync_inode(struct inode *inode)
165 filemap_fdatawrite(inode->i_mapping);
166 nfs_wb_all(inode);
167 filemap_fdatawait(inode->i_mapping);
171 * Basic procedure for returning a delegation to the server
173 int __nfs_inode_return_delegation(struct inode *inode)
175 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
176 struct nfs_inode *nfsi = NFS_I(inode);
177 struct nfs_delegation *delegation;
178 int res = 0;
180 nfs_msync_inode(inode);
181 down_read(&clp->cl_sem);
182 /* Guard against new delegated open calls */
183 down_write(&nfsi->rwsem);
184 spin_lock(&clp->cl_lock);
185 delegation = nfsi->delegation;
186 if (delegation != NULL) {
187 list_del_init(&delegation->super_list);
188 nfsi->delegation = NULL;
189 nfsi->delegation_state = 0;
191 spin_unlock(&clp->cl_lock);
192 nfs_delegation_claim_opens(inode);
193 up_write(&nfsi->rwsem);
194 up_read(&clp->cl_sem);
195 nfs_msync_inode(inode);
197 if (delegation != NULL)
198 res = nfs_do_return_delegation(inode, delegation);
199 return res;
203 * Return all delegations associated to a super block
205 void nfs_return_all_delegations(struct super_block *sb)
207 struct nfs_client *clp = NFS_SB(sb)->nfs_client;
208 struct nfs_delegation *delegation;
209 struct inode *inode;
211 if (clp == NULL)
212 return;
213 restart:
214 spin_lock(&clp->cl_lock);
215 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
216 if (delegation->inode->i_sb != sb)
217 continue;
218 inode = igrab(delegation->inode);
219 if (inode == NULL)
220 continue;
221 spin_unlock(&clp->cl_lock);
222 nfs_inode_return_delegation(inode);
223 iput(inode);
224 goto restart;
226 spin_unlock(&clp->cl_lock);
229 static int nfs_do_expire_all_delegations(void *ptr)
231 struct nfs_client *clp = ptr;
232 struct nfs_delegation *delegation;
233 struct inode *inode;
235 allow_signal(SIGKILL);
236 restart:
237 spin_lock(&clp->cl_lock);
238 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
239 goto out;
240 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
241 goto out;
242 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
243 inode = igrab(delegation->inode);
244 if (inode == NULL)
245 continue;
246 spin_unlock(&clp->cl_lock);
247 nfs_inode_return_delegation(inode);
248 iput(inode);
249 goto restart;
251 out:
252 spin_unlock(&clp->cl_lock);
253 nfs_put_client(clp);
254 module_put_and_exit(0);
257 void nfs_expire_all_delegations(struct nfs_client *clp)
259 struct task_struct *task;
261 __module_get(THIS_MODULE);
262 atomic_inc(&clp->cl_count);
263 task = kthread_run(nfs_do_expire_all_delegations, clp,
264 "%u.%u.%u.%u-delegreturn",
265 NIPQUAD(clp->cl_addr.sin_addr));
266 if (!IS_ERR(task))
267 return;
268 nfs_put_client(clp);
269 module_put(THIS_MODULE);
273 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
275 void nfs_handle_cb_pathdown(struct nfs_client *clp)
277 struct nfs_delegation *delegation;
278 struct inode *inode;
280 if (clp == NULL)
281 return;
282 restart:
283 spin_lock(&clp->cl_lock);
284 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
285 inode = igrab(delegation->inode);
286 if (inode == NULL)
287 continue;
288 spin_unlock(&clp->cl_lock);
289 nfs_inode_return_delegation(inode);
290 iput(inode);
291 goto restart;
293 spin_unlock(&clp->cl_lock);
296 struct recall_threadargs {
297 struct inode *inode;
298 struct nfs_client *clp;
299 const nfs4_stateid *stateid;
301 struct completion started;
302 int result;
305 static int recall_thread(void *data)
307 struct recall_threadargs *args = (struct recall_threadargs *)data;
308 struct inode *inode = igrab(args->inode);
309 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
310 struct nfs_inode *nfsi = NFS_I(inode);
311 struct nfs_delegation *delegation;
313 daemonize("nfsv4-delegreturn");
315 nfs_msync_inode(inode);
316 down_read(&clp->cl_sem);
317 down_write(&nfsi->rwsem);
318 spin_lock(&clp->cl_lock);
319 delegation = nfsi->delegation;
320 if (delegation != NULL && memcmp(delegation->stateid.data,
321 args->stateid->data,
322 sizeof(delegation->stateid.data)) == 0) {
323 list_del_init(&delegation->super_list);
324 nfsi->delegation = NULL;
325 nfsi->delegation_state = 0;
326 args->result = 0;
327 } else {
328 delegation = NULL;
329 args->result = -ENOENT;
331 spin_unlock(&clp->cl_lock);
332 complete(&args->started);
333 nfs_delegation_claim_opens(inode);
334 up_write(&nfsi->rwsem);
335 up_read(&clp->cl_sem);
336 nfs_msync_inode(inode);
338 if (delegation != NULL)
339 nfs_do_return_delegation(inode, delegation);
340 iput(inode);
341 module_put_and_exit(0);
345 * Asynchronous delegation recall!
347 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
349 struct recall_threadargs data = {
350 .inode = inode,
351 .stateid = stateid,
353 int status;
355 init_completion(&data.started);
356 __module_get(THIS_MODULE);
357 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
358 if (status < 0)
359 goto out_module_put;
360 wait_for_completion(&data.started);
361 return data.result;
362 out_module_put:
363 module_put(THIS_MODULE);
364 return status;
368 * Retrieve the inode associated with a delegation
370 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
372 struct nfs_delegation *delegation;
373 struct inode *res = NULL;
374 spin_lock(&clp->cl_lock);
375 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
376 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
377 res = igrab(delegation->inode);
378 break;
381 spin_unlock(&clp->cl_lock);
382 return res;
386 * Mark all delegations as needing to be reclaimed
388 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
390 struct nfs_delegation *delegation;
391 spin_lock(&clp->cl_lock);
392 list_for_each_entry(delegation, &clp->cl_delegations, super_list)
393 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
394 spin_unlock(&clp->cl_lock);
398 * Reap all unclaimed delegations after reboot recovery is done
400 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
402 struct nfs_delegation *delegation, *n;
403 LIST_HEAD(head);
404 spin_lock(&clp->cl_lock);
405 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
406 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
407 continue;
408 list_move(&delegation->super_list, &head);
409 NFS_I(delegation->inode)->delegation = NULL;
410 NFS_I(delegation->inode)->delegation_state = 0;
412 spin_unlock(&clp->cl_lock);
413 while(!list_empty(&head)) {
414 delegation = list_entry(head.next, struct nfs_delegation, super_list);
415 list_del(&delegation->super_list);
416 nfs_free_delegation(delegation);
420 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
422 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
423 struct nfs_inode *nfsi = NFS_I(inode);
424 struct nfs_delegation *delegation;
425 int res = 0;
427 if (nfsi->delegation_state == 0)
428 return 0;
429 spin_lock(&clp->cl_lock);
430 delegation = nfsi->delegation;
431 if (delegation != NULL) {
432 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
433 res = 1;
435 spin_unlock(&clp->cl_lock);
436 return res;