[POWERPC] clean up ide io accessors
[linux-2.6/openmoko-kernel/knife-kernel.git] / fs / lockd / clntlock.c
blob52774feab93f3c4736febeeca2146a7dad2b9dfc
1 /*
2 * linux/fs/lockd/clntlock.c
4 * Lock handling for the client side NLM implementation
6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
7 */
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/time.h>
12 #include <linux/nfs_fs.h>
13 #include <linux/sunrpc/clnt.h>
14 #include <linux/sunrpc/svc.h>
15 #include <linux/lockd/lockd.h>
16 #include <linux/smp_lock.h>
18 #define NLMDBG_FACILITY NLMDBG_CLIENT
21 * Local function prototypes
23 static int reclaimer(void *ptr);
26 * The following functions handle blocking and granting from the
27 * client perspective.
31 * This is the representation of a blocked client lock.
33 struct nlm_wait {
34 struct list_head b_list; /* linked list */
35 wait_queue_head_t b_wait; /* where to wait on */
36 struct nlm_host * b_host;
37 struct file_lock * b_lock; /* local file lock */
38 unsigned short b_reclaim; /* got to reclaim lock */
39 u32 b_status; /* grant callback status */
42 static LIST_HEAD(nlm_blocked);
45 * Queue up a lock for blocking so that the GRANTED request can see it
47 struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl)
49 struct nlm_wait *block;
51 block = kmalloc(sizeof(*block), GFP_KERNEL);
52 if (block != NULL) {
53 block->b_host = host;
54 block->b_lock = fl;
55 init_waitqueue_head(&block->b_wait);
56 block->b_status = NLM_LCK_BLOCKED;
57 list_add(&block->b_list, &nlm_blocked);
59 return block;
62 void nlmclnt_finish_block(struct nlm_wait *block)
64 if (block == NULL)
65 return;
66 list_del(&block->b_list);
67 kfree(block);
71 * Block on a lock
73 int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout)
75 long ret;
77 /* A borken server might ask us to block even if we didn't
78 * request it. Just say no!
80 if (block == NULL)
81 return -EAGAIN;
83 /* Go to sleep waiting for GRANT callback. Some servers seem
84 * to lose callbacks, however, so we're going to poll from
85 * time to time just to make sure.
87 * For now, the retry frequency is pretty high; normally
88 * a 1 minute timeout would do. See the comment before
89 * nlmclnt_lock for an explanation.
91 ret = wait_event_interruptible_timeout(block->b_wait,
92 block->b_status != NLM_LCK_BLOCKED,
93 timeout);
94 if (ret < 0)
95 return -ERESTARTSYS;
96 req->a_res.status = block->b_status;
97 return 0;
101 * The server lockd has called us back to tell us the lock was granted
103 u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
105 const struct file_lock *fl = &lock->fl;
106 const struct nfs_fh *fh = &lock->fh;
107 struct nlm_wait *block;
108 u32 res = nlm_lck_denied;
111 * Look up blocked request based on arguments.
112 * Warning: must not use cookie to match it!
114 list_for_each_entry(block, &nlm_blocked, b_list) {
115 struct file_lock *fl_blocked = block->b_lock;
117 if (fl_blocked->fl_start != fl->fl_start)
118 continue;
119 if (fl_blocked->fl_end != fl->fl_end)
120 continue;
122 * Careful! The NLM server will return the 32-bit "pid" that
123 * we put on the wire: in this case the lockowner "pid".
125 if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid)
126 continue;
127 if (!nlm_cmp_addr(&block->b_host->h_addr, addr))
128 continue;
129 if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_dentry->d_inode) ,fh) != 0)
130 continue;
131 /* Alright, we found a lock. Set the return status
132 * and wake up the caller
134 block->b_status = NLM_LCK_GRANTED;
135 wake_up(&block->b_wait);
136 res = nlm_granted;
138 return res;
142 * The following procedures deal with the recovery of locks after a
143 * server crash.
147 * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
148 * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
150 static void nlmclnt_prepare_reclaim(struct nlm_host *host)
152 down_write(&host->h_rwsem);
153 host->h_monitored = 0;
154 host->h_state++;
155 host->h_nextrebind = 0;
156 nlm_rebind_host(host);
159 * Mark the locks for reclaiming.
161 list_splice_init(&host->h_granted, &host->h_reclaim);
163 dprintk("NLM: reclaiming locks for host %s", host->h_name);
166 static void nlmclnt_finish_reclaim(struct nlm_host *host)
168 host->h_reclaiming = 0;
169 up_write(&host->h_rwsem);
170 dprintk("NLM: done reclaiming locks for host %s", host->h_name);
174 * Reclaim all locks on server host. We do this by spawning a separate
175 * reclaimer thread.
177 void
178 nlmclnt_recovery(struct nlm_host *host, u32 newstate)
180 if (host->h_nsmstate == newstate)
181 return;
182 host->h_nsmstate = newstate;
183 if (!host->h_reclaiming++) {
184 nlm_get_host(host);
185 __module_get(THIS_MODULE);
186 if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0)
187 module_put(THIS_MODULE);
191 static int
192 reclaimer(void *ptr)
194 struct nlm_host *host = (struct nlm_host *) ptr;
195 struct nlm_wait *block;
196 struct file_lock *fl, *next;
197 u32 nsmstate;
199 daemonize("%s-reclaim", host->h_name);
200 allow_signal(SIGKILL);
202 /* This one ensures that our parent doesn't terminate while the
203 * reclaim is in progress */
204 lock_kernel();
205 lockd_up();
207 nlmclnt_prepare_reclaim(host);
208 /* First, reclaim all locks that have been marked. */
209 restart:
210 nsmstate = host->h_nsmstate;
211 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
212 list_del_init(&fl->fl_u.nfs_fl.list);
214 if (signalled())
215 continue;
216 if (nlmclnt_reclaim(host, fl) != 0)
217 continue;
218 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
219 if (host->h_nsmstate != nsmstate) {
220 /* Argh! The server rebooted again! */
221 list_splice_init(&host->h_granted, &host->h_reclaim);
222 goto restart;
225 nlmclnt_finish_reclaim(host);
227 /* Now, wake up all processes that sleep on a blocked lock */
228 list_for_each_entry(block, &nlm_blocked, b_list) {
229 if (block->b_host == host) {
230 block->b_status = NLM_LCK_DENIED_GRACE_PERIOD;
231 wake_up(&block->b_wait);
235 /* Release host handle after use */
236 nlm_release_host(host);
237 lockd_down();
238 unlock_kernel();
239 module_put_and_exit(0);