NFSv4: Ensure nfs4_lock_expired() caches delegated locks
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / lockd / host.c
blob38b0e8a1aec0903d61b8bd78a10e05d5f77d29b4
1 /*
2 * linux/fs/lockd/host.c
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/in.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19 #include <linux/mutex.h>
22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
23 #define NLM_HOST_MAX 64
24 #define NLM_HOST_NRHASH 32
25 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
26 #define NLM_HOST_REBIND (60 * HZ)
27 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
29 #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr)
31 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
32 static unsigned long next_gc;
33 static int nrhosts;
34 static DEFINE_MUTEX(nlm_host_mutex);
37 static void nlm_gc_hosts(void);
40 * Find an NLM server handle in the cache. If there is none, create it.
42 struct nlm_host *
43 nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
45 return nlm_lookup_host(0, sin, proto, version);
49 * Find an NLM client handle in the cache. If there is none, create it.
51 struct nlm_host *
52 nlmsvc_lookup_host(struct svc_rqst *rqstp)
54 return nlm_lookup_host(1, &rqstp->rq_addr,
55 rqstp->rq_prot, rqstp->rq_vers);
59 * Common host lookup routine for server & client
61 struct nlm_host *
62 nlm_lookup_host(int server, struct sockaddr_in *sin,
63 int proto, int version)
65 struct nlm_host *host, **hp;
66 u32 addr;
67 int hash;
69 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
70 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
74 /* Lock hash table */
75 mutex_lock(&nlm_host_mutex);
77 if (time_after_eq(jiffies, next_gc))
78 nlm_gc_hosts();
80 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
81 if (host->h_proto != proto)
82 continue;
83 if (host->h_version != version)
84 continue;
85 if (host->h_server != server)
86 continue;
88 if (nlm_cmp_addr(&host->h_addr, sin)) {
89 if (hp != nlm_hosts + hash) {
90 *hp = host->h_next;
91 host->h_next = nlm_hosts[hash];
92 nlm_hosts[hash] = host;
94 nlm_get_host(host);
95 mutex_unlock(&nlm_host_mutex);
96 return host;
100 /* Ooops, no host found, create it */
101 dprintk("lockd: creating host entry\n");
103 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
104 goto nohost;
105 memset(host, 0, sizeof(*host));
107 addr = sin->sin_addr.s_addr;
108 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
110 host->h_addr = *sin;
111 host->h_addr.sin_port = 0; /* ouch! */
112 host->h_version = version;
113 host->h_proto = proto;
114 host->h_rpcclnt = NULL;
115 mutex_init(&host->h_mutex);
116 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
117 host->h_expires = jiffies + NLM_HOST_EXPIRE;
118 atomic_set(&host->h_count, 1);
119 init_waitqueue_head(&host->h_gracewait);
120 init_rwsem(&host->h_rwsem);
121 host->h_state = 0; /* pseudo NSM state */
122 host->h_nsmstate = 0; /* real NSM state */
123 host->h_server = server;
124 host->h_next = nlm_hosts[hash];
125 nlm_hosts[hash] = host;
126 INIT_LIST_HEAD(&host->h_lockowners);
127 spin_lock_init(&host->h_lock);
128 INIT_LIST_HEAD(&host->h_granted);
129 INIT_LIST_HEAD(&host->h_reclaim);
131 if (++nrhosts > NLM_HOST_MAX)
132 next_gc = 0;
134 nohost:
135 mutex_unlock(&nlm_host_mutex);
136 return host;
139 struct nlm_host *
140 nlm_find_client(void)
142 /* find a nlm_host for a client for which h_killed == 0.
143 * and return it
145 int hash;
146 mutex_lock(&nlm_host_mutex);
147 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
148 struct nlm_host *host, **hp;
149 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
150 if (host->h_server &&
151 host->h_killed == 0) {
152 nlm_get_host(host);
153 mutex_unlock(&nlm_host_mutex);
154 return host;
158 mutex_unlock(&nlm_host_mutex);
159 return NULL;
164 * Create the NLM RPC client for an NLM peer
166 struct rpc_clnt *
167 nlm_bind_host(struct nlm_host *host)
169 struct rpc_clnt *clnt;
170 struct rpc_xprt *xprt;
172 dprintk("lockd: nlm_bind_host(%08x)\n",
173 (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
175 /* Lock host handle */
176 mutex_lock(&host->h_mutex);
178 /* If we've already created an RPC client, check whether
179 * RPC rebind is required
181 if ((clnt = host->h_rpcclnt) != NULL) {
182 xprt = clnt->cl_xprt;
183 if (time_after_eq(jiffies, host->h_nextrebind)) {
184 rpc_force_rebind(clnt);
185 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
186 dprintk("lockd: next rebind in %ld jiffies\n",
187 host->h_nextrebind - jiffies);
189 } else {
190 xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
191 if (IS_ERR(xprt))
192 goto forgetit;
194 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
195 xprt->resvport = 1; /* NLM requires a reserved port */
197 /* Existing NLM servers accept AUTH_UNIX only */
198 clnt = rpc_new_client(xprt, host->h_name, &nlm_program,
199 host->h_version, RPC_AUTH_UNIX);
200 if (IS_ERR(clnt))
201 goto forgetit;
202 clnt->cl_autobind = 1; /* turn on pmap queries */
203 clnt->cl_softrtry = 1; /* All queries are soft */
205 host->h_rpcclnt = clnt;
208 mutex_unlock(&host->h_mutex);
209 return clnt;
211 forgetit:
212 printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
213 mutex_unlock(&host->h_mutex);
214 return NULL;
218 * Force a portmap lookup of the remote lockd port
220 void
221 nlm_rebind_host(struct nlm_host *host)
223 dprintk("lockd: rebind host %s\n", host->h_name);
224 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
225 rpc_force_rebind(host->h_rpcclnt);
226 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
231 * Increment NLM host count
233 struct nlm_host * nlm_get_host(struct nlm_host *host)
235 if (host) {
236 dprintk("lockd: get host %s\n", host->h_name);
237 atomic_inc(&host->h_count);
238 host->h_expires = jiffies + NLM_HOST_EXPIRE;
240 return host;
244 * Release NLM host after use
246 void nlm_release_host(struct nlm_host *host)
248 if (host != NULL) {
249 dprintk("lockd: release host %s\n", host->h_name);
250 BUG_ON(atomic_read(&host->h_count) < 0);
251 if (atomic_dec_and_test(&host->h_count)) {
252 BUG_ON(!list_empty(&host->h_lockowners));
253 BUG_ON(!list_empty(&host->h_granted));
254 BUG_ON(!list_empty(&host->h_reclaim));
260 * Shut down the hosts module.
261 * Note that this routine is called only at server shutdown time.
263 void
264 nlm_shutdown_hosts(void)
266 struct nlm_host *host;
267 int i;
269 dprintk("lockd: shutting down host module\n");
270 mutex_lock(&nlm_host_mutex);
272 /* First, make all hosts eligible for gc */
273 dprintk("lockd: nuking all hosts...\n");
274 for (i = 0; i < NLM_HOST_NRHASH; i++) {
275 for (host = nlm_hosts[i]; host; host = host->h_next)
276 host->h_expires = jiffies - 1;
279 /* Then, perform a garbage collection pass */
280 nlm_gc_hosts();
281 mutex_unlock(&nlm_host_mutex);
283 /* complain if any hosts are left */
284 if (nrhosts) {
285 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
286 dprintk("lockd: %d hosts left:\n", nrhosts);
287 for (i = 0; i < NLM_HOST_NRHASH; i++) {
288 for (host = nlm_hosts[i]; host; host = host->h_next) {
289 dprintk(" %s (cnt %d use %d exp %ld)\n",
290 host->h_name, atomic_read(&host->h_count),
291 host->h_inuse, host->h_expires);
298 * Garbage collect any unused NLM hosts.
299 * This GC combines reference counting for async operations with
300 * mark & sweep for resources held by remote clients.
302 static void
303 nlm_gc_hosts(void)
305 struct nlm_host **q, *host;
306 struct rpc_clnt *clnt;
307 int i;
309 dprintk("lockd: host garbage collection\n");
310 for (i = 0; i < NLM_HOST_NRHASH; i++) {
311 for (host = nlm_hosts[i]; host; host = host->h_next)
312 host->h_inuse = 0;
315 /* Mark all hosts that hold locks, blocks or shares */
316 nlmsvc_mark_resources();
318 for (i = 0; i < NLM_HOST_NRHASH; i++) {
319 q = &nlm_hosts[i];
320 while ((host = *q) != NULL) {
321 if (atomic_read(&host->h_count) || host->h_inuse
322 || time_before(jiffies, host->h_expires)) {
323 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
324 host->h_name, atomic_read(&host->h_count),
325 host->h_inuse, host->h_expires);
326 q = &host->h_next;
327 continue;
329 dprintk("lockd: delete host %s\n", host->h_name);
330 *q = host->h_next;
331 /* Don't unmonitor hosts that have been invalidated */
332 if (host->h_monitored && !host->h_killed)
333 nsm_unmonitor(host);
334 if ((clnt = host->h_rpcclnt) != NULL) {
335 if (atomic_read(&clnt->cl_users)) {
336 printk(KERN_WARNING
337 "lockd: active RPC handle\n");
338 clnt->cl_dead = 1;
339 } else {
340 rpc_destroy_client(host->h_rpcclnt);
343 kfree(host);
344 nrhosts--;
348 next_gc = jiffies + NLM_HOST_COLLECT;