[PATCH] w1: Move w1-connector definitions into linux/include/connector.h
[linux-2.6/verdex.git] / fs / lockd / host.c
blob729ac427d359537f20e1dffa15514869ca8264ce
1 /*
2 * linux/fs/lockd/host.c
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/in.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19 #include <linux/mutex.h>
22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
23 #define NLM_HOST_MAX 64
24 #define NLM_HOST_NRHASH 32
25 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
26 #define NLM_HOST_REBIND (60 * HZ)
27 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
29 #define NLM_HOST_ADDR(sv) (&(sv)->s_nlmclnt->cl_xprt->addr)
31 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
32 static unsigned long next_gc;
33 static int nrhosts;
34 static DEFINE_MUTEX(nlm_host_mutex);
37 static void nlm_gc_hosts(void);
40 * Find an NLM server handle in the cache. If there is none, create it.
42 struct nlm_host *
43 nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
45 return nlm_lookup_host(0, sin, proto, version);
49 * Find an NLM client handle in the cache. If there is none, create it.
51 struct nlm_host *
52 nlmsvc_lookup_host(struct svc_rqst *rqstp)
54 return nlm_lookup_host(1, &rqstp->rq_addr,
55 rqstp->rq_prot, rqstp->rq_vers);
59 * Common host lookup routine for server & client
61 struct nlm_host *
62 nlm_lookup_host(int server, struct sockaddr_in *sin,
63 int proto, int version)
65 struct nlm_host *host, **hp;
66 u32 addr;
67 int hash;
69 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
70 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
72 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
74 /* Lock hash table */
75 mutex_lock(&nlm_host_mutex);
77 if (time_after_eq(jiffies, next_gc))
78 nlm_gc_hosts();
80 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
81 if (host->h_proto != proto)
82 continue;
83 if (host->h_version != version)
84 continue;
85 if (host->h_server != server)
86 continue;
88 if (nlm_cmp_addr(&host->h_addr, sin)) {
89 if (hp != nlm_hosts + hash) {
90 *hp = host->h_next;
91 host->h_next = nlm_hosts[hash];
92 nlm_hosts[hash] = host;
94 nlm_get_host(host);
95 mutex_unlock(&nlm_host_mutex);
96 return host;
100 /* Ooops, no host found, create it */
101 dprintk("lockd: creating host entry\n");
103 if (!(host = (struct nlm_host *) kmalloc(sizeof(*host), GFP_KERNEL)))
104 goto nohost;
105 memset(host, 0, sizeof(*host));
107 addr = sin->sin_addr.s_addr;
108 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
110 host->h_addr = *sin;
111 host->h_addr.sin_port = 0; /* ouch! */
112 host->h_version = version;
113 host->h_proto = proto;
114 host->h_rpcclnt = NULL;
115 init_MUTEX(&host->h_sema);
116 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
117 host->h_expires = jiffies + NLM_HOST_EXPIRE;
118 atomic_set(&host->h_count, 1);
119 init_waitqueue_head(&host->h_gracewait);
120 host->h_state = 0; /* pseudo NSM state */
121 host->h_nsmstate = 0; /* real NSM state */
122 host->h_server = server;
123 host->h_next = nlm_hosts[hash];
124 nlm_hosts[hash] = host;
125 INIT_LIST_HEAD(&host->h_lockowners);
126 spin_lock_init(&host->h_lock);
127 INIT_LIST_HEAD(&host->h_granted);
128 INIT_LIST_HEAD(&host->h_reclaim);
130 if (++nrhosts > NLM_HOST_MAX)
131 next_gc = 0;
133 nohost:
134 mutex_unlock(&nlm_host_mutex);
135 return host;
138 struct nlm_host *
139 nlm_find_client(void)
141 /* find a nlm_host for a client for which h_killed == 0.
142 * and return it
144 int hash;
145 mutex_lock(&nlm_host_mutex);
146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
147 struct nlm_host *host, **hp;
148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
149 if (host->h_server &&
150 host->h_killed == 0) {
151 nlm_get_host(host);
152 mutex_unlock(&nlm_host_mutex);
153 return host;
157 mutex_unlock(&nlm_host_mutex);
158 return NULL;
163 * Create the NLM RPC client for an NLM peer
165 struct rpc_clnt *
166 nlm_bind_host(struct nlm_host *host)
168 struct rpc_clnt *clnt;
169 struct rpc_xprt *xprt;
171 dprintk("lockd: nlm_bind_host(%08x)\n",
172 (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
174 /* Lock host handle */
175 down(&host->h_sema);
177 /* If we've already created an RPC client, check whether
178 * RPC rebind is required
180 if ((clnt = host->h_rpcclnt) != NULL) {
181 xprt = clnt->cl_xprt;
182 if (time_after_eq(jiffies, host->h_nextrebind)) {
183 rpc_force_rebind(clnt);
184 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
185 dprintk("lockd: next rebind in %ld jiffies\n",
186 host->h_nextrebind - jiffies);
188 } else {
189 xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
190 if (IS_ERR(xprt))
191 goto forgetit;
193 xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout);
194 xprt->resvport = 1; /* NLM requires a reserved port */
196 /* Existing NLM servers accept AUTH_UNIX only */
197 clnt = rpc_new_client(xprt, host->h_name, &nlm_program,
198 host->h_version, RPC_AUTH_UNIX);
199 if (IS_ERR(clnt))
200 goto forgetit;
201 clnt->cl_autobind = 1; /* turn on pmap queries */
202 clnt->cl_softrtry = 1; /* All queries are soft */
204 host->h_rpcclnt = clnt;
207 up(&host->h_sema);
208 return clnt;
210 forgetit:
211 printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
212 up(&host->h_sema);
213 return NULL;
217 * Force a portmap lookup of the remote lockd port
219 void
220 nlm_rebind_host(struct nlm_host *host)
222 dprintk("lockd: rebind host %s\n", host->h_name);
223 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
224 rpc_force_rebind(host->h_rpcclnt);
225 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
230 * Increment NLM host count
232 struct nlm_host * nlm_get_host(struct nlm_host *host)
234 if (host) {
235 dprintk("lockd: get host %s\n", host->h_name);
236 atomic_inc(&host->h_count);
237 host->h_expires = jiffies + NLM_HOST_EXPIRE;
239 return host;
243 * Release NLM host after use
245 void nlm_release_host(struct nlm_host *host)
247 if (host != NULL) {
248 dprintk("lockd: release host %s\n", host->h_name);
249 BUG_ON(atomic_read(&host->h_count) < 0);
250 if (atomic_dec_and_test(&host->h_count)) {
251 BUG_ON(!list_empty(&host->h_lockowners));
252 BUG_ON(!list_empty(&host->h_granted));
253 BUG_ON(!list_empty(&host->h_reclaim));
259 * Shut down the hosts module.
260 * Note that this routine is called only at server shutdown time.
262 void
263 nlm_shutdown_hosts(void)
265 struct nlm_host *host;
266 int i;
268 dprintk("lockd: shutting down host module\n");
269 mutex_lock(&nlm_host_mutex);
271 /* First, make all hosts eligible for gc */
272 dprintk("lockd: nuking all hosts...\n");
273 for (i = 0; i < NLM_HOST_NRHASH; i++) {
274 for (host = nlm_hosts[i]; host; host = host->h_next)
275 host->h_expires = jiffies - 1;
278 /* Then, perform a garbage collection pass */
279 nlm_gc_hosts();
280 mutex_unlock(&nlm_host_mutex);
282 /* complain if any hosts are left */
283 if (nrhosts) {
284 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
285 dprintk("lockd: %d hosts left:\n", nrhosts);
286 for (i = 0; i < NLM_HOST_NRHASH; i++) {
287 for (host = nlm_hosts[i]; host; host = host->h_next) {
288 dprintk(" %s (cnt %d use %d exp %ld)\n",
289 host->h_name, atomic_read(&host->h_count),
290 host->h_inuse, host->h_expires);
297 * Garbage collect any unused NLM hosts.
298 * This GC combines reference counting for async operations with
299 * mark & sweep for resources held by remote clients.
301 static void
302 nlm_gc_hosts(void)
304 struct nlm_host **q, *host;
305 struct rpc_clnt *clnt;
306 int i;
308 dprintk("lockd: host garbage collection\n");
309 for (i = 0; i < NLM_HOST_NRHASH; i++) {
310 for (host = nlm_hosts[i]; host; host = host->h_next)
311 host->h_inuse = 0;
314 /* Mark all hosts that hold locks, blocks or shares */
315 nlmsvc_mark_resources();
317 for (i = 0; i < NLM_HOST_NRHASH; i++) {
318 q = &nlm_hosts[i];
319 while ((host = *q) != NULL) {
320 if (atomic_read(&host->h_count) || host->h_inuse
321 || time_before(jiffies, host->h_expires)) {
322 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
323 host->h_name, atomic_read(&host->h_count),
324 host->h_inuse, host->h_expires);
325 q = &host->h_next;
326 continue;
328 dprintk("lockd: delete host %s\n", host->h_name);
329 *q = host->h_next;
330 /* Don't unmonitor hosts that have been invalidated */
331 if (host->h_monitored && !host->h_killed)
332 nsm_unmonitor(host);
333 if ((clnt = host->h_rpcclnt) != NULL) {
334 if (atomic_read(&clnt->cl_users)) {
335 printk(KERN_WARNING
336 "lockd: active RPC handle\n");
337 clnt->cl_dead = 1;
338 } else {
339 rpc_destroy_client(host->h_rpcclnt);
342 kfree(host);
343 nrhosts--;
347 next_gc = jiffies + NLM_HOST_COLLECT;