usbcore: move code among source files
[linux-2.6/x86.git] / fs / lockd / host.c
bloba0d0b58ce7a49968aa8ed942d00fbec3e9fc9539
1 /*
2 * linux/fs/lockd/host.c
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
9 */
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/in.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19 #include <linux/mutex.h>
22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
23 #define NLM_HOST_MAX 64
24 #define NLM_HOST_NRHASH 32
25 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
26 #define NLM_HOST_REBIND (60 * HZ)
27 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
30 static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
31 static unsigned long next_gc;
32 static int nrhosts;
33 static DEFINE_MUTEX(nlm_host_mutex);
36 static void nlm_gc_hosts(void);
39 * Find an NLM server handle in the cache. If there is none, create it.
41 struct nlm_host *
42 nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version)
44 return nlm_lookup_host(0, sin, proto, version);
48 * Find an NLM client handle in the cache. If there is none, create it.
50 struct nlm_host *
51 nlmsvc_lookup_host(struct svc_rqst *rqstp)
53 return nlm_lookup_host(1, &rqstp->rq_addr,
54 rqstp->rq_prot, rqstp->rq_vers);
58 * Common host lookup routine for server & client
60 struct nlm_host *
61 nlm_lookup_host(int server, struct sockaddr_in *sin,
62 int proto, int version)
64 struct nlm_host *host, **hp;
65 u32 addr;
66 int hash;
68 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n",
69 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version);
71 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
73 /* Lock hash table */
74 mutex_lock(&nlm_host_mutex);
76 if (time_after_eq(jiffies, next_gc))
77 nlm_gc_hosts();
79 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
80 if (host->h_proto != proto)
81 continue;
82 if (host->h_version != version)
83 continue;
84 if (host->h_server != server)
85 continue;
87 if (nlm_cmp_addr(&host->h_addr, sin)) {
88 if (hp != nlm_hosts + hash) {
89 *hp = host->h_next;
90 host->h_next = nlm_hosts[hash];
91 nlm_hosts[hash] = host;
93 nlm_get_host(host);
94 mutex_unlock(&nlm_host_mutex);
95 return host;
99 /* Ooops, no host found, create it */
100 dprintk("lockd: creating host entry\n");
102 host = kzalloc(sizeof(*host), GFP_KERNEL);
103 if (!host)
104 goto nohost;
106 addr = sin->sin_addr.s_addr;
107 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
109 host->h_addr = *sin;
110 host->h_addr.sin_port = 0; /* ouch! */
111 host->h_version = version;
112 host->h_proto = proto;
113 host->h_rpcclnt = NULL;
114 mutex_init(&host->h_mutex);
115 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
116 host->h_expires = jiffies + NLM_HOST_EXPIRE;
117 atomic_set(&host->h_count, 1);
118 init_waitqueue_head(&host->h_gracewait);
119 init_rwsem(&host->h_rwsem);
120 host->h_state = 0; /* pseudo NSM state */
121 host->h_nsmstate = 0; /* real NSM state */
122 host->h_server = server;
123 host->h_next = nlm_hosts[hash];
124 nlm_hosts[hash] = host;
125 INIT_LIST_HEAD(&host->h_lockowners);
126 spin_lock_init(&host->h_lock);
127 INIT_LIST_HEAD(&host->h_granted);
128 INIT_LIST_HEAD(&host->h_reclaim);
130 if (++nrhosts > NLM_HOST_MAX)
131 next_gc = 0;
133 nohost:
134 mutex_unlock(&nlm_host_mutex);
135 return host;
138 struct nlm_host *
139 nlm_find_client(void)
141 /* find a nlm_host for a client for which h_killed == 0.
142 * and return it
144 int hash;
145 mutex_lock(&nlm_host_mutex);
146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
147 struct nlm_host *host, **hp;
148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
149 if (host->h_server &&
150 host->h_killed == 0) {
151 nlm_get_host(host);
152 mutex_unlock(&nlm_host_mutex);
153 return host;
157 mutex_unlock(&nlm_host_mutex);
158 return NULL;
163 * Create the NLM RPC client for an NLM peer
165 struct rpc_clnt *
166 nlm_bind_host(struct nlm_host *host)
168 struct rpc_clnt *clnt;
170 dprintk("lockd: nlm_bind_host(%08x)\n",
171 (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
173 /* Lock host handle */
174 mutex_lock(&host->h_mutex);
176 /* If we've already created an RPC client, check whether
177 * RPC rebind is required
179 if ((clnt = host->h_rpcclnt) != NULL) {
180 if (time_after_eq(jiffies, host->h_nextrebind)) {
181 rpc_force_rebind(clnt);
182 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
183 dprintk("lockd: next rebind in %ld jiffies\n",
184 host->h_nextrebind - jiffies);
186 } else {
187 unsigned long increment = nlmsvc_timeout * HZ;
188 struct rpc_timeout timeparms = {
189 .to_initval = increment,
190 .to_increment = increment,
191 .to_maxval = increment * 6UL,
192 .to_retries = 5U,
194 struct rpc_create_args args = {
195 .protocol = host->h_proto,
196 .address = (struct sockaddr *)&host->h_addr,
197 .addrsize = sizeof(host->h_addr),
198 .timeout = &timeparms,
199 .servername = host->h_name,
200 .program = &nlm_program,
201 .version = host->h_version,
202 .authflavor = RPC_AUTH_UNIX,
203 .flags = (RPC_CLNT_CREATE_HARDRTRY |
204 RPC_CLNT_CREATE_AUTOBIND),
207 clnt = rpc_create(&args);
208 if (!IS_ERR(clnt))
209 host->h_rpcclnt = clnt;
210 else {
211 printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
212 clnt = NULL;
216 mutex_unlock(&host->h_mutex);
217 return clnt;
221 * Force a portmap lookup of the remote lockd port
223 void
224 nlm_rebind_host(struct nlm_host *host)
226 dprintk("lockd: rebind host %s\n", host->h_name);
227 if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
228 rpc_force_rebind(host->h_rpcclnt);
229 host->h_nextrebind = jiffies + NLM_HOST_REBIND;
234 * Increment NLM host count
236 struct nlm_host * nlm_get_host(struct nlm_host *host)
238 if (host) {
239 dprintk("lockd: get host %s\n", host->h_name);
240 atomic_inc(&host->h_count);
241 host->h_expires = jiffies + NLM_HOST_EXPIRE;
243 return host;
247 * Release NLM host after use
249 void nlm_release_host(struct nlm_host *host)
251 if (host != NULL) {
252 dprintk("lockd: release host %s\n", host->h_name);
253 BUG_ON(atomic_read(&host->h_count) < 0);
254 if (atomic_dec_and_test(&host->h_count)) {
255 BUG_ON(!list_empty(&host->h_lockowners));
256 BUG_ON(!list_empty(&host->h_granted));
257 BUG_ON(!list_empty(&host->h_reclaim));
263 * Shut down the hosts module.
264 * Note that this routine is called only at server shutdown time.
266 void
267 nlm_shutdown_hosts(void)
269 struct nlm_host *host;
270 int i;
272 dprintk("lockd: shutting down host module\n");
273 mutex_lock(&nlm_host_mutex);
275 /* First, make all hosts eligible for gc */
276 dprintk("lockd: nuking all hosts...\n");
277 for (i = 0; i < NLM_HOST_NRHASH; i++) {
278 for (host = nlm_hosts[i]; host; host = host->h_next)
279 host->h_expires = jiffies - 1;
282 /* Then, perform a garbage collection pass */
283 nlm_gc_hosts();
284 mutex_unlock(&nlm_host_mutex);
286 /* complain if any hosts are left */
287 if (nrhosts) {
288 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
289 dprintk("lockd: %d hosts left:\n", nrhosts);
290 for (i = 0; i < NLM_HOST_NRHASH; i++) {
291 for (host = nlm_hosts[i]; host; host = host->h_next) {
292 dprintk(" %s (cnt %d use %d exp %ld)\n",
293 host->h_name, atomic_read(&host->h_count),
294 host->h_inuse, host->h_expires);
301 * Garbage collect any unused NLM hosts.
302 * This GC combines reference counting for async operations with
303 * mark & sweep for resources held by remote clients.
305 static void
306 nlm_gc_hosts(void)
308 struct nlm_host **q, *host;
309 struct rpc_clnt *clnt;
310 int i;
312 dprintk("lockd: host garbage collection\n");
313 for (i = 0; i < NLM_HOST_NRHASH; i++) {
314 for (host = nlm_hosts[i]; host; host = host->h_next)
315 host->h_inuse = 0;
318 /* Mark all hosts that hold locks, blocks or shares */
319 nlmsvc_mark_resources();
321 for (i = 0; i < NLM_HOST_NRHASH; i++) {
322 q = &nlm_hosts[i];
323 while ((host = *q) != NULL) {
324 if (atomic_read(&host->h_count) || host->h_inuse
325 || time_before(jiffies, host->h_expires)) {
326 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
327 host->h_name, atomic_read(&host->h_count),
328 host->h_inuse, host->h_expires);
329 q = &host->h_next;
330 continue;
332 dprintk("lockd: delete host %s\n", host->h_name);
333 *q = host->h_next;
334 /* Don't unmonitor hosts that have been invalidated */
335 if (host->h_monitored && !host->h_killed)
336 nsm_unmonitor(host);
337 if ((clnt = host->h_rpcclnt) != NULL) {
338 if (atomic_read(&clnt->cl_users)) {
339 printk(KERN_WARNING
340 "lockd: active RPC handle\n");
341 clnt->cl_dead = 1;
342 } else {
343 rpc_destroy_client(host->h_rpcclnt);
346 kfree(host);
347 nrhosts--;
351 next_gc = jiffies + NLM_HOST_COLLECT;