2 * linux/fs/lockd/host.c
4 * Management for NLM peer hosts. The nlm_host struct is shared
5 * between client and server implementation. The only reason to
6 * do so is to reduce code bloat.
8 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 #include <linux/sunrpc/clnt.h>
16 #include <linux/sunrpc/svc.h>
17 #include <linux/lockd/lockd.h>
18 #include <linux/lockd/sm_inter.h>
19 #include <linux/mutex.h>
22 #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
23 #define NLM_HOST_MAX 64
24 #define NLM_HOST_NRHASH 32
25 #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
26 #define NLM_HOST_REBIND (60 * HZ)
27 #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28 #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
30 static struct hlist_head nlm_hosts
[NLM_HOST_NRHASH
];
31 static unsigned long next_gc
;
33 static DEFINE_MUTEX(nlm_host_mutex
);
36 static void nlm_gc_hosts(void);
37 static struct nsm_handle
* __nsm_find(const struct sockaddr_in
*,
38 const char *, int, int);
41 * Find an NLM server handle in the cache. If there is none, create it.
44 nlmclnt_lookup_host(const struct sockaddr_in
*sin
, int proto
, int version
,
45 const char *hostname
, int hostname_len
)
47 return nlm_lookup_host(0, sin
, proto
, version
,
48 hostname
, hostname_len
);
52 * Find an NLM client handle in the cache. If there is none, create it.
55 nlmsvc_lookup_host(struct svc_rqst
*rqstp
,
56 const char *hostname
, int hostname_len
)
58 return nlm_lookup_host(1, &rqstp
->rq_addr
,
59 rqstp
->rq_prot
, rqstp
->rq_vers
,
60 hostname
, hostname_len
);
64 * Common host lookup routine for server & client
67 nlm_lookup_host(int server
, const struct sockaddr_in
*sin
,
68 int proto
, int version
,
72 struct hlist_head
*chain
;
73 struct hlist_node
*pos
;
74 struct nlm_host
*host
;
75 struct nsm_handle
*nsm
= NULL
;
78 dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n",
79 NIPQUAD(sin
->sin_addr
.s_addr
), proto
, version
,
80 server
? "server" : "client",
82 hostname
? hostname
: "<none>");
85 hash
= NLM_ADDRHASH(sin
->sin_addr
.s_addr
);
88 mutex_lock(&nlm_host_mutex
);
90 if (time_after_eq(jiffies
, next_gc
))
93 /* We may keep several nlm_host objects for a peer, because each
94 * nlm_host is identified by
95 * (address, protocol, version, server/client)
96 * We could probably simplify this a little by putting all those
97 * different NLM rpc_clients into one single nlm_host object.
98 * This would allow us to have one nlm_host per address.
100 chain
= &nlm_hosts
[hash
];
101 hlist_for_each_entry(host
, pos
, chain
, h_hash
) {
102 if (!nlm_cmp_addr(&host
->h_addr
, sin
))
105 /* See if we have an NSM handle for this client */
107 nsm
= host
->h_nsmhandle
;
109 if (host
->h_proto
!= proto
)
111 if (host
->h_version
!= version
)
113 if (host
->h_server
!= server
)
116 /* Move to head of hash chain. */
117 hlist_del(&host
->h_hash
);
118 hlist_add_head(&host
->h_hash
, chain
);
124 atomic_inc(&nsm
->sm_count
);
128 /* Sadly, the host isn't in our hash table yet. See if
129 * we have an NSM handle for it. If not, create one.
131 if (!nsm
&& !(nsm
= nsm_find(sin
, hostname
, hostname_len
)))
134 host
= kzalloc(sizeof(*host
), GFP_KERNEL
);
139 host
->h_name
= nsm
->sm_name
;
141 host
->h_addr
.sin_port
= 0; /* ouch! */
142 host
->h_version
= version
;
143 host
->h_proto
= proto
;
144 host
->h_rpcclnt
= NULL
;
145 mutex_init(&host
->h_mutex
);
146 host
->h_nextrebind
= jiffies
+ NLM_HOST_REBIND
;
147 host
->h_expires
= jiffies
+ NLM_HOST_EXPIRE
;
148 atomic_set(&host
->h_count
, 1);
149 init_waitqueue_head(&host
->h_gracewait
);
150 init_rwsem(&host
->h_rwsem
);
151 host
->h_state
= 0; /* pseudo NSM state */
152 host
->h_nsmstate
= 0; /* real NSM state */
153 host
->h_nsmhandle
= nsm
;
154 host
->h_server
= server
;
155 hlist_add_head(&host
->h_hash
, chain
);
156 INIT_LIST_HEAD(&host
->h_lockowners
);
157 spin_lock_init(&host
->h_lock
);
158 INIT_LIST_HEAD(&host
->h_granted
);
159 INIT_LIST_HEAD(&host
->h_reclaim
);
161 if (++nrhosts
> NLM_HOST_MAX
)
165 mutex_unlock(&nlm_host_mutex
);
173 nlm_destroy_host(struct nlm_host
*host
)
175 struct rpc_clnt
*clnt
;
177 BUG_ON(!list_empty(&host
->h_lockowners
));
178 BUG_ON(atomic_read(&host
->h_count
));
181 * Release NSM handle and unmonitor host.
185 if ((clnt
= host
->h_rpcclnt
) != NULL
) {
186 if (atomic_read(&clnt
->cl_users
)) {
188 "lockd: active RPC handle\n");
191 rpc_destroy_client(host
->h_rpcclnt
);
198 * Create the NLM RPC client for an NLM peer
201 nlm_bind_host(struct nlm_host
*host
)
203 struct rpc_clnt
*clnt
;
205 dprintk("lockd: nlm_bind_host(%08x)\n",
206 (unsigned)ntohl(host
->h_addr
.sin_addr
.s_addr
));
208 /* Lock host handle */
209 mutex_lock(&host
->h_mutex
);
211 /* If we've already created an RPC client, check whether
212 * RPC rebind is required
214 if ((clnt
= host
->h_rpcclnt
) != NULL
) {
215 if (time_after_eq(jiffies
, host
->h_nextrebind
)) {
216 rpc_force_rebind(clnt
);
217 host
->h_nextrebind
= jiffies
+ NLM_HOST_REBIND
;
218 dprintk("lockd: next rebind in %ld jiffies\n",
219 host
->h_nextrebind
- jiffies
);
222 unsigned long increment
= nlmsvc_timeout
* HZ
;
223 struct rpc_timeout timeparms
= {
224 .to_initval
= increment
,
225 .to_increment
= increment
,
226 .to_maxval
= increment
* 6UL,
229 struct rpc_create_args args
= {
230 .protocol
= host
->h_proto
,
231 .address
= (struct sockaddr
*)&host
->h_addr
,
232 .addrsize
= sizeof(host
->h_addr
),
233 .timeout
= &timeparms
,
234 .servername
= host
->h_name
,
235 .program
= &nlm_program
,
236 .version
= host
->h_version
,
237 .authflavor
= RPC_AUTH_UNIX
,
238 .flags
= (RPC_CLNT_CREATE_HARDRTRY
|
239 RPC_CLNT_CREATE_AUTOBIND
),
242 clnt
= rpc_create(&args
);
244 host
->h_rpcclnt
= clnt
;
246 printk("lockd: couldn't create RPC handle for %s\n", host
->h_name
);
251 mutex_unlock(&host
->h_mutex
);
256 * Force a portmap lookup of the remote lockd port
259 nlm_rebind_host(struct nlm_host
*host
)
261 dprintk("lockd: rebind host %s\n", host
->h_name
);
262 if (host
->h_rpcclnt
&& time_after_eq(jiffies
, host
->h_nextrebind
)) {
263 rpc_force_rebind(host
->h_rpcclnt
);
264 host
->h_nextrebind
= jiffies
+ NLM_HOST_REBIND
;
269 * Increment NLM host count
271 struct nlm_host
* nlm_get_host(struct nlm_host
*host
)
274 dprintk("lockd: get host %s\n", host
->h_name
);
275 atomic_inc(&host
->h_count
);
276 host
->h_expires
= jiffies
+ NLM_HOST_EXPIRE
;
282 * Release NLM host after use
284 void nlm_release_host(struct nlm_host
*host
)
287 dprintk("lockd: release host %s\n", host
->h_name
);
288 BUG_ON(atomic_read(&host
->h_count
) < 0);
289 if (atomic_dec_and_test(&host
->h_count
)) {
290 BUG_ON(!list_empty(&host
->h_lockowners
));
291 BUG_ON(!list_empty(&host
->h_granted
));
292 BUG_ON(!list_empty(&host
->h_reclaim
));
298 * We were notified that the host indicated by address &sin
300 * Release all resources held by that peer.
302 void nlm_host_rebooted(const struct sockaddr_in
*sin
,
303 const char *hostname
, int hostname_len
,
306 struct hlist_head
*chain
;
307 struct hlist_node
*pos
;
308 struct nsm_handle
*nsm
;
309 struct nlm_host
*host
;
311 dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n",
312 hostname
, NIPQUAD(sin
->sin_addr
));
314 /* Find the NSM handle for this peer */
315 if (!(nsm
= __nsm_find(sin
, hostname
, hostname_len
, 0)))
318 /* When reclaiming locks on this peer, make sure that
319 * we set up a new notification */
320 nsm
->sm_monitored
= 0;
322 /* Mark all hosts tied to this NSM state as having rebooted.
323 * We run the loop repeatedly, because we drop the host table
325 * To avoid processing a host several times, we match the nsmstate.
327 again
: mutex_lock(&nlm_host_mutex
);
328 for (chain
= nlm_hosts
; chain
< nlm_hosts
+ NLM_HOST_NRHASH
; ++chain
) {
329 hlist_for_each_entry(host
, pos
, chain
, h_hash
) {
330 if (host
->h_nsmhandle
== nsm
331 && host
->h_nsmstate
!= new_state
) {
332 host
->h_nsmstate
= new_state
;
336 mutex_unlock(&nlm_host_mutex
);
338 if (host
->h_server
) {
339 /* We're server for this guy, just ditch
340 * all the locks he held. */
341 nlmsvc_free_host_resources(host
);
343 /* He's the server, initiate lock recovery. */
344 nlmclnt_recovery(host
);
347 nlm_release_host(host
);
353 mutex_unlock(&nlm_host_mutex
);
357 * Shut down the hosts module.
358 * Note that this routine is called only at server shutdown time.
361 nlm_shutdown_hosts(void)
363 struct hlist_head
*chain
;
364 struct hlist_node
*pos
;
365 struct nlm_host
*host
;
367 dprintk("lockd: shutting down host module\n");
368 mutex_lock(&nlm_host_mutex
);
370 /* First, make all hosts eligible for gc */
371 dprintk("lockd: nuking all hosts...\n");
372 for (chain
= nlm_hosts
; chain
< nlm_hosts
+ NLM_HOST_NRHASH
; ++chain
) {
373 hlist_for_each_entry(host
, pos
, chain
, h_hash
)
374 host
->h_expires
= jiffies
- 1;
377 /* Then, perform a garbage collection pass */
379 mutex_unlock(&nlm_host_mutex
);
381 /* complain if any hosts are left */
383 printk(KERN_WARNING
"lockd: couldn't shutdown host module!\n");
384 dprintk("lockd: %d hosts left:\n", nrhosts
);
385 for (chain
= nlm_hosts
; chain
< nlm_hosts
+ NLM_HOST_NRHASH
; ++chain
) {
386 hlist_for_each_entry(host
, pos
, chain
, h_hash
) {
387 dprintk(" %s (cnt %d use %d exp %ld)\n",
388 host
->h_name
, atomic_read(&host
->h_count
),
389 host
->h_inuse
, host
->h_expires
);
396 * Garbage collect any unused NLM hosts.
397 * This GC combines reference counting for async operations with
398 * mark & sweep for resources held by remote clients.
403 struct hlist_head
*chain
;
404 struct hlist_node
*pos
, *next
;
405 struct nlm_host
*host
;
407 dprintk("lockd: host garbage collection\n");
408 for (chain
= nlm_hosts
; chain
< nlm_hosts
+ NLM_HOST_NRHASH
; ++chain
) {
409 hlist_for_each_entry(host
, pos
, chain
, h_hash
)
413 /* Mark all hosts that hold locks, blocks or shares */
414 nlmsvc_mark_resources();
416 for (chain
= nlm_hosts
; chain
< nlm_hosts
+ NLM_HOST_NRHASH
; ++chain
) {
417 hlist_for_each_entry_safe(host
, pos
, next
, chain
, h_hash
) {
418 if (atomic_read(&host
->h_count
) || host
->h_inuse
419 || time_before(jiffies
, host
->h_expires
)) {
420 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
421 host
->h_name
, atomic_read(&host
->h_count
),
422 host
->h_inuse
, host
->h_expires
);
425 dprintk("lockd: delete host %s\n", host
->h_name
);
426 hlist_del_init(&host
->h_hash
);
428 nlm_destroy_host(host
);
433 next_gc
= jiffies
+ NLM_HOST_COLLECT
;
440 static LIST_HEAD(nsm_handles
);
441 static DEFINE_MUTEX(nsm_mutex
);
443 static struct nsm_handle
*
444 __nsm_find(const struct sockaddr_in
*sin
,
445 const char *hostname
, int hostname_len
,
448 struct nsm_handle
*nsm
= NULL
;
449 struct list_head
*pos
;
454 if (hostname
&& memchr(hostname
, '/', hostname_len
) != NULL
) {
455 if (printk_ratelimit()) {
456 printk(KERN_WARNING
"Invalid hostname \"%.*s\" "
457 "in NFS lock request\n",
458 hostname_len
, hostname
);
463 mutex_lock(&nsm_mutex
);
464 list_for_each(pos
, &nsm_handles
) {
465 nsm
= list_entry(pos
, struct nsm_handle
, sm_link
);
467 if (hostname
&& nsm_use_hostnames
) {
468 if (strlen(nsm
->sm_name
) != hostname_len
469 || memcmp(nsm
->sm_name
, hostname
, hostname_len
))
471 } else if (!nlm_cmp_addr(&nsm
->sm_addr
, sin
))
473 atomic_inc(&nsm
->sm_count
);
482 nsm
= kzalloc(sizeof(*nsm
) + hostname_len
+ 1, GFP_KERNEL
);
485 nsm
->sm_name
= (char *) (nsm
+ 1);
486 memcpy(nsm
->sm_name
, hostname
, hostname_len
);
487 nsm
->sm_name
[hostname_len
] = '\0';
488 atomic_set(&nsm
->sm_count
, 1);
490 list_add(&nsm
->sm_link
, &nsm_handles
);
494 mutex_unlock(&nsm_mutex
);
499 nsm_find(const struct sockaddr_in
*sin
, const char *hostname
, int hostname_len
)
501 return __nsm_find(sin
, hostname
, hostname_len
, 1);
505 * Release an NSM handle
508 nsm_release(struct nsm_handle
*nsm
)
512 if (atomic_dec_and_test(&nsm
->sm_count
)) {
513 mutex_lock(&nsm_mutex
);
514 if (atomic_read(&nsm
->sm_count
) == 0) {
515 list_del(&nsm
->sm_link
);
518 mutex_unlock(&nsm_mutex
);