sched: remove the 'u64 now' parameter from update_stats_wait_end()
[usb.git] / fs / nfsd / nfssvc.c
bloba8c89ae4c7437bef113f3107ee28e726959b1ebc
1 /*
2 * linux/fs/nfsd/nfssvc.c
4 * Central processing for nfsd.
6 * Authors: Olaf Kirch (okir@monad.swb.de)
8 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
9 */
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/nfs.h>
16 #include <linux/in.h>
17 #include <linux/uio.h>
18 #include <linux/unistd.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/freezer.h>
23 #include <linux/fs_struct.h>
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/stats.h>
27 #include <linux/sunrpc/svc.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/cache.h>
30 #include <linux/nfsd/nfsd.h>
31 #include <linux/nfsd/stats.h>
32 #include <linux/nfsd/cache.h>
33 #include <linux/nfsd/syscall.h>
34 #include <linux/lockd/bind.h>
35 #include <linux/nfsacl.h>
37 #define NFSDDBG_FACILITY NFSDDBG_SVC
39 /* these signals will be delivered to an nfsd thread
40 * when handling a request
42 #define ALLOWED_SIGS (sigmask(SIGKILL))
43 /* these signals will be delivered to an nfsd thread
44 * when not handling a request. i.e. when waiting
46 #define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
47 /* if the last thread dies with SIGHUP, then the exports table is
48 * left unchanged ( like 2.4-{0-9} ). Any other signal will clear
49 * the exports table (like 2.2).
51 #define SIG_NOCLEAN SIGHUP
53 extern struct svc_program nfsd_program;
54 static void nfsd(struct svc_rqst *rqstp);
55 struct timeval nfssvc_boot;
56 struct svc_serv *nfsd_serv;
57 static atomic_t nfsd_busy;
58 static unsigned long nfsd_last_call;
59 static DEFINE_SPINLOCK(nfsd_call_lock);
61 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
62 static struct svc_stat nfsd_acl_svcstats;
63 static struct svc_version * nfsd_acl_version[] = {
64 [2] = &nfsd_acl_version2,
65 [3] = &nfsd_acl_version3,
68 #define NFSD_ACL_MINVERS 2
69 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
70 static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
72 static struct svc_program nfsd_acl_program = {
73 .pg_prog = NFS_ACL_PROGRAM,
74 .pg_nvers = NFSD_ACL_NRVERS,
75 .pg_vers = nfsd_acl_versions,
76 .pg_name = "nfsacl",
77 .pg_class = "nfsd",
78 .pg_stats = &nfsd_acl_svcstats,
79 .pg_authenticate = &svc_set_client,
82 static struct svc_stat nfsd_acl_svcstats = {
83 .program = &nfsd_acl_program,
85 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
87 static struct svc_version * nfsd_version[] = {
88 [2] = &nfsd_version2,
89 #if defined(CONFIG_NFSD_V3)
90 [3] = &nfsd_version3,
91 #endif
92 #if defined(CONFIG_NFSD_V4)
93 [4] = &nfsd_version4,
94 #endif
97 #define NFSD_MINVERS 2
98 #define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
99 static struct svc_version *nfsd_versions[NFSD_NRVERS];
101 struct svc_program nfsd_program = {
102 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
103 .pg_next = &nfsd_acl_program,
104 #endif
105 .pg_prog = NFS_PROGRAM, /* program number */
106 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
107 .pg_vers = nfsd_versions, /* version table */
108 .pg_name = "nfsd", /* program name */
109 .pg_class = "nfsd", /* authentication class */
110 .pg_stats = &nfsd_svcstats, /* version table */
111 .pg_authenticate = &svc_set_client, /* export authentication */
115 int nfsd_vers(int vers, enum vers_op change)
117 if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
118 return -1;
119 switch(change) {
120 case NFSD_SET:
121 nfsd_versions[vers] = nfsd_version[vers];
122 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
123 if (vers < NFSD_ACL_NRVERS)
124 nfsd_acl_versions[vers] = nfsd_acl_version[vers];
125 #endif
126 break;
127 case NFSD_CLEAR:
128 nfsd_versions[vers] = NULL;
129 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
130 if (vers < NFSD_ACL_NRVERS)
131 nfsd_acl_versions[vers] = NULL;
132 #endif
133 break;
134 case NFSD_TEST:
135 return nfsd_versions[vers] != NULL;
136 case NFSD_AVAIL:
137 return nfsd_version[vers] != NULL;
139 return 0;
142 * Maximum number of nfsd processes
144 #define NFSD_MAXSERVS 8192
146 int nfsd_nrthreads(void)
148 if (nfsd_serv == NULL)
149 return 0;
150 else
151 return nfsd_serv->sv_nrthreads;
154 static int killsig; /* signal that was used to kill last nfsd */
155 static void nfsd_last_thread(struct svc_serv *serv)
157 /* When last nfsd thread exits we need to do some clean-up */
158 struct svc_sock *svsk;
159 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list)
160 lockd_down();
161 nfsd_serv = NULL;
162 nfsd_racache_shutdown();
163 nfs4_state_shutdown();
165 printk(KERN_WARNING "nfsd: last server has exited\n");
166 if (killsig != SIG_NOCLEAN) {
167 printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
168 nfsd_export_flush();
172 void nfsd_reset_versions(void)
174 int found_one = 0;
175 int i;
177 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
178 if (nfsd_program.pg_vers[i])
179 found_one = 1;
182 if (!found_one) {
183 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
184 nfsd_program.pg_vers[i] = nfsd_version[i];
185 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
186 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
187 nfsd_acl_program.pg_vers[i] =
188 nfsd_acl_version[i];
189 #endif
193 int nfsd_create_serv(void)
195 int err = 0;
196 lock_kernel();
197 if (nfsd_serv) {
198 svc_get(nfsd_serv);
199 unlock_kernel();
200 return 0;
202 if (nfsd_max_blksize == 0) {
203 /* choose a suitable default */
204 struct sysinfo i;
205 si_meminfo(&i);
206 /* Aim for 1/4096 of memory per thread
207 * This gives 1MB on 4Gig machines
208 * But only uses 32K on 128M machines.
209 * Bottom out at 8K on 32M and smaller.
210 * Of course, this is only a default.
212 nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
213 i.totalram <<= PAGE_SHIFT - 12;
214 while (nfsd_max_blksize > i.totalram &&
215 nfsd_max_blksize >= 8*1024*2)
216 nfsd_max_blksize /= 2;
219 atomic_set(&nfsd_busy, 0);
220 nfsd_serv = svc_create_pooled(&nfsd_program,
221 nfsd_max_blksize,
222 nfsd_last_thread,
223 nfsd, SIG_NOCLEAN, THIS_MODULE);
224 if (nfsd_serv == NULL)
225 err = -ENOMEM;
226 unlock_kernel();
227 do_gettimeofday(&nfssvc_boot); /* record boot time */
228 return err;
231 static int nfsd_init_socks(int port)
233 int error;
234 if (!list_empty(&nfsd_serv->sv_permsocks))
235 return 0;
237 error = lockd_up(IPPROTO_UDP);
238 if (error >= 0) {
239 error = svc_makesock(nfsd_serv, IPPROTO_UDP, port,
240 SVC_SOCK_DEFAULTS);
241 if (error < 0)
242 lockd_down();
244 if (error < 0)
245 return error;
247 #ifdef CONFIG_NFSD_TCP
248 error = lockd_up(IPPROTO_TCP);
249 if (error >= 0) {
250 error = svc_makesock(nfsd_serv, IPPROTO_TCP, port,
251 SVC_SOCK_DEFAULTS);
252 if (error < 0)
253 lockd_down();
255 if (error < 0)
256 return error;
257 #endif
258 return 0;
261 int nfsd_nrpools(void)
263 if (nfsd_serv == NULL)
264 return 0;
265 else
266 return nfsd_serv->sv_nrpools;
269 int nfsd_get_nrthreads(int n, int *nthreads)
271 int i = 0;
273 if (nfsd_serv != NULL) {
274 for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++)
275 nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads;
278 return 0;
281 int nfsd_set_nrthreads(int n, int *nthreads)
283 int i = 0;
284 int tot = 0;
285 int err = 0;
287 if (nfsd_serv == NULL || n <= 0)
288 return 0;
290 if (n > nfsd_serv->sv_nrpools)
291 n = nfsd_serv->sv_nrpools;
293 /* enforce a global maximum number of threads */
294 tot = 0;
295 for (i = 0; i < n; i++) {
296 if (nthreads[i] > NFSD_MAXSERVS)
297 nthreads[i] = NFSD_MAXSERVS;
298 tot += nthreads[i];
300 if (tot > NFSD_MAXSERVS) {
301 /* total too large: scale down requested numbers */
302 for (i = 0; i < n && tot > 0; i++) {
303 int new = nthreads[i] * NFSD_MAXSERVS / tot;
304 tot -= (nthreads[i] - new);
305 nthreads[i] = new;
307 for (i = 0; i < n && tot > 0; i++) {
308 nthreads[i]--;
309 tot--;
314 * There must always be a thread in pool 0; the admin
315 * can't shut down NFS completely using pool_threads.
317 if (nthreads[0] == 0)
318 nthreads[0] = 1;
320 /* apply the new numbers */
321 lock_kernel();
322 svc_get(nfsd_serv);
323 for (i = 0; i < n; i++) {
324 err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
325 nthreads[i]);
326 if (err)
327 break;
329 svc_destroy(nfsd_serv);
330 unlock_kernel();
332 return err;
336 nfsd_svc(unsigned short port, int nrservs)
338 int error;
340 lock_kernel();
341 dprintk("nfsd: creating service\n");
342 error = -EINVAL;
343 if (nrservs <= 0)
344 nrservs = 0;
345 if (nrservs > NFSD_MAXSERVS)
346 nrservs = NFSD_MAXSERVS;
348 /* Readahead param cache - will no-op if it already exists */
349 error = nfsd_racache_init(2*nrservs);
350 if (error<0)
351 goto out;
352 error = nfs4_state_start();
353 if (error<0)
354 goto out;
356 nfsd_reset_versions();
358 error = nfsd_create_serv();
360 if (error)
361 goto out;
362 error = nfsd_init_socks(port);
363 if (error)
364 goto failure;
366 error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
367 failure:
368 svc_destroy(nfsd_serv); /* Release server */
369 out:
370 unlock_kernel();
371 return error;
374 static inline void
375 update_thread_usage(int busy_threads)
377 unsigned long prev_call;
378 unsigned long diff;
379 int decile;
381 spin_lock(&nfsd_call_lock);
382 prev_call = nfsd_last_call;
383 nfsd_last_call = jiffies;
384 decile = busy_threads*10/nfsdstats.th_cnt;
385 if (decile>0 && decile <= 10) {
386 diff = nfsd_last_call - prev_call;
387 if ( (nfsdstats.th_usage[decile-1] += diff) >= NFSD_USAGE_WRAP)
388 nfsdstats.th_usage[decile-1] -= NFSD_USAGE_WRAP;
389 if (decile == 10)
390 nfsdstats.th_fullcnt++;
392 spin_unlock(&nfsd_call_lock);
396 * This is the NFS server kernel thread
398 static void
399 nfsd(struct svc_rqst *rqstp)
401 struct fs_struct *fsp;
402 int err;
403 sigset_t shutdown_mask, allowed_mask;
405 /* Lock module and set up kernel thread */
406 lock_kernel();
407 daemonize("nfsd");
409 /* After daemonize() this kernel thread shares current->fs
410 * with the init process. We need to create files with a
411 * umask of 0 instead of init's umask. */
412 fsp = copy_fs_struct(current->fs);
413 if (!fsp) {
414 printk("Unable to start nfsd thread: out of memory\n");
415 goto out;
417 exit_fs(current);
418 current->fs = fsp;
419 current->fs->umask = 0;
421 siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
422 siginitsetinv(&allowed_mask, ALLOWED_SIGS);
424 nfsdstats.th_cnt++;
426 rqstp->rq_task = current;
428 unlock_kernel();
431 * We want less throttling in balance_dirty_pages() so that nfs to
432 * localhost doesn't cause nfsd to lock up due to all the client's
433 * dirty pages.
435 current->flags |= PF_LESS_THROTTLE;
436 set_freezable();
439 * The main request loop
441 for (;;) {
442 /* Block all but the shutdown signals */
443 sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);
446 * Find a socket with data available and call its
447 * recvfrom routine.
449 while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
451 if (err < 0)
452 break;
453 update_thread_usage(atomic_read(&nfsd_busy));
454 atomic_inc(&nfsd_busy);
456 /* Lock the export hash tables for reading. */
457 exp_readlock();
459 /* Process request with signals blocked. */
460 sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
462 svc_process(rqstp);
464 /* Unlock export hash tables */
465 exp_readunlock();
466 update_thread_usage(atomic_read(&nfsd_busy));
467 atomic_dec(&nfsd_busy);
470 if (err != -EINTR) {
471 printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
472 } else {
473 unsigned int signo;
475 for (signo = 1; signo <= _NSIG; signo++)
476 if (sigismember(&current->pending.signal, signo) &&
477 !sigismember(&current->blocked, signo))
478 break;
479 killsig = signo;
481 /* Clear signals before calling svc_exit_thread() */
482 flush_signals(current);
484 lock_kernel();
486 nfsdstats.th_cnt --;
488 out:
489 /* Release the thread */
490 svc_exit_thread(rqstp);
492 /* Release module */
493 unlock_kernel();
494 module_put_and_exit(0);
497 static __be32 map_new_errors(u32 vers, __be32 nfserr)
499 if (nfserr == nfserr_jukebox && vers == 2)
500 return nfserr_dropit;
501 if (nfserr == nfserr_wrongsec && vers < 4)
502 return nfserr_acces;
503 return nfserr;
507 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
509 struct svc_procedure *proc;
510 kxdrproc_t xdr;
511 __be32 nfserr;
512 __be32 *nfserrp;
514 dprintk("nfsd_dispatch: vers %d proc %d\n",
515 rqstp->rq_vers, rqstp->rq_proc);
516 proc = rqstp->rq_procinfo;
518 /* Check whether we have this call in the cache. */
519 switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
520 case RC_INTR:
521 case RC_DROPIT:
522 return 0;
523 case RC_REPLY:
524 return 1;
525 case RC_DOIT:;
526 /* do it */
529 /* Decode arguments */
530 xdr = proc->pc_decode;
531 if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
532 rqstp->rq_argp)) {
533 dprintk("nfsd: failed to decode arguments!\n");
534 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
535 *statp = rpc_garbage_args;
536 return 1;
539 /* need to grab the location to store the status, as
540 * nfsv4 does some encoding while processing
542 nfserrp = rqstp->rq_res.head[0].iov_base
543 + rqstp->rq_res.head[0].iov_len;
544 rqstp->rq_res.head[0].iov_len += sizeof(__be32);
546 /* Now call the procedure handler, and encode NFS status. */
547 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
548 nfserr = map_new_errors(rqstp->rq_vers, nfserr);
549 if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2)
550 nfserr = nfserr_dropit;
551 if (nfserr == nfserr_dropit) {
552 dprintk("nfsd: Dropping request due to malloc failure!\n");
553 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
554 return 0;
557 if (rqstp->rq_proc != 0)
558 *nfserrp++ = nfserr;
560 /* Encode result.
561 * For NFSv2, additional info is never returned in case of an error.
563 if (!(nfserr && rqstp->rq_vers == 2)) {
564 xdr = proc->pc_encode;
565 if (xdr && !xdr(rqstp, nfserrp,
566 rqstp->rq_resp)) {
567 /* Failed to encode result. Release cache entry */
568 dprintk("nfsd: failed to encode result!\n");
569 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
570 *statp = rpc_system_err;
571 return 1;
575 /* Store reply in cache. */
576 nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
577 return 1;