x86: switch pci-gart over to using set_memory_np() instead of clear_kernel_mapping()
[linux-2.6/kmemtrace.git] / fs / nfsd / nfssvc.c
blob9647b0f7bc0c0d52610bf1419c9bafcd466b57dc
1 /*
2 * linux/fs/nfsd/nfssvc.c
4 * Central processing for nfsd.
6 * Authors: Olaf Kirch (okir@monad.swb.de)
8 * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
9 */
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/time.h>
14 #include <linux/errno.h>
15 #include <linux/nfs.h>
16 #include <linux/in.h>
17 #include <linux/uio.h>
18 #include <linux/unistd.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/freezer.h>
23 #include <linux/fs_struct.h>
25 #include <linux/sunrpc/types.h>
26 #include <linux/sunrpc/stats.h>
27 #include <linux/sunrpc/svc.h>
28 #include <linux/sunrpc/svcsock.h>
29 #include <linux/sunrpc/cache.h>
30 #include <linux/nfsd/nfsd.h>
31 #include <linux/nfsd/stats.h>
32 #include <linux/nfsd/cache.h>
33 #include <linux/nfsd/syscall.h>
34 #include <linux/lockd/bind.h>
35 #include <linux/nfsacl.h>
37 #define NFSDDBG_FACILITY NFSDDBG_SVC
39 /* these signals will be delivered to an nfsd thread
40 * when handling a request
42 #define ALLOWED_SIGS (sigmask(SIGKILL))
43 /* these signals will be delivered to an nfsd thread
44 * when not handling a request. i.e. when waiting
46 #define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
47 /* if the last thread dies with SIGHUP, then the exports table is
48 * left unchanged ( like 2.4-{0-9} ). Any other signal will clear
49 * the exports table (like 2.2).
51 #define SIG_NOCLEAN SIGHUP
53 extern struct svc_program nfsd_program;
54 static void nfsd(struct svc_rqst *rqstp);
55 struct timeval nfssvc_boot;
56 struct svc_serv *nfsd_serv;
57 static atomic_t nfsd_busy;
58 static unsigned long nfsd_last_call;
59 static DEFINE_SPINLOCK(nfsd_call_lock);
61 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
62 static struct svc_stat nfsd_acl_svcstats;
63 static struct svc_version * nfsd_acl_version[] = {
64 [2] = &nfsd_acl_version2,
65 [3] = &nfsd_acl_version3,
68 #define NFSD_ACL_MINVERS 2
69 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
70 static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
72 static struct svc_program nfsd_acl_program = {
73 .pg_prog = NFS_ACL_PROGRAM,
74 .pg_nvers = NFSD_ACL_NRVERS,
75 .pg_vers = nfsd_acl_versions,
76 .pg_name = "nfsacl",
77 .pg_class = "nfsd",
78 .pg_stats = &nfsd_acl_svcstats,
79 .pg_authenticate = &svc_set_client,
82 static struct svc_stat nfsd_acl_svcstats = {
83 .program = &nfsd_acl_program,
85 #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
87 static struct svc_version * nfsd_version[] = {
88 [2] = &nfsd_version2,
89 #if defined(CONFIG_NFSD_V3)
90 [3] = &nfsd_version3,
91 #endif
92 #if defined(CONFIG_NFSD_V4)
93 [4] = &nfsd_version4,
94 #endif
97 #define NFSD_MINVERS 2
98 #define NFSD_NRVERS ARRAY_SIZE(nfsd_version)
99 static struct svc_version *nfsd_versions[NFSD_NRVERS];
101 struct svc_program nfsd_program = {
102 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
103 .pg_next = &nfsd_acl_program,
104 #endif
105 .pg_prog = NFS_PROGRAM, /* program number */
106 .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */
107 .pg_vers = nfsd_versions, /* version table */
108 .pg_name = "nfsd", /* program name */
109 .pg_class = "nfsd", /* authentication class */
110 .pg_stats = &nfsd_svcstats, /* version table */
111 .pg_authenticate = &svc_set_client, /* export authentication */
115 int nfsd_vers(int vers, enum vers_op change)
117 if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS)
118 return -1;
119 switch(change) {
120 case NFSD_SET:
121 nfsd_versions[vers] = nfsd_version[vers];
122 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
123 if (vers < NFSD_ACL_NRVERS)
124 nfsd_acl_versions[vers] = nfsd_acl_version[vers];
125 #endif
126 break;
127 case NFSD_CLEAR:
128 nfsd_versions[vers] = NULL;
129 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
130 if (vers < NFSD_ACL_NRVERS)
131 nfsd_acl_versions[vers] = NULL;
132 #endif
133 break;
134 case NFSD_TEST:
135 return nfsd_versions[vers] != NULL;
136 case NFSD_AVAIL:
137 return nfsd_version[vers] != NULL;
139 return 0;
142 * Maximum number of nfsd processes
144 #define NFSD_MAXSERVS 8192
146 int nfsd_nrthreads(void)
148 if (nfsd_serv == NULL)
149 return 0;
150 else
151 return nfsd_serv->sv_nrthreads;
154 static int killsig; /* signal that was used to kill last nfsd */
155 static void nfsd_last_thread(struct svc_serv *serv)
157 /* When last nfsd thread exits we need to do some clean-up */
158 struct svc_xprt *xprt;
159 list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list)
160 lockd_down();
161 nfsd_serv = NULL;
162 nfsd_racache_shutdown();
163 nfs4_state_shutdown();
165 printk(KERN_WARNING "nfsd: last server has exited\n");
166 if (killsig != SIG_NOCLEAN) {
167 printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
168 nfsd_export_flush();
172 void nfsd_reset_versions(void)
174 int found_one = 0;
175 int i;
177 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
178 if (nfsd_program.pg_vers[i])
179 found_one = 1;
182 if (!found_one) {
183 for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
184 nfsd_program.pg_vers[i] = nfsd_version[i];
185 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
186 for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
187 nfsd_acl_program.pg_vers[i] =
188 nfsd_acl_version[i];
189 #endif
193 int nfsd_create_serv(void)
195 int err = 0;
196 lock_kernel();
197 if (nfsd_serv) {
198 svc_get(nfsd_serv);
199 unlock_kernel();
200 return 0;
202 if (nfsd_max_blksize == 0) {
203 /* choose a suitable default */
204 struct sysinfo i;
205 si_meminfo(&i);
206 /* Aim for 1/4096 of memory per thread
207 * This gives 1MB on 4Gig machines
208 * But only uses 32K on 128M machines.
209 * Bottom out at 8K on 32M and smaller.
210 * Of course, this is only a default.
212 nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
213 i.totalram <<= PAGE_SHIFT - 12;
214 while (nfsd_max_blksize > i.totalram &&
215 nfsd_max_blksize >= 8*1024*2)
216 nfsd_max_blksize /= 2;
219 atomic_set(&nfsd_busy, 0);
220 nfsd_serv = svc_create_pooled(&nfsd_program,
221 nfsd_max_blksize,
222 nfsd_last_thread,
223 nfsd, SIG_NOCLEAN, THIS_MODULE);
224 if (nfsd_serv == NULL)
225 err = -ENOMEM;
226 unlock_kernel();
227 do_gettimeofday(&nfssvc_boot); /* record boot time */
228 return err;
231 static int nfsd_init_socks(int port)
233 int error;
234 if (!list_empty(&nfsd_serv->sv_permsocks))
235 return 0;
237 error = lockd_up(IPPROTO_UDP);
238 if (error >= 0) {
239 error = svc_create_xprt(nfsd_serv, "udp", port,
240 SVC_SOCK_DEFAULTS);
241 if (error < 0)
242 lockd_down();
244 if (error < 0)
245 return error;
247 #ifdef CONFIG_NFSD_TCP
248 error = lockd_up(IPPROTO_TCP);
249 if (error >= 0) {
250 error = svc_create_xprt(nfsd_serv, "tcp", port,
251 SVC_SOCK_DEFAULTS);
252 if (error < 0)
253 lockd_down();
255 if (error < 0)
256 return error;
257 #endif
258 return 0;
261 int nfsd_nrpools(void)
263 if (nfsd_serv == NULL)
264 return 0;
265 else
266 return nfsd_serv->sv_nrpools;
269 int nfsd_get_nrthreads(int n, int *nthreads)
271 int i = 0;
273 if (nfsd_serv != NULL) {
274 for (i = 0; i < nfsd_serv->sv_nrpools && i < n; i++)
275 nthreads[i] = nfsd_serv->sv_pools[i].sp_nrthreads;
278 return 0;
281 int nfsd_set_nrthreads(int n, int *nthreads)
283 int i = 0;
284 int tot = 0;
285 int err = 0;
287 if (nfsd_serv == NULL || n <= 0)
288 return 0;
290 if (n > nfsd_serv->sv_nrpools)
291 n = nfsd_serv->sv_nrpools;
293 /* enforce a global maximum number of threads */
294 tot = 0;
295 for (i = 0; i < n; i++) {
296 if (nthreads[i] > NFSD_MAXSERVS)
297 nthreads[i] = NFSD_MAXSERVS;
298 tot += nthreads[i];
300 if (tot > NFSD_MAXSERVS) {
301 /* total too large: scale down requested numbers */
302 for (i = 0; i < n && tot > 0; i++) {
303 int new = nthreads[i] * NFSD_MAXSERVS / tot;
304 tot -= (nthreads[i] - new);
305 nthreads[i] = new;
307 for (i = 0; i < n && tot > 0; i++) {
308 nthreads[i]--;
309 tot--;
314 * There must always be a thread in pool 0; the admin
315 * can't shut down NFS completely using pool_threads.
317 if (nthreads[0] == 0)
318 nthreads[0] = 1;
320 /* apply the new numbers */
321 lock_kernel();
322 svc_get(nfsd_serv);
323 for (i = 0; i < n; i++) {
324 err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
325 nthreads[i]);
326 if (err)
327 break;
329 svc_destroy(nfsd_serv);
330 unlock_kernel();
332 return err;
336 nfsd_svc(unsigned short port, int nrservs)
338 int error;
340 lock_kernel();
341 dprintk("nfsd: creating service\n");
342 error = -EINVAL;
343 if (nrservs <= 0)
344 nrservs = 0;
345 if (nrservs > NFSD_MAXSERVS)
346 nrservs = NFSD_MAXSERVS;
348 /* Readahead param cache - will no-op if it already exists */
349 error = nfsd_racache_init(2*nrservs);
350 if (error<0)
351 goto out;
352 nfs4_state_start();
354 nfsd_reset_versions();
356 error = nfsd_create_serv();
358 if (error)
359 goto out;
360 error = nfsd_init_socks(port);
361 if (error)
362 goto failure;
364 error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
365 failure:
366 svc_destroy(nfsd_serv); /* Release server */
367 out:
368 unlock_kernel();
369 return error;
372 static inline void
373 update_thread_usage(int busy_threads)
375 unsigned long prev_call;
376 unsigned long diff;
377 int decile;
379 spin_lock(&nfsd_call_lock);
380 prev_call = nfsd_last_call;
381 nfsd_last_call = jiffies;
382 decile = busy_threads*10/nfsdstats.th_cnt;
383 if (decile>0 && decile <= 10) {
384 diff = nfsd_last_call - prev_call;
385 if ( (nfsdstats.th_usage[decile-1] += diff) >= NFSD_USAGE_WRAP)
386 nfsdstats.th_usage[decile-1] -= NFSD_USAGE_WRAP;
387 if (decile == 10)
388 nfsdstats.th_fullcnt++;
390 spin_unlock(&nfsd_call_lock);
394 * This is the NFS server kernel thread
396 static void
397 nfsd(struct svc_rqst *rqstp)
399 struct fs_struct *fsp;
400 int err;
401 sigset_t shutdown_mask, allowed_mask;
403 /* Lock module and set up kernel thread */
404 lock_kernel();
405 daemonize("nfsd");
407 /* After daemonize() this kernel thread shares current->fs
408 * with the init process. We need to create files with a
409 * umask of 0 instead of init's umask. */
410 fsp = copy_fs_struct(current->fs);
411 if (!fsp) {
412 printk("Unable to start nfsd thread: out of memory\n");
413 goto out;
415 exit_fs(current);
416 current->fs = fsp;
417 current->fs->umask = 0;
419 siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
420 siginitsetinv(&allowed_mask, ALLOWED_SIGS);
422 nfsdstats.th_cnt++;
424 rqstp->rq_task = current;
426 unlock_kernel();
429 * We want less throttling in balance_dirty_pages() so that nfs to
430 * localhost doesn't cause nfsd to lock up due to all the client's
431 * dirty pages.
433 current->flags |= PF_LESS_THROTTLE;
434 set_freezable();
437 * The main request loop
439 for (;;) {
440 /* Block all but the shutdown signals */
441 sigprocmask(SIG_SETMASK, &shutdown_mask, NULL);
444 * Find a socket with data available and call its
445 * recvfrom routine.
447 while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
449 if (err < 0)
450 break;
451 update_thread_usage(atomic_read(&nfsd_busy));
452 atomic_inc(&nfsd_busy);
454 /* Lock the export hash tables for reading. */
455 exp_readlock();
457 /* Process request with signals blocked. */
458 sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
460 svc_process(rqstp);
462 /* Unlock export hash tables */
463 exp_readunlock();
464 update_thread_usage(atomic_read(&nfsd_busy));
465 atomic_dec(&nfsd_busy);
468 if (err != -EINTR) {
469 printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
470 } else {
471 unsigned int signo;
473 for (signo = 1; signo <= _NSIG; signo++)
474 if (sigismember(&current->pending.signal, signo) &&
475 !sigismember(&current->blocked, signo))
476 break;
477 killsig = signo;
479 /* Clear signals before calling svc_exit_thread() */
480 flush_signals(current);
482 lock_kernel();
484 nfsdstats.th_cnt --;
486 out:
487 /* Release the thread */
488 svc_exit_thread(rqstp);
490 /* Release module */
491 unlock_kernel();
492 module_put_and_exit(0);
495 static __be32 map_new_errors(u32 vers, __be32 nfserr)
497 if (nfserr == nfserr_jukebox && vers == 2)
498 return nfserr_dropit;
499 if (nfserr == nfserr_wrongsec && vers < 4)
500 return nfserr_acces;
501 return nfserr;
505 nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
507 struct svc_procedure *proc;
508 kxdrproc_t xdr;
509 __be32 nfserr;
510 __be32 *nfserrp;
512 dprintk("nfsd_dispatch: vers %d proc %d\n",
513 rqstp->rq_vers, rqstp->rq_proc);
514 proc = rqstp->rq_procinfo;
516 /* Check whether we have this call in the cache. */
517 switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
518 case RC_INTR:
519 case RC_DROPIT:
520 return 0;
521 case RC_REPLY:
522 return 1;
523 case RC_DOIT:;
524 /* do it */
527 /* Decode arguments */
528 xdr = proc->pc_decode;
529 if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
530 rqstp->rq_argp)) {
531 dprintk("nfsd: failed to decode arguments!\n");
532 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
533 *statp = rpc_garbage_args;
534 return 1;
537 /* need to grab the location to store the status, as
538 * nfsv4 does some encoding while processing
540 nfserrp = rqstp->rq_res.head[0].iov_base
541 + rqstp->rq_res.head[0].iov_len;
542 rqstp->rq_res.head[0].iov_len += sizeof(__be32);
544 /* Now call the procedure handler, and encode NFS status. */
545 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
546 nfserr = map_new_errors(rqstp->rq_vers, nfserr);
547 if (nfserr == nfserr_dropit) {
548 dprintk("nfsd: Dropping request; may be revisited later\n");
549 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
550 return 0;
553 if (rqstp->rq_proc != 0)
554 *nfserrp++ = nfserr;
556 /* Encode result.
557 * For NFSv2, additional info is never returned in case of an error.
559 if (!(nfserr && rqstp->rq_vers == 2)) {
560 xdr = proc->pc_encode;
561 if (xdr && !xdr(rqstp, nfserrp,
562 rqstp->rq_resp)) {
563 /* Failed to encode result. Release cache entry */
564 dprintk("nfsd: failed to encode result!\n");
565 nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
566 *statp = rpc_system_err;
567 return 1;
571 /* Store reply in cache. */
572 nfsd_cache_update(rqstp, proc->pc_cachetype, statp + 1);
573 return 1;