[PATCH] Linux Kernel Dump Test Module
[linux-2.6/mini2440.git] / fs / cifs / cifsfs.c
blobc00c654f2e11c0ce9cdbb3d597bb1afe9e673ddb
1 /*
2 * fs/cifs/cifsfs.c
4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #define DECLARE_GLOBALS_HERE
40 #include "cifsglob.h"
41 #include "cifsproto.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include <linux/mm.h>
45 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
47 #ifdef CONFIG_CIFS_QUOTA
48 static struct quotactl_ops cifs_quotactl_ops;
49 #endif
51 int cifsFYI = 0;
52 int cifsERROR = 1;
53 int traceSMB = 0;
54 unsigned int oplockEnabled = 1;
55 unsigned int experimEnabled = 0;
56 unsigned int linuxExtEnabled = 1;
57 unsigned int lookupCacheEnabled = 1;
58 unsigned int multiuser_mount = 0;
59 unsigned int extended_security = CIFSSEC_DEF;
60 /* unsigned int ntlmv2_support = 0; */
61 unsigned int sign_CIFS_PDUs = 1;
62 extern struct task_struct * oplockThread; /* remove sparse warning */
63 struct task_struct * oplockThread = NULL;
64 extern struct task_struct * dnotifyThread; /* remove sparse warning */
65 struct task_struct * dnotifyThread = NULL;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, int, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
70 module_param(cifs_min_rcv, int, 0);
71 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
72 unsigned int cifs_min_small = 30;
73 module_param(cifs_min_small, int, 0);
74 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
75 unsigned int cifs_max_pending = CIFS_MAX_REQ;
76 module_param(cifs_max_pending, int, 0);
77 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
79 extern mempool_t *cifs_sm_req_poolp;
80 extern mempool_t *cifs_req_poolp;
81 extern mempool_t *cifs_mid_poolp;
83 extern kmem_cache_t *cifs_oplock_cachep;
85 static int
86 cifs_read_super(struct super_block *sb, void *data,
87 const char *devname, int silent)
89 struct inode *inode;
90 struct cifs_sb_info *cifs_sb;
91 int rc = 0;
93 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
94 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
95 cifs_sb = CIFS_SB(sb);
96 if(cifs_sb == NULL)
97 return -ENOMEM;
99 rc = cifs_mount(sb, cifs_sb, data, devname);
101 if (rc) {
102 if (!silent)
103 cERROR(1,
104 ("cifs_mount failed w/return code = %d", rc));
105 goto out_mount_failed;
108 sb->s_magic = CIFS_MAGIC_NUMBER;
109 sb->s_op = &cifs_super_ops;
110 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
111 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
112 #ifdef CONFIG_CIFS_QUOTA
113 sb->s_qcop = &cifs_quotactl_ops;
114 #endif
115 sb->s_blocksize = CIFS_MAX_MSGSIZE;
116 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
117 inode = iget(sb, ROOT_I);
119 if (!inode) {
120 rc = -ENOMEM;
121 goto out_no_root;
124 sb->s_root = d_alloc_root(inode);
126 if (!sb->s_root) {
127 rc = -ENOMEM;
128 goto out_no_root;
131 return 0;
133 out_no_root:
134 cERROR(1, ("cifs_read_super: get root inode failed"));
135 if (inode)
136 iput(inode);
138 out_mount_failed:
139 if(cifs_sb) {
140 if(cifs_sb->local_nls)
141 unload_nls(cifs_sb->local_nls);
142 kfree(cifs_sb);
144 return rc;
147 static void
148 cifs_put_super(struct super_block *sb)
150 int rc = 0;
151 struct cifs_sb_info *cifs_sb;
153 cFYI(1, ("In cifs_put_super"));
154 cifs_sb = CIFS_SB(sb);
155 if(cifs_sb == NULL) {
156 cFYI(1,("Empty cifs superblock info passed to unmount"));
157 return;
159 rc = cifs_umount(sb, cifs_sb);
160 if (rc) {
161 cERROR(1, ("cifs_umount failed with return code %d", rc));
163 unload_nls(cifs_sb->local_nls);
164 kfree(cifs_sb);
165 return;
168 static int
169 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
171 struct super_block *sb = dentry->d_sb;
172 int xid;
173 int rc = -EOPNOTSUPP;
174 struct cifs_sb_info *cifs_sb;
175 struct cifsTconInfo *pTcon;
177 xid = GetXid();
179 cifs_sb = CIFS_SB(sb);
180 pTcon = cifs_sb->tcon;
182 buf->f_type = CIFS_MAGIC_NUMBER;
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
186 presumably be total path, but note
187 that some servers (includinng Samba 3)
188 have a shorter maximum path */
189 buf->f_files = 0; /* undefined */
190 buf->f_ffree = 0; /* unlimited */
192 /* BB we could add a second check for a QFS Unix capability bit */
193 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
194 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
195 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
196 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
198 /* Only need to call the old QFSInfo if failed
199 on newer one */
200 if(rc)
201 rc = CIFSSMBQFSInfo(xid, pTcon, buf);
203 /* Old Windows servers do not support level 103, retry with level
204 one if old server failed the previous call */
205 if(rc)
206 rc = SMBOldQFSInfo(xid, pTcon, buf);
208 int f_type;
209 __fsid_t f_fsid;
210 int f_namelen; */
211 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
212 FreeXid(xid);
213 return 0; /* always return success? what if volume is no
214 longer available? */
217 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
219 struct cifs_sb_info *cifs_sb;
221 cifs_sb = CIFS_SB(inode->i_sb);
223 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
224 return 0;
225 } else /* file mode might have been restricted at mount time
226 on the client (above and beyond ACL on servers) for
227 servers which do not support setting and viewing mode bits,
228 so allowing client to check permissions is useful */
229 return generic_permission(inode, mask, NULL);
232 static kmem_cache_t *cifs_inode_cachep;
233 static kmem_cache_t *cifs_req_cachep;
234 static kmem_cache_t *cifs_mid_cachep;
235 kmem_cache_t *cifs_oplock_cachep;
236 static kmem_cache_t *cifs_sm_req_cachep;
237 mempool_t *cifs_sm_req_poolp;
238 mempool_t *cifs_req_poolp;
239 mempool_t *cifs_mid_poolp;
241 static struct inode *
242 cifs_alloc_inode(struct super_block *sb)
244 struct cifsInodeInfo *cifs_inode;
245 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
246 if (!cifs_inode)
247 return NULL;
248 cifs_inode->cifsAttrs = 0x20; /* default */
249 atomic_set(&cifs_inode->inUse, 0);
250 cifs_inode->time = 0;
251 /* Until the file is open and we have gotten oplock
252 info back from the server, can not assume caching of
253 file data or metadata */
254 cifs_inode->clientCanCacheRead = FALSE;
255 cifs_inode->clientCanCacheAll = FALSE;
256 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
257 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
258 INIT_LIST_HEAD(&cifs_inode->openFileList);
259 return &cifs_inode->vfs_inode;
262 static void
263 cifs_destroy_inode(struct inode *inode)
265 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
269 * cifs_show_options() is for displaying mount options in /proc/mounts.
270 * Not all settable options are displayed but most of the important
271 * ones are.
273 static int
274 cifs_show_options(struct seq_file *s, struct vfsmount *m)
276 struct cifs_sb_info *cifs_sb;
278 cifs_sb = CIFS_SB(m->mnt_sb);
280 if (cifs_sb) {
281 if (cifs_sb->tcon) {
282 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
283 if (cifs_sb->tcon->ses) {
284 if (cifs_sb->tcon->ses->userName)
285 seq_printf(s, ",username=%s",
286 cifs_sb->tcon->ses->userName);
287 if(cifs_sb->tcon->ses->domainName)
288 seq_printf(s, ",domain=%s",
289 cifs_sb->tcon->ses->domainName);
292 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
293 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
295 return 0;
298 #ifdef CONFIG_CIFS_QUOTA
299 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
300 struct fs_disk_quota * pdquota)
302 int xid;
303 int rc = 0;
304 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
305 struct cifsTconInfo *pTcon;
307 if(cifs_sb)
308 pTcon = cifs_sb->tcon;
309 else
310 return -EIO;
313 xid = GetXid();
314 if(pTcon) {
315 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
316 } else {
317 return -EIO;
320 FreeXid(xid);
321 return rc;
324 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
327 int xid;
328 int rc = 0;
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
332 if(cifs_sb)
333 pTcon = cifs_sb->tcon;
334 else
335 return -EIO;
337 xid = GetXid();
338 if(pTcon) {
339 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
340 } else {
341 rc = -EIO;
344 FreeXid(xid);
345 return rc;
348 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
350 int xid;
351 int rc = 0;
352 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
353 struct cifsTconInfo *pTcon;
355 if(cifs_sb)
356 pTcon = cifs_sb->tcon;
357 else
358 return -EIO;
360 xid = GetXid();
361 if(pTcon) {
362 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
363 } else {
364 rc = -EIO;
367 FreeXid(xid);
368 return rc;
371 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
373 int xid;
374 int rc = 0;
375 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
376 struct cifsTconInfo *pTcon;
378 if(cifs_sb) {
379 pTcon = cifs_sb->tcon;
380 } else {
381 return -EIO;
383 xid = GetXid();
384 if(pTcon) {
385 cFYI(1,("pqstats %p",qstats));
386 } else {
387 rc = -EIO;
390 FreeXid(xid);
391 return rc;
394 static struct quotactl_ops cifs_quotactl_ops = {
395 .set_xquota = cifs_xquota_set,
396 .get_xquota = cifs_xquota_set,
397 .set_xstate = cifs_xstate_set,
398 .get_xstate = cifs_xstate_get,
400 #endif
402 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
404 struct cifs_sb_info *cifs_sb;
405 struct cifsTconInfo * tcon;
407 if (!(flags & MNT_FORCE))
408 return;
409 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
410 if(cifs_sb == NULL)
411 return;
413 tcon = cifs_sb->tcon;
414 if(tcon == NULL)
415 return;
416 down(&tcon->tconSem);
417 if (atomic_read(&tcon->useCount) == 1)
418 tcon->tidStatus = CifsExiting;
419 up(&tcon->tconSem);
421 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
422 /* cancel_notify_requests(tcon); */
423 if(tcon->ses && tcon->ses->server)
425 cFYI(1,("wake up tasks now - umount begin not complete"));
426 wake_up_all(&tcon->ses->server->request_q);
427 wake_up_all(&tcon->ses->server->response_q);
428 msleep(1); /* yield */
429 /* we have to kick the requests once more */
430 wake_up_all(&tcon->ses->server->response_q);
431 msleep(1);
433 /* BB FIXME - finish add checks for tidStatus BB */
435 return;
438 static int cifs_remount(struct super_block *sb, int *flags, char *data)
440 *flags |= MS_NODIRATIME;
441 return 0;
444 struct super_operations cifs_super_ops = {
445 .read_inode = cifs_read_inode,
446 .put_super = cifs_put_super,
447 .statfs = cifs_statfs,
448 .alloc_inode = cifs_alloc_inode,
449 .destroy_inode = cifs_destroy_inode,
450 /* .drop_inode = generic_delete_inode,
451 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
452 unless later we add lazy close of inodes or unless the kernel forgets to call
453 us with the same number of releases (closes) as opens */
454 .show_options = cifs_show_options,
455 .umount_begin = cifs_umount_begin,
456 .remount_fs = cifs_remount,
459 static int
460 cifs_get_sb(struct file_system_type *fs_type,
461 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
463 int rc;
464 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
466 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
468 if (IS_ERR(sb))
469 return PTR_ERR(sb);
471 sb->s_flags = flags;
473 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
474 if (rc) {
475 up_write(&sb->s_umount);
476 deactivate_super(sb);
477 return rc;
479 sb->s_flags |= MS_ACTIVE;
480 return simple_set_mnt(mnt, sb);
483 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
484 unsigned long nr_segs, loff_t pos)
486 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
487 ssize_t written;
489 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
490 if (!CIFS_I(inode)->clientCanCacheAll)
491 filemap_fdatawrite(inode->i_mapping);
492 return written;
495 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
497 /* origin == SEEK_END => we must revalidate the cached file length */
498 if (origin == 2) {
499 int retval = cifs_revalidate(file->f_dentry);
500 if (retval < 0)
501 return (loff_t)retval;
503 return remote_llseek(file, offset, origin);
506 static struct file_system_type cifs_fs_type = {
507 .owner = THIS_MODULE,
508 .name = "cifs",
509 .get_sb = cifs_get_sb,
510 .kill_sb = kill_anon_super,
511 /* .fs_flags */
513 struct inode_operations cifs_dir_inode_ops = {
514 .create = cifs_create,
515 .lookup = cifs_lookup,
516 .getattr = cifs_getattr,
517 .unlink = cifs_unlink,
518 .link = cifs_hardlink,
519 .mkdir = cifs_mkdir,
520 .rmdir = cifs_rmdir,
521 .rename = cifs_rename,
522 .permission = cifs_permission,
523 /* revalidate:cifs_revalidate, */
524 .setattr = cifs_setattr,
525 .symlink = cifs_symlink,
526 .mknod = cifs_mknod,
527 #ifdef CONFIG_CIFS_XATTR
528 .setxattr = cifs_setxattr,
529 .getxattr = cifs_getxattr,
530 .listxattr = cifs_listxattr,
531 .removexattr = cifs_removexattr,
532 #endif
535 struct inode_operations cifs_file_inode_ops = {
536 /* revalidate:cifs_revalidate, */
537 .setattr = cifs_setattr,
538 .getattr = cifs_getattr, /* do we need this anymore? */
539 .rename = cifs_rename,
540 .permission = cifs_permission,
541 #ifdef CONFIG_CIFS_XATTR
542 .setxattr = cifs_setxattr,
543 .getxattr = cifs_getxattr,
544 .listxattr = cifs_listxattr,
545 .removexattr = cifs_removexattr,
546 #endif
549 struct inode_operations cifs_symlink_inode_ops = {
550 .readlink = generic_readlink,
551 .follow_link = cifs_follow_link,
552 .put_link = cifs_put_link,
553 .permission = cifs_permission,
554 /* BB add the following two eventually */
555 /* revalidate: cifs_revalidate,
556 setattr: cifs_notify_change, *//* BB do we need notify change */
557 #ifdef CONFIG_CIFS_XATTR
558 .setxattr = cifs_setxattr,
559 .getxattr = cifs_getxattr,
560 .listxattr = cifs_listxattr,
561 .removexattr = cifs_removexattr,
562 #endif
565 const struct file_operations cifs_file_ops = {
566 .read = do_sync_read,
567 .write = do_sync_write,
568 .aio_read = generic_file_aio_read,
569 .aio_write = cifs_file_aio_write,
570 .open = cifs_open,
571 .release = cifs_close,
572 .lock = cifs_lock,
573 .fsync = cifs_fsync,
574 .flush = cifs_flush,
575 .mmap = cifs_file_mmap,
576 .sendfile = generic_file_sendfile,
577 .llseek = cifs_llseek,
578 #ifdef CONFIG_CIFS_POSIX
579 .ioctl = cifs_ioctl,
580 #endif /* CONFIG_CIFS_POSIX */
582 #ifdef CONFIG_CIFS_EXPERIMENTAL
583 .dir_notify = cifs_dir_notify,
584 #endif /* CONFIG_CIFS_EXPERIMENTAL */
587 const struct file_operations cifs_file_direct_ops = {
588 /* no mmap, no aio, no readv -
589 BB reevaluate whether they can be done with directio, no cache */
590 .read = cifs_user_read,
591 .write = cifs_user_write,
592 .open = cifs_open,
593 .release = cifs_close,
594 .lock = cifs_lock,
595 .fsync = cifs_fsync,
596 .flush = cifs_flush,
597 .sendfile = generic_file_sendfile, /* BB removeme BB */
598 #ifdef CONFIG_CIFS_POSIX
599 .ioctl = cifs_ioctl,
600 #endif /* CONFIG_CIFS_POSIX */
601 .llseek = cifs_llseek,
602 #ifdef CONFIG_CIFS_EXPERIMENTAL
603 .dir_notify = cifs_dir_notify,
604 #endif /* CONFIG_CIFS_EXPERIMENTAL */
606 const struct file_operations cifs_file_nobrl_ops = {
607 .read = do_sync_read,
608 .write = do_sync_write,
609 .aio_read = generic_file_aio_read,
610 .aio_write = cifs_file_aio_write,
611 .open = cifs_open,
612 .release = cifs_close,
613 .fsync = cifs_fsync,
614 .flush = cifs_flush,
615 .mmap = cifs_file_mmap,
616 .sendfile = generic_file_sendfile,
617 .llseek = cifs_llseek,
618 #ifdef CONFIG_CIFS_POSIX
619 .ioctl = cifs_ioctl,
620 #endif /* CONFIG_CIFS_POSIX */
622 #ifdef CONFIG_CIFS_EXPERIMENTAL
623 .dir_notify = cifs_dir_notify,
624 #endif /* CONFIG_CIFS_EXPERIMENTAL */
627 const struct file_operations cifs_file_direct_nobrl_ops = {
628 /* no mmap, no aio, no readv -
629 BB reevaluate whether they can be done with directio, no cache */
630 .read = cifs_user_read,
631 .write = cifs_user_write,
632 .open = cifs_open,
633 .release = cifs_close,
634 .fsync = cifs_fsync,
635 .flush = cifs_flush,
636 .sendfile = generic_file_sendfile, /* BB removeme BB */
637 #ifdef CONFIG_CIFS_POSIX
638 .ioctl = cifs_ioctl,
639 #endif /* CONFIG_CIFS_POSIX */
640 .llseek = cifs_llseek,
641 #ifdef CONFIG_CIFS_EXPERIMENTAL
642 .dir_notify = cifs_dir_notify,
643 #endif /* CONFIG_CIFS_EXPERIMENTAL */
646 const struct file_operations cifs_dir_ops = {
647 .readdir = cifs_readdir,
648 .release = cifs_closedir,
649 .read = generic_read_dir,
650 #ifdef CONFIG_CIFS_EXPERIMENTAL
651 .dir_notify = cifs_dir_notify,
652 #endif /* CONFIG_CIFS_EXPERIMENTAL */
653 .ioctl = cifs_ioctl,
656 static void
657 cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
659 struct cifsInodeInfo *cifsi = inode;
661 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
662 SLAB_CTOR_CONSTRUCTOR) {
663 inode_init_once(&cifsi->vfs_inode);
664 INIT_LIST_HEAD(&cifsi->lockList);
668 static int
669 cifs_init_inodecache(void)
671 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
672 sizeof (struct cifsInodeInfo),
673 0, (SLAB_RECLAIM_ACCOUNT|
674 SLAB_MEM_SPREAD),
675 cifs_init_once, NULL);
676 if (cifs_inode_cachep == NULL)
677 return -ENOMEM;
679 return 0;
682 static void
683 cifs_destroy_inodecache(void)
685 kmem_cache_destroy(cifs_inode_cachep);
688 static int
689 cifs_init_request_bufs(void)
691 if(CIFSMaxBufSize < 8192) {
692 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
693 Unicode path name has to fit in any SMB/CIFS path based frames */
694 CIFSMaxBufSize = 8192;
695 } else if (CIFSMaxBufSize > 1024*127) {
696 CIFSMaxBufSize = 1024 * 127;
697 } else {
698 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
700 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
701 cifs_req_cachep = kmem_cache_create("cifs_request",
702 CIFSMaxBufSize +
703 MAX_CIFS_HDR_SIZE, 0,
704 SLAB_HWCACHE_ALIGN, NULL, NULL);
705 if (cifs_req_cachep == NULL)
706 return -ENOMEM;
708 if(cifs_min_rcv < 1)
709 cifs_min_rcv = 1;
710 else if (cifs_min_rcv > 64) {
711 cifs_min_rcv = 64;
712 cERROR(1,("cifs_min_rcv set to maximum (64)"));
715 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
716 cifs_req_cachep);
718 if(cifs_req_poolp == NULL) {
719 kmem_cache_destroy(cifs_req_cachep);
720 return -ENOMEM;
722 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
723 almost all handle based requests (but not write response, nor is it
724 sufficient for path based requests). A smaller size would have
725 been more efficient (compacting multiple slab items on one 4k page)
726 for the case in which debug was on, but this larger size allows
727 more SMBs to use small buffer alloc and is still much more
728 efficient to alloc 1 per page off the slab compared to 17K (5page)
729 alloc of large cifs buffers even when page debugging is on */
730 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
731 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
732 NULL, NULL);
733 if (cifs_sm_req_cachep == NULL) {
734 mempool_destroy(cifs_req_poolp);
735 kmem_cache_destroy(cifs_req_cachep);
736 return -ENOMEM;
739 if(cifs_min_small < 2)
740 cifs_min_small = 2;
741 else if (cifs_min_small > 256) {
742 cifs_min_small = 256;
743 cFYI(1,("cifs_min_small set to maximum (256)"));
746 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
747 cifs_sm_req_cachep);
749 if(cifs_sm_req_poolp == NULL) {
750 mempool_destroy(cifs_req_poolp);
751 kmem_cache_destroy(cifs_req_cachep);
752 kmem_cache_destroy(cifs_sm_req_cachep);
753 return -ENOMEM;
756 return 0;
759 static void
760 cifs_destroy_request_bufs(void)
762 mempool_destroy(cifs_req_poolp);
763 kmem_cache_destroy(cifs_req_cachep);
764 mempool_destroy(cifs_sm_req_poolp);
765 kmem_cache_destroy(cifs_sm_req_cachep);
768 static int
769 cifs_init_mids(void)
771 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
772 sizeof (struct mid_q_entry), 0,
773 SLAB_HWCACHE_ALIGN, NULL, NULL);
774 if (cifs_mid_cachep == NULL)
775 return -ENOMEM;
777 /* 3 is a reasonable minimum number of simultaneous operations */
778 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
779 if(cifs_mid_poolp == NULL) {
780 kmem_cache_destroy(cifs_mid_cachep);
781 return -ENOMEM;
784 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
785 sizeof (struct oplock_q_entry), 0,
786 SLAB_HWCACHE_ALIGN, NULL, NULL);
787 if (cifs_oplock_cachep == NULL) {
788 kmem_cache_destroy(cifs_mid_cachep);
789 mempool_destroy(cifs_mid_poolp);
790 return -ENOMEM;
793 return 0;
796 static void
797 cifs_destroy_mids(void)
799 mempool_destroy(cifs_mid_poolp);
800 kmem_cache_destroy(cifs_mid_cachep);
801 kmem_cache_destroy(cifs_oplock_cachep);
804 static int cifs_oplock_thread(void * dummyarg)
806 struct oplock_q_entry * oplock_item;
807 struct cifsTconInfo *pTcon;
808 struct inode * inode;
809 __u16 netfid;
810 int rc;
812 do {
813 if (try_to_freeze())
814 continue;
816 spin_lock(&GlobalMid_Lock);
817 if(list_empty(&GlobalOplock_Q)) {
818 spin_unlock(&GlobalMid_Lock);
819 set_current_state(TASK_INTERRUPTIBLE);
820 schedule_timeout(39*HZ);
821 } else {
822 oplock_item = list_entry(GlobalOplock_Q.next,
823 struct oplock_q_entry, qhead);
824 if(oplock_item) {
825 cFYI(1,("found oplock item to write out"));
826 pTcon = oplock_item->tcon;
827 inode = oplock_item->pinode;
828 netfid = oplock_item->netfid;
829 spin_unlock(&GlobalMid_Lock);
830 DeleteOplockQEntry(oplock_item);
831 /* can not grab inode sem here since it would
832 deadlock when oplock received on delete
833 since vfs_unlink holds the i_mutex across
834 the call */
835 /* mutex_lock(&inode->i_mutex);*/
836 if (S_ISREG(inode->i_mode)) {
837 rc = filemap_fdatawrite(inode->i_mapping);
838 if(CIFS_I(inode)->clientCanCacheRead == 0) {
839 filemap_fdatawait(inode->i_mapping);
840 invalidate_remote_inode(inode);
842 } else
843 rc = 0;
844 /* mutex_unlock(&inode->i_mutex);*/
845 if (rc)
846 CIFS_I(inode)->write_behind_rc = rc;
847 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
849 /* releasing a stale oplock after recent reconnection
850 of smb session using a now incorrect file
851 handle is not a data integrity issue but do
852 not bother sending an oplock release if session
853 to server still is disconnected since oplock
854 already released by the server in that case */
855 if(pTcon->tidStatus != CifsNeedReconnect) {
856 rc = CIFSSMBLock(0, pTcon, netfid,
857 0 /* len */ , 0 /* offset */, 0,
858 0, LOCKING_ANDX_OPLOCK_RELEASE,
859 0 /* wait flag */);
860 cFYI(1,("Oplock release rc = %d ",rc));
862 } else
863 spin_unlock(&GlobalMid_Lock);
864 set_current_state(TASK_INTERRUPTIBLE);
865 schedule_timeout(1); /* yield in case q were corrupt */
867 } while (!kthread_should_stop());
869 return 0;
872 static int cifs_dnotify_thread(void * dummyarg)
874 struct list_head *tmp;
875 struct cifsSesInfo *ses;
877 do {
878 if (try_to_freeze())
879 continue;
880 set_current_state(TASK_INTERRUPTIBLE);
881 schedule_timeout(15*HZ);
882 read_lock(&GlobalSMBSeslock);
883 /* check if any stuck requests that need
884 to be woken up and wakeq so the
885 thread can wake up and error out */
886 list_for_each(tmp, &GlobalSMBSessionList) {
887 ses = list_entry(tmp, struct cifsSesInfo,
888 cifsSessionList);
889 if(ses && ses->server &&
890 atomic_read(&ses->server->inFlight))
891 wake_up_all(&ses->server->response_q);
893 read_unlock(&GlobalSMBSeslock);
894 } while (!kthread_should_stop());
896 return 0;
899 static int __init
900 init_cifs(void)
902 int rc = 0;
903 #ifdef CONFIG_PROC_FS
904 cifs_proc_init();
905 #endif
906 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
907 INIT_LIST_HEAD(&GlobalSMBSessionList);
908 INIT_LIST_HEAD(&GlobalTreeConnectionList);
909 INIT_LIST_HEAD(&GlobalOplock_Q);
910 #ifdef CONFIG_CIFS_EXPERIMENTAL
911 INIT_LIST_HEAD(&GlobalDnotifyReqList);
912 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
913 #endif
915 * Initialize Global counters
917 atomic_set(&sesInfoAllocCount, 0);
918 atomic_set(&tconInfoAllocCount, 0);
919 atomic_set(&tcpSesAllocCount,0);
920 atomic_set(&tcpSesReconnectCount, 0);
921 atomic_set(&tconInfoReconnectCount, 0);
923 atomic_set(&bufAllocCount, 0);
924 atomic_set(&smBufAllocCount, 0);
925 #ifdef CONFIG_CIFS_STATS2
926 atomic_set(&totBufAllocCount, 0);
927 atomic_set(&totSmBufAllocCount, 0);
928 #endif /* CONFIG_CIFS_STATS2 */
930 atomic_set(&midCount, 0);
931 GlobalCurrentXid = 0;
932 GlobalTotalActiveXid = 0;
933 GlobalMaxActiveXid = 0;
934 rwlock_init(&GlobalSMBSeslock);
935 spin_lock_init(&GlobalMid_Lock);
937 if(cifs_max_pending < 2) {
938 cifs_max_pending = 2;
939 cFYI(1,("cifs_max_pending set to min of 2"));
940 } else if(cifs_max_pending > 256) {
941 cifs_max_pending = 256;
942 cFYI(1,("cifs_max_pending set to max of 256"));
945 rc = cifs_init_inodecache();
946 if (rc)
947 goto out_clean_proc;
949 rc = cifs_init_mids();
950 if (rc)
951 goto out_destroy_inodecache;
953 rc = cifs_init_request_bufs();
954 if (rc)
955 goto out_destroy_mids;
957 rc = register_filesystem(&cifs_fs_type);
958 if (rc)
959 goto out_destroy_request_bufs;
961 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
962 if (IS_ERR(oplockThread)) {
963 rc = PTR_ERR(oplockThread);
964 cERROR(1,("error %d create oplock thread", rc));
965 goto out_unregister_filesystem;
968 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
969 if (IS_ERR(dnotifyThread)) {
970 rc = PTR_ERR(dnotifyThread);
971 cERROR(1,("error %d create dnotify thread", rc));
972 goto out_stop_oplock_thread;
975 return 0;
977 out_stop_oplock_thread:
978 kthread_stop(oplockThread);
979 out_unregister_filesystem:
980 unregister_filesystem(&cifs_fs_type);
981 out_destroy_request_bufs:
982 cifs_destroy_request_bufs();
983 out_destroy_mids:
984 cifs_destroy_mids();
985 out_destroy_inodecache:
986 cifs_destroy_inodecache();
987 out_clean_proc:
988 #ifdef CONFIG_PROC_FS
989 cifs_proc_clean();
990 #endif
991 return rc;
994 static void __exit
995 exit_cifs(void)
997 cFYI(0, ("In unregister ie exit_cifs"));
998 #ifdef CONFIG_PROC_FS
999 cifs_proc_clean();
1000 #endif
1001 unregister_filesystem(&cifs_fs_type);
1002 cifs_destroy_inodecache();
1003 cifs_destroy_mids();
1004 cifs_destroy_request_bufs();
1005 kthread_stop(oplockThread);
1006 kthread_stop(dnotifyThread);
1009 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1010 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1011 MODULE_DESCRIPTION
1012 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1013 MODULE_VERSION(CIFS_VERSION);
1014 module_init(init_cifs)
1015 module_exit(exit_cifs)