4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops
;
52 #ifdef CONFIG_CIFS_EXPERIMENTAL
53 extern struct export_operations cifs_export_ops
;
54 #endif /* EXPERIMENTAL */
59 unsigned int oplockEnabled
= 1;
60 unsigned int experimEnabled
= 0;
61 unsigned int linuxExtEnabled
= 1;
62 unsigned int lookupCacheEnabled
= 1;
63 unsigned int multiuser_mount
= 0;
64 unsigned int extended_security
= CIFSSEC_DEF
;
65 /* unsigned int ntlmv2_support = 0; */
66 unsigned int sign_CIFS_PDUs
= 1;
67 extern struct task_struct
* oplockThread
; /* remove sparse warning */
68 struct task_struct
* oplockThread
= NULL
;
69 /* extern struct task_struct * dnotifyThread; remove sparse warning */
70 static struct task_struct
* dnotifyThread
= NULL
;
71 static const struct super_operations cifs_super_ops
;
72 unsigned int CIFSMaxBufSize
= CIFS_MAX_MSGSIZE
;
73 module_param(CIFSMaxBufSize
, int, 0);
74 MODULE_PARM_DESC(CIFSMaxBufSize
,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv
= CIFS_MIN_RCV_POOL
;
76 module_param(cifs_min_rcv
, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv
,"Network buffers in pool. Default: 4 Range: 1 to 64");
78 unsigned int cifs_min_small
= 30;
79 module_param(cifs_min_small
, int, 0);
80 MODULE_PARM_DESC(cifs_min_small
,"Small network buffers in pool. Default: 30 Range: 2 to 256");
81 unsigned int cifs_max_pending
= CIFS_MAX_REQ
;
82 module_param(cifs_max_pending
, int, 0);
83 MODULE_PARM_DESC(cifs_max_pending
,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
85 extern mempool_t
*cifs_sm_req_poolp
;
86 extern mempool_t
*cifs_req_poolp
;
87 extern mempool_t
*cifs_mid_poolp
;
89 extern struct kmem_cache
*cifs_oplock_cachep
;
92 cifs_read_super(struct super_block
*sb
, void *data
,
93 const char *devname
, int silent
)
96 struct cifs_sb_info
*cifs_sb
;
99 /* BB should we make this contingent on mount parm? */
100 sb
->s_flags
|= MS_NODIRATIME
| MS_NOATIME
;
101 sb
->s_fs_info
= kzalloc(sizeof(struct cifs_sb_info
),GFP_KERNEL
);
102 cifs_sb
= CIFS_SB(sb
);
106 rc
= cifs_mount(sb
, cifs_sb
, data
, devname
);
111 ("cifs_mount failed w/return code = %d", rc
));
112 goto out_mount_failed
;
115 sb
->s_magic
= CIFS_MAGIC_NUMBER
;
116 sb
->s_op
= &cifs_super_ops
;
117 #ifdef CONFIG_CIFS_EXPERIMENTAL
118 if(experimEnabled
!= 0)
119 sb
->s_export_op
= &cifs_export_ops
;
120 #endif /* EXPERIMENTAL */
121 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
122 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
123 #ifdef CONFIG_CIFS_QUOTA
124 sb
->s_qcop
= &cifs_quotactl_ops
;
126 sb
->s_blocksize
= CIFS_MAX_MSGSIZE
;
127 sb
->s_blocksize_bits
= 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
128 inode
= iget(sb
, ROOT_I
);
135 sb
->s_root
= d_alloc_root(inode
);
145 cERROR(1, ("cifs_read_super: get root inode failed"));
151 if(cifs_sb
->local_nls
)
152 unload_nls(cifs_sb
->local_nls
);
159 cifs_put_super(struct super_block
*sb
)
162 struct cifs_sb_info
*cifs_sb
;
164 cFYI(1, ("In cifs_put_super"));
165 cifs_sb
= CIFS_SB(sb
);
166 if(cifs_sb
== NULL
) {
167 cFYI(1,("Empty cifs superblock info passed to unmount"));
170 rc
= cifs_umount(sb
, cifs_sb
);
172 cERROR(1, ("cifs_umount failed with return code %d", rc
));
174 unload_nls(cifs_sb
->local_nls
);
180 cifs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
182 struct super_block
*sb
= dentry
->d_sb
;
184 int rc
= -EOPNOTSUPP
;
185 struct cifs_sb_info
*cifs_sb
;
186 struct cifsTconInfo
*pTcon
;
190 cifs_sb
= CIFS_SB(sb
);
191 pTcon
= cifs_sb
->tcon
;
193 buf
->f_type
= CIFS_MAGIC_NUMBER
;
195 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
196 buf
->f_namelen
= PATH_MAX
; /* PATH_MAX may be too long - it would
197 presumably be total path, but note
198 that some servers (includinng Samba 3)
199 have a shorter maximum path */
200 buf
->f_files
= 0; /* undefined */
201 buf
->f_ffree
= 0; /* unlimited */
203 /* BB we could add a second check for a QFS Unix capability bit */
204 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
205 if ((pTcon
->ses
->capabilities
& CAP_UNIX
) && (CIFS_POSIX_EXTENSIONS
&
206 le64_to_cpu(pTcon
->fsUnixInfo
.Capability
)))
207 rc
= CIFSSMBQFSPosixInfo(xid
, pTcon
, buf
);
209 /* Only need to call the old QFSInfo if failed
212 if(pTcon
->ses
->capabilities
& CAP_NT_SMBS
)
213 rc
= CIFSSMBQFSInfo(xid
, pTcon
, buf
); /* not supported by OS2 */
215 /* Some old Windows servers also do not support level 103, retry with
216 older level one if old server failed the previous call or we
217 bypassed it because we detected that this was an older LANMAN sess */
219 rc
= SMBOldQFSInfo(xid
, pTcon
, buf
);
224 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
226 return 0; /* always return success? what if volume is no
230 static int cifs_permission(struct inode
* inode
, int mask
, struct nameidata
*nd
)
232 struct cifs_sb_info
*cifs_sb
;
234 cifs_sb
= CIFS_SB(inode
->i_sb
);
236 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
) {
238 } else /* file mode might have been restricted at mount time
239 on the client (above and beyond ACL on servers) for
240 servers which do not support setting and viewing mode bits,
241 so allowing client to check permissions is useful */
242 return generic_permission(inode
, mask
, NULL
);
245 static struct kmem_cache
*cifs_inode_cachep
;
246 static struct kmem_cache
*cifs_req_cachep
;
247 static struct kmem_cache
*cifs_mid_cachep
;
248 struct kmem_cache
*cifs_oplock_cachep
;
249 static struct kmem_cache
*cifs_sm_req_cachep
;
250 mempool_t
*cifs_sm_req_poolp
;
251 mempool_t
*cifs_req_poolp
;
252 mempool_t
*cifs_mid_poolp
;
254 static struct inode
*
255 cifs_alloc_inode(struct super_block
*sb
)
257 struct cifsInodeInfo
*cifs_inode
;
258 cifs_inode
= kmem_cache_alloc(cifs_inode_cachep
, GFP_KERNEL
);
261 cifs_inode
->cifsAttrs
= 0x20; /* default */
262 atomic_set(&cifs_inode
->inUse
, 0);
263 cifs_inode
->time
= 0;
264 /* Until the file is open and we have gotten oplock
265 info back from the server, can not assume caching of
266 file data or metadata */
267 cifs_inode
->clientCanCacheRead
= FALSE
;
268 cifs_inode
->clientCanCacheAll
= FALSE
;
269 cifs_inode
->vfs_inode
.i_blkbits
= 14; /* 2**14 = CIFS_MAX_MSGSIZE */
271 /* Can not set i_flags here - they get immediately overwritten
272 to zero by the VFS */
273 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
274 INIT_LIST_HEAD(&cifs_inode
->openFileList
);
275 return &cifs_inode
->vfs_inode
;
279 cifs_destroy_inode(struct inode
*inode
)
281 kmem_cache_free(cifs_inode_cachep
, CIFS_I(inode
));
285 * cifs_show_options() is for displaying mount options in /proc/mounts.
286 * Not all settable options are displayed but most of the important
290 cifs_show_options(struct seq_file
*s
, struct vfsmount
*m
)
292 struct cifs_sb_info
*cifs_sb
;
294 cifs_sb
= CIFS_SB(m
->mnt_sb
);
298 /* BB add prepath to mount options displayed */
299 seq_printf(s
, ",unc=%s", cifs_sb
->tcon
->treeName
);
300 if (cifs_sb
->tcon
->ses
) {
301 if (cifs_sb
->tcon
->ses
->userName
)
302 seq_printf(s
, ",username=%s",
303 cifs_sb
->tcon
->ses
->userName
);
304 if(cifs_sb
->tcon
->ses
->domainName
)
305 seq_printf(s
, ",domain=%s",
306 cifs_sb
->tcon
->ses
->domainName
);
309 seq_printf(s
, ",rsize=%d",cifs_sb
->rsize
);
310 seq_printf(s
, ",wsize=%d",cifs_sb
->wsize
);
315 #ifdef CONFIG_CIFS_QUOTA
316 int cifs_xquota_set(struct super_block
* sb
, int quota_type
, qid_t qid
,
317 struct fs_disk_quota
* pdquota
)
321 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
322 struct cifsTconInfo
*pTcon
;
325 pTcon
= cifs_sb
->tcon
;
332 cFYI(1,("set type: 0x%x id: %d",quota_type
,qid
));
341 int cifs_xquota_get(struct super_block
* sb
, int quota_type
, qid_t qid
,
342 struct fs_disk_quota
* pdquota
)
346 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
347 struct cifsTconInfo
*pTcon
;
350 pTcon
= cifs_sb
->tcon
;
356 cFYI(1,("set type: 0x%x id: %d",quota_type
,qid
));
365 int cifs_xstate_set(struct super_block
* sb
, unsigned int flags
, int operation
)
369 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
370 struct cifsTconInfo
*pTcon
;
373 pTcon
= cifs_sb
->tcon
;
379 cFYI(1,("flags: 0x%x operation: 0x%x",flags
,operation
));
388 int cifs_xstate_get(struct super_block
* sb
, struct fs_quota_stat
*qstats
)
392 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
393 struct cifsTconInfo
*pTcon
;
396 pTcon
= cifs_sb
->tcon
;
402 cFYI(1,("pqstats %p",qstats
));
411 static struct quotactl_ops cifs_quotactl_ops
= {
412 .set_xquota
= cifs_xquota_set
,
413 .get_xquota
= cifs_xquota_set
,
414 .set_xstate
= cifs_xstate_set
,
415 .get_xstate
= cifs_xstate_get
,
419 static void cifs_umount_begin(struct vfsmount
* vfsmnt
, int flags
)
421 struct cifs_sb_info
*cifs_sb
;
422 struct cifsTconInfo
* tcon
;
424 if (!(flags
& MNT_FORCE
))
426 cifs_sb
= CIFS_SB(vfsmnt
->mnt_sb
);
430 tcon
= cifs_sb
->tcon
;
433 down(&tcon
->tconSem
);
434 if (atomic_read(&tcon
->useCount
) == 1)
435 tcon
->tidStatus
= CifsExiting
;
438 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
439 /* cancel_notify_requests(tcon); */
440 if(tcon
->ses
&& tcon
->ses
->server
)
442 cFYI(1,("wake up tasks now - umount begin not complete"));
443 wake_up_all(&tcon
->ses
->server
->request_q
);
444 wake_up_all(&tcon
->ses
->server
->response_q
);
445 msleep(1); /* yield */
446 /* we have to kick the requests once more */
447 wake_up_all(&tcon
->ses
->server
->response_q
);
450 /* BB FIXME - finish add checks for tidStatus BB */
455 #ifdef CONFIG_CIFS_STATS2
456 static int cifs_show_stats(struct seq_file
*s
, struct vfsmount
*mnt
)
463 static int cifs_remount(struct super_block
*sb
, int *flags
, char *data
)
465 *flags
|= MS_NODIRATIME
;
469 static const struct super_operations cifs_super_ops
= {
470 .read_inode
= cifs_read_inode
,
471 .put_super
= cifs_put_super
,
472 .statfs
= cifs_statfs
,
473 .alloc_inode
= cifs_alloc_inode
,
474 .destroy_inode
= cifs_destroy_inode
,
475 /* .drop_inode = generic_delete_inode,
476 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
477 unless later we add lazy close of inodes or unless the kernel forgets to call
478 us with the same number of releases (closes) as opens */
479 .show_options
= cifs_show_options
,
480 .umount_begin
= cifs_umount_begin
,
481 .remount_fs
= cifs_remount
,
482 #ifdef CONFIG_CIFS_STATS2
483 .show_stats
= cifs_show_stats
,
488 cifs_get_sb(struct file_system_type
*fs_type
,
489 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
492 struct super_block
*sb
= sget(fs_type
, NULL
, set_anon_super
, NULL
);
494 cFYI(1, ("Devname: %s flags: %d ", dev_name
, flags
));
501 rc
= cifs_read_super(sb
, data
, dev_name
, flags
& MS_SILENT
? 1 : 0);
503 up_write(&sb
->s_umount
);
504 deactivate_super(sb
);
507 sb
->s_flags
|= MS_ACTIVE
;
508 return simple_set_mnt(mnt
, sb
);
511 static ssize_t
cifs_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
512 unsigned long nr_segs
, loff_t pos
)
514 struct inode
*inode
= iocb
->ki_filp
->f_path
.dentry
->d_inode
;
517 written
= generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
518 if (!CIFS_I(inode
)->clientCanCacheAll
)
519 filemap_fdatawrite(inode
->i_mapping
);
523 static loff_t
cifs_llseek(struct file
*file
, loff_t offset
, int origin
)
525 /* origin == SEEK_END => we must revalidate the cached file length */
526 if (origin
== SEEK_END
) {
529 /* some applications poll for the file length in this strange
530 way so we must seek to end on non-oplocked files by
531 setting the revalidate time to zero */
532 if(file
->f_path
.dentry
->d_inode
)
533 CIFS_I(file
->f_path
.dentry
->d_inode
)->time
= 0;
535 retval
= cifs_revalidate(file
->f_path
.dentry
);
537 return (loff_t
)retval
;
539 return remote_llseek(file
, offset
, origin
);
542 static struct file_system_type cifs_fs_type
= {
543 .owner
= THIS_MODULE
,
545 .get_sb
= cifs_get_sb
,
546 .kill_sb
= kill_anon_super
,
549 const struct inode_operations cifs_dir_inode_ops
= {
550 .create
= cifs_create
,
551 .lookup
= cifs_lookup
,
552 .getattr
= cifs_getattr
,
553 .unlink
= cifs_unlink
,
554 .link
= cifs_hardlink
,
557 .rename
= cifs_rename
,
558 .permission
= cifs_permission
,
559 /* revalidate:cifs_revalidate, */
560 .setattr
= cifs_setattr
,
561 .symlink
= cifs_symlink
,
563 #ifdef CONFIG_CIFS_XATTR
564 .setxattr
= cifs_setxattr
,
565 .getxattr
= cifs_getxattr
,
566 .listxattr
= cifs_listxattr
,
567 .removexattr
= cifs_removexattr
,
571 const struct inode_operations cifs_file_inode_ops
= {
572 /* revalidate:cifs_revalidate, */
573 .setattr
= cifs_setattr
,
574 .getattr
= cifs_getattr
, /* do we need this anymore? */
575 .rename
= cifs_rename
,
576 .permission
= cifs_permission
,
577 #ifdef CONFIG_CIFS_XATTR
578 .setxattr
= cifs_setxattr
,
579 .getxattr
= cifs_getxattr
,
580 .listxattr
= cifs_listxattr
,
581 .removexattr
= cifs_removexattr
,
585 const struct inode_operations cifs_symlink_inode_ops
= {
586 .readlink
= generic_readlink
,
587 .follow_link
= cifs_follow_link
,
588 .put_link
= cifs_put_link
,
589 .permission
= cifs_permission
,
590 /* BB add the following two eventually */
591 /* revalidate: cifs_revalidate,
592 setattr: cifs_notify_change, *//* BB do we need notify change */
593 #ifdef CONFIG_CIFS_XATTR
594 .setxattr
= cifs_setxattr
,
595 .getxattr
= cifs_getxattr
,
596 .listxattr
= cifs_listxattr
,
597 .removexattr
= cifs_removexattr
,
601 const struct file_operations cifs_file_ops
= {
602 .read
= do_sync_read
,
603 .write
= do_sync_write
,
604 .aio_read
= generic_file_aio_read
,
605 .aio_write
= cifs_file_aio_write
,
607 .release
= cifs_close
,
611 .mmap
= cifs_file_mmap
,
612 .sendfile
= generic_file_sendfile
,
613 .llseek
= cifs_llseek
,
614 #ifdef CONFIG_CIFS_POSIX
616 #endif /* CONFIG_CIFS_POSIX */
618 #ifdef CONFIG_CIFS_EXPERIMENTAL
619 .dir_notify
= cifs_dir_notify
,
620 #endif /* CONFIG_CIFS_EXPERIMENTAL */
623 const struct file_operations cifs_file_direct_ops
= {
624 /* no mmap, no aio, no readv -
625 BB reevaluate whether they can be done with directio, no cache */
626 .read
= cifs_user_read
,
627 .write
= cifs_user_write
,
629 .release
= cifs_close
,
633 .sendfile
= generic_file_sendfile
, /* BB removeme BB */
634 #ifdef CONFIG_CIFS_POSIX
636 #endif /* CONFIG_CIFS_POSIX */
637 .llseek
= cifs_llseek
,
638 #ifdef CONFIG_CIFS_EXPERIMENTAL
639 .dir_notify
= cifs_dir_notify
,
640 #endif /* CONFIG_CIFS_EXPERIMENTAL */
642 const struct file_operations cifs_file_nobrl_ops
= {
643 .read
= do_sync_read
,
644 .write
= do_sync_write
,
645 .aio_read
= generic_file_aio_read
,
646 .aio_write
= cifs_file_aio_write
,
648 .release
= cifs_close
,
651 .mmap
= cifs_file_mmap
,
652 .sendfile
= generic_file_sendfile
,
653 .llseek
= cifs_llseek
,
654 #ifdef CONFIG_CIFS_POSIX
656 #endif /* CONFIG_CIFS_POSIX */
658 #ifdef CONFIG_CIFS_EXPERIMENTAL
659 .dir_notify
= cifs_dir_notify
,
660 #endif /* CONFIG_CIFS_EXPERIMENTAL */
663 const struct file_operations cifs_file_direct_nobrl_ops
= {
664 /* no mmap, no aio, no readv -
665 BB reevaluate whether they can be done with directio, no cache */
666 .read
= cifs_user_read
,
667 .write
= cifs_user_write
,
669 .release
= cifs_close
,
672 .sendfile
= generic_file_sendfile
, /* BB removeme BB */
673 #ifdef CONFIG_CIFS_POSIX
675 #endif /* CONFIG_CIFS_POSIX */
676 .llseek
= cifs_llseek
,
677 #ifdef CONFIG_CIFS_EXPERIMENTAL
678 .dir_notify
= cifs_dir_notify
,
679 #endif /* CONFIG_CIFS_EXPERIMENTAL */
682 const struct file_operations cifs_dir_ops
= {
683 .readdir
= cifs_readdir
,
684 .release
= cifs_closedir
,
685 .read
= generic_read_dir
,
686 #ifdef CONFIG_CIFS_EXPERIMENTAL
687 .dir_notify
= cifs_dir_notify
,
688 #endif /* CONFIG_CIFS_EXPERIMENTAL */
693 cifs_init_once(void *inode
, struct kmem_cache
* cachep
, unsigned long flags
)
695 struct cifsInodeInfo
*cifsi
= inode
;
697 if ((flags
& (SLAB_CTOR_VERIFY
| SLAB_CTOR_CONSTRUCTOR
)) ==
698 SLAB_CTOR_CONSTRUCTOR
) {
699 inode_init_once(&cifsi
->vfs_inode
);
700 INIT_LIST_HEAD(&cifsi
->lockList
);
705 cifs_init_inodecache(void)
707 cifs_inode_cachep
= kmem_cache_create("cifs_inode_cache",
708 sizeof (struct cifsInodeInfo
),
709 0, (SLAB_RECLAIM_ACCOUNT
|
711 cifs_init_once
, NULL
);
712 if (cifs_inode_cachep
== NULL
)
719 cifs_destroy_inodecache(void)
721 kmem_cache_destroy(cifs_inode_cachep
);
725 cifs_init_request_bufs(void)
727 if(CIFSMaxBufSize
< 8192) {
728 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
729 Unicode path name has to fit in any SMB/CIFS path based frames */
730 CIFSMaxBufSize
= 8192;
731 } else if (CIFSMaxBufSize
> 1024*127) {
732 CIFSMaxBufSize
= 1024 * 127;
734 CIFSMaxBufSize
&= 0x1FE00; /* Round size to even 512 byte mult*/
736 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
737 cifs_req_cachep
= kmem_cache_create("cifs_request",
739 MAX_CIFS_HDR_SIZE
, 0,
740 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
741 if (cifs_req_cachep
== NULL
)
746 else if (cifs_min_rcv
> 64) {
748 cERROR(1,("cifs_min_rcv set to maximum (64)"));
751 cifs_req_poolp
= mempool_create_slab_pool(cifs_min_rcv
,
754 if(cifs_req_poolp
== NULL
) {
755 kmem_cache_destroy(cifs_req_cachep
);
758 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
759 almost all handle based requests (but not write response, nor is it
760 sufficient for path based requests). A smaller size would have
761 been more efficient (compacting multiple slab items on one 4k page)
762 for the case in which debug was on, but this larger size allows
763 more SMBs to use small buffer alloc and is still much more
764 efficient to alloc 1 per page off the slab compared to 17K (5page)
765 alloc of large cifs buffers even when page debugging is on */
766 cifs_sm_req_cachep
= kmem_cache_create("cifs_small_rq",
767 MAX_CIFS_SMALL_BUFFER_SIZE
, 0, SLAB_HWCACHE_ALIGN
,
769 if (cifs_sm_req_cachep
== NULL
) {
770 mempool_destroy(cifs_req_poolp
);
771 kmem_cache_destroy(cifs_req_cachep
);
775 if(cifs_min_small
< 2)
777 else if (cifs_min_small
> 256) {
778 cifs_min_small
= 256;
779 cFYI(1,("cifs_min_small set to maximum (256)"));
782 cifs_sm_req_poolp
= mempool_create_slab_pool(cifs_min_small
,
785 if(cifs_sm_req_poolp
== NULL
) {
786 mempool_destroy(cifs_req_poolp
);
787 kmem_cache_destroy(cifs_req_cachep
);
788 kmem_cache_destroy(cifs_sm_req_cachep
);
796 cifs_destroy_request_bufs(void)
798 mempool_destroy(cifs_req_poolp
);
799 kmem_cache_destroy(cifs_req_cachep
);
800 mempool_destroy(cifs_sm_req_poolp
);
801 kmem_cache_destroy(cifs_sm_req_cachep
);
807 cifs_mid_cachep
= kmem_cache_create("cifs_mpx_ids",
808 sizeof (struct mid_q_entry
), 0,
809 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
810 if (cifs_mid_cachep
== NULL
)
813 /* 3 is a reasonable minimum number of simultaneous operations */
814 cifs_mid_poolp
= mempool_create_slab_pool(3, cifs_mid_cachep
);
815 if(cifs_mid_poolp
== NULL
) {
816 kmem_cache_destroy(cifs_mid_cachep
);
820 cifs_oplock_cachep
= kmem_cache_create("cifs_oplock_structs",
821 sizeof (struct oplock_q_entry
), 0,
822 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
823 if (cifs_oplock_cachep
== NULL
) {
824 kmem_cache_destroy(cifs_mid_cachep
);
825 mempool_destroy(cifs_mid_poolp
);
833 cifs_destroy_mids(void)
835 mempool_destroy(cifs_mid_poolp
);
836 kmem_cache_destroy(cifs_mid_cachep
);
837 kmem_cache_destroy(cifs_oplock_cachep
);
840 static int cifs_oplock_thread(void * dummyarg
)
842 struct oplock_q_entry
* oplock_item
;
843 struct cifsTconInfo
*pTcon
;
844 struct inode
* inode
;
852 spin_lock(&GlobalMid_Lock
);
853 if(list_empty(&GlobalOplock_Q
)) {
854 spin_unlock(&GlobalMid_Lock
);
855 set_current_state(TASK_INTERRUPTIBLE
);
856 schedule_timeout(39*HZ
);
858 oplock_item
= list_entry(GlobalOplock_Q
.next
,
859 struct oplock_q_entry
, qhead
);
861 cFYI(1,("found oplock item to write out"));
862 pTcon
= oplock_item
->tcon
;
863 inode
= oplock_item
->pinode
;
864 netfid
= oplock_item
->netfid
;
865 spin_unlock(&GlobalMid_Lock
);
866 DeleteOplockQEntry(oplock_item
);
867 /* can not grab inode sem here since it would
868 deadlock when oplock received on delete
869 since vfs_unlink holds the i_mutex across
871 /* mutex_lock(&inode->i_mutex);*/
872 if (S_ISREG(inode
->i_mode
)) {
873 rc
= filemap_fdatawrite(inode
->i_mapping
);
874 if(CIFS_I(inode
)->clientCanCacheRead
== 0) {
875 filemap_fdatawait(inode
->i_mapping
);
876 invalidate_remote_inode(inode
);
880 /* mutex_unlock(&inode->i_mutex);*/
882 CIFS_I(inode
)->write_behind_rc
= rc
;
883 cFYI(1,("Oplock flush inode %p rc %d",inode
,rc
));
885 /* releasing a stale oplock after recent reconnection
886 of smb session using a now incorrect file
887 handle is not a data integrity issue but do
888 not bother sending an oplock release if session
889 to server still is disconnected since oplock
890 already released by the server in that case */
891 if(pTcon
->tidStatus
!= CifsNeedReconnect
) {
892 rc
= CIFSSMBLock(0, pTcon
, netfid
,
893 0 /* len */ , 0 /* offset */, 0,
894 0, LOCKING_ANDX_OPLOCK_RELEASE
,
896 cFYI(1,("Oplock release rc = %d ",rc
));
899 spin_unlock(&GlobalMid_Lock
);
900 set_current_state(TASK_INTERRUPTIBLE
);
901 schedule_timeout(1); /* yield in case q were corrupt */
903 } while (!kthread_should_stop());
908 static int cifs_dnotify_thread(void * dummyarg
)
910 struct list_head
*tmp
;
911 struct cifsSesInfo
*ses
;
916 set_current_state(TASK_INTERRUPTIBLE
);
917 schedule_timeout(15*HZ
);
918 read_lock(&GlobalSMBSeslock
);
919 /* check if any stuck requests that need
920 to be woken up and wakeq so the
921 thread can wake up and error out */
922 list_for_each(tmp
, &GlobalSMBSessionList
) {
923 ses
= list_entry(tmp
, struct cifsSesInfo
,
925 if(ses
&& ses
->server
&&
926 atomic_read(&ses
->server
->inFlight
))
927 wake_up_all(&ses
->server
->response_q
);
929 read_unlock(&GlobalSMBSeslock
);
930 } while (!kthread_should_stop());
939 #ifdef CONFIG_PROC_FS
942 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
943 INIT_LIST_HEAD(&GlobalSMBSessionList
);
944 INIT_LIST_HEAD(&GlobalTreeConnectionList
);
945 INIT_LIST_HEAD(&GlobalOplock_Q
);
946 #ifdef CONFIG_CIFS_EXPERIMENTAL
947 INIT_LIST_HEAD(&GlobalDnotifyReqList
);
948 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q
);
951 * Initialize Global counters
953 atomic_set(&sesInfoAllocCount
, 0);
954 atomic_set(&tconInfoAllocCount
, 0);
955 atomic_set(&tcpSesAllocCount
,0);
956 atomic_set(&tcpSesReconnectCount
, 0);
957 atomic_set(&tconInfoReconnectCount
, 0);
959 atomic_set(&bufAllocCount
, 0);
960 atomic_set(&smBufAllocCount
, 0);
961 #ifdef CONFIG_CIFS_STATS2
962 atomic_set(&totBufAllocCount
, 0);
963 atomic_set(&totSmBufAllocCount
, 0);
964 #endif /* CONFIG_CIFS_STATS2 */
966 atomic_set(&midCount
, 0);
967 GlobalCurrentXid
= 0;
968 GlobalTotalActiveXid
= 0;
969 GlobalMaxActiveXid
= 0;
970 memset(Local_System_Name
, 0, 15);
971 rwlock_init(&GlobalSMBSeslock
);
972 spin_lock_init(&GlobalMid_Lock
);
974 if(cifs_max_pending
< 2) {
975 cifs_max_pending
= 2;
976 cFYI(1,("cifs_max_pending set to min of 2"));
977 } else if(cifs_max_pending
> 256) {
978 cifs_max_pending
= 256;
979 cFYI(1,("cifs_max_pending set to max of 256"));
982 rc
= cifs_init_inodecache();
986 rc
= cifs_init_mids();
988 goto out_destroy_inodecache
;
990 rc
= cifs_init_request_bufs();
992 goto out_destroy_mids
;
994 rc
= register_filesystem(&cifs_fs_type
);
996 goto out_destroy_request_bufs
;
998 oplockThread
= kthread_run(cifs_oplock_thread
, NULL
, "cifsoplockd");
999 if (IS_ERR(oplockThread
)) {
1000 rc
= PTR_ERR(oplockThread
);
1001 cERROR(1,("error %d create oplock thread", rc
));
1002 goto out_unregister_filesystem
;
1005 dnotifyThread
= kthread_run(cifs_dnotify_thread
, NULL
, "cifsdnotifyd");
1006 if (IS_ERR(dnotifyThread
)) {
1007 rc
= PTR_ERR(dnotifyThread
);
1008 cERROR(1,("error %d create dnotify thread", rc
));
1009 goto out_stop_oplock_thread
;
1014 out_stop_oplock_thread
:
1015 kthread_stop(oplockThread
);
1016 out_unregister_filesystem
:
1017 unregister_filesystem(&cifs_fs_type
);
1018 out_destroy_request_bufs
:
1019 cifs_destroy_request_bufs();
1021 cifs_destroy_mids();
1022 out_destroy_inodecache
:
1023 cifs_destroy_inodecache();
1025 #ifdef CONFIG_PROC_FS
1034 cFYI(0, ("In unregister ie exit_cifs"));
1035 #ifdef CONFIG_PROC_FS
1038 unregister_filesystem(&cifs_fs_type
);
1039 cifs_destroy_inodecache();
1040 cifs_destroy_mids();
1041 cifs_destroy_request_bufs();
1042 kthread_stop(oplockThread
);
1043 kthread_stop(dnotifyThread
);
1046 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1047 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1049 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1050 MODULE_VERSION(CIFS_VERSION
);
1051 module_init(init_cifs
)
1052 module_exit(exit_cifs
)