4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops
;
55 unsigned int oplockEnabled
= 1;
56 unsigned int experimEnabled
= 0;
57 unsigned int linuxExtEnabled
= 1;
58 unsigned int lookupCacheEnabled
= 1;
59 unsigned int multiuser_mount
= 0;
60 unsigned int extended_security
= CIFSSEC_DEF
;
61 /* unsigned int ntlmv2_support = 0; */
62 unsigned int sign_CIFS_PDUs
= 1;
63 extern struct task_struct
* oplockThread
; /* remove sparse warning */
64 struct task_struct
* oplockThread
= NULL
;
65 extern struct task_struct
* dnotifyThread
; /* remove sparse warning */
66 struct task_struct
* dnotifyThread
= NULL
;
67 static const struct super_operations cifs_super_ops
;
68 unsigned int CIFSMaxBufSize
= CIFS_MAX_MSGSIZE
;
69 module_param(CIFSMaxBufSize
, int, 0);
70 MODULE_PARM_DESC(CIFSMaxBufSize
,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
71 unsigned int cifs_min_rcv
= CIFS_MIN_RCV_POOL
;
72 module_param(cifs_min_rcv
, int, 0);
73 MODULE_PARM_DESC(cifs_min_rcv
,"Network buffers in pool. Default: 4 Range: 1 to 64");
74 unsigned int cifs_min_small
= 30;
75 module_param(cifs_min_small
, int, 0);
76 MODULE_PARM_DESC(cifs_min_small
,"Small network buffers in pool. Default: 30 Range: 2 to 256");
77 unsigned int cifs_max_pending
= CIFS_MAX_REQ
;
78 module_param(cifs_max_pending
, int, 0);
79 MODULE_PARM_DESC(cifs_max_pending
,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
81 extern mempool_t
*cifs_sm_req_poolp
;
82 extern mempool_t
*cifs_req_poolp
;
83 extern mempool_t
*cifs_mid_poolp
;
85 extern struct kmem_cache
*cifs_oplock_cachep
;
88 cifs_read_super(struct super_block
*sb
, void *data
,
89 const char *devname
, int silent
)
92 struct cifs_sb_info
*cifs_sb
;
95 sb
->s_flags
|= MS_NODIRATIME
; /* and probably even noatime */
96 sb
->s_fs_info
= kzalloc(sizeof(struct cifs_sb_info
),GFP_KERNEL
);
97 cifs_sb
= CIFS_SB(sb
);
101 rc
= cifs_mount(sb
, cifs_sb
, data
, devname
);
106 ("cifs_mount failed w/return code = %d", rc
));
107 goto out_mount_failed
;
110 sb
->s_magic
= CIFS_MAGIC_NUMBER
;
111 sb
->s_op
= &cifs_super_ops
;
112 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
113 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
114 #ifdef CONFIG_CIFS_QUOTA
115 sb
->s_qcop
= &cifs_quotactl_ops
;
117 sb
->s_blocksize
= CIFS_MAX_MSGSIZE
;
118 sb
->s_blocksize_bits
= 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
119 inode
= iget(sb
, ROOT_I
);
126 sb
->s_root
= d_alloc_root(inode
);
136 cERROR(1, ("cifs_read_super: get root inode failed"));
142 if(cifs_sb
->local_nls
)
143 unload_nls(cifs_sb
->local_nls
);
150 cifs_put_super(struct super_block
*sb
)
153 struct cifs_sb_info
*cifs_sb
;
155 cFYI(1, ("In cifs_put_super"));
156 cifs_sb
= CIFS_SB(sb
);
157 if(cifs_sb
== NULL
) {
158 cFYI(1,("Empty cifs superblock info passed to unmount"));
161 rc
= cifs_umount(sb
, cifs_sb
);
163 cERROR(1, ("cifs_umount failed with return code %d", rc
));
165 unload_nls(cifs_sb
->local_nls
);
171 cifs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
173 struct super_block
*sb
= dentry
->d_sb
;
175 int rc
= -EOPNOTSUPP
;
176 struct cifs_sb_info
*cifs_sb
;
177 struct cifsTconInfo
*pTcon
;
181 cifs_sb
= CIFS_SB(sb
);
182 pTcon
= cifs_sb
->tcon
;
184 buf
->f_type
= CIFS_MAGIC_NUMBER
;
186 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
187 buf
->f_namelen
= PATH_MAX
; /* PATH_MAX may be too long - it would
188 presumably be total path, but note
189 that some servers (includinng Samba 3)
190 have a shorter maximum path */
191 buf
->f_files
= 0; /* undefined */
192 buf
->f_ffree
= 0; /* unlimited */
194 /* BB we could add a second check for a QFS Unix capability bit */
195 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
196 if ((pTcon
->ses
->capabilities
& CAP_UNIX
) && (CIFS_POSIX_EXTENSIONS
&
197 le64_to_cpu(pTcon
->fsUnixInfo
.Capability
)))
198 rc
= CIFSSMBQFSPosixInfo(xid
, pTcon
, buf
);
200 /* Only need to call the old QFSInfo if failed
203 if(pTcon
->ses
->capabilities
& CAP_NT_SMBS
)
204 rc
= CIFSSMBQFSInfo(xid
, pTcon
, buf
); /* not supported by OS2 */
206 /* Some old Windows servers also do not support level 103, retry with
207 older level one if old server failed the previous call or we
208 bypassed it because we detected that this was an older LANMAN sess */
210 rc
= SMBOldQFSInfo(xid
, pTcon
, buf
);
215 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
217 return 0; /* always return success? what if volume is no
221 static int cifs_permission(struct inode
* inode
, int mask
, struct nameidata
*nd
)
223 struct cifs_sb_info
*cifs_sb
;
225 cifs_sb
= CIFS_SB(inode
->i_sb
);
227 if (cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_NO_PERM
) {
229 } else /* file mode might have been restricted at mount time
230 on the client (above and beyond ACL on servers) for
231 servers which do not support setting and viewing mode bits,
232 so allowing client to check permissions is useful */
233 return generic_permission(inode
, mask
, NULL
);
236 static struct kmem_cache
*cifs_inode_cachep
;
237 static struct kmem_cache
*cifs_req_cachep
;
238 static struct kmem_cache
*cifs_mid_cachep
;
239 struct kmem_cache
*cifs_oplock_cachep
;
240 static struct kmem_cache
*cifs_sm_req_cachep
;
241 mempool_t
*cifs_sm_req_poolp
;
242 mempool_t
*cifs_req_poolp
;
243 mempool_t
*cifs_mid_poolp
;
245 static struct inode
*
246 cifs_alloc_inode(struct super_block
*sb
)
248 struct cifsInodeInfo
*cifs_inode
;
249 cifs_inode
= kmem_cache_alloc(cifs_inode_cachep
, GFP_KERNEL
);
252 cifs_inode
->cifsAttrs
= 0x20; /* default */
253 atomic_set(&cifs_inode
->inUse
, 0);
254 cifs_inode
->time
= 0;
255 /* Until the file is open and we have gotten oplock
256 info back from the server, can not assume caching of
257 file data or metadata */
258 cifs_inode
->clientCanCacheRead
= FALSE
;
259 cifs_inode
->clientCanCacheAll
= FALSE
;
260 cifs_inode
->vfs_inode
.i_blkbits
= 14; /* 2**14 = CIFS_MAX_MSGSIZE */
261 cifs_inode
->vfs_inode
.i_flags
= S_NOATIME
| S_NOCMTIME
;
262 INIT_LIST_HEAD(&cifs_inode
->openFileList
);
263 return &cifs_inode
->vfs_inode
;
267 cifs_destroy_inode(struct inode
*inode
)
269 kmem_cache_free(cifs_inode_cachep
, CIFS_I(inode
));
273 * cifs_show_options() is for displaying mount options in /proc/mounts.
274 * Not all settable options are displayed but most of the important
278 cifs_show_options(struct seq_file
*s
, struct vfsmount
*m
)
280 struct cifs_sb_info
*cifs_sb
;
282 cifs_sb
= CIFS_SB(m
->mnt_sb
);
286 seq_printf(s
, ",unc=%s", cifs_sb
->tcon
->treeName
);
287 if (cifs_sb
->tcon
->ses
) {
288 if (cifs_sb
->tcon
->ses
->userName
)
289 seq_printf(s
, ",username=%s",
290 cifs_sb
->tcon
->ses
->userName
);
291 if(cifs_sb
->tcon
->ses
->domainName
)
292 seq_printf(s
, ",domain=%s",
293 cifs_sb
->tcon
->ses
->domainName
);
296 seq_printf(s
, ",rsize=%d",cifs_sb
->rsize
);
297 seq_printf(s
, ",wsize=%d",cifs_sb
->wsize
);
302 #ifdef CONFIG_CIFS_QUOTA
303 int cifs_xquota_set(struct super_block
* sb
, int quota_type
, qid_t qid
,
304 struct fs_disk_quota
* pdquota
)
308 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
309 struct cifsTconInfo
*pTcon
;
312 pTcon
= cifs_sb
->tcon
;
319 cFYI(1,("set type: 0x%x id: %d",quota_type
,qid
));
328 int cifs_xquota_get(struct super_block
* sb
, int quota_type
, qid_t qid
,
329 struct fs_disk_quota
* pdquota
)
333 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
334 struct cifsTconInfo
*pTcon
;
337 pTcon
= cifs_sb
->tcon
;
343 cFYI(1,("set type: 0x%x id: %d",quota_type
,qid
));
352 int cifs_xstate_set(struct super_block
* sb
, unsigned int flags
, int operation
)
356 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
357 struct cifsTconInfo
*pTcon
;
360 pTcon
= cifs_sb
->tcon
;
366 cFYI(1,("flags: 0x%x operation: 0x%x",flags
,operation
));
375 int cifs_xstate_get(struct super_block
* sb
, struct fs_quota_stat
*qstats
)
379 struct cifs_sb_info
*cifs_sb
= CIFS_SB(sb
);
380 struct cifsTconInfo
*pTcon
;
383 pTcon
= cifs_sb
->tcon
;
389 cFYI(1,("pqstats %p",qstats
));
398 static struct quotactl_ops cifs_quotactl_ops
= {
399 .set_xquota
= cifs_xquota_set
,
400 .get_xquota
= cifs_xquota_set
,
401 .set_xstate
= cifs_xstate_set
,
402 .get_xstate
= cifs_xstate_get
,
406 static void cifs_umount_begin(struct vfsmount
* vfsmnt
, int flags
)
408 struct cifs_sb_info
*cifs_sb
;
409 struct cifsTconInfo
* tcon
;
411 if (!(flags
& MNT_FORCE
))
413 cifs_sb
= CIFS_SB(vfsmnt
->mnt_sb
);
417 tcon
= cifs_sb
->tcon
;
420 down(&tcon
->tconSem
);
421 if (atomic_read(&tcon
->useCount
) == 1)
422 tcon
->tidStatus
= CifsExiting
;
425 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
426 /* cancel_notify_requests(tcon); */
427 if(tcon
->ses
&& tcon
->ses
->server
)
429 cFYI(1,("wake up tasks now - umount begin not complete"));
430 wake_up_all(&tcon
->ses
->server
->request_q
);
431 wake_up_all(&tcon
->ses
->server
->response_q
);
432 msleep(1); /* yield */
433 /* we have to kick the requests once more */
434 wake_up_all(&tcon
->ses
->server
->response_q
);
437 /* BB FIXME - finish add checks for tidStatus BB */
442 #ifdef CONFIG_CIFS_STATS2
443 static int cifs_show_stats(struct seq_file
*s
, struct vfsmount
*mnt
)
450 static int cifs_remount(struct super_block
*sb
, int *flags
, char *data
)
452 *flags
|= MS_NODIRATIME
;
456 static const struct super_operations cifs_super_ops
= {
457 .read_inode
= cifs_read_inode
,
458 .put_super
= cifs_put_super
,
459 .statfs
= cifs_statfs
,
460 .alloc_inode
= cifs_alloc_inode
,
461 .destroy_inode
= cifs_destroy_inode
,
462 /* .drop_inode = generic_delete_inode,
463 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
464 unless later we add lazy close of inodes or unless the kernel forgets to call
465 us with the same number of releases (closes) as opens */
466 .show_options
= cifs_show_options
,
467 .umount_begin
= cifs_umount_begin
,
468 .remount_fs
= cifs_remount
,
469 #ifdef CONFIG_CIFS_STATS2
470 .show_stats
= cifs_show_stats
,
475 cifs_get_sb(struct file_system_type
*fs_type
,
476 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
479 struct super_block
*sb
= sget(fs_type
, NULL
, set_anon_super
, NULL
);
481 cFYI(1, ("Devname: %s flags: %d ", dev_name
, flags
));
488 rc
= cifs_read_super(sb
, data
, dev_name
, flags
& MS_SILENT
? 1 : 0);
490 up_write(&sb
->s_umount
);
491 deactivate_super(sb
);
494 sb
->s_flags
|= MS_ACTIVE
;
495 return simple_set_mnt(mnt
, sb
);
498 static ssize_t
cifs_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
499 unsigned long nr_segs
, loff_t pos
)
501 struct inode
*inode
= iocb
->ki_filp
->f_path
.dentry
->d_inode
;
504 written
= generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
505 if (!CIFS_I(inode
)->clientCanCacheAll
)
506 filemap_fdatawrite(inode
->i_mapping
);
510 static loff_t
cifs_llseek(struct file
*file
, loff_t offset
, int origin
)
512 /* origin == SEEK_END => we must revalidate the cached file length */
513 if (origin
== SEEK_END
) {
516 /* some applications poll for the file length in this strange
517 way so we must seek to end on non-oplocked files by
518 setting the revalidate time to zero */
519 if(file
->f_path
.dentry
->d_inode
)
520 CIFS_I(file
->f_path
.dentry
->d_inode
)->time
= 0;
522 retval
= cifs_revalidate(file
->f_path
.dentry
);
524 return (loff_t
)retval
;
526 return remote_llseek(file
, offset
, origin
);
529 static struct file_system_type cifs_fs_type
= {
530 .owner
= THIS_MODULE
,
532 .get_sb
= cifs_get_sb
,
533 .kill_sb
= kill_anon_super
,
536 const struct inode_operations cifs_dir_inode_ops
= {
537 .create
= cifs_create
,
538 .lookup
= cifs_lookup
,
539 .getattr
= cifs_getattr
,
540 .unlink
= cifs_unlink
,
541 .link
= cifs_hardlink
,
544 .rename
= cifs_rename
,
545 .permission
= cifs_permission
,
546 /* revalidate:cifs_revalidate, */
547 .setattr
= cifs_setattr
,
548 .symlink
= cifs_symlink
,
550 #ifdef CONFIG_CIFS_XATTR
551 .setxattr
= cifs_setxattr
,
552 .getxattr
= cifs_getxattr
,
553 .listxattr
= cifs_listxattr
,
554 .removexattr
= cifs_removexattr
,
558 const struct inode_operations cifs_file_inode_ops
= {
559 /* revalidate:cifs_revalidate, */
560 .setattr
= cifs_setattr
,
561 .getattr
= cifs_getattr
, /* do we need this anymore? */
562 .rename
= cifs_rename
,
563 .permission
= cifs_permission
,
564 #ifdef CONFIG_CIFS_XATTR
565 .setxattr
= cifs_setxattr
,
566 .getxattr
= cifs_getxattr
,
567 .listxattr
= cifs_listxattr
,
568 .removexattr
= cifs_removexattr
,
572 const struct inode_operations cifs_symlink_inode_ops
= {
573 .readlink
= generic_readlink
,
574 .follow_link
= cifs_follow_link
,
575 .put_link
= cifs_put_link
,
576 .permission
= cifs_permission
,
577 /* BB add the following two eventually */
578 /* revalidate: cifs_revalidate,
579 setattr: cifs_notify_change, *//* BB do we need notify change */
580 #ifdef CONFIG_CIFS_XATTR
581 .setxattr
= cifs_setxattr
,
582 .getxattr
= cifs_getxattr
,
583 .listxattr
= cifs_listxattr
,
584 .removexattr
= cifs_removexattr
,
588 const struct file_operations cifs_file_ops
= {
589 .read
= do_sync_read
,
590 .write
= do_sync_write
,
591 .aio_read
= generic_file_aio_read
,
592 .aio_write
= cifs_file_aio_write
,
594 .release
= cifs_close
,
598 .mmap
= cifs_file_mmap
,
599 .sendfile
= generic_file_sendfile
,
600 .llseek
= cifs_llseek
,
601 #ifdef CONFIG_CIFS_POSIX
603 #endif /* CONFIG_CIFS_POSIX */
605 #ifdef CONFIG_CIFS_EXPERIMENTAL
606 .dir_notify
= cifs_dir_notify
,
607 #endif /* CONFIG_CIFS_EXPERIMENTAL */
610 const struct file_operations cifs_file_direct_ops
= {
611 /* no mmap, no aio, no readv -
612 BB reevaluate whether they can be done with directio, no cache */
613 .read
= cifs_user_read
,
614 .write
= cifs_user_write
,
616 .release
= cifs_close
,
620 .sendfile
= generic_file_sendfile
, /* BB removeme BB */
621 #ifdef CONFIG_CIFS_POSIX
623 #endif /* CONFIG_CIFS_POSIX */
624 .llseek
= cifs_llseek
,
625 #ifdef CONFIG_CIFS_EXPERIMENTAL
626 .dir_notify
= cifs_dir_notify
,
627 #endif /* CONFIG_CIFS_EXPERIMENTAL */
629 const struct file_operations cifs_file_nobrl_ops
= {
630 .read
= do_sync_read
,
631 .write
= do_sync_write
,
632 .aio_read
= generic_file_aio_read
,
633 .aio_write
= cifs_file_aio_write
,
635 .release
= cifs_close
,
638 .mmap
= cifs_file_mmap
,
639 .sendfile
= generic_file_sendfile
,
640 .llseek
= cifs_llseek
,
641 #ifdef CONFIG_CIFS_POSIX
643 #endif /* CONFIG_CIFS_POSIX */
645 #ifdef CONFIG_CIFS_EXPERIMENTAL
646 .dir_notify
= cifs_dir_notify
,
647 #endif /* CONFIG_CIFS_EXPERIMENTAL */
650 const struct file_operations cifs_file_direct_nobrl_ops
= {
651 /* no mmap, no aio, no readv -
652 BB reevaluate whether they can be done with directio, no cache */
653 .read
= cifs_user_read
,
654 .write
= cifs_user_write
,
656 .release
= cifs_close
,
659 .sendfile
= generic_file_sendfile
, /* BB removeme BB */
660 #ifdef CONFIG_CIFS_POSIX
662 #endif /* CONFIG_CIFS_POSIX */
663 .llseek
= cifs_llseek
,
664 #ifdef CONFIG_CIFS_EXPERIMENTAL
665 .dir_notify
= cifs_dir_notify
,
666 #endif /* CONFIG_CIFS_EXPERIMENTAL */
669 const struct file_operations cifs_dir_ops
= {
670 .readdir
= cifs_readdir
,
671 .release
= cifs_closedir
,
672 .read
= generic_read_dir
,
673 #ifdef CONFIG_CIFS_EXPERIMENTAL
674 .dir_notify
= cifs_dir_notify
,
675 #endif /* CONFIG_CIFS_EXPERIMENTAL */
680 cifs_init_once(void *inode
, struct kmem_cache
* cachep
, unsigned long flags
)
682 struct cifsInodeInfo
*cifsi
= inode
;
684 if ((flags
& (SLAB_CTOR_VERIFY
| SLAB_CTOR_CONSTRUCTOR
)) ==
685 SLAB_CTOR_CONSTRUCTOR
) {
686 inode_init_once(&cifsi
->vfs_inode
);
687 INIT_LIST_HEAD(&cifsi
->lockList
);
692 cifs_init_inodecache(void)
694 cifs_inode_cachep
= kmem_cache_create("cifs_inode_cache",
695 sizeof (struct cifsInodeInfo
),
696 0, (SLAB_RECLAIM_ACCOUNT
|
698 cifs_init_once
, NULL
);
699 if (cifs_inode_cachep
== NULL
)
706 cifs_destroy_inodecache(void)
708 kmem_cache_destroy(cifs_inode_cachep
);
712 cifs_init_request_bufs(void)
714 if(CIFSMaxBufSize
< 8192) {
715 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
716 Unicode path name has to fit in any SMB/CIFS path based frames */
717 CIFSMaxBufSize
= 8192;
718 } else if (CIFSMaxBufSize
> 1024*127) {
719 CIFSMaxBufSize
= 1024 * 127;
721 CIFSMaxBufSize
&= 0x1FE00; /* Round size to even 512 byte mult*/
723 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
724 cifs_req_cachep
= kmem_cache_create("cifs_request",
726 MAX_CIFS_HDR_SIZE
, 0,
727 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
728 if (cifs_req_cachep
== NULL
)
733 else if (cifs_min_rcv
> 64) {
735 cERROR(1,("cifs_min_rcv set to maximum (64)"));
738 cifs_req_poolp
= mempool_create_slab_pool(cifs_min_rcv
,
741 if(cifs_req_poolp
== NULL
) {
742 kmem_cache_destroy(cifs_req_cachep
);
745 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
746 almost all handle based requests (but not write response, nor is it
747 sufficient for path based requests). A smaller size would have
748 been more efficient (compacting multiple slab items on one 4k page)
749 for the case in which debug was on, but this larger size allows
750 more SMBs to use small buffer alloc and is still much more
751 efficient to alloc 1 per page off the slab compared to 17K (5page)
752 alloc of large cifs buffers even when page debugging is on */
753 cifs_sm_req_cachep
= kmem_cache_create("cifs_small_rq",
754 MAX_CIFS_SMALL_BUFFER_SIZE
, 0, SLAB_HWCACHE_ALIGN
,
756 if (cifs_sm_req_cachep
== NULL
) {
757 mempool_destroy(cifs_req_poolp
);
758 kmem_cache_destroy(cifs_req_cachep
);
762 if(cifs_min_small
< 2)
764 else if (cifs_min_small
> 256) {
765 cifs_min_small
= 256;
766 cFYI(1,("cifs_min_small set to maximum (256)"));
769 cifs_sm_req_poolp
= mempool_create_slab_pool(cifs_min_small
,
772 if(cifs_sm_req_poolp
== NULL
) {
773 mempool_destroy(cifs_req_poolp
);
774 kmem_cache_destroy(cifs_req_cachep
);
775 kmem_cache_destroy(cifs_sm_req_cachep
);
783 cifs_destroy_request_bufs(void)
785 mempool_destroy(cifs_req_poolp
);
786 kmem_cache_destroy(cifs_req_cachep
);
787 mempool_destroy(cifs_sm_req_poolp
);
788 kmem_cache_destroy(cifs_sm_req_cachep
);
794 cifs_mid_cachep
= kmem_cache_create("cifs_mpx_ids",
795 sizeof (struct mid_q_entry
), 0,
796 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
797 if (cifs_mid_cachep
== NULL
)
800 /* 3 is a reasonable minimum number of simultaneous operations */
801 cifs_mid_poolp
= mempool_create_slab_pool(3, cifs_mid_cachep
);
802 if(cifs_mid_poolp
== NULL
) {
803 kmem_cache_destroy(cifs_mid_cachep
);
807 cifs_oplock_cachep
= kmem_cache_create("cifs_oplock_structs",
808 sizeof (struct oplock_q_entry
), 0,
809 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
810 if (cifs_oplock_cachep
== NULL
) {
811 kmem_cache_destroy(cifs_mid_cachep
);
812 mempool_destroy(cifs_mid_poolp
);
820 cifs_destroy_mids(void)
822 mempool_destroy(cifs_mid_poolp
);
823 kmem_cache_destroy(cifs_mid_cachep
);
824 kmem_cache_destroy(cifs_oplock_cachep
);
827 static int cifs_oplock_thread(void * dummyarg
)
829 struct oplock_q_entry
* oplock_item
;
830 struct cifsTconInfo
*pTcon
;
831 struct inode
* inode
;
839 spin_lock(&GlobalMid_Lock
);
840 if(list_empty(&GlobalOplock_Q
)) {
841 spin_unlock(&GlobalMid_Lock
);
842 set_current_state(TASK_INTERRUPTIBLE
);
843 schedule_timeout(39*HZ
);
845 oplock_item
= list_entry(GlobalOplock_Q
.next
,
846 struct oplock_q_entry
, qhead
);
848 cFYI(1,("found oplock item to write out"));
849 pTcon
= oplock_item
->tcon
;
850 inode
= oplock_item
->pinode
;
851 netfid
= oplock_item
->netfid
;
852 spin_unlock(&GlobalMid_Lock
);
853 DeleteOplockQEntry(oplock_item
);
854 /* can not grab inode sem here since it would
855 deadlock when oplock received on delete
856 since vfs_unlink holds the i_mutex across
858 /* mutex_lock(&inode->i_mutex);*/
859 if (S_ISREG(inode
->i_mode
)) {
860 rc
= filemap_fdatawrite(inode
->i_mapping
);
861 if(CIFS_I(inode
)->clientCanCacheRead
== 0) {
862 filemap_fdatawait(inode
->i_mapping
);
863 invalidate_remote_inode(inode
);
867 /* mutex_unlock(&inode->i_mutex);*/
869 CIFS_I(inode
)->write_behind_rc
= rc
;
870 cFYI(1,("Oplock flush inode %p rc %d",inode
,rc
));
872 /* releasing a stale oplock after recent reconnection
873 of smb session using a now incorrect file
874 handle is not a data integrity issue but do
875 not bother sending an oplock release if session
876 to server still is disconnected since oplock
877 already released by the server in that case */
878 if(pTcon
->tidStatus
!= CifsNeedReconnect
) {
879 rc
= CIFSSMBLock(0, pTcon
, netfid
,
880 0 /* len */ , 0 /* offset */, 0,
881 0, LOCKING_ANDX_OPLOCK_RELEASE
,
883 cFYI(1,("Oplock release rc = %d ",rc
));
886 spin_unlock(&GlobalMid_Lock
);
887 set_current_state(TASK_INTERRUPTIBLE
);
888 schedule_timeout(1); /* yield in case q were corrupt */
890 } while (!kthread_should_stop());
895 static int cifs_dnotify_thread(void * dummyarg
)
897 struct list_head
*tmp
;
898 struct cifsSesInfo
*ses
;
903 set_current_state(TASK_INTERRUPTIBLE
);
904 schedule_timeout(15*HZ
);
905 read_lock(&GlobalSMBSeslock
);
906 /* check if any stuck requests that need
907 to be woken up and wakeq so the
908 thread can wake up and error out */
909 list_for_each(tmp
, &GlobalSMBSessionList
) {
910 ses
= list_entry(tmp
, struct cifsSesInfo
,
912 if(ses
&& ses
->server
&&
913 atomic_read(&ses
->server
->inFlight
))
914 wake_up_all(&ses
->server
->response_q
);
916 read_unlock(&GlobalSMBSeslock
);
917 } while (!kthread_should_stop());
926 #ifdef CONFIG_PROC_FS
929 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
930 INIT_LIST_HEAD(&GlobalSMBSessionList
);
931 INIT_LIST_HEAD(&GlobalTreeConnectionList
);
932 INIT_LIST_HEAD(&GlobalOplock_Q
);
933 #ifdef CONFIG_CIFS_EXPERIMENTAL
934 INIT_LIST_HEAD(&GlobalDnotifyReqList
);
935 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q
);
938 * Initialize Global counters
940 atomic_set(&sesInfoAllocCount
, 0);
941 atomic_set(&tconInfoAllocCount
, 0);
942 atomic_set(&tcpSesAllocCount
,0);
943 atomic_set(&tcpSesReconnectCount
, 0);
944 atomic_set(&tconInfoReconnectCount
, 0);
946 atomic_set(&bufAllocCount
, 0);
947 atomic_set(&smBufAllocCount
, 0);
948 #ifdef CONFIG_CIFS_STATS2
949 atomic_set(&totBufAllocCount
, 0);
950 atomic_set(&totSmBufAllocCount
, 0);
951 #endif /* CONFIG_CIFS_STATS2 */
953 atomic_set(&midCount
, 0);
954 GlobalCurrentXid
= 0;
955 GlobalTotalActiveXid
= 0;
956 GlobalMaxActiveXid
= 0;
957 memset(Local_System_Name
, 0, 15);
958 rwlock_init(&GlobalSMBSeslock
);
959 spin_lock_init(&GlobalMid_Lock
);
961 if(cifs_max_pending
< 2) {
962 cifs_max_pending
= 2;
963 cFYI(1,("cifs_max_pending set to min of 2"));
964 } else if(cifs_max_pending
> 256) {
965 cifs_max_pending
= 256;
966 cFYI(1,("cifs_max_pending set to max of 256"));
969 rc
= cifs_init_inodecache();
973 rc
= cifs_init_mids();
975 goto out_destroy_inodecache
;
977 rc
= cifs_init_request_bufs();
979 goto out_destroy_mids
;
981 rc
= register_filesystem(&cifs_fs_type
);
983 goto out_destroy_request_bufs
;
985 oplockThread
= kthread_run(cifs_oplock_thread
, NULL
, "cifsoplockd");
986 if (IS_ERR(oplockThread
)) {
987 rc
= PTR_ERR(oplockThread
);
988 cERROR(1,("error %d create oplock thread", rc
));
989 goto out_unregister_filesystem
;
992 dnotifyThread
= kthread_run(cifs_dnotify_thread
, NULL
, "cifsdnotifyd");
993 if (IS_ERR(dnotifyThread
)) {
994 rc
= PTR_ERR(dnotifyThread
);
995 cERROR(1,("error %d create dnotify thread", rc
));
996 goto out_stop_oplock_thread
;
1001 out_stop_oplock_thread
:
1002 kthread_stop(oplockThread
);
1003 out_unregister_filesystem
:
1004 unregister_filesystem(&cifs_fs_type
);
1005 out_destroy_request_bufs
:
1006 cifs_destroy_request_bufs();
1008 cifs_destroy_mids();
1009 out_destroy_inodecache
:
1010 cifs_destroy_inodecache();
1012 #ifdef CONFIG_PROC_FS
1021 cFYI(0, ("In unregister ie exit_cifs"));
1022 #ifdef CONFIG_PROC_FS
1025 unregister_filesystem(&cifs_fs_type
);
1026 cifs_destroy_inodecache();
1027 cifs_destroy_mids();
1028 cifs_destroy_request_bufs();
1029 kthread_stop(oplockThread
);
1030 kthread_stop(dnotifyThread
);
1033 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1034 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1036 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1037 MODULE_VERSION(CIFS_VERSION
);
1038 module_init(init_cifs
)
1039 module_exit(exit_cifs
)