[SCSI] fix CONFIG_SCSI_WAIT_SCAN=m
[linux-2.6/kmemtrace.git] / fs / cifs / cifsfs.c
blobd38c69b591cfe0907d3d9e08bdb0da0380321709
1 /*
2 * fs/cifs/cifsfs.c
4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops;
50 #endif /* QUOTA */
52 #ifdef CONFIG_CIFS_EXPERIMENTAL
53 extern struct export_operations cifs_export_ops;
54 #endif /* EXPERIMENTAL */
56 int cifsFYI = 0;
57 int cifsERROR = 1;
58 int traceSMB = 0;
59 unsigned int oplockEnabled = 1;
60 unsigned int experimEnabled = 0;
61 unsigned int linuxExtEnabled = 1;
62 unsigned int lookupCacheEnabled = 1;
63 unsigned int multiuser_mount = 0;
64 unsigned int extended_security = CIFSSEC_DEF;
65 /* unsigned int ntlmv2_support = 0; */
66 unsigned int sign_CIFS_PDUs = 1;
67 extern struct task_struct * oplockThread; /* remove sparse warning */
68 struct task_struct * oplockThread = NULL;
69 /* extern struct task_struct * dnotifyThread; remove sparse warning */
70 static struct task_struct * dnotifyThread = NULL;
71 static const struct super_operations cifs_super_ops;
72 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
73 module_param(CIFSMaxBufSize, int, 0);
74 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, int, 0);
80 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
81 unsigned int cifs_max_pending = CIFS_MAX_REQ;
82 module_param(cifs_max_pending, int, 0);
83 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
89 extern struct kmem_cache *cifs_oplock_cachep;
91 static int
92 cifs_read_super(struct super_block *sb, void *data,
93 const char *devname, int silent)
95 struct inode *inode;
96 struct cifs_sb_info *cifs_sb;
97 int rc = 0;
99 /* BB should we make this contingent on mount parm? */
100 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
101 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
102 cifs_sb = CIFS_SB(sb);
103 if (cifs_sb == NULL)
104 return -ENOMEM;
106 rc = cifs_mount(sb, cifs_sb, data, devname);
108 if (rc) {
109 if (!silent)
110 cERROR(1,
111 ("cifs_mount failed w/return code = %d", rc));
112 goto out_mount_failed;
115 sb->s_magic = CIFS_MAGIC_NUMBER;
116 sb->s_op = &cifs_super_ops;
117 #ifdef CONFIG_CIFS_EXPERIMENTAL
118 if (experimEnabled != 0)
119 sb->s_export_op = &cifs_export_ops;
120 #endif /* EXPERIMENTAL */
121 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
122 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
123 #ifdef CONFIG_CIFS_QUOTA
124 sb->s_qcop = &cifs_quotactl_ops;
125 #endif
126 sb->s_blocksize = CIFS_MAX_MSGSIZE;
127 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
128 inode = iget(sb, ROOT_I);
130 if (!inode) {
131 rc = -ENOMEM;
132 goto out_no_root;
135 sb->s_root = d_alloc_root(inode);
137 if (!sb->s_root) {
138 rc = -ENOMEM;
139 goto out_no_root;
142 return 0;
144 out_no_root:
145 cERROR(1, ("cifs_read_super: get root inode failed"));
146 if (inode)
147 iput(inode);
149 out_mount_failed:
150 if (cifs_sb) {
151 if (cifs_sb->local_nls)
152 unload_nls(cifs_sb->local_nls);
153 kfree(cifs_sb);
155 return rc;
158 static void
159 cifs_put_super(struct super_block *sb)
161 int rc = 0;
162 struct cifs_sb_info *cifs_sb;
164 cFYI(1, ("In cifs_put_super"));
165 cifs_sb = CIFS_SB(sb);
166 if (cifs_sb == NULL) {
167 cFYI(1,("Empty cifs superblock info passed to unmount"));
168 return;
170 rc = cifs_umount(sb, cifs_sb);
171 if (rc) {
172 cERROR(1, ("cifs_umount failed with return code %d", rc));
174 unload_nls(cifs_sb->local_nls);
175 kfree(cifs_sb);
176 return;
179 static int
180 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
182 struct super_block *sb = dentry->d_sb;
183 int xid;
184 int rc = -EOPNOTSUPP;
185 struct cifs_sb_info *cifs_sb;
186 struct cifsTconInfo *pTcon;
188 xid = GetXid();
190 cifs_sb = CIFS_SB(sb);
191 pTcon = cifs_sb->tcon;
193 buf->f_type = CIFS_MAGIC_NUMBER;
195 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
196 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
197 presumably be total path, but note
198 that some servers (includinng Samba 3)
199 have a shorter maximum path */
200 buf->f_files = 0; /* undefined */
201 buf->f_ffree = 0; /* unlimited */
203 /* BB we could add a second check for a QFS Unix capability bit */
204 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
205 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
206 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
207 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
209 /* Only need to call the old QFSInfo if failed
210 on newer one */
211 if (rc)
212 if (pTcon->ses->capabilities & CAP_NT_SMBS)
213 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
215 /* Some old Windows servers also do not support level 103, retry with
216 older level one if old server failed the previous call or we
217 bypassed it because we detected that this was an older LANMAN sess */
218 if (rc)
219 rc = SMBOldQFSInfo(xid, pTcon, buf);
221 int f_type;
222 __fsid_t f_fsid;
223 int f_namelen; */
224 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
225 FreeXid(xid);
226 return 0; /* always return success? what if volume is no
227 longer available? */
230 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
232 struct cifs_sb_info *cifs_sb;
234 cifs_sb = CIFS_SB(inode->i_sb);
236 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
237 return 0;
238 } else /* file mode might have been restricted at mount time
239 on the client (above and beyond ACL on servers) for
240 servers which do not support setting and viewing mode bits,
241 so allowing client to check permissions is useful */
242 return generic_permission(inode, mask, NULL);
245 static struct kmem_cache *cifs_inode_cachep;
246 static struct kmem_cache *cifs_req_cachep;
247 static struct kmem_cache *cifs_mid_cachep;
248 struct kmem_cache *cifs_oplock_cachep;
249 static struct kmem_cache *cifs_sm_req_cachep;
250 mempool_t *cifs_sm_req_poolp;
251 mempool_t *cifs_req_poolp;
252 mempool_t *cifs_mid_poolp;
254 static struct inode *
255 cifs_alloc_inode(struct super_block *sb)
257 struct cifsInodeInfo *cifs_inode;
258 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
259 if (!cifs_inode)
260 return NULL;
261 cifs_inode->cifsAttrs = 0x20; /* default */
262 atomic_set(&cifs_inode->inUse, 0);
263 cifs_inode->time = 0;
264 /* Until the file is open and we have gotten oplock
265 info back from the server, can not assume caching of
266 file data or metadata */
267 cifs_inode->clientCanCacheRead = FALSE;
268 cifs_inode->clientCanCacheAll = FALSE;
269 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
271 /* Can not set i_flags here - they get immediately overwritten
272 to zero by the VFS */
273 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
274 INIT_LIST_HEAD(&cifs_inode->openFileList);
275 return &cifs_inode->vfs_inode;
278 static void
279 cifs_destroy_inode(struct inode *inode)
281 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
285 * cifs_show_options() is for displaying mount options in /proc/mounts.
286 * Not all settable options are displayed but most of the important
287 * ones are.
289 static int
290 cifs_show_options(struct seq_file *s, struct vfsmount *m)
292 struct cifs_sb_info *cifs_sb;
294 cifs_sb = CIFS_SB(m->mnt_sb);
296 if (cifs_sb) {
297 if (cifs_sb->tcon) {
298 /* BB add prepath to mount options displayed */
299 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
300 if (cifs_sb->tcon->ses) {
301 if (cifs_sb->tcon->ses->userName)
302 seq_printf(s, ",username=%s",
303 cifs_sb->tcon->ses->userName);
304 if (cifs_sb->tcon->ses->domainName)
305 seq_printf(s, ",domain=%s",
306 cifs_sb->tcon->ses->domainName);
309 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
310 seq_printf(s, ",posixpaths");
311 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
312 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
313 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
314 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
315 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
316 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
317 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
318 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
320 return 0;
323 #ifdef CONFIG_CIFS_QUOTA
324 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
327 int xid;
328 int rc = 0;
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
332 if (cifs_sb)
333 pTcon = cifs_sb->tcon;
334 else
335 return -EIO;
338 xid = GetXid();
339 if (pTcon) {
340 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
341 } else {
342 return -EIO;
345 FreeXid(xid);
346 return rc;
349 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
350 struct fs_disk_quota * pdquota)
352 int xid;
353 int rc = 0;
354 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
355 struct cifsTconInfo *pTcon;
357 if (cifs_sb)
358 pTcon = cifs_sb->tcon;
359 else
360 return -EIO;
362 xid = GetXid();
363 if (pTcon) {
364 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
365 } else {
366 rc = -EIO;
369 FreeXid(xid);
370 return rc;
373 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
375 int xid;
376 int rc = 0;
377 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
378 struct cifsTconInfo *pTcon;
380 if (cifs_sb)
381 pTcon = cifs_sb->tcon;
382 else
383 return -EIO;
385 xid = GetXid();
386 if (pTcon) {
387 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
388 } else {
389 rc = -EIO;
392 FreeXid(xid);
393 return rc;
396 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
398 int xid;
399 int rc = 0;
400 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
401 struct cifsTconInfo *pTcon;
403 if (cifs_sb) {
404 pTcon = cifs_sb->tcon;
405 } else {
406 return -EIO;
408 xid = GetXid();
409 if (pTcon) {
410 cFYI(1,("pqstats %p",qstats));
411 } else {
412 rc = -EIO;
415 FreeXid(xid);
416 return rc;
419 static struct quotactl_ops cifs_quotactl_ops = {
420 .set_xquota = cifs_xquota_set,
421 .get_xquota = cifs_xquota_set,
422 .set_xstate = cifs_xstate_set,
423 .get_xstate = cifs_xstate_get,
425 #endif
427 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
429 struct cifs_sb_info *cifs_sb;
430 struct cifsTconInfo * tcon;
432 if (!(flags & MNT_FORCE))
433 return;
434 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
435 if (cifs_sb == NULL)
436 return;
438 tcon = cifs_sb->tcon;
439 if (tcon == NULL)
440 return;
441 down(&tcon->tconSem);
442 if (atomic_read(&tcon->useCount) == 1)
443 tcon->tidStatus = CifsExiting;
444 up(&tcon->tconSem);
446 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
447 /* cancel_notify_requests(tcon); */
448 if (tcon->ses && tcon->ses->server)
450 cFYI(1,("wake up tasks now - umount begin not complete"));
451 wake_up_all(&tcon->ses->server->request_q);
452 wake_up_all(&tcon->ses->server->response_q);
453 msleep(1); /* yield */
454 /* we have to kick the requests once more */
455 wake_up_all(&tcon->ses->server->response_q);
456 msleep(1);
458 /* BB FIXME - finish add checks for tidStatus BB */
460 return;
463 #ifdef CONFIG_CIFS_STATS2
464 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
466 /* BB FIXME */
467 return 0;
469 #endif
471 static int cifs_remount(struct super_block *sb, int *flags, char *data)
473 *flags |= MS_NODIRATIME;
474 return 0;
477 static const struct super_operations cifs_super_ops = {
478 .read_inode = cifs_read_inode,
479 .put_super = cifs_put_super,
480 .statfs = cifs_statfs,
481 .alloc_inode = cifs_alloc_inode,
482 .destroy_inode = cifs_destroy_inode,
483 /* .drop_inode = generic_delete_inode,
484 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
485 unless later we add lazy close of inodes or unless the kernel forgets to call
486 us with the same number of releases (closes) as opens */
487 .show_options = cifs_show_options,
488 .umount_begin = cifs_umount_begin,
489 .remount_fs = cifs_remount,
490 #ifdef CONFIG_CIFS_STATS2
491 .show_stats = cifs_show_stats,
492 #endif
495 static int
496 cifs_get_sb(struct file_system_type *fs_type,
497 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
499 int rc;
500 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
502 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
504 if (IS_ERR(sb))
505 return PTR_ERR(sb);
507 sb->s_flags = flags;
509 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
510 if (rc) {
511 up_write(&sb->s_umount);
512 deactivate_super(sb);
513 return rc;
515 sb->s_flags |= MS_ACTIVE;
516 return simple_set_mnt(mnt, sb);
519 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
520 unsigned long nr_segs, loff_t pos)
522 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
523 ssize_t written;
525 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
526 if (!CIFS_I(inode)->clientCanCacheAll)
527 filemap_fdatawrite(inode->i_mapping);
528 return written;
531 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
533 /* origin == SEEK_END => we must revalidate the cached file length */
534 if (origin == SEEK_END) {
535 int retval;
537 /* some applications poll for the file length in this strange
538 way so we must seek to end on non-oplocked files by
539 setting the revalidate time to zero */
540 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
542 retval = cifs_revalidate(file->f_path.dentry);
543 if (retval < 0)
544 return (loff_t)retval;
546 return remote_llseek(file, offset, origin);
549 static struct file_system_type cifs_fs_type = {
550 .owner = THIS_MODULE,
551 .name = "cifs",
552 .get_sb = cifs_get_sb,
553 .kill_sb = kill_anon_super,
554 /* .fs_flags */
556 const struct inode_operations cifs_dir_inode_ops = {
557 .create = cifs_create,
558 .lookup = cifs_lookup,
559 .getattr = cifs_getattr,
560 .unlink = cifs_unlink,
561 .link = cifs_hardlink,
562 .mkdir = cifs_mkdir,
563 .rmdir = cifs_rmdir,
564 .rename = cifs_rename,
565 .permission = cifs_permission,
566 /* revalidate:cifs_revalidate, */
567 .setattr = cifs_setattr,
568 .symlink = cifs_symlink,
569 .mknod = cifs_mknod,
570 #ifdef CONFIG_CIFS_XATTR
571 .setxattr = cifs_setxattr,
572 .getxattr = cifs_getxattr,
573 .listxattr = cifs_listxattr,
574 .removexattr = cifs_removexattr,
575 #endif
578 const struct inode_operations cifs_file_inode_ops = {
579 /* revalidate:cifs_revalidate, */
580 .setattr = cifs_setattr,
581 .getattr = cifs_getattr, /* do we need this anymore? */
582 .rename = cifs_rename,
583 .permission = cifs_permission,
584 #ifdef CONFIG_CIFS_XATTR
585 .setxattr = cifs_setxattr,
586 .getxattr = cifs_getxattr,
587 .listxattr = cifs_listxattr,
588 .removexattr = cifs_removexattr,
589 #endif
592 const struct inode_operations cifs_symlink_inode_ops = {
593 .readlink = generic_readlink,
594 .follow_link = cifs_follow_link,
595 .put_link = cifs_put_link,
596 .permission = cifs_permission,
597 /* BB add the following two eventually */
598 /* revalidate: cifs_revalidate,
599 setattr: cifs_notify_change, *//* BB do we need notify change */
600 #ifdef CONFIG_CIFS_XATTR
601 .setxattr = cifs_setxattr,
602 .getxattr = cifs_getxattr,
603 .listxattr = cifs_listxattr,
604 .removexattr = cifs_removexattr,
605 #endif
608 const struct file_operations cifs_file_ops = {
609 .read = do_sync_read,
610 .write = do_sync_write,
611 .aio_read = generic_file_aio_read,
612 .aio_write = cifs_file_aio_write,
613 .open = cifs_open,
614 .release = cifs_close,
615 .lock = cifs_lock,
616 .fsync = cifs_fsync,
617 .flush = cifs_flush,
618 .mmap = cifs_file_mmap,
619 .sendfile = generic_file_sendfile,
620 .llseek = cifs_llseek,
621 #ifdef CONFIG_CIFS_POSIX
622 .ioctl = cifs_ioctl,
623 #endif /* CONFIG_CIFS_POSIX */
625 #ifdef CONFIG_CIFS_EXPERIMENTAL
626 .dir_notify = cifs_dir_notify,
627 #endif /* CONFIG_CIFS_EXPERIMENTAL */
630 const struct file_operations cifs_file_direct_ops = {
631 /* no mmap, no aio, no readv -
632 BB reevaluate whether they can be done with directio, no cache */
633 .read = cifs_user_read,
634 .write = cifs_user_write,
635 .open = cifs_open,
636 .release = cifs_close,
637 .lock = cifs_lock,
638 .fsync = cifs_fsync,
639 .flush = cifs_flush,
640 .sendfile = generic_file_sendfile, /* BB removeme BB */
641 #ifdef CONFIG_CIFS_POSIX
642 .ioctl = cifs_ioctl,
643 #endif /* CONFIG_CIFS_POSIX */
644 .llseek = cifs_llseek,
645 #ifdef CONFIG_CIFS_EXPERIMENTAL
646 .dir_notify = cifs_dir_notify,
647 #endif /* CONFIG_CIFS_EXPERIMENTAL */
649 const struct file_operations cifs_file_nobrl_ops = {
650 .read = do_sync_read,
651 .write = do_sync_write,
652 .aio_read = generic_file_aio_read,
653 .aio_write = cifs_file_aio_write,
654 .open = cifs_open,
655 .release = cifs_close,
656 .fsync = cifs_fsync,
657 .flush = cifs_flush,
658 .mmap = cifs_file_mmap,
659 .sendfile = generic_file_sendfile,
660 .llseek = cifs_llseek,
661 #ifdef CONFIG_CIFS_POSIX
662 .ioctl = cifs_ioctl,
663 #endif /* CONFIG_CIFS_POSIX */
665 #ifdef CONFIG_CIFS_EXPERIMENTAL
666 .dir_notify = cifs_dir_notify,
667 #endif /* CONFIG_CIFS_EXPERIMENTAL */
670 const struct file_operations cifs_file_direct_nobrl_ops = {
671 /* no mmap, no aio, no readv -
672 BB reevaluate whether they can be done with directio, no cache */
673 .read = cifs_user_read,
674 .write = cifs_user_write,
675 .open = cifs_open,
676 .release = cifs_close,
677 .fsync = cifs_fsync,
678 .flush = cifs_flush,
679 .sendfile = generic_file_sendfile, /* BB removeme BB */
680 #ifdef CONFIG_CIFS_POSIX
681 .ioctl = cifs_ioctl,
682 #endif /* CONFIG_CIFS_POSIX */
683 .llseek = cifs_llseek,
684 #ifdef CONFIG_CIFS_EXPERIMENTAL
685 .dir_notify = cifs_dir_notify,
686 #endif /* CONFIG_CIFS_EXPERIMENTAL */
689 const struct file_operations cifs_dir_ops = {
690 .readdir = cifs_readdir,
691 .release = cifs_closedir,
692 .read = generic_read_dir,
693 #ifdef CONFIG_CIFS_EXPERIMENTAL
694 .dir_notify = cifs_dir_notify,
695 #endif /* CONFIG_CIFS_EXPERIMENTAL */
696 .ioctl = cifs_ioctl,
699 static void
700 cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
702 struct cifsInodeInfo *cifsi = inode;
704 inode_init_once(&cifsi->vfs_inode);
705 INIT_LIST_HEAD(&cifsi->lockList);
708 static int
709 cifs_init_inodecache(void)
711 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
712 sizeof (struct cifsInodeInfo),
713 0, (SLAB_RECLAIM_ACCOUNT|
714 SLAB_MEM_SPREAD),
715 cifs_init_once, NULL);
716 if (cifs_inode_cachep == NULL)
717 return -ENOMEM;
719 return 0;
722 static void
723 cifs_destroy_inodecache(void)
725 kmem_cache_destroy(cifs_inode_cachep);
728 static int
729 cifs_init_request_bufs(void)
731 if (CIFSMaxBufSize < 8192) {
732 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
733 Unicode path name has to fit in any SMB/CIFS path based frames */
734 CIFSMaxBufSize = 8192;
735 } else if (CIFSMaxBufSize > 1024*127) {
736 CIFSMaxBufSize = 1024 * 127;
737 } else {
738 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
740 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
741 cifs_req_cachep = kmem_cache_create("cifs_request",
742 CIFSMaxBufSize +
743 MAX_CIFS_HDR_SIZE, 0,
744 SLAB_HWCACHE_ALIGN, NULL, NULL);
745 if (cifs_req_cachep == NULL)
746 return -ENOMEM;
748 if (cifs_min_rcv < 1)
749 cifs_min_rcv = 1;
750 else if (cifs_min_rcv > 64) {
751 cifs_min_rcv = 64;
752 cERROR(1,("cifs_min_rcv set to maximum (64)"));
755 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
756 cifs_req_cachep);
758 if (cifs_req_poolp == NULL) {
759 kmem_cache_destroy(cifs_req_cachep);
760 return -ENOMEM;
762 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
763 almost all handle based requests (but not write response, nor is it
764 sufficient for path based requests). A smaller size would have
765 been more efficient (compacting multiple slab items on one 4k page)
766 for the case in which debug was on, but this larger size allows
767 more SMBs to use small buffer alloc and is still much more
768 efficient to alloc 1 per page off the slab compared to 17K (5page)
769 alloc of large cifs buffers even when page debugging is on */
770 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
771 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
772 NULL, NULL);
773 if (cifs_sm_req_cachep == NULL) {
774 mempool_destroy(cifs_req_poolp);
775 kmem_cache_destroy(cifs_req_cachep);
776 return -ENOMEM;
779 if (cifs_min_small < 2)
780 cifs_min_small = 2;
781 else if (cifs_min_small > 256) {
782 cifs_min_small = 256;
783 cFYI(1,("cifs_min_small set to maximum (256)"));
786 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
787 cifs_sm_req_cachep);
789 if (cifs_sm_req_poolp == NULL) {
790 mempool_destroy(cifs_req_poolp);
791 kmem_cache_destroy(cifs_req_cachep);
792 kmem_cache_destroy(cifs_sm_req_cachep);
793 return -ENOMEM;
796 return 0;
799 static void
800 cifs_destroy_request_bufs(void)
802 mempool_destroy(cifs_req_poolp);
803 kmem_cache_destroy(cifs_req_cachep);
804 mempool_destroy(cifs_sm_req_poolp);
805 kmem_cache_destroy(cifs_sm_req_cachep);
808 static int
809 cifs_init_mids(void)
811 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
812 sizeof (struct mid_q_entry), 0,
813 SLAB_HWCACHE_ALIGN, NULL, NULL);
814 if (cifs_mid_cachep == NULL)
815 return -ENOMEM;
817 /* 3 is a reasonable minimum number of simultaneous operations */
818 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
819 if (cifs_mid_poolp == NULL) {
820 kmem_cache_destroy(cifs_mid_cachep);
821 return -ENOMEM;
824 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
825 sizeof (struct oplock_q_entry), 0,
826 SLAB_HWCACHE_ALIGN, NULL, NULL);
827 if (cifs_oplock_cachep == NULL) {
828 kmem_cache_destroy(cifs_mid_cachep);
829 mempool_destroy(cifs_mid_poolp);
830 return -ENOMEM;
833 return 0;
836 static void
837 cifs_destroy_mids(void)
839 mempool_destroy(cifs_mid_poolp);
840 kmem_cache_destroy(cifs_mid_cachep);
841 kmem_cache_destroy(cifs_oplock_cachep);
844 static int cifs_oplock_thread(void * dummyarg)
846 struct oplock_q_entry * oplock_item;
847 struct cifsTconInfo *pTcon;
848 struct inode * inode;
849 __u16 netfid;
850 int rc;
852 do {
853 if (try_to_freeze())
854 continue;
856 spin_lock(&GlobalMid_Lock);
857 if (list_empty(&GlobalOplock_Q)) {
858 spin_unlock(&GlobalMid_Lock);
859 set_current_state(TASK_INTERRUPTIBLE);
860 schedule_timeout(39*HZ);
861 } else {
862 oplock_item = list_entry(GlobalOplock_Q.next,
863 struct oplock_q_entry, qhead);
864 if (oplock_item) {
865 cFYI(1,("found oplock item to write out"));
866 pTcon = oplock_item->tcon;
867 inode = oplock_item->pinode;
868 netfid = oplock_item->netfid;
869 spin_unlock(&GlobalMid_Lock);
870 DeleteOplockQEntry(oplock_item);
871 /* can not grab inode sem here since it would
872 deadlock when oplock received on delete
873 since vfs_unlink holds the i_mutex across
874 the call */
875 /* mutex_lock(&inode->i_mutex);*/
876 if (S_ISREG(inode->i_mode)) {
877 rc = filemap_fdatawrite(inode->i_mapping);
878 if (CIFS_I(inode)->clientCanCacheRead == 0) {
879 filemap_fdatawait(inode->i_mapping);
880 invalidate_remote_inode(inode);
882 } else
883 rc = 0;
884 /* mutex_unlock(&inode->i_mutex);*/
885 if (rc)
886 CIFS_I(inode)->write_behind_rc = rc;
887 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
889 /* releasing a stale oplock after recent reconnection
890 of smb session using a now incorrect file
891 handle is not a data integrity issue but do
892 not bother sending an oplock release if session
893 to server still is disconnected since oplock
894 already released by the server in that case */
895 if (pTcon->tidStatus != CifsNeedReconnect) {
896 rc = CIFSSMBLock(0, pTcon, netfid,
897 0 /* len */ , 0 /* offset */, 0,
898 0, LOCKING_ANDX_OPLOCK_RELEASE,
899 0 /* wait flag */);
900 cFYI(1,("Oplock release rc = %d ",rc));
902 } else
903 spin_unlock(&GlobalMid_Lock);
904 set_current_state(TASK_INTERRUPTIBLE);
905 schedule_timeout(1); /* yield in case q were corrupt */
907 } while (!kthread_should_stop());
909 return 0;
912 static int cifs_dnotify_thread(void * dummyarg)
914 struct list_head *tmp;
915 struct cifsSesInfo *ses;
917 do {
918 if (try_to_freeze())
919 continue;
920 set_current_state(TASK_INTERRUPTIBLE);
921 schedule_timeout(15*HZ);
922 read_lock(&GlobalSMBSeslock);
923 /* check if any stuck requests that need
924 to be woken up and wakeq so the
925 thread can wake up and error out */
926 list_for_each(tmp, &GlobalSMBSessionList) {
927 ses = list_entry(tmp, struct cifsSesInfo,
928 cifsSessionList);
929 if (ses && ses->server &&
930 atomic_read(&ses->server->inFlight))
931 wake_up_all(&ses->server->response_q);
933 read_unlock(&GlobalSMBSeslock);
934 } while (!kthread_should_stop());
936 return 0;
939 static int __init
940 init_cifs(void)
942 int rc = 0;
943 #ifdef CONFIG_PROC_FS
944 cifs_proc_init();
945 #endif
946 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
947 INIT_LIST_HEAD(&GlobalSMBSessionList);
948 INIT_LIST_HEAD(&GlobalTreeConnectionList);
949 INIT_LIST_HEAD(&GlobalOplock_Q);
950 #ifdef CONFIG_CIFS_EXPERIMENTAL
951 INIT_LIST_HEAD(&GlobalDnotifyReqList);
952 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
953 #endif
955 * Initialize Global counters
957 atomic_set(&sesInfoAllocCount, 0);
958 atomic_set(&tconInfoAllocCount, 0);
959 atomic_set(&tcpSesAllocCount,0);
960 atomic_set(&tcpSesReconnectCount, 0);
961 atomic_set(&tconInfoReconnectCount, 0);
963 atomic_set(&bufAllocCount, 0);
964 atomic_set(&smBufAllocCount, 0);
965 #ifdef CONFIG_CIFS_STATS2
966 atomic_set(&totBufAllocCount, 0);
967 atomic_set(&totSmBufAllocCount, 0);
968 #endif /* CONFIG_CIFS_STATS2 */
970 atomic_set(&midCount, 0);
971 GlobalCurrentXid = 0;
972 GlobalTotalActiveXid = 0;
973 GlobalMaxActiveXid = 0;
974 memset(Local_System_Name, 0, 15);
975 rwlock_init(&GlobalSMBSeslock);
976 spin_lock_init(&GlobalMid_Lock);
978 if (cifs_max_pending < 2) {
979 cifs_max_pending = 2;
980 cFYI(1,("cifs_max_pending set to min of 2"));
981 } else if (cifs_max_pending > 256) {
982 cifs_max_pending = 256;
983 cFYI(1,("cifs_max_pending set to max of 256"));
986 rc = cifs_init_inodecache();
987 if (rc)
988 goto out_clean_proc;
990 rc = cifs_init_mids();
991 if (rc)
992 goto out_destroy_inodecache;
994 rc = cifs_init_request_bufs();
995 if (rc)
996 goto out_destroy_mids;
998 rc = register_filesystem(&cifs_fs_type);
999 if (rc)
1000 goto out_destroy_request_bufs;
1002 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1003 if (IS_ERR(oplockThread)) {
1004 rc = PTR_ERR(oplockThread);
1005 cERROR(1,("error %d create oplock thread", rc));
1006 goto out_unregister_filesystem;
1009 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1010 if (IS_ERR(dnotifyThread)) {
1011 rc = PTR_ERR(dnotifyThread);
1012 cERROR(1,("error %d create dnotify thread", rc));
1013 goto out_stop_oplock_thread;
1016 return 0;
1018 out_stop_oplock_thread:
1019 kthread_stop(oplockThread);
1020 out_unregister_filesystem:
1021 unregister_filesystem(&cifs_fs_type);
1022 out_destroy_request_bufs:
1023 cifs_destroy_request_bufs();
1024 out_destroy_mids:
1025 cifs_destroy_mids();
1026 out_destroy_inodecache:
1027 cifs_destroy_inodecache();
1028 out_clean_proc:
1029 #ifdef CONFIG_PROC_FS
1030 cifs_proc_clean();
1031 #endif
1032 return rc;
1035 static void __exit
1036 exit_cifs(void)
1038 cFYI(0, ("In unregister ie exit_cifs"));
1039 #ifdef CONFIG_PROC_FS
1040 cifs_proc_clean();
1041 #endif
1042 unregister_filesystem(&cifs_fs_type);
1043 cifs_destroy_inodecache();
1044 cifs_destroy_mids();
1045 cifs_destroy_request_bufs();
1046 kthread_stop(oplockThread);
1047 kthread_stop(dnotifyThread);
1050 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1051 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1052 MODULE_DESCRIPTION
1053 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1054 MODULE_VERSION(CIFS_VERSION);
1055 module_init(init_cifs)
1056 module_exit(exit_cifs)