allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / fs / cifs / cifsfs.c
blob82f59def7bcdd9eca993918746aee98bcda04ea5
1 /*
2 * fs/cifs/cifsfs.c
4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #include <linux/key-type.h>
47 #include "cifs_spnego.h"
48 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
50 #ifdef CONFIG_CIFS_QUOTA
51 static struct quotactl_ops cifs_quotactl_ops;
52 #endif /* QUOTA */
54 int cifsFYI = 0;
55 int cifsERROR = 1;
56 int traceSMB = 0;
57 unsigned int oplockEnabled = 1;
58 unsigned int experimEnabled = 0;
59 unsigned int linuxExtEnabled = 1;
60 unsigned int lookupCacheEnabled = 1;
61 unsigned int multiuser_mount = 0;
62 unsigned int extended_security = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 extern struct task_struct *oplockThread; /* remove sparse warning */
66 struct task_struct *oplockThread = NULL;
67 /* extern struct task_struct * dnotifyThread; remove sparse warning */
68 static struct task_struct *dnotifyThread = NULL;
69 static const struct super_operations cifs_super_ops;
70 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
71 module_param(CIFSMaxBufSize, int, 0);
72 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
73 "Default: 16384 Range: 8192 to 130048");
74 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
75 module_param(cifs_min_rcv, int, 0);
76 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
77 "1 to 64");
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, int, 0);
80 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
81 "Range: 2 to 256");
82 unsigned int cifs_max_pending = CIFS_MAX_REQ;
83 module_param(cifs_max_pending, int, 0);
84 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
85 "Default: 50 Range: 2 to 256");
87 extern mempool_t *cifs_sm_req_poolp;
88 extern mempool_t *cifs_req_poolp;
89 extern mempool_t *cifs_mid_poolp;
91 extern struct kmem_cache *cifs_oplock_cachep;
93 static int
94 cifs_read_super(struct super_block *sb, void *data,
95 const char *devname, int silent)
97 struct inode *inode;
98 struct cifs_sb_info *cifs_sb;
99 int rc = 0;
101 /* BB should we make this contingent on mount parm? */
102 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
103 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
104 cifs_sb = CIFS_SB(sb);
105 if (cifs_sb == NULL)
106 return -ENOMEM;
108 rc = cifs_mount(sb, cifs_sb, data, devname);
110 if (rc) {
111 if (!silent)
112 cERROR(1,
113 ("cifs_mount failed w/return code = %d", rc));
114 goto out_mount_failed;
117 sb->s_magic = CIFS_MAGIC_NUMBER;
118 sb->s_op = &cifs_super_ops;
119 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
120 sb->s_blocksize =
121 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
122 #ifdef CONFIG_CIFS_QUOTA
123 sb->s_qcop = &cifs_quotactl_ops;
124 #endif
125 sb->s_blocksize = CIFS_MAX_MSGSIZE;
126 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
127 inode = iget(sb, ROOT_I);
129 if (!inode) {
130 rc = -ENOMEM;
131 goto out_no_root;
134 sb->s_root = d_alloc_root(inode);
136 if (!sb->s_root) {
137 rc = -ENOMEM;
138 goto out_no_root;
141 #ifdef CONFIG_CIFS_EXPERIMENTAL
142 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
143 cFYI(1, ("export ops supported"));
144 sb->s_export_op = &cifs_export_ops;
146 #endif /* EXPERIMENTAL */
148 return 0;
150 out_no_root:
151 cERROR(1, ("cifs_read_super: get root inode failed"));
152 if (inode)
153 iput(inode);
155 out_mount_failed:
156 if (cifs_sb) {
157 if (cifs_sb->local_nls)
158 unload_nls(cifs_sb->local_nls);
159 kfree(cifs_sb);
161 return rc;
164 static void
165 cifs_put_super(struct super_block *sb)
167 int rc = 0;
168 struct cifs_sb_info *cifs_sb;
170 cFYI(1, ("In cifs_put_super"));
171 cifs_sb = CIFS_SB(sb);
172 if (cifs_sb == NULL) {
173 cFYI(1, ("Empty cifs superblock info passed to unmount"));
174 return;
176 rc = cifs_umount(sb, cifs_sb);
177 if (rc) {
178 cERROR(1, ("cifs_umount failed with return code %d", rc));
180 unload_nls(cifs_sb->local_nls);
181 kfree(cifs_sb);
182 return;
185 static int
186 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
188 struct super_block *sb = dentry->d_sb;
189 int xid;
190 int rc = -EOPNOTSUPP;
191 struct cifs_sb_info *cifs_sb;
192 struct cifsTconInfo *pTcon;
194 xid = GetXid();
196 cifs_sb = CIFS_SB(sb);
197 pTcon = cifs_sb->tcon;
199 buf->f_type = CIFS_MAGIC_NUMBER;
201 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
202 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
203 presumably be total path, but note
204 that some servers (includinng Samba 3)
205 have a shorter maximum path */
206 buf->f_files = 0; /* undefined */
207 buf->f_ffree = 0; /* unlimited */
209 /* BB we could add a second check for a QFS Unix capability bit */
210 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
211 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
212 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
213 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
215 /* Only need to call the old QFSInfo if failed
216 on newer one */
217 if (rc)
218 if (pTcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
221 /* Some old Windows servers also do not support level 103, retry with
222 older level one if old server failed the previous call or we
223 bypassed it because we detected that this was an older LANMAN sess */
224 if (rc)
225 rc = SMBOldQFSInfo(xid, pTcon, buf);
226 /* int f_type;
227 __fsid_t f_fsid;
228 int f_namelen; */
229 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
230 FreeXid(xid);
231 return 0; /* always return success? what if volume is no
232 longer available? */
235 static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
237 struct cifs_sb_info *cifs_sb;
239 cifs_sb = CIFS_SB(inode->i_sb);
241 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
242 return 0;
243 else /* file mode might have been restricted at mount time
244 on the client (above and beyond ACL on servers) for
245 servers which do not support setting and viewing mode bits,
246 so allowing client to check permissions is useful */
247 return generic_permission(inode, mask, NULL);
250 static struct kmem_cache *cifs_inode_cachep;
251 static struct kmem_cache *cifs_req_cachep;
252 static struct kmem_cache *cifs_mid_cachep;
253 struct kmem_cache *cifs_oplock_cachep;
254 static struct kmem_cache *cifs_sm_req_cachep;
255 mempool_t *cifs_sm_req_poolp;
256 mempool_t *cifs_req_poolp;
257 mempool_t *cifs_mid_poolp;
259 static struct inode *
260 cifs_alloc_inode(struct super_block *sb)
262 struct cifsInodeInfo *cifs_inode;
263 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
264 if (!cifs_inode)
265 return NULL;
266 cifs_inode->cifsAttrs = 0x20; /* default */
267 atomic_set(&cifs_inode->inUse, 0);
268 cifs_inode->time = 0;
269 cifs_inode->write_behind_rc = 0;
270 /* Until the file is open and we have gotten oplock
271 info back from the server, can not assume caching of
272 file data or metadata */
273 cifs_inode->clientCanCacheRead = FALSE;
274 cifs_inode->clientCanCacheAll = FALSE;
275 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
277 /* Can not set i_flags here - they get immediately overwritten
278 to zero by the VFS */
279 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
280 INIT_LIST_HEAD(&cifs_inode->openFileList);
281 return &cifs_inode->vfs_inode;
284 static void
285 cifs_destroy_inode(struct inode *inode)
287 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
291 * cifs_show_options() is for displaying mount options in /proc/mounts.
292 * Not all settable options are displayed but most of the important
293 * ones are.
295 static int
296 cifs_show_options(struct seq_file *s, struct vfsmount *m)
298 struct cifs_sb_info *cifs_sb;
300 cifs_sb = CIFS_SB(m->mnt_sb);
302 if (cifs_sb) {
303 if (cifs_sb->tcon) {
304 /* BB add prepath to mount options displayed */
305 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
306 if (cifs_sb->tcon->ses) {
307 if (cifs_sb->tcon->ses->userName)
308 seq_printf(s, ",username=%s",
309 cifs_sb->tcon->ses->userName);
310 if (cifs_sb->tcon->ses->domainName)
311 seq_printf(s, ",domain=%s",
312 cifs_sb->tcon->ses->domainName);
314 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
315 !(cifs_sb->tcon->unix_ext))
316 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
317 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
318 !(cifs_sb->tcon->unix_ext))
319 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
321 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
322 seq_printf(s, ",posixpaths");
323 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
324 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
326 return 0;
329 #ifdef CONFIG_CIFS_QUOTA
330 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
331 struct fs_disk_quota *pdquota)
333 int xid;
334 int rc = 0;
335 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
336 struct cifsTconInfo *pTcon;
338 if (cifs_sb)
339 pTcon = cifs_sb->tcon;
340 else
341 return -EIO;
344 xid = GetXid();
345 if (pTcon) {
346 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
347 } else {
348 rc = -EIO;
351 FreeXid(xid);
352 return rc;
355 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
356 struct fs_disk_quota *pdquota)
358 int xid;
359 int rc = 0;
360 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
361 struct cifsTconInfo *pTcon;
363 if (cifs_sb)
364 pTcon = cifs_sb->tcon;
365 else
366 return -EIO;
368 xid = GetXid();
369 if (pTcon) {
370 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
371 } else {
372 rc = -EIO;
375 FreeXid(xid);
376 return rc;
379 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
381 int xid;
382 int rc = 0;
383 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
384 struct cifsTconInfo *pTcon;
386 if (cifs_sb)
387 pTcon = cifs_sb->tcon;
388 else
389 return -EIO;
391 xid = GetXid();
392 if (pTcon) {
393 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
394 } else {
395 rc = -EIO;
398 FreeXid(xid);
399 return rc;
402 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
404 int xid;
405 int rc = 0;
406 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
407 struct cifsTconInfo *pTcon;
409 if (cifs_sb) {
410 pTcon = cifs_sb->tcon;
411 } else {
412 return -EIO;
414 xid = GetXid();
415 if (pTcon) {
416 cFYI(1, ("pqstats %p", qstats));
417 } else {
418 rc = -EIO;
421 FreeXid(xid);
422 return rc;
425 static struct quotactl_ops cifs_quotactl_ops = {
426 .set_xquota = cifs_xquota_set,
427 .get_xquota = cifs_xquota_set,
428 .set_xstate = cifs_xstate_set,
429 .get_xstate = cifs_xstate_get,
431 #endif
433 static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
435 struct cifs_sb_info *cifs_sb;
436 struct cifsTconInfo *tcon;
438 if (!(flags & MNT_FORCE))
439 return;
440 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
441 if (cifs_sb == NULL)
442 return;
444 tcon = cifs_sb->tcon;
445 if (tcon == NULL)
446 return;
447 down(&tcon->tconSem);
448 if (atomic_read(&tcon->useCount) == 1)
449 tcon->tidStatus = CifsExiting;
450 up(&tcon->tconSem);
452 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
453 /* cancel_notify_requests(tcon); */
454 if (tcon->ses && tcon->ses->server) {
455 cFYI(1, ("wake up tasks now - umount begin not complete"));
456 wake_up_all(&tcon->ses->server->request_q);
457 wake_up_all(&tcon->ses->server->response_q);
458 msleep(1); /* yield */
459 /* we have to kick the requests once more */
460 wake_up_all(&tcon->ses->server->response_q);
461 msleep(1);
463 /* BB FIXME - finish add checks for tidStatus BB */
465 return;
468 #ifdef CONFIG_CIFS_STATS2
469 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
471 /* BB FIXME */
472 return 0;
474 #endif
476 static int cifs_remount(struct super_block *sb, int *flags, char *data)
478 *flags |= MS_NODIRATIME;
479 return 0;
482 static const struct super_operations cifs_super_ops = {
483 .read_inode = cifs_read_inode,
484 .put_super = cifs_put_super,
485 .statfs = cifs_statfs,
486 .alloc_inode = cifs_alloc_inode,
487 .destroy_inode = cifs_destroy_inode,
488 /* .drop_inode = generic_delete_inode,
489 .delete_inode = cifs_delete_inode, */ /* Do not need above two
490 functions unless later we add lazy close of inodes or unless the
491 kernel forgets to call us with the same number of releases (closes)
492 as opens */
493 .show_options = cifs_show_options,
494 .umount_begin = cifs_umount_begin,
495 .remount_fs = cifs_remount,
496 #ifdef CONFIG_CIFS_STATS2
497 .show_stats = cifs_show_stats,
498 #endif
501 static int
502 cifs_get_sb(struct file_system_type *fs_type,
503 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
505 int rc;
506 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
508 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
510 if (IS_ERR(sb))
511 return PTR_ERR(sb);
513 sb->s_flags = flags;
515 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
516 if (rc) {
517 up_write(&sb->s_umount);
518 deactivate_super(sb);
519 return rc;
521 sb->s_flags |= MS_ACTIVE;
522 return simple_set_mnt(mnt, sb);
525 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
526 unsigned long nr_segs, loff_t pos)
528 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
529 ssize_t written;
531 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
532 if (!CIFS_I(inode)->clientCanCacheAll)
533 filemap_fdatawrite(inode->i_mapping);
534 return written;
537 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
539 /* origin == SEEK_END => we must revalidate the cached file length */
540 if (origin == SEEK_END) {
541 int retval;
543 /* some applications poll for the file length in this strange
544 way so we must seek to end on non-oplocked files by
545 setting the revalidate time to zero */
546 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
548 retval = cifs_revalidate(file->f_path.dentry);
549 if (retval < 0)
550 return (loff_t)retval;
552 return remote_llseek(file, offset, origin);
555 static struct file_system_type cifs_fs_type = {
556 .owner = THIS_MODULE,
557 .name = "cifs",
558 .get_sb = cifs_get_sb,
559 .kill_sb = kill_anon_super,
560 /* .fs_flags */
562 const struct inode_operations cifs_dir_inode_ops = {
563 .create = cifs_create,
564 .lookup = cifs_lookup,
565 .getattr = cifs_getattr,
566 .unlink = cifs_unlink,
567 .link = cifs_hardlink,
568 .mkdir = cifs_mkdir,
569 .rmdir = cifs_rmdir,
570 .rename = cifs_rename,
571 .permission = cifs_permission,
572 /* revalidate:cifs_revalidate, */
573 .setattr = cifs_setattr,
574 .symlink = cifs_symlink,
575 .mknod = cifs_mknod,
576 #ifdef CONFIG_CIFS_XATTR
577 .setxattr = cifs_setxattr,
578 .getxattr = cifs_getxattr,
579 .listxattr = cifs_listxattr,
580 .removexattr = cifs_removexattr,
581 #endif
584 const struct inode_operations cifs_file_inode_ops = {
585 /* revalidate:cifs_revalidate, */
586 .setattr = cifs_setattr,
587 .getattr = cifs_getattr, /* do we need this anymore? */
588 .rename = cifs_rename,
589 .permission = cifs_permission,
590 #ifdef CONFIG_CIFS_XATTR
591 .setxattr = cifs_setxattr,
592 .getxattr = cifs_getxattr,
593 .listxattr = cifs_listxattr,
594 .removexattr = cifs_removexattr,
595 #endif
598 const struct inode_operations cifs_symlink_inode_ops = {
599 .readlink = generic_readlink,
600 .follow_link = cifs_follow_link,
601 .put_link = cifs_put_link,
602 .permission = cifs_permission,
603 /* BB add the following two eventually */
604 /* revalidate: cifs_revalidate,
605 setattr: cifs_notify_change, *//* BB do we need notify change */
606 #ifdef CONFIG_CIFS_XATTR
607 .setxattr = cifs_setxattr,
608 .getxattr = cifs_getxattr,
609 .listxattr = cifs_listxattr,
610 .removexattr = cifs_removexattr,
611 #endif
614 const struct file_operations cifs_file_ops = {
615 .read = do_sync_read,
616 .write = do_sync_write,
617 .aio_read = generic_file_aio_read,
618 .aio_write = cifs_file_aio_write,
619 .open = cifs_open,
620 .release = cifs_close,
621 .lock = cifs_lock,
622 .fsync = cifs_fsync,
623 .flush = cifs_flush,
624 .mmap = cifs_file_mmap,
625 .splice_read = generic_file_splice_read,
626 .llseek = cifs_llseek,
627 #ifdef CONFIG_CIFS_POSIX
628 .ioctl = cifs_ioctl,
629 #endif /* CONFIG_CIFS_POSIX */
631 #ifdef CONFIG_CIFS_EXPERIMENTAL
632 .dir_notify = cifs_dir_notify,
633 #endif /* CONFIG_CIFS_EXPERIMENTAL */
636 const struct file_operations cifs_file_direct_ops = {
637 /* no mmap, no aio, no readv -
638 BB reevaluate whether they can be done with directio, no cache */
639 .read = cifs_user_read,
640 .write = cifs_user_write,
641 .open = cifs_open,
642 .release = cifs_close,
643 .lock = cifs_lock,
644 .fsync = cifs_fsync,
645 .flush = cifs_flush,
646 .splice_read = generic_file_splice_read,
647 #ifdef CONFIG_CIFS_POSIX
648 .ioctl = cifs_ioctl,
649 #endif /* CONFIG_CIFS_POSIX */
650 .llseek = cifs_llseek,
651 #ifdef CONFIG_CIFS_EXPERIMENTAL
652 .dir_notify = cifs_dir_notify,
653 #endif /* CONFIG_CIFS_EXPERIMENTAL */
655 const struct file_operations cifs_file_nobrl_ops = {
656 .read = do_sync_read,
657 .write = do_sync_write,
658 .aio_read = generic_file_aio_read,
659 .aio_write = cifs_file_aio_write,
660 .open = cifs_open,
661 .release = cifs_close,
662 .fsync = cifs_fsync,
663 .flush = cifs_flush,
664 .mmap = cifs_file_mmap,
665 .splice_read = generic_file_splice_read,
666 .llseek = cifs_llseek,
667 #ifdef CONFIG_CIFS_POSIX
668 .ioctl = cifs_ioctl,
669 #endif /* CONFIG_CIFS_POSIX */
671 #ifdef CONFIG_CIFS_EXPERIMENTAL
672 .dir_notify = cifs_dir_notify,
673 #endif /* CONFIG_CIFS_EXPERIMENTAL */
676 const struct file_operations cifs_file_direct_nobrl_ops = {
677 /* no mmap, no aio, no readv -
678 BB reevaluate whether they can be done with directio, no cache */
679 .read = cifs_user_read,
680 .write = cifs_user_write,
681 .open = cifs_open,
682 .release = cifs_close,
683 .fsync = cifs_fsync,
684 .flush = cifs_flush,
685 .splice_read = generic_file_splice_read,
686 #ifdef CONFIG_CIFS_POSIX
687 .ioctl = cifs_ioctl,
688 #endif /* CONFIG_CIFS_POSIX */
689 .llseek = cifs_llseek,
690 #ifdef CONFIG_CIFS_EXPERIMENTAL
691 .dir_notify = cifs_dir_notify,
692 #endif /* CONFIG_CIFS_EXPERIMENTAL */
695 const struct file_operations cifs_dir_ops = {
696 .readdir = cifs_readdir,
697 .release = cifs_closedir,
698 .read = generic_read_dir,
699 #ifdef CONFIG_CIFS_EXPERIMENTAL
700 .dir_notify = cifs_dir_notify,
701 #endif /* CONFIG_CIFS_EXPERIMENTAL */
702 .ioctl = cifs_ioctl,
705 static void
706 cifs_init_once(void *inode, struct kmem_cache *cachep, unsigned long flags)
708 struct cifsInodeInfo *cifsi = inode;
710 inode_init_once(&cifsi->vfs_inode);
711 INIT_LIST_HEAD(&cifsi->lockList);
714 static int
715 cifs_init_inodecache(void)
717 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
718 sizeof(struct cifsInodeInfo),
719 0, (SLAB_RECLAIM_ACCOUNT|
720 SLAB_MEM_SPREAD),
721 cifs_init_once, NULL);
722 if (cifs_inode_cachep == NULL)
723 return -ENOMEM;
725 return 0;
728 static void
729 cifs_destroy_inodecache(void)
731 kmem_cache_destroy(cifs_inode_cachep);
734 static int
735 cifs_init_request_bufs(void)
737 if (CIFSMaxBufSize < 8192) {
738 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
739 Unicode path name has to fit in any SMB/CIFS path based frames */
740 CIFSMaxBufSize = 8192;
741 } else if (CIFSMaxBufSize > 1024*127) {
742 CIFSMaxBufSize = 1024 * 127;
743 } else {
744 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
746 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
747 cifs_req_cachep = kmem_cache_create("cifs_request",
748 CIFSMaxBufSize +
749 MAX_CIFS_HDR_SIZE, 0,
750 SLAB_HWCACHE_ALIGN, NULL, NULL);
751 if (cifs_req_cachep == NULL)
752 return -ENOMEM;
754 if (cifs_min_rcv < 1)
755 cifs_min_rcv = 1;
756 else if (cifs_min_rcv > 64) {
757 cifs_min_rcv = 64;
758 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
761 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
762 cifs_req_cachep);
764 if (cifs_req_poolp == NULL) {
765 kmem_cache_destroy(cifs_req_cachep);
766 return -ENOMEM;
768 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
769 almost all handle based requests (but not write response, nor is it
770 sufficient for path based requests). A smaller size would have
771 been more efficient (compacting multiple slab items on one 4k page)
772 for the case in which debug was on, but this larger size allows
773 more SMBs to use small buffer alloc and is still much more
774 efficient to alloc 1 per page off the slab compared to 17K (5page)
775 alloc of large cifs buffers even when page debugging is on */
776 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
777 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
778 NULL, NULL);
779 if (cifs_sm_req_cachep == NULL) {
780 mempool_destroy(cifs_req_poolp);
781 kmem_cache_destroy(cifs_req_cachep);
782 return -ENOMEM;
785 if (cifs_min_small < 2)
786 cifs_min_small = 2;
787 else if (cifs_min_small > 256) {
788 cifs_min_small = 256;
789 cFYI(1, ("cifs_min_small set to maximum (256)"));
792 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
793 cifs_sm_req_cachep);
795 if (cifs_sm_req_poolp == NULL) {
796 mempool_destroy(cifs_req_poolp);
797 kmem_cache_destroy(cifs_req_cachep);
798 kmem_cache_destroy(cifs_sm_req_cachep);
799 return -ENOMEM;
802 return 0;
805 static void
806 cifs_destroy_request_bufs(void)
808 mempool_destroy(cifs_req_poolp);
809 kmem_cache_destroy(cifs_req_cachep);
810 mempool_destroy(cifs_sm_req_poolp);
811 kmem_cache_destroy(cifs_sm_req_cachep);
814 static int
815 cifs_init_mids(void)
817 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
818 sizeof(struct mid_q_entry), 0,
819 SLAB_HWCACHE_ALIGN, NULL, NULL);
820 if (cifs_mid_cachep == NULL)
821 return -ENOMEM;
823 /* 3 is a reasonable minimum number of simultaneous operations */
824 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
825 if (cifs_mid_poolp == NULL) {
826 kmem_cache_destroy(cifs_mid_cachep);
827 return -ENOMEM;
830 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
831 sizeof(struct oplock_q_entry), 0,
832 SLAB_HWCACHE_ALIGN, NULL, NULL);
833 if (cifs_oplock_cachep == NULL) {
834 mempool_destroy(cifs_mid_poolp);
835 kmem_cache_destroy(cifs_mid_cachep);
836 return -ENOMEM;
839 return 0;
842 static void
843 cifs_destroy_mids(void)
845 mempool_destroy(cifs_mid_poolp);
846 kmem_cache_destroy(cifs_mid_cachep);
847 kmem_cache_destroy(cifs_oplock_cachep);
850 static int cifs_oplock_thread(void *dummyarg)
852 struct oplock_q_entry *oplock_item;
853 struct cifsTconInfo *pTcon;
854 struct inode *inode;
855 __u16 netfid;
856 int rc, waitrc = 0;
858 set_freezable();
859 do {
860 if (try_to_freeze())
861 continue;
863 spin_lock(&GlobalMid_Lock);
864 if (list_empty(&GlobalOplock_Q)) {
865 spin_unlock(&GlobalMid_Lock);
866 set_current_state(TASK_INTERRUPTIBLE);
867 schedule_timeout(39*HZ);
868 } else {
869 oplock_item = list_entry(GlobalOplock_Q.next,
870 struct oplock_q_entry, qhead);
871 if (oplock_item) {
872 cFYI(1, ("found oplock item to write out"));
873 pTcon = oplock_item->tcon;
874 inode = oplock_item->pinode;
875 netfid = oplock_item->netfid;
876 spin_unlock(&GlobalMid_Lock);
877 DeleteOplockQEntry(oplock_item);
878 /* can not grab inode sem here since it would
879 deadlock when oplock received on delete
880 since vfs_unlink holds the i_mutex across
881 the call */
882 /* mutex_lock(&inode->i_mutex);*/
883 if (S_ISREG(inode->i_mode)) {
884 rc =
885 filemap_fdatawrite(inode->i_mapping);
886 if (CIFS_I(inode)->clientCanCacheRead
887 == 0) {
888 waitrc = filemap_fdatawait(inode->i_mapping);
889 invalidate_remote_inode(inode);
891 if (rc == 0)
892 rc = waitrc;
893 } else
894 rc = 0;
895 /* mutex_unlock(&inode->i_mutex);*/
896 if (rc)
897 CIFS_I(inode)->write_behind_rc = rc;
898 cFYI(1, ("Oplock flush inode %p rc %d",
899 inode, rc));
901 /* releasing stale oplock after recent reconnect
902 of smb session using a now incorrect file
903 handle is not a data integrity issue but do
904 not bother sending an oplock release if session
905 to server still is disconnected since oplock
906 already released by the server in that case */
907 if (pTcon->tidStatus != CifsNeedReconnect) {
908 rc = CIFSSMBLock(0, pTcon, netfid,
909 0 /* len */ , 0 /* offset */, 0,
910 0, LOCKING_ANDX_OPLOCK_RELEASE,
911 0 /* wait flag */);
912 cFYI(1, ("Oplock release rc = %d", rc));
914 } else
915 spin_unlock(&GlobalMid_Lock);
916 set_current_state(TASK_INTERRUPTIBLE);
917 schedule_timeout(1); /* yield in case q were corrupt */
919 } while (!kthread_should_stop());
921 return 0;
924 static int cifs_dnotify_thread(void *dummyarg)
926 struct list_head *tmp;
927 struct cifsSesInfo *ses;
929 do {
930 if (try_to_freeze())
931 continue;
932 set_current_state(TASK_INTERRUPTIBLE);
933 schedule_timeout(15*HZ);
934 read_lock(&GlobalSMBSeslock);
935 /* check if any stuck requests that need
936 to be woken up and wakeq so the
937 thread can wake up and error out */
938 list_for_each(tmp, &GlobalSMBSessionList) {
939 ses = list_entry(tmp, struct cifsSesInfo,
940 cifsSessionList);
941 if (ses && ses->server &&
942 atomic_read(&ses->server->inFlight))
943 wake_up_all(&ses->server->response_q);
945 read_unlock(&GlobalSMBSeslock);
946 } while (!kthread_should_stop());
948 return 0;
951 static int __init
952 init_cifs(void)
954 int rc = 0;
955 #ifdef CONFIG_PROC_FS
956 cifs_proc_init();
957 #endif
958 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
959 INIT_LIST_HEAD(&GlobalSMBSessionList);
960 INIT_LIST_HEAD(&GlobalTreeConnectionList);
961 INIT_LIST_HEAD(&GlobalOplock_Q);
962 #ifdef CONFIG_CIFS_EXPERIMENTAL
963 INIT_LIST_HEAD(&GlobalDnotifyReqList);
964 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
965 #endif
967 * Initialize Global counters
969 atomic_set(&sesInfoAllocCount, 0);
970 atomic_set(&tconInfoAllocCount, 0);
971 atomic_set(&tcpSesAllocCount, 0);
972 atomic_set(&tcpSesReconnectCount, 0);
973 atomic_set(&tconInfoReconnectCount, 0);
975 atomic_set(&bufAllocCount, 0);
976 atomic_set(&smBufAllocCount, 0);
977 #ifdef CONFIG_CIFS_STATS2
978 atomic_set(&totBufAllocCount, 0);
979 atomic_set(&totSmBufAllocCount, 0);
980 #endif /* CONFIG_CIFS_STATS2 */
982 atomic_set(&midCount, 0);
983 GlobalCurrentXid = 0;
984 GlobalTotalActiveXid = 0;
985 GlobalMaxActiveXid = 0;
986 memset(Local_System_Name, 0, 15);
987 rwlock_init(&GlobalSMBSeslock);
988 spin_lock_init(&GlobalMid_Lock);
990 if (cifs_max_pending < 2) {
991 cifs_max_pending = 2;
992 cFYI(1, ("cifs_max_pending set to min of 2"));
993 } else if (cifs_max_pending > 256) {
994 cifs_max_pending = 256;
995 cFYI(1, ("cifs_max_pending set to max of 256"));
998 rc = cifs_init_inodecache();
999 if (rc)
1000 goto out_clean_proc;
1002 rc = cifs_init_mids();
1003 if (rc)
1004 goto out_destroy_inodecache;
1006 rc = cifs_init_request_bufs();
1007 if (rc)
1008 goto out_destroy_mids;
1010 rc = register_filesystem(&cifs_fs_type);
1011 if (rc)
1012 goto out_destroy_request_bufs;
1013 #ifdef CONFIG_CIFS_UPCALL
1014 rc = register_key_type(&cifs_spnego_key_type);
1015 if (rc)
1016 goto out_unregister_filesystem;
1017 #endif
1018 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1019 if (IS_ERR(oplockThread)) {
1020 rc = PTR_ERR(oplockThread);
1021 cERROR(1, ("error %d create oplock thread", rc));
1022 goto out_unregister_key_type;
1025 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1026 if (IS_ERR(dnotifyThread)) {
1027 rc = PTR_ERR(dnotifyThread);
1028 cERROR(1, ("error %d create dnotify thread", rc));
1029 goto out_stop_oplock_thread;
1032 return 0;
1034 out_stop_oplock_thread:
1035 kthread_stop(oplockThread);
1036 out_unregister_key_type:
1037 #ifdef CONFIG_CIFS_UPCALL
1038 unregister_key_type(&cifs_spnego_key_type);
1039 out_unregister_filesystem:
1040 #endif
1041 unregister_filesystem(&cifs_fs_type);
1042 out_destroy_request_bufs:
1043 cifs_destroy_request_bufs();
1044 out_destroy_mids:
1045 cifs_destroy_mids();
1046 out_destroy_inodecache:
1047 cifs_destroy_inodecache();
1048 out_clean_proc:
1049 #ifdef CONFIG_PROC_FS
1050 cifs_proc_clean();
1051 #endif
1052 return rc;
1055 static void __exit
1056 exit_cifs(void)
1058 cFYI(0, ("exit_cifs"));
1059 #ifdef CONFIG_PROC_FS
1060 cifs_proc_clean();
1061 #endif
1062 #ifdef CONFIG_CIFS_UPCALL
1063 unregister_key_type(&cifs_spnego_key_type);
1064 #endif
1065 unregister_filesystem(&cifs_fs_type);
1066 cifs_destroy_inodecache();
1067 cifs_destroy_mids();
1068 cifs_destroy_request_bufs();
1069 kthread_stop(oplockThread);
1070 kthread_stop(dnotifyThread);
1073 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1074 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1075 MODULE_DESCRIPTION
1076 ("VFS to access servers complying with the SNIA CIFS Specification "
1077 "e.g. Samba and Windows");
1078 MODULE_VERSION(CIFS_VERSION);
1079 module_init(init_cifs)
1080 module_exit(exit_cifs)