slab allocators: Remove SLAB_DEBUG_INITIAL flag
[linux-2.6/openmoko-kernel/knife-kernel.git] / fs / cifs / cifsfs.c
blob8568e100953cd52f2cee789fd986356e2106d5a4
1 /*
2 * fs/cifs/cifsfs.c
4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
27 #include <linux/fs.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include "cifsfs.h"
39 #include "cifspdu.h"
40 #define DECLARE_GLOBALS_HERE
41 #include "cifsglob.h"
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
45 #include <linux/mm.h>
46 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
48 #ifdef CONFIG_CIFS_QUOTA
49 static struct quotactl_ops cifs_quotactl_ops;
50 #endif /* QUOTA */
52 #ifdef CONFIG_CIFS_EXPERIMENTAL
53 extern struct export_operations cifs_export_ops;
54 #endif /* EXPERIMENTAL */
56 int cifsFYI = 0;
57 int cifsERROR = 1;
58 int traceSMB = 0;
59 unsigned int oplockEnabled = 1;
60 unsigned int experimEnabled = 0;
61 unsigned int linuxExtEnabled = 1;
62 unsigned int lookupCacheEnabled = 1;
63 unsigned int multiuser_mount = 0;
64 unsigned int extended_security = CIFSSEC_DEF;
65 /* unsigned int ntlmv2_support = 0; */
66 unsigned int sign_CIFS_PDUs = 1;
67 extern struct task_struct * oplockThread; /* remove sparse warning */
68 struct task_struct * oplockThread = NULL;
69 /* extern struct task_struct * dnotifyThread; remove sparse warning */
70 static struct task_struct * dnotifyThread = NULL;
71 static const struct super_operations cifs_super_ops;
72 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
73 module_param(CIFSMaxBufSize, int, 0);
74 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, int, 0);
80 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
81 unsigned int cifs_max_pending = CIFS_MAX_REQ;
82 module_param(cifs_max_pending, int, 0);
83 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
85 extern mempool_t *cifs_sm_req_poolp;
86 extern mempool_t *cifs_req_poolp;
87 extern mempool_t *cifs_mid_poolp;
89 extern struct kmem_cache *cifs_oplock_cachep;
91 static int
92 cifs_read_super(struct super_block *sb, void *data,
93 const char *devname, int silent)
95 struct inode *inode;
96 struct cifs_sb_info *cifs_sb;
97 int rc = 0;
99 /* BB should we make this contingent on mount parm? */
100 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
101 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
102 cifs_sb = CIFS_SB(sb);
103 if (cifs_sb == NULL)
104 return -ENOMEM;
106 rc = cifs_mount(sb, cifs_sb, data, devname);
108 if (rc) {
109 if (!silent)
110 cERROR(1,
111 ("cifs_mount failed w/return code = %d", rc));
112 goto out_mount_failed;
115 sb->s_magic = CIFS_MAGIC_NUMBER;
116 sb->s_op = &cifs_super_ops;
117 #ifdef CONFIG_CIFS_EXPERIMENTAL
118 if (experimEnabled != 0)
119 sb->s_export_op = &cifs_export_ops;
120 #endif /* EXPERIMENTAL */
121 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
122 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
123 #ifdef CONFIG_CIFS_QUOTA
124 sb->s_qcop = &cifs_quotactl_ops;
125 #endif
126 sb->s_blocksize = CIFS_MAX_MSGSIZE;
127 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
128 inode = iget(sb, ROOT_I);
130 if (!inode) {
131 rc = -ENOMEM;
132 goto out_no_root;
135 sb->s_root = d_alloc_root(inode);
137 if (!sb->s_root) {
138 rc = -ENOMEM;
139 goto out_no_root;
142 return 0;
144 out_no_root:
145 cERROR(1, ("cifs_read_super: get root inode failed"));
146 if (inode)
147 iput(inode);
149 out_mount_failed:
150 if (cifs_sb) {
151 if (cifs_sb->local_nls)
152 unload_nls(cifs_sb->local_nls);
153 kfree(cifs_sb);
155 return rc;
158 static void
159 cifs_put_super(struct super_block *sb)
161 int rc = 0;
162 struct cifs_sb_info *cifs_sb;
164 cFYI(1, ("In cifs_put_super"));
165 cifs_sb = CIFS_SB(sb);
166 if (cifs_sb == NULL) {
167 cFYI(1,("Empty cifs superblock info passed to unmount"));
168 return;
170 rc = cifs_umount(sb, cifs_sb);
171 if (rc) {
172 cERROR(1, ("cifs_umount failed with return code %d", rc));
174 unload_nls(cifs_sb->local_nls);
175 kfree(cifs_sb);
176 return;
179 static int
180 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
182 struct super_block *sb = dentry->d_sb;
183 int xid;
184 int rc = -EOPNOTSUPP;
185 struct cifs_sb_info *cifs_sb;
186 struct cifsTconInfo *pTcon;
188 xid = GetXid();
190 cifs_sb = CIFS_SB(sb);
191 pTcon = cifs_sb->tcon;
193 buf->f_type = CIFS_MAGIC_NUMBER;
195 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
196 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
197 presumably be total path, but note
198 that some servers (includinng Samba 3)
199 have a shorter maximum path */
200 buf->f_files = 0; /* undefined */
201 buf->f_ffree = 0; /* unlimited */
203 /* BB we could add a second check for a QFS Unix capability bit */
204 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
205 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
206 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
207 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
209 /* Only need to call the old QFSInfo if failed
210 on newer one */
211 if (rc)
212 if (pTcon->ses->capabilities & CAP_NT_SMBS)
213 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
215 /* Some old Windows servers also do not support level 103, retry with
216 older level one if old server failed the previous call or we
217 bypassed it because we detected that this was an older LANMAN sess */
218 if (rc)
219 rc = SMBOldQFSInfo(xid, pTcon, buf);
221 int f_type;
222 __fsid_t f_fsid;
223 int f_namelen; */
224 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
225 FreeXid(xid);
226 return 0; /* always return success? what if volume is no
227 longer available? */
230 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
232 struct cifs_sb_info *cifs_sb;
234 cifs_sb = CIFS_SB(inode->i_sb);
236 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
237 return 0;
238 } else /* file mode might have been restricted at mount time
239 on the client (above and beyond ACL on servers) for
240 servers which do not support setting and viewing mode bits,
241 so allowing client to check permissions is useful */
242 return generic_permission(inode, mask, NULL);
245 static struct kmem_cache *cifs_inode_cachep;
246 static struct kmem_cache *cifs_req_cachep;
247 static struct kmem_cache *cifs_mid_cachep;
248 struct kmem_cache *cifs_oplock_cachep;
249 static struct kmem_cache *cifs_sm_req_cachep;
250 mempool_t *cifs_sm_req_poolp;
251 mempool_t *cifs_req_poolp;
252 mempool_t *cifs_mid_poolp;
254 static struct inode *
255 cifs_alloc_inode(struct super_block *sb)
257 struct cifsInodeInfo *cifs_inode;
258 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
259 if (!cifs_inode)
260 return NULL;
261 cifs_inode->cifsAttrs = 0x20; /* default */
262 atomic_set(&cifs_inode->inUse, 0);
263 cifs_inode->time = 0;
264 /* Until the file is open and we have gotten oplock
265 info back from the server, can not assume caching of
266 file data or metadata */
267 cifs_inode->clientCanCacheRead = FALSE;
268 cifs_inode->clientCanCacheAll = FALSE;
269 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
271 /* Can not set i_flags here - they get immediately overwritten
272 to zero by the VFS */
273 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
274 INIT_LIST_HEAD(&cifs_inode->openFileList);
275 return &cifs_inode->vfs_inode;
278 static void
279 cifs_destroy_inode(struct inode *inode)
281 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
285 * cifs_show_options() is for displaying mount options in /proc/mounts.
286 * Not all settable options are displayed but most of the important
287 * ones are.
289 static int
290 cifs_show_options(struct seq_file *s, struct vfsmount *m)
292 struct cifs_sb_info *cifs_sb;
294 cifs_sb = CIFS_SB(m->mnt_sb);
296 if (cifs_sb) {
297 if (cifs_sb->tcon) {
298 /* BB add prepath to mount options displayed */
299 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
300 if (cifs_sb->tcon->ses) {
301 if (cifs_sb->tcon->ses->userName)
302 seq_printf(s, ",username=%s",
303 cifs_sb->tcon->ses->userName);
304 if (cifs_sb->tcon->ses->domainName)
305 seq_printf(s, ",domain=%s",
306 cifs_sb->tcon->ses->domainName);
309 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
310 seq_printf(s, ",posixpaths");
311 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
312 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
313 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
314 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
315 !(cifs_sb->tcon->ses->capabilities & CAP_UNIX))
316 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
317 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
318 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
320 return 0;
323 #ifdef CONFIG_CIFS_QUOTA
324 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
327 int xid;
328 int rc = 0;
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
332 if (cifs_sb)
333 pTcon = cifs_sb->tcon;
334 else
335 return -EIO;
338 xid = GetXid();
339 if (pTcon) {
340 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
341 } else {
342 return -EIO;
345 FreeXid(xid);
346 return rc;
349 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
350 struct fs_disk_quota * pdquota)
352 int xid;
353 int rc = 0;
354 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
355 struct cifsTconInfo *pTcon;
357 if (cifs_sb)
358 pTcon = cifs_sb->tcon;
359 else
360 return -EIO;
362 xid = GetXid();
363 if (pTcon) {
364 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
365 } else {
366 rc = -EIO;
369 FreeXid(xid);
370 return rc;
373 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
375 int xid;
376 int rc = 0;
377 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
378 struct cifsTconInfo *pTcon;
380 if (cifs_sb)
381 pTcon = cifs_sb->tcon;
382 else
383 return -EIO;
385 xid = GetXid();
386 if (pTcon) {
387 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
388 } else {
389 rc = -EIO;
392 FreeXid(xid);
393 return rc;
396 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
398 int xid;
399 int rc = 0;
400 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
401 struct cifsTconInfo *pTcon;
403 if (cifs_sb) {
404 pTcon = cifs_sb->tcon;
405 } else {
406 return -EIO;
408 xid = GetXid();
409 if (pTcon) {
410 cFYI(1,("pqstats %p",qstats));
411 } else {
412 rc = -EIO;
415 FreeXid(xid);
416 return rc;
419 static struct quotactl_ops cifs_quotactl_ops = {
420 .set_xquota = cifs_xquota_set,
421 .get_xquota = cifs_xquota_set,
422 .set_xstate = cifs_xstate_set,
423 .get_xstate = cifs_xstate_get,
425 #endif
427 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
429 struct cifs_sb_info *cifs_sb;
430 struct cifsTconInfo * tcon;
432 if (!(flags & MNT_FORCE))
433 return;
434 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
435 if (cifs_sb == NULL)
436 return;
438 tcon = cifs_sb->tcon;
439 if (tcon == NULL)
440 return;
441 down(&tcon->tconSem);
442 if (atomic_read(&tcon->useCount) == 1)
443 tcon->tidStatus = CifsExiting;
444 up(&tcon->tconSem);
446 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
447 /* cancel_notify_requests(tcon); */
448 if (tcon->ses && tcon->ses->server)
450 cFYI(1,("wake up tasks now - umount begin not complete"));
451 wake_up_all(&tcon->ses->server->request_q);
452 wake_up_all(&tcon->ses->server->response_q);
453 msleep(1); /* yield */
454 /* we have to kick the requests once more */
455 wake_up_all(&tcon->ses->server->response_q);
456 msleep(1);
458 /* BB FIXME - finish add checks for tidStatus BB */
460 return;
463 #ifdef CONFIG_CIFS_STATS2
464 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
466 /* BB FIXME */
467 return 0;
469 #endif
471 static int cifs_remount(struct super_block *sb, int *flags, char *data)
473 *flags |= MS_NODIRATIME;
474 return 0;
477 static const struct super_operations cifs_super_ops = {
478 .read_inode = cifs_read_inode,
479 .put_super = cifs_put_super,
480 .statfs = cifs_statfs,
481 .alloc_inode = cifs_alloc_inode,
482 .destroy_inode = cifs_destroy_inode,
483 /* .drop_inode = generic_delete_inode,
484 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
485 unless later we add lazy close of inodes or unless the kernel forgets to call
486 us with the same number of releases (closes) as opens */
487 .show_options = cifs_show_options,
488 .umount_begin = cifs_umount_begin,
489 .remount_fs = cifs_remount,
490 #ifdef CONFIG_CIFS_STATS2
491 .show_stats = cifs_show_stats,
492 #endif
495 static int
496 cifs_get_sb(struct file_system_type *fs_type,
497 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
499 int rc;
500 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
502 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
504 if (IS_ERR(sb))
505 return PTR_ERR(sb);
507 sb->s_flags = flags;
509 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
510 if (rc) {
511 up_write(&sb->s_umount);
512 deactivate_super(sb);
513 return rc;
515 sb->s_flags |= MS_ACTIVE;
516 return simple_set_mnt(mnt, sb);
519 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
520 unsigned long nr_segs, loff_t pos)
522 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
523 ssize_t written;
525 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
526 if (!CIFS_I(inode)->clientCanCacheAll)
527 filemap_fdatawrite(inode->i_mapping);
528 return written;
531 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
533 /* origin == SEEK_END => we must revalidate the cached file length */
534 if (origin == SEEK_END) {
535 int retval;
537 /* some applications poll for the file length in this strange
538 way so we must seek to end on non-oplocked files by
539 setting the revalidate time to zero */
540 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
542 retval = cifs_revalidate(file->f_path.dentry);
543 if (retval < 0)
544 return (loff_t)retval;
546 return remote_llseek(file, offset, origin);
549 static struct file_system_type cifs_fs_type = {
550 .owner = THIS_MODULE,
551 .name = "cifs",
552 .get_sb = cifs_get_sb,
553 .kill_sb = kill_anon_super,
554 /* .fs_flags */
556 const struct inode_operations cifs_dir_inode_ops = {
557 .create = cifs_create,
558 .lookup = cifs_lookup,
559 .getattr = cifs_getattr,
560 .unlink = cifs_unlink,
561 .link = cifs_hardlink,
562 .mkdir = cifs_mkdir,
563 .rmdir = cifs_rmdir,
564 .rename = cifs_rename,
565 .permission = cifs_permission,
566 /* revalidate:cifs_revalidate, */
567 .setattr = cifs_setattr,
568 .symlink = cifs_symlink,
569 .mknod = cifs_mknod,
570 #ifdef CONFIG_CIFS_XATTR
571 .setxattr = cifs_setxattr,
572 .getxattr = cifs_getxattr,
573 .listxattr = cifs_listxattr,
574 .removexattr = cifs_removexattr,
575 #endif
578 const struct inode_operations cifs_file_inode_ops = {
579 /* revalidate:cifs_revalidate, */
580 .setattr = cifs_setattr,
581 .getattr = cifs_getattr, /* do we need this anymore? */
582 .rename = cifs_rename,
583 .permission = cifs_permission,
584 #ifdef CONFIG_CIFS_XATTR
585 .setxattr = cifs_setxattr,
586 .getxattr = cifs_getxattr,
587 .listxattr = cifs_listxattr,
588 .removexattr = cifs_removexattr,
589 #endif
592 const struct inode_operations cifs_symlink_inode_ops = {
593 .readlink = generic_readlink,
594 .follow_link = cifs_follow_link,
595 .put_link = cifs_put_link,
596 .permission = cifs_permission,
597 /* BB add the following two eventually */
598 /* revalidate: cifs_revalidate,
599 setattr: cifs_notify_change, *//* BB do we need notify change */
600 #ifdef CONFIG_CIFS_XATTR
601 .setxattr = cifs_setxattr,
602 .getxattr = cifs_getxattr,
603 .listxattr = cifs_listxattr,
604 .removexattr = cifs_removexattr,
605 #endif
608 const struct file_operations cifs_file_ops = {
609 .read = do_sync_read,
610 .write = do_sync_write,
611 .aio_read = generic_file_aio_read,
612 .aio_write = cifs_file_aio_write,
613 .open = cifs_open,
614 .release = cifs_close,
615 .lock = cifs_lock,
616 .fsync = cifs_fsync,
617 .flush = cifs_flush,
618 .mmap = cifs_file_mmap,
619 .sendfile = generic_file_sendfile,
620 .llseek = cifs_llseek,
621 #ifdef CONFIG_CIFS_POSIX
622 .ioctl = cifs_ioctl,
623 #endif /* CONFIG_CIFS_POSIX */
625 #ifdef CONFIG_CIFS_EXPERIMENTAL
626 .dir_notify = cifs_dir_notify,
627 #endif /* CONFIG_CIFS_EXPERIMENTAL */
630 const struct file_operations cifs_file_direct_ops = {
631 /* no mmap, no aio, no readv -
632 BB reevaluate whether they can be done with directio, no cache */
633 .read = cifs_user_read,
634 .write = cifs_user_write,
635 .open = cifs_open,
636 .release = cifs_close,
637 .lock = cifs_lock,
638 .fsync = cifs_fsync,
639 .flush = cifs_flush,
640 .sendfile = generic_file_sendfile, /* BB removeme BB */
641 #ifdef CONFIG_CIFS_POSIX
642 .ioctl = cifs_ioctl,
643 #endif /* CONFIG_CIFS_POSIX */
644 .llseek = cifs_llseek,
645 #ifdef CONFIG_CIFS_EXPERIMENTAL
646 .dir_notify = cifs_dir_notify,
647 #endif /* CONFIG_CIFS_EXPERIMENTAL */
649 const struct file_operations cifs_file_nobrl_ops = {
650 .read = do_sync_read,
651 .write = do_sync_write,
652 .aio_read = generic_file_aio_read,
653 .aio_write = cifs_file_aio_write,
654 .open = cifs_open,
655 .release = cifs_close,
656 .fsync = cifs_fsync,
657 .flush = cifs_flush,
658 .mmap = cifs_file_mmap,
659 .sendfile = generic_file_sendfile,
660 .llseek = cifs_llseek,
661 #ifdef CONFIG_CIFS_POSIX
662 .ioctl = cifs_ioctl,
663 #endif /* CONFIG_CIFS_POSIX */
665 #ifdef CONFIG_CIFS_EXPERIMENTAL
666 .dir_notify = cifs_dir_notify,
667 #endif /* CONFIG_CIFS_EXPERIMENTAL */
670 const struct file_operations cifs_file_direct_nobrl_ops = {
671 /* no mmap, no aio, no readv -
672 BB reevaluate whether they can be done with directio, no cache */
673 .read = cifs_user_read,
674 .write = cifs_user_write,
675 .open = cifs_open,
676 .release = cifs_close,
677 .fsync = cifs_fsync,
678 .flush = cifs_flush,
679 .sendfile = generic_file_sendfile, /* BB removeme BB */
680 #ifdef CONFIG_CIFS_POSIX
681 .ioctl = cifs_ioctl,
682 #endif /* CONFIG_CIFS_POSIX */
683 .llseek = cifs_llseek,
684 #ifdef CONFIG_CIFS_EXPERIMENTAL
685 .dir_notify = cifs_dir_notify,
686 #endif /* CONFIG_CIFS_EXPERIMENTAL */
689 const struct file_operations cifs_dir_ops = {
690 .readdir = cifs_readdir,
691 .release = cifs_closedir,
692 .read = generic_read_dir,
693 #ifdef CONFIG_CIFS_EXPERIMENTAL
694 .dir_notify = cifs_dir_notify,
695 #endif /* CONFIG_CIFS_EXPERIMENTAL */
696 .ioctl = cifs_ioctl,
699 static void
700 cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
702 struct cifsInodeInfo *cifsi = inode;
704 if (flags & SLAB_CTOR_CONSTRUCTOR) {
705 inode_init_once(&cifsi->vfs_inode);
706 INIT_LIST_HEAD(&cifsi->lockList);
710 static int
711 cifs_init_inodecache(void)
713 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
714 sizeof (struct cifsInodeInfo),
715 0, (SLAB_RECLAIM_ACCOUNT|
716 SLAB_MEM_SPREAD),
717 cifs_init_once, NULL);
718 if (cifs_inode_cachep == NULL)
719 return -ENOMEM;
721 return 0;
724 static void
725 cifs_destroy_inodecache(void)
727 kmem_cache_destroy(cifs_inode_cachep);
730 static int
731 cifs_init_request_bufs(void)
733 if (CIFSMaxBufSize < 8192) {
734 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
735 Unicode path name has to fit in any SMB/CIFS path based frames */
736 CIFSMaxBufSize = 8192;
737 } else if (CIFSMaxBufSize > 1024*127) {
738 CIFSMaxBufSize = 1024 * 127;
739 } else {
740 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
742 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
743 cifs_req_cachep = kmem_cache_create("cifs_request",
744 CIFSMaxBufSize +
745 MAX_CIFS_HDR_SIZE, 0,
746 SLAB_HWCACHE_ALIGN, NULL, NULL);
747 if (cifs_req_cachep == NULL)
748 return -ENOMEM;
750 if (cifs_min_rcv < 1)
751 cifs_min_rcv = 1;
752 else if (cifs_min_rcv > 64) {
753 cifs_min_rcv = 64;
754 cERROR(1,("cifs_min_rcv set to maximum (64)"));
757 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
758 cifs_req_cachep);
760 if (cifs_req_poolp == NULL) {
761 kmem_cache_destroy(cifs_req_cachep);
762 return -ENOMEM;
764 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
765 almost all handle based requests (but not write response, nor is it
766 sufficient for path based requests). A smaller size would have
767 been more efficient (compacting multiple slab items on one 4k page)
768 for the case in which debug was on, but this larger size allows
769 more SMBs to use small buffer alloc and is still much more
770 efficient to alloc 1 per page off the slab compared to 17K (5page)
771 alloc of large cifs buffers even when page debugging is on */
772 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
773 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
774 NULL, NULL);
775 if (cifs_sm_req_cachep == NULL) {
776 mempool_destroy(cifs_req_poolp);
777 kmem_cache_destroy(cifs_req_cachep);
778 return -ENOMEM;
781 if (cifs_min_small < 2)
782 cifs_min_small = 2;
783 else if (cifs_min_small > 256) {
784 cifs_min_small = 256;
785 cFYI(1,("cifs_min_small set to maximum (256)"));
788 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
789 cifs_sm_req_cachep);
791 if (cifs_sm_req_poolp == NULL) {
792 mempool_destroy(cifs_req_poolp);
793 kmem_cache_destroy(cifs_req_cachep);
794 kmem_cache_destroy(cifs_sm_req_cachep);
795 return -ENOMEM;
798 return 0;
801 static void
802 cifs_destroy_request_bufs(void)
804 mempool_destroy(cifs_req_poolp);
805 kmem_cache_destroy(cifs_req_cachep);
806 mempool_destroy(cifs_sm_req_poolp);
807 kmem_cache_destroy(cifs_sm_req_cachep);
810 static int
811 cifs_init_mids(void)
813 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
814 sizeof (struct mid_q_entry), 0,
815 SLAB_HWCACHE_ALIGN, NULL, NULL);
816 if (cifs_mid_cachep == NULL)
817 return -ENOMEM;
819 /* 3 is a reasonable minimum number of simultaneous operations */
820 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
821 if (cifs_mid_poolp == NULL) {
822 kmem_cache_destroy(cifs_mid_cachep);
823 return -ENOMEM;
826 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
827 sizeof (struct oplock_q_entry), 0,
828 SLAB_HWCACHE_ALIGN, NULL, NULL);
829 if (cifs_oplock_cachep == NULL) {
830 kmem_cache_destroy(cifs_mid_cachep);
831 mempool_destroy(cifs_mid_poolp);
832 return -ENOMEM;
835 return 0;
838 static void
839 cifs_destroy_mids(void)
841 mempool_destroy(cifs_mid_poolp);
842 kmem_cache_destroy(cifs_mid_cachep);
843 kmem_cache_destroy(cifs_oplock_cachep);
846 static int cifs_oplock_thread(void * dummyarg)
848 struct oplock_q_entry * oplock_item;
849 struct cifsTconInfo *pTcon;
850 struct inode * inode;
851 __u16 netfid;
852 int rc;
854 do {
855 if (try_to_freeze())
856 continue;
858 spin_lock(&GlobalMid_Lock);
859 if (list_empty(&GlobalOplock_Q)) {
860 spin_unlock(&GlobalMid_Lock);
861 set_current_state(TASK_INTERRUPTIBLE);
862 schedule_timeout(39*HZ);
863 } else {
864 oplock_item = list_entry(GlobalOplock_Q.next,
865 struct oplock_q_entry, qhead);
866 if (oplock_item) {
867 cFYI(1,("found oplock item to write out"));
868 pTcon = oplock_item->tcon;
869 inode = oplock_item->pinode;
870 netfid = oplock_item->netfid;
871 spin_unlock(&GlobalMid_Lock);
872 DeleteOplockQEntry(oplock_item);
873 /* can not grab inode sem here since it would
874 deadlock when oplock received on delete
875 since vfs_unlink holds the i_mutex across
876 the call */
877 /* mutex_lock(&inode->i_mutex);*/
878 if (S_ISREG(inode->i_mode)) {
879 rc = filemap_fdatawrite(inode->i_mapping);
880 if (CIFS_I(inode)->clientCanCacheRead == 0) {
881 filemap_fdatawait(inode->i_mapping);
882 invalidate_remote_inode(inode);
884 } else
885 rc = 0;
886 /* mutex_unlock(&inode->i_mutex);*/
887 if (rc)
888 CIFS_I(inode)->write_behind_rc = rc;
889 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
891 /* releasing a stale oplock after recent reconnection
892 of smb session using a now incorrect file
893 handle is not a data integrity issue but do
894 not bother sending an oplock release if session
895 to server still is disconnected since oplock
896 already released by the server in that case */
897 if (pTcon->tidStatus != CifsNeedReconnect) {
898 rc = CIFSSMBLock(0, pTcon, netfid,
899 0 /* len */ , 0 /* offset */, 0,
900 0, LOCKING_ANDX_OPLOCK_RELEASE,
901 0 /* wait flag */);
902 cFYI(1,("Oplock release rc = %d ",rc));
904 } else
905 spin_unlock(&GlobalMid_Lock);
906 set_current_state(TASK_INTERRUPTIBLE);
907 schedule_timeout(1); /* yield in case q were corrupt */
909 } while (!kthread_should_stop());
911 return 0;
914 static int cifs_dnotify_thread(void * dummyarg)
916 struct list_head *tmp;
917 struct cifsSesInfo *ses;
919 do {
920 if (try_to_freeze())
921 continue;
922 set_current_state(TASK_INTERRUPTIBLE);
923 schedule_timeout(15*HZ);
924 read_lock(&GlobalSMBSeslock);
925 /* check if any stuck requests that need
926 to be woken up and wakeq so the
927 thread can wake up and error out */
928 list_for_each(tmp, &GlobalSMBSessionList) {
929 ses = list_entry(tmp, struct cifsSesInfo,
930 cifsSessionList);
931 if (ses && ses->server &&
932 atomic_read(&ses->server->inFlight))
933 wake_up_all(&ses->server->response_q);
935 read_unlock(&GlobalSMBSeslock);
936 } while (!kthread_should_stop());
938 return 0;
941 static int __init
942 init_cifs(void)
944 int rc = 0;
945 #ifdef CONFIG_PROC_FS
946 cifs_proc_init();
947 #endif
948 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
949 INIT_LIST_HEAD(&GlobalSMBSessionList);
950 INIT_LIST_HEAD(&GlobalTreeConnectionList);
951 INIT_LIST_HEAD(&GlobalOplock_Q);
952 #ifdef CONFIG_CIFS_EXPERIMENTAL
953 INIT_LIST_HEAD(&GlobalDnotifyReqList);
954 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
955 #endif
957 * Initialize Global counters
959 atomic_set(&sesInfoAllocCount, 0);
960 atomic_set(&tconInfoAllocCount, 0);
961 atomic_set(&tcpSesAllocCount,0);
962 atomic_set(&tcpSesReconnectCount, 0);
963 atomic_set(&tconInfoReconnectCount, 0);
965 atomic_set(&bufAllocCount, 0);
966 atomic_set(&smBufAllocCount, 0);
967 #ifdef CONFIG_CIFS_STATS2
968 atomic_set(&totBufAllocCount, 0);
969 atomic_set(&totSmBufAllocCount, 0);
970 #endif /* CONFIG_CIFS_STATS2 */
972 atomic_set(&midCount, 0);
973 GlobalCurrentXid = 0;
974 GlobalTotalActiveXid = 0;
975 GlobalMaxActiveXid = 0;
976 memset(Local_System_Name, 0, 15);
977 rwlock_init(&GlobalSMBSeslock);
978 spin_lock_init(&GlobalMid_Lock);
980 if (cifs_max_pending < 2) {
981 cifs_max_pending = 2;
982 cFYI(1,("cifs_max_pending set to min of 2"));
983 } else if (cifs_max_pending > 256) {
984 cifs_max_pending = 256;
985 cFYI(1,("cifs_max_pending set to max of 256"));
988 rc = cifs_init_inodecache();
989 if (rc)
990 goto out_clean_proc;
992 rc = cifs_init_mids();
993 if (rc)
994 goto out_destroy_inodecache;
996 rc = cifs_init_request_bufs();
997 if (rc)
998 goto out_destroy_mids;
1000 rc = register_filesystem(&cifs_fs_type);
1001 if (rc)
1002 goto out_destroy_request_bufs;
1004 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1005 if (IS_ERR(oplockThread)) {
1006 rc = PTR_ERR(oplockThread);
1007 cERROR(1,("error %d create oplock thread", rc));
1008 goto out_unregister_filesystem;
1011 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1012 if (IS_ERR(dnotifyThread)) {
1013 rc = PTR_ERR(dnotifyThread);
1014 cERROR(1,("error %d create dnotify thread", rc));
1015 goto out_stop_oplock_thread;
1018 return 0;
1020 out_stop_oplock_thread:
1021 kthread_stop(oplockThread);
1022 out_unregister_filesystem:
1023 unregister_filesystem(&cifs_fs_type);
1024 out_destroy_request_bufs:
1025 cifs_destroy_request_bufs();
1026 out_destroy_mids:
1027 cifs_destroy_mids();
1028 out_destroy_inodecache:
1029 cifs_destroy_inodecache();
1030 out_clean_proc:
1031 #ifdef CONFIG_PROC_FS
1032 cifs_proc_clean();
1033 #endif
1034 return rc;
1037 static void __exit
1038 exit_cifs(void)
1040 cFYI(0, ("In unregister ie exit_cifs"));
1041 #ifdef CONFIG_PROC_FS
1042 cifs_proc_clean();
1043 #endif
1044 unregister_filesystem(&cifs_fs_type);
1045 cifs_destroy_inodecache();
1046 cifs_destroy_mids();
1047 cifs_destroy_request_bufs();
1048 kthread_stop(oplockThread);
1049 kthread_stop(dnotifyThread);
1052 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1053 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1054 MODULE_DESCRIPTION
1055 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1056 MODULE_VERSION(CIFS_VERSION);
1057 module_init(init_cifs)
1058 module_exit(exit_cifs)