4 * Copyright (C) 1995, 1996, 1997 by Paal-Kr. Engstad and Volker Lendecke
5 * Copyright (C) 1997 by Volker Lendecke
7 * Please add a note about your changes to smbfs in the ChangeLog file.
10 #include <linux/time.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/stat.h>
16 #include <linux/slab.h>
17 #include <linux/pagemap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/net.h>
20 #include <linux/aio.h>
22 #include <asm/uaccess.h>
23 #include <asm/system.h>
25 #include <linux/smbno.h>
26 #include <linux/smb_fs.h>
28 #include "smb_debug.h"
32 smb_fsync(struct file
*file
, struct dentry
* dentry
, int datasync
)
34 struct smb_sb_info
*server
= server_from_dentry(dentry
);
37 VERBOSE("sync file %s/%s\n", DENTRY_PATH(dentry
));
40 * The VFS will writepage() all dirty pages for us, but we
41 * should send a SMBflush to the server, letting it know that
42 * we want things synchronized with actual storage.
44 * Note: this function requires all pages to have been written already
45 * (should be ok with writepage_sync)
47 result
= smb_proc_flush(server
, SMB_I(dentry
->d_inode
)->fileid
);
52 * Read a page synchronously.
55 smb_readpage_sync(struct dentry
*dentry
, struct page
*page
)
57 char *buffer
= kmap(page
);
58 loff_t offset
= (loff_t
)page
->index
<< PAGE_CACHE_SHIFT
;
59 struct smb_sb_info
*server
= server_from_dentry(dentry
);
60 unsigned int rsize
= smb_get_rsize(server
);
61 int count
= PAGE_SIZE
;
64 VERBOSE("file %s/%s, count=%d@%Ld, rsize=%d\n",
65 DENTRY_PATH(dentry
), count
, offset
, rsize
);
67 result
= smb_open(dentry
, SMB_O_RDONLY
);
75 result
= server
->ops
->read(dentry
->d_inode
,offset
,rsize
,buffer
);
82 dentry
->d_inode
->i_atime
=
83 current_fs_time(dentry
->d_inode
->i_sb
);
88 memset(buffer
, 0, count
);
89 flush_dcache_page(page
);
90 SetPageUptodate(page
);
100 * We are called with the page locked and we unlock it when done.
103 smb_readpage(struct file
*file
, struct page
*page
)
106 struct dentry
*dentry
= file
->f_path
.dentry
;
108 page_cache_get(page
);
109 error
= smb_readpage_sync(dentry
, page
);
110 page_cache_release(page
);
115 * Write a page synchronously.
116 * Offset is the data offset within the page.
119 smb_writepage_sync(struct inode
*inode
, struct page
*page
,
120 unsigned long pageoffset
, unsigned int count
)
123 char *buffer
= kmap(page
) + pageoffset
;
124 struct smb_sb_info
*server
= server_from_inode(inode
);
125 unsigned int wsize
= smb_get_wsize(server
);
128 offset
= ((loff_t
)page
->index
<< PAGE_CACHE_SHIFT
) + pageoffset
;
129 VERBOSE("file ino=%ld, fileid=%d, count=%d@%Ld, wsize=%d\n",
130 inode
->i_ino
, SMB_I(inode
)->fileid
, count
, offset
, wsize
);
138 write_ret
= server
->ops
->write(inode
, offset
, wsize
, buffer
);
140 PARANOIA("failed write, wsize=%d, write_ret=%d\n",
145 /* N.B. what if result < wsize?? */
146 #ifdef SMBFS_PARANOIA
147 if (write_ret
< wsize
)
148 PARANOIA("short write, wsize=%d, write_ret=%d\n",
155 * Update the inode now rather than waiting for a refresh.
157 inode
->i_mtime
= inode
->i_atime
= current_fs_time(inode
->i_sb
);
158 SMB_I(inode
)->flags
|= SMB_F_LOCALWRITE
;
159 if (offset
> inode
->i_size
)
160 inode
->i_size
= offset
;
168 * Write a page to the server. This will be used for NFS swapping only
169 * (for now), and we currently do this synchronously only.
171 * We are called with the page locked and we unlock it when done.
174 smb_writepage(struct page
*page
, struct writeback_control
*wbc
)
176 struct address_space
*mapping
= page
->mapping
;
178 unsigned long end_index
;
179 unsigned offset
= PAGE_CACHE_SIZE
;
183 inode
= mapping
->host
;
186 end_index
= inode
->i_size
>> PAGE_CACHE_SHIFT
;
189 if (page
->index
< end_index
)
191 /* things got complicated... */
192 offset
= inode
->i_size
& (PAGE_CACHE_SIZE
-1);
193 /* OK, are we completely out? */
194 if (page
->index
>= end_index
+1 || !offset
)
195 return 0; /* truncated - don't care */
197 page_cache_get(page
);
198 err
= smb_writepage_sync(inode
, page
, 0, offset
);
199 SetPageUptodate(page
);
201 page_cache_release(page
);
206 smb_updatepage(struct file
*file
, struct page
*page
, unsigned long offset
,
209 struct dentry
*dentry
= file
->f_path
.dentry
;
211 DEBUG1("(%s/%s %d@%lld)\n", DENTRY_PATH(dentry
), count
,
212 ((unsigned long long)page
->index
<< PAGE_CACHE_SHIFT
) + offset
);
214 return smb_writepage_sync(dentry
->d_inode
, page
, offset
, count
);
218 smb_file_aio_read(struct kiocb
*iocb
, const struct iovec
*iov
,
219 unsigned long nr_segs
, loff_t pos
)
221 struct file
* file
= iocb
->ki_filp
;
222 struct dentry
* dentry
= file
->f_path
.dentry
;
225 VERBOSE("file %s/%s, count=%lu@%lu\n", DENTRY_PATH(dentry
),
226 (unsigned long) iocb
->ki_left
, (unsigned long) pos
);
228 status
= smb_revalidate_inode(dentry
);
230 PARANOIA("%s/%s validation failed, error=%Zd\n",
231 DENTRY_PATH(dentry
), status
);
235 VERBOSE("before read, size=%ld, flags=%x, atime=%ld\n",
236 (long)dentry
->d_inode
->i_size
,
237 dentry
->d_inode
->i_flags
, dentry
->d_inode
->i_atime
);
239 status
= generic_file_aio_read(iocb
, iov
, nr_segs
, pos
);
245 smb_file_mmap(struct file
* file
, struct vm_area_struct
* vma
)
247 struct dentry
* dentry
= file
->f_path
.dentry
;
250 VERBOSE("file %s/%s, address %lu - %lu\n",
251 DENTRY_PATH(dentry
), vma
->vm_start
, vma
->vm_end
);
253 status
= smb_revalidate_inode(dentry
);
255 PARANOIA("%s/%s validation failed, error=%d\n",
256 DENTRY_PATH(dentry
), status
);
259 status
= generic_file_mmap(file
, vma
);
265 smb_file_splice_read(struct file
*file
, loff_t
*ppos
,
266 struct pipe_inode_info
*pipe
, size_t count
,
269 struct dentry
*dentry
= file
->f_path
.dentry
;
272 VERBOSE("file %s/%s, pos=%Ld, count=%d\n",
273 DENTRY_PATH(dentry
), *ppos
, count
);
275 status
= smb_revalidate_inode(dentry
);
277 PARANOIA("%s/%s validation failed, error=%Zd\n",
278 DENTRY_PATH(dentry
), status
);
281 status
= generic_file_splice_read(file
, ppos
, pipe
, count
, flags
);
287 * This does the "real" work of the write. The generic routine has
288 * allocated the page, locked it, done all the page alignment stuff
289 * calculations etc. Now we should just copy the data from user
290 * space and write it back to the real medium..
292 * If the writer ends up delaying the write, the writer needs to
293 * increment the page use counts until he is done with the page.
295 static int smb_prepare_write(struct file
*file
, struct page
*page
,
296 unsigned offset
, unsigned to
)
301 static int smb_commit_write(struct file
*file
, struct page
*page
,
302 unsigned offset
, unsigned to
)
308 status
= smb_updatepage(file
, page
, offset
, to
-offset
);
313 const struct address_space_operations smb_file_aops
= {
314 .readpage
= smb_readpage
,
315 .writepage
= smb_writepage
,
316 .prepare_write
= smb_prepare_write
,
317 .commit_write
= smb_commit_write
321 * Write to a file (through the page cache).
324 smb_file_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
325 unsigned long nr_segs
, loff_t pos
)
327 struct file
* file
= iocb
->ki_filp
;
328 struct dentry
* dentry
= file
->f_path
.dentry
;
331 VERBOSE("file %s/%s, count=%lu@%lu\n",
333 (unsigned long) iocb
->ki_left
, (unsigned long) pos
);
335 result
= smb_revalidate_inode(dentry
);
337 PARANOIA("%s/%s validation failed, error=%Zd\n",
338 DENTRY_PATH(dentry
), result
);
342 result
= smb_open(dentry
, SMB_O_WRONLY
);
346 if (iocb
->ki_left
> 0) {
347 result
= generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
348 VERBOSE("pos=%ld, size=%ld, mtime=%ld, atime=%ld\n",
349 (long) file
->f_pos
, (long) dentry
->d_inode
->i_size
,
350 dentry
->d_inode
->i_mtime
, dentry
->d_inode
->i_atime
);
357 smb_file_open(struct inode
*inode
, struct file
* file
)
360 struct dentry
*dentry
= file
->f_path
.dentry
;
361 int smb_mode
= (file
->f_mode
& O_ACCMODE
) - 1;
364 result
= smb_open(dentry
, smb_mode
);
367 SMB_I(inode
)->openers
++;
374 smb_file_release(struct inode
*inode
, struct file
* file
)
377 if (!--SMB_I(inode
)->openers
) {
378 /* We must flush any dirty pages now as we won't be able to
379 write anything after close. mmap can trigger this.
380 "openers" should perhaps include mmap'ers ... */
381 filemap_write_and_wait(inode
->i_mapping
);
389 * Check whether the required access is compatible with
390 * an inode's permission. SMB doesn't recognize superuser
391 * privileges, so we need our own check for this.
394 smb_file_permission(struct inode
*inode
, int mask
, struct nameidata
*nd
)
396 int mode
= inode
->i_mode
;
399 VERBOSE("mode=%x, mask=%x\n", mode
, mask
);
401 /* Look at user permissions */
403 if ((mode
& 7 & mask
) != mask
)
408 const struct file_operations smb_file_operations
=
410 .llseek
= remote_llseek
,
411 .read
= do_sync_read
,
412 .aio_read
= smb_file_aio_read
,
413 .write
= do_sync_write
,
414 .aio_write
= smb_file_aio_write
,
416 .mmap
= smb_file_mmap
,
417 .open
= smb_file_open
,
418 .release
= smb_file_release
,
420 .splice_read
= smb_file_splice_read
,
423 const struct inode_operations smb_file_inode_operations
=
425 .permission
= smb_file_permission
,
426 .getattr
= smb_getattr
,
427 .setattr
= smb_notify_change
,