4 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/locks.h>
9 #include <linux/fcntl.h>
11 #include <asm/uaccess.h>
13 extern int *blk_size
[];
14 extern int *blksize_size
[];
16 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
19 ssize_t
block_write(struct file
* filp
, const char * buf
,
20 size_t count
, loff_t
*ppos
)
22 struct inode
* inode
= filp
->f_dentry
->d_inode
;
23 ssize_t blocksize
, blocksize_bits
, i
, buffercount
, write_error
;
24 ssize_t block
, blocks
;
28 struct buffer_head
* bhlist
[NBUF
];
31 struct buffer_head
* bh
, *bufferlist
[NBUF
];
34 write_error
= buffercount
= 0;
36 if ( is_read_only( inode
->i_rdev
))
38 blocksize
= BLOCK_SIZE
;
39 if (blksize_size
[MAJOR(dev
)] && blksize_size
[MAJOR(dev
)][MINOR(dev
)])
40 blocksize
= blksize_size
[MAJOR(dev
)][MINOR(dev
)];
49 block
= *ppos
>> blocksize_bits
;
50 offset
= *ppos
& (blocksize
-1);
52 if (blk_size
[MAJOR(dev
)])
53 size
= ((loff_t
) blk_size
[MAJOR(dev
)][MINOR(dev
)] << BLOCK_SIZE_BITS
) >> blocksize_bits
;
58 return written
? written
: -ENOSPC
;
59 chars
= blocksize
- offset
;
64 /* get the buffer head */
66 struct buffer_head
* (*fn
)(kdev_t
, int, int) = getblk
;
67 if (chars
!= blocksize
)
69 bh
= fn(dev
, block
, blocksize
);
72 bh
= getblk(dev
, block
, blocksize
);
74 if (chars
!= blocksize
&& !buffer_uptodate(bh
)) {
76 !read_ahead
[MAJOR(dev
)]) {
77 /* We do this to force the read of a single buffer */
79 bh
= bread(dev
,block
,blocksize
);
81 /* Read-ahead before write */
82 blocks
= read_ahead
[MAJOR(dev
)] / (blocksize
>> 9) / 2;
83 if (block
+ blocks
> size
) blocks
= size
- block
;
84 if (blocks
> NBUF
) blocks
=NBUF
;
86 for(i
=1; i
<blocks
; i
++){
87 bhlist
[i
] = getblk (dev
, block
+i
, blocksize
);
89 while(i
>= 0) brelse(bhlist
[i
--]);
90 return written
? written
: -EIO
;
93 ll_rw_block(READ
, blocks
, bhlist
);
94 for(i
=1; i
<blocks
; i
++) brelse(bhlist
[i
]);
102 return written
? written
: -EIO
;
103 p
= offset
+ bh
->b_data
;
108 copy_from_user(p
,buf
,chars
);
111 mark_buffer_uptodate(bh
, 1);
112 mark_buffer_dirty(bh
, 0);
113 if (filp
->f_flags
& O_SYNC
)
114 bufferlist
[buffercount
++] = bh
;
117 if (buffercount
== NBUF
){
118 ll_rw_block(WRITE
, buffercount
, bufferlist
);
119 for(i
=0; i
<buffercount
; i
++){
120 wait_on_buffer(bufferlist
[i
]);
121 if (!buffer_uptodate(bufferlist
[i
]))
123 brelse(bufferlist
[i
]);
131 ll_rw_block(WRITE
, buffercount
, bufferlist
);
132 for(i
=0; i
<buffercount
; i
++){
133 wait_on_buffer(bufferlist
[i
]);
134 if (!buffer_uptodate(bufferlist
[i
]))
136 brelse(bufferlist
[i
]);
145 ssize_t
block_read(struct file
* filp
, char * buf
, size_t count
, loff_t
*ppos
)
147 struct inode
* inode
= filp
->f_dentry
->d_inode
;
151 ssize_t blocksize_bits
, i
;
152 size_t blocks
, rblocks
, left
;
153 int bhrequest
, uptodate
;
154 struct buffer_head
** bhb
, ** bhe
;
155 struct buffer_head
* buflist
[NBUF
];
156 struct buffer_head
* bhreq
[NBUF
];
163 blocksize
= BLOCK_SIZE
;
164 if (blksize_size
[MAJOR(dev
)] && blksize_size
[MAJOR(dev
)][MINOR(dev
)])
165 blocksize
= blksize_size
[MAJOR(dev
)][MINOR(dev
)];
174 if (blk_size
[MAJOR(dev
)])
175 size
= (loff_t
) blk_size
[MAJOR(dev
)][MINOR(dev
)] << BLOCK_SIZE_BITS
;
181 /* size - offset might not fit into left, so check explicitly. */
182 else if (size
- offset
> INT_MAX
)
185 left
= size
- offset
;
191 block
= offset
>> blocksize_bits
;
192 offset
&= blocksize
-1;
193 size
>>= blocksize_bits
;
194 rblocks
= blocks
= (left
+ offset
+ blocksize
- 1) >> blocksize_bits
;
197 if (blocks
< read_ahead
[MAJOR(dev
)] / (blocksize
>> 9))
198 blocks
= read_ahead
[MAJOR(dev
)] / (blocksize
>> 9);
199 if (rblocks
> blocks
)
203 if (block
+ blocks
> size
) {
204 blocks
= size
- block
;
209 /* We do this in a two stage process. We first try to request
210 as many blocks as we can, then we wait for the first one to
211 complete, and then we try to wrap up as many as are actually
212 done. This routine is rather generic, in that it can be used
213 in a filesystem by substituting the appropriate function in
216 This routine is optimized to make maximum use of the various
217 buffers and caches. */
224 *bhb
= getblk(dev
, block
++, blocksize
);
225 if (*bhb
&& !buffer_uptodate(*bhb
)) {
227 bhreq
[bhrequest
++] = *bhb
;
230 if (++bhb
== &buflist
[NBUF
])
233 /* If the block we have on hand is uptodate, go ahead
234 and complete processing. */
241 /* Now request them all */
243 ll_rw_block(READ
, bhrequest
, bhreq
);
246 do { /* Finish off all I/O that has actually completed */
248 wait_on_buffer(*bhe
);
249 if (!buffer_uptodate(*bhe
)) { /* read error? */
251 if (++bhe
== &buflist
[NBUF
])
257 if (left
< blocksize
- offset
)
260 chars
= blocksize
- offset
;
265 copy_to_user(buf
,offset
+(*bhe
)->b_data
,chars
);
273 if (++bhe
== &buflist
[NBUF
])
275 } while (left
> 0 && bhe
!= bhb
&& (!*bhe
|| !buffer_locked(*bhe
)));
278 /* Release the read-ahead blocks */
281 if (++bhe
== &buflist
[NBUF
])
291 * Filp may be NULL when we are called by an msync of a vma
292 * since the vma has no handle.
295 int block_fsync(struct file
*filp
, struct dentry
*dentry
)
297 return fsync_dev(dentry
->d_inode
->i_rdev
);