4 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/locks.h>
9 #include <linux/fcntl.h>
11 #include <asm/uaccess.h>
13 extern int *blk_size
[];
14 extern int *blksize_size
[];
16 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
19 ssize_t
block_write(struct file
* filp
, const char * buf
,
20 size_t count
, loff_t
*ppos
)
22 struct inode
* inode
= filp
->f_dentry
->d_inode
;
23 ssize_t blocksize
, blocksize_bits
, i
, buffercount
, write_error
;
24 ssize_t block
, blocks
;
28 struct buffer_head
* bhlist
[NBUF
];
31 struct buffer_head
* bh
, *bufferlist
[NBUF
];
34 write_error
= buffercount
= 0;
36 if ( is_read_only( inode
->i_rdev
))
38 blocksize
= BLOCK_SIZE
;
39 if (blksize_size
[MAJOR(dev
)] && blksize_size
[MAJOR(dev
)][MINOR(dev
)])
40 blocksize
= blksize_size
[MAJOR(dev
)][MINOR(dev
)];
49 block
= *ppos
>> blocksize_bits
;
50 offset
= *ppos
& (blocksize
-1);
52 if (blk_size
[MAJOR(dev
)])
53 size
= ((loff_t
) blk_size
[MAJOR(dev
)][MINOR(dev
)] << BLOCK_SIZE_BITS
) >> blocksize_bits
;
58 return written
? written
: -ENOSPC
;
59 chars
= blocksize
- offset
;
64 /* get the buffer head */
66 struct buffer_head
* (*fn
)(kdev_t
, int, int) = getblk
;
67 if (chars
!= blocksize
)
69 bh
= fn(dev
, block
, blocksize
);
72 bh
= getblk(dev
, block
, blocksize
);
74 if (chars
!= blocksize
&& !buffer_uptodate(bh
)) {
76 !read_ahead
[MAJOR(dev
)]) {
77 /* We do this to force the read of a single buffer */
79 bh
= bread(dev
,block
,blocksize
);
81 /* Read-ahead before write */
82 blocks
= read_ahead
[MAJOR(dev
)] / (blocksize
>> 9) / 2;
83 if (block
+ blocks
> size
) blocks
= size
- block
;
84 if (blocks
> NBUF
) blocks
=NBUF
;
86 for(i
=1; i
<blocks
; i
++){
87 bhlist
[i
] = getblk (dev
, block
+i
, blocksize
);
89 while(i
>= 0) brelse(bhlist
[i
--]);
90 return written
? written
: -EIO
;
93 ll_rw_block(READ
, blocks
, bhlist
);
94 for(i
=1; i
<blocks
; i
++) brelse(bhlist
[i
]);
102 return written
? written
: -EIO
;
103 p
= offset
+ bh
->b_data
;
108 copy_from_user(p
,buf
,chars
);
111 mark_buffer_uptodate(bh
, 1);
112 mark_buffer_dirty(bh
, 0);
113 if (filp
->f_flags
& O_SYNC
)
114 bufferlist
[buffercount
++] = bh
;
117 if (buffercount
== NBUF
){
118 ll_rw_block(WRITE
, buffercount
, bufferlist
);
119 for(i
=0; i
<buffercount
; i
++){
120 wait_on_buffer(bufferlist
[i
]);
121 if (!buffer_uptodate(bufferlist
[i
]))
123 brelse(bufferlist
[i
]);
132 ll_rw_block(WRITE
, buffercount
, bufferlist
);
133 for(i
=0; i
<buffercount
; i
++){
134 wait_on_buffer(bufferlist
[i
]);
135 if (!buffer_uptodate(bufferlist
[i
]))
137 brelse(bufferlist
[i
]);
146 ssize_t
block_read(struct file
* filp
, char * buf
, size_t count
, loff_t
*ppos
)
148 struct inode
* inode
= filp
->f_dentry
->d_inode
;
152 ssize_t blocksize_bits
, i
;
153 size_t blocks
, rblocks
, left
;
154 int bhrequest
, uptodate
;
155 struct buffer_head
** bhb
, ** bhe
;
156 struct buffer_head
* buflist
[NBUF
];
157 struct buffer_head
* bhreq
[NBUF
];
164 blocksize
= BLOCK_SIZE
;
165 if (blksize_size
[MAJOR(dev
)] && blksize_size
[MAJOR(dev
)][MINOR(dev
)])
166 blocksize
= blksize_size
[MAJOR(dev
)][MINOR(dev
)];
175 if (blk_size
[MAJOR(dev
)])
176 size
= (loff_t
) blk_size
[MAJOR(dev
)][MINOR(dev
)] << BLOCK_SIZE_BITS
;
182 /* size - offset might not fit into left, so check explicitly. */
183 else if (size
- offset
> INT_MAX
)
186 left
= size
- offset
;
192 block
= offset
>> blocksize_bits
;
193 offset
&= blocksize
-1;
194 size
>>= blocksize_bits
;
195 rblocks
= blocks
= (left
+ offset
+ blocksize
- 1) >> blocksize_bits
;
198 if (blocks
< read_ahead
[MAJOR(dev
)] / (blocksize
>> 9))
199 blocks
= read_ahead
[MAJOR(dev
)] / (blocksize
>> 9);
200 if (rblocks
> blocks
)
204 if (block
+ blocks
> size
) {
205 blocks
= size
- block
;
210 /* We do this in a two stage process. We first try to request
211 as many blocks as we can, then we wait for the first one to
212 complete, and then we try to wrap up as many as are actually
213 done. This routine is rather generic, in that it can be used
214 in a filesystem by substituting the appropriate function in
217 This routine is optimized to make maximum use of the various
218 buffers and caches. */
225 *bhb
= getblk(dev
, block
++, blocksize
);
226 if (*bhb
&& !buffer_uptodate(*bhb
)) {
228 bhreq
[bhrequest
++] = *bhb
;
231 if (++bhb
== &buflist
[NBUF
])
234 /* If the block we have on hand is uptodate, go ahead
235 and complete processing. */
242 /* Now request them all */
244 ll_rw_block(READ
, bhrequest
, bhreq
);
247 do { /* Finish off all I/O that has actually completed */
249 wait_on_buffer(*bhe
);
250 if (!buffer_uptodate(*bhe
)) { /* read error? */
252 if (++bhe
== &buflist
[NBUF
])
258 if (left
< blocksize
- offset
)
261 chars
= blocksize
- offset
;
266 copy_to_user(buf
,offset
+(*bhe
)->b_data
,chars
);
274 if (++bhe
== &buflist
[NBUF
])
276 } while (left
> 0 && bhe
!= bhb
&& (!*bhe
|| !buffer_locked(*bhe
)));
277 if (bhe
== bhb
&& !blocks
)
281 /* Release the read-ahead blocks */
284 if (++bhe
== &buflist
[NBUF
])
294 * Filp may be NULL when we are called by an msync of a vma
295 * since the vma has no handle.
298 int block_fsync(struct file
*filp
, struct dentry
*dentry
)
300 return fsync_dev(dentry
->d_inode
->i_rdev
);