MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / fs / affs / file.c
blob9786e07cbef3cff828064950317c6a2298c3d6b8
1 /*
2 * linux/fs/affs/file.c
4 * (c) 1996 Hans-Joachim Widmaier - Rewritten
6 * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
8 * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
10 * (C) 1991 Linus Torvalds - minix filesystem
12 * affs regular file handling primitives
15 #include <asm/div64.h>
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/time.h>
19 #include <linux/affs_fs.h>
20 #include <linux/fcntl.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/stat.h>
25 #include <linux/smp_lock.h>
26 #include <linux/dirent.h>
27 #include <linux/fs.h>
28 #include <linux/amigaffs.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/buffer_head.h>
34 #if PAGE_SIZE < 4096
35 #error PAGE_SIZE must be at least 4096
36 #endif
38 static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
39 static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
40 static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
41 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
42 static ssize_t affs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos);
43 static int affs_file_open(struct inode *inode, struct file *filp);
44 static int affs_file_release(struct inode *inode, struct file *filp);
46 struct file_operations affs_file_operations = {
47 .llseek = generic_file_llseek,
48 .read = generic_file_read,
49 .write = affs_file_write,
50 .mmap = generic_file_mmap,
51 .open = affs_file_open,
52 .release = affs_file_release,
53 .fsync = file_fsync,
54 .sendfile = generic_file_sendfile,
57 struct inode_operations affs_file_inode_operations = {
58 .truncate = affs_truncate,
59 .setattr = affs_notify_change,
62 static int
63 affs_file_open(struct inode *inode, struct file *filp)
65 if (atomic_read(&filp->f_count) != 1)
66 return 0;
67 pr_debug("AFFS: open(%d)\n", AFFS_I(inode)->i_opencnt);
68 AFFS_I(inode)->i_opencnt++;
69 return 0;
72 static int
73 affs_file_release(struct inode *inode, struct file *filp)
75 if (atomic_read(&filp->f_count) != 0)
76 return 0;
77 pr_debug("AFFS: release(%d)\n", AFFS_I(inode)->i_opencnt);
78 AFFS_I(inode)->i_opencnt--;
79 if (!AFFS_I(inode)->i_opencnt)
80 affs_free_prealloc(inode);
82 return 0;
85 static int
86 affs_grow_extcache(struct inode *inode, u32 lc_idx)
88 struct super_block *sb = inode->i_sb;
89 struct buffer_head *bh;
90 u32 lc_max;
91 int i, j, key;
93 if (!AFFS_I(inode)->i_lc) {
94 char *ptr = (char *)get_zeroed_page(GFP_NOFS);
95 if (!ptr)
96 return -ENOMEM;
97 AFFS_I(inode)->i_lc = (u32 *)ptr;
98 AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
101 lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
103 if (AFFS_I(inode)->i_extcnt > lc_max) {
104 u32 lc_shift, lc_mask, tmp, off;
106 /* need to recalculate linear cache, start from old size */
107 lc_shift = AFFS_I(inode)->i_lc_shift;
108 tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
109 for (; tmp; tmp >>= 1)
110 lc_shift++;
111 lc_mask = (1 << lc_shift) - 1;
113 /* fix idx and old size to new shift */
114 lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
115 AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
117 /* first shrink old cache to make more space */
118 off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
119 for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
120 AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
122 AFFS_I(inode)->i_lc_shift = lc_shift;
123 AFFS_I(inode)->i_lc_mask = lc_mask;
126 /* fill cache to the needed index */
127 i = AFFS_I(inode)->i_lc_size;
128 AFFS_I(inode)->i_lc_size = lc_idx + 1;
129 for (; i <= lc_idx; i++) {
130 if (!i) {
131 AFFS_I(inode)->i_lc[0] = inode->i_ino;
132 continue;
134 key = AFFS_I(inode)->i_lc[i - 1];
135 j = AFFS_I(inode)->i_lc_mask + 1;
136 // unlock cache
137 for (; j > 0; j--) {
138 bh = affs_bread(sb, key);
139 if (!bh)
140 goto err;
141 key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
142 affs_brelse(bh);
144 // lock cache
145 AFFS_I(inode)->i_lc[i] = key;
148 return 0;
150 err:
151 // lock cache
152 return -EIO;
155 static struct buffer_head *
156 affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
158 struct super_block *sb = inode->i_sb;
159 struct buffer_head *new_bh;
160 u32 blocknr, tmp;
162 blocknr = affs_alloc_block(inode, bh->b_blocknr);
163 if (!blocknr)
164 return ERR_PTR(-ENOSPC);
166 new_bh = affs_getzeroblk(sb, blocknr);
167 if (!new_bh) {
168 affs_free_block(sb, blocknr);
169 return ERR_PTR(-EIO);
172 AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
173 AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
174 AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
175 AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
176 affs_fix_checksum(sb, new_bh);
178 mark_buffer_dirty_inode(new_bh, inode);
180 tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
181 if (tmp)
182 affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
183 AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
184 affs_adjust_checksum(bh, blocknr - tmp);
185 mark_buffer_dirty_inode(bh, inode);
187 AFFS_I(inode)->i_extcnt++;
188 mark_inode_dirty(inode);
190 return new_bh;
193 static inline struct buffer_head *
194 affs_get_extblock(struct inode *inode, u32 ext)
196 /* inline the simplest case: same extended block as last time */
197 struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
198 if (ext == AFFS_I(inode)->i_ext_last)
199 atomic_inc(&bh->b_count);
200 else
201 /* we have to do more (not inlined) */
202 bh = affs_get_extblock_slow(inode, ext);
204 return bh;
207 static struct buffer_head *
208 affs_get_extblock_slow(struct inode *inode, u32 ext)
210 struct super_block *sb = inode->i_sb;
211 struct buffer_head *bh;
212 u32 ext_key;
213 u32 lc_idx, lc_off, ac_idx;
214 u32 tmp, idx;
216 if (ext == AFFS_I(inode)->i_ext_last + 1) {
217 /* read the next extended block from the current one */
218 bh = AFFS_I(inode)->i_ext_bh;
219 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
220 if (ext < AFFS_I(inode)->i_extcnt)
221 goto read_ext;
222 if (ext > AFFS_I(inode)->i_extcnt)
223 BUG();
224 bh = affs_alloc_extblock(inode, bh, ext);
225 if (IS_ERR(bh))
226 return bh;
227 goto store_ext;
230 if (ext == 0) {
231 /* we seek back to the file header block */
232 ext_key = inode->i_ino;
233 goto read_ext;
236 if (ext >= AFFS_I(inode)->i_extcnt) {
237 struct buffer_head *prev_bh;
239 /* allocate a new extended block */
240 if (ext > AFFS_I(inode)->i_extcnt)
241 BUG();
243 /* get previous extended block */
244 prev_bh = affs_get_extblock(inode, ext - 1);
245 if (IS_ERR(prev_bh))
246 return prev_bh;
247 bh = affs_alloc_extblock(inode, prev_bh, ext);
248 affs_brelse(prev_bh);
249 if (IS_ERR(bh))
250 return bh;
251 goto store_ext;
254 again:
255 /* check if there is an extended cache and whether it's large enough */
256 lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
257 lc_off = ext & AFFS_I(inode)->i_lc_mask;
259 if (lc_idx >= AFFS_I(inode)->i_lc_size) {
260 int err;
262 err = affs_grow_extcache(inode, lc_idx);
263 if (err)
264 return ERR_PTR(err);
265 goto again;
268 /* every n'th key we find in the linear cache */
269 if (!lc_off) {
270 ext_key = AFFS_I(inode)->i_lc[lc_idx];
271 goto read_ext;
274 /* maybe it's still in the associative cache */
275 ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
276 if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
277 ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
278 goto read_ext;
281 /* try to find one of the previous extended blocks */
282 tmp = ext;
283 idx = ac_idx;
284 while (--tmp, --lc_off > 0) {
285 idx = (idx - 1) & AFFS_AC_MASK;
286 if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
287 ext_key = AFFS_I(inode)->i_ac[idx].key;
288 goto find_ext;
292 /* fall back to the linear cache */
293 ext_key = AFFS_I(inode)->i_lc[lc_idx];
294 find_ext:
295 /* read all extended blocks until we find the one we need */
296 //unlock cache
297 do {
298 bh = affs_bread(sb, ext_key);
299 if (!bh)
300 goto err_bread;
301 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
302 affs_brelse(bh);
303 tmp++;
304 } while (tmp < ext);
305 //lock cache
307 /* store it in the associative cache */
308 // recalculate ac_idx?
309 AFFS_I(inode)->i_ac[ac_idx].ext = ext;
310 AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
312 read_ext:
313 /* finally read the right extended block */
314 //unlock cache
315 bh = affs_bread(sb, ext_key);
316 if (!bh)
317 goto err_bread;
318 //lock cache
320 store_ext:
321 /* release old cached extended block and store the new one */
322 affs_brelse(AFFS_I(inode)->i_ext_bh);
323 AFFS_I(inode)->i_ext_last = ext;
324 AFFS_I(inode)->i_ext_bh = bh;
325 atomic_inc(&bh->b_count);
327 return bh;
329 err_bread:
330 affs_brelse(bh);
331 return ERR_PTR(-EIO);
334 static int
335 affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
337 struct super_block *sb = inode->i_sb;
338 struct buffer_head *ext_bh;
339 u32 ext;
341 pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
344 if (block > (sector_t)0x7fffffffUL)
345 BUG();
347 if (block >= AFFS_I(inode)->i_blkcnt) {
348 if (block > AFFS_I(inode)->i_blkcnt || !create)
349 goto err_big;
350 } else
351 create = 0;
353 //lock cache
354 affs_lock_ext(inode);
356 ext = (u32)block / AFFS_SB(sb)->s_hashsize;
357 block -= ext * AFFS_SB(sb)->s_hashsize;
358 ext_bh = affs_get_extblock(inode, ext);
359 if (IS_ERR(ext_bh))
360 goto err_ext;
361 map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
363 if (create) {
364 u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
365 if (!blocknr)
366 goto err_alloc;
367 set_buffer_new(bh_result);
368 AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
369 AFFS_I(inode)->i_blkcnt++;
371 /* store new block */
372 if (bh_result->b_blocknr)
373 affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
374 AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
375 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
376 affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
377 bh_result->b_blocknr = blocknr;
379 if (!block) {
380 /* insert first block into header block */
381 u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
382 if (tmp)
383 affs_warning(sb, "get_block", "first block already set (%d)", tmp);
384 AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
385 affs_adjust_checksum(ext_bh, blocknr - tmp);
389 affs_brelse(ext_bh);
390 //unlock cache
391 affs_unlock_ext(inode);
392 return 0;
394 err_big:
395 affs_error(inode->i_sb,"get_block","strange block request %d", block);
396 return -EIO;
397 err_ext:
398 // unlock cache
399 affs_unlock_ext(inode);
400 return PTR_ERR(ext_bh);
401 err_alloc:
402 brelse(ext_bh);
403 clear_buffer_mapped(bh_result);
404 bh_result->b_bdev = NULL;
405 // unlock cache
406 affs_unlock_ext(inode);
407 return -ENOSPC;
410 static int affs_writepage(struct page *page, struct writeback_control *wbc)
412 return block_write_full_page(page, affs_get_block, wbc);
414 static int affs_readpage(struct file *file, struct page *page)
416 return block_read_full_page(page, affs_get_block);
418 static int affs_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
420 return cont_prepare_write(page, from, to, affs_get_block,
421 &AFFS_I(page->mapping->host)->mmu_private);
423 static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
425 return generic_block_bmap(mapping,block,affs_get_block);
427 struct address_space_operations affs_aops = {
428 .readpage = affs_readpage,
429 .writepage = affs_writepage,
430 .sync_page = block_sync_page,
431 .prepare_write = affs_prepare_write,
432 .commit_write = generic_commit_write,
433 .bmap = _affs_bmap
436 static inline struct buffer_head *
437 affs_bread_ino(struct inode *inode, int block, int create)
439 struct buffer_head *bh, tmp_bh;
440 int err;
442 tmp_bh.b_state = 0;
443 err = affs_get_block(inode, block, &tmp_bh, create);
444 if (!err) {
445 bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
446 if (bh) {
447 bh->b_state |= tmp_bh.b_state;
448 return bh;
450 err = -EIO;
452 return ERR_PTR(err);
455 static inline struct buffer_head *
456 affs_getzeroblk_ino(struct inode *inode, int block)
458 struct buffer_head *bh, tmp_bh;
459 int err;
461 tmp_bh.b_state = 0;
462 err = affs_get_block(inode, block, &tmp_bh, 1);
463 if (!err) {
464 bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
465 if (bh) {
466 bh->b_state |= tmp_bh.b_state;
467 return bh;
469 err = -EIO;
471 return ERR_PTR(err);
474 static inline struct buffer_head *
475 affs_getemptyblk_ino(struct inode *inode, int block)
477 struct buffer_head *bh, tmp_bh;
478 int err;
480 tmp_bh.b_state = 0;
481 err = affs_get_block(inode, block, &tmp_bh, 1);
482 if (!err) {
483 bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
484 if (bh) {
485 bh->b_state |= tmp_bh.b_state;
486 return bh;
488 err = -EIO;
490 return ERR_PTR(err);
493 static ssize_t
494 affs_file_write(struct file *file, const char __user *buf,
495 size_t count, loff_t *ppos)
497 ssize_t retval;
499 retval = generic_file_write (file, buf, count, ppos);
500 if (retval >0) {
501 struct inode *inode = file->f_dentry->d_inode;
502 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
503 mark_inode_dirty(inode);
505 return retval;
508 static int
509 affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
511 struct inode *inode = page->mapping->host;
512 struct super_block *sb = inode->i_sb;
513 struct buffer_head *bh;
514 char *data;
515 u32 bidx, boff, bsize;
516 u32 tmp;
518 pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
519 if (from > to || to > PAGE_CACHE_SIZE)
520 BUG();
521 kmap(page);
522 data = page_address(page);
523 bsize = AFFS_SB(sb)->s_data_blksize;
524 tmp = (page->index << PAGE_CACHE_SHIFT) + from;
525 bidx = tmp / bsize;
526 boff = tmp % bsize;
528 while (from < to) {
529 bh = affs_bread_ino(inode, bidx, 0);
530 if (IS_ERR(bh))
531 return PTR_ERR(bh);
532 tmp = min(bsize - boff, to - from);
533 if (from + tmp > to || tmp > bsize)
534 BUG();
535 memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
536 affs_brelse(bh);
537 bidx++;
538 from += tmp;
539 boff = 0;
541 flush_dcache_page(page);
542 kunmap(page);
543 return 0;
546 static int
547 affs_extent_file_ofs(struct inode *inode, u32 newsize)
549 struct super_block *sb = inode->i_sb;
550 struct buffer_head *bh, *prev_bh;
551 u32 bidx, boff;
552 u32 size, bsize;
553 u32 tmp;
555 pr_debug("AFFS: extent_file(%u, %d)\n", (u32)inode->i_ino, newsize);
556 bsize = AFFS_SB(sb)->s_data_blksize;
557 bh = NULL;
558 size = AFFS_I(inode)->mmu_private;
559 bidx = size / bsize;
560 boff = size % bsize;
561 if (boff) {
562 bh = affs_bread_ino(inode, bidx, 0);
563 if (IS_ERR(bh))
564 return PTR_ERR(bh);
565 tmp = min(bsize - boff, newsize - size);
566 if (boff + tmp > bsize || tmp > bsize)
567 BUG();
568 memset(AFFS_DATA(bh) + boff, 0, tmp);
569 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
570 affs_fix_checksum(sb, bh);
571 mark_buffer_dirty_inode(bh, inode);
572 size += tmp;
573 bidx++;
574 } else if (bidx) {
575 bh = affs_bread_ino(inode, bidx - 1, 0);
576 if (IS_ERR(bh))
577 return PTR_ERR(bh);
580 while (size < newsize) {
581 prev_bh = bh;
582 bh = affs_getzeroblk_ino(inode, bidx);
583 if (IS_ERR(bh))
584 goto out;
585 tmp = min(bsize, newsize - size);
586 if (tmp > bsize)
587 BUG();
588 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
589 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
590 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
591 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
592 affs_fix_checksum(sb, bh);
593 bh->b_state &= ~(1UL << BH_New);
594 mark_buffer_dirty_inode(bh, inode);
595 if (prev_bh) {
596 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
597 if (tmp)
598 affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
599 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
600 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
601 mark_buffer_dirty_inode(prev_bh, inode);
602 affs_brelse(prev_bh);
604 size += bsize;
605 bidx++;
607 affs_brelse(bh);
608 inode->i_size = AFFS_I(inode)->mmu_private = newsize;
609 return 0;
611 out:
612 inode->i_size = AFFS_I(inode)->mmu_private = newsize;
613 return PTR_ERR(bh);
616 static int
617 affs_readpage_ofs(struct file *file, struct page *page)
619 struct inode *inode = page->mapping->host;
620 u32 to;
621 int err;
623 pr_debug("AFFS: read_page(%u, %ld)\n", (u32)inode->i_ino, page->index);
624 to = PAGE_CACHE_SIZE;
625 if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
626 to = inode->i_size & ~PAGE_CACHE_MASK;
627 memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
630 err = affs_do_readpage_ofs(file, page, 0, to);
631 if (!err)
632 SetPageUptodate(page);
633 unlock_page(page);
634 return err;
637 static int affs_prepare_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
639 struct inode *inode = page->mapping->host;
640 u32 size, offset;
641 u32 tmp;
642 int err = 0;
644 pr_debug("AFFS: prepare_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
645 offset = page->index << PAGE_CACHE_SHIFT;
646 if (offset + from > AFFS_I(inode)->mmu_private) {
647 err = affs_extent_file_ofs(inode, offset + from);
648 if (err)
649 return err;
651 size = inode->i_size;
653 if (PageUptodate(page))
654 return 0;
656 if (from) {
657 err = affs_do_readpage_ofs(file, page, 0, from);
658 if (err)
659 return err;
661 if (to < PAGE_CACHE_SIZE) {
662 char *kaddr = kmap_atomic(page, KM_USER0);
664 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
665 flush_dcache_page(page);
666 kunmap_atomic(kaddr, KM_USER0);
667 if (size > offset + to) {
668 if (size < offset + PAGE_CACHE_SIZE)
669 tmp = size & ~PAGE_CACHE_MASK;
670 else
671 tmp = PAGE_CACHE_SIZE;
672 err = affs_do_readpage_ofs(file, page, to, tmp);
675 return err;
678 static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned from, unsigned to)
680 struct inode *inode = page->mapping->host;
681 struct super_block *sb = inode->i_sb;
682 struct buffer_head *bh, *prev_bh;
683 char *data;
684 u32 bidx, boff, bsize;
685 u32 tmp;
686 int written;
688 pr_debug("AFFS: commit_write(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
689 bsize = AFFS_SB(sb)->s_data_blksize;
690 data = page_address(page);
692 bh = NULL;
693 written = 0;
694 tmp = (page->index << PAGE_CACHE_SHIFT) + from;
695 bidx = tmp / bsize;
696 boff = tmp % bsize;
697 if (boff) {
698 bh = affs_bread_ino(inode, bidx, 0);
699 if (IS_ERR(bh))
700 return PTR_ERR(bh);
701 tmp = min(bsize - boff, to - from);
702 if (boff + tmp > bsize || tmp > bsize)
703 BUG();
704 memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
705 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
706 affs_fix_checksum(sb, bh);
707 mark_buffer_dirty_inode(bh, inode);
708 written += tmp;
709 from += tmp;
710 bidx++;
711 } else if (bidx) {
712 bh = affs_bread_ino(inode, bidx - 1, 0);
713 if (IS_ERR(bh))
714 return PTR_ERR(bh);
716 while (from + bsize <= to) {
717 prev_bh = bh;
718 bh = affs_getemptyblk_ino(inode, bidx);
719 if (IS_ERR(bh))
720 goto out;
721 memcpy(AFFS_DATA(bh), data + from, bsize);
722 if (buffer_new(bh)) {
723 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
724 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
725 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
726 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
727 AFFS_DATA_HEAD(bh)->next = 0;
728 bh->b_state &= ~(1UL << BH_New);
729 if (prev_bh) {
730 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
731 if (tmp)
732 affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
733 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
734 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
735 mark_buffer_dirty_inode(prev_bh, inode);
738 affs_brelse(prev_bh);
739 affs_fix_checksum(sb, bh);
740 mark_buffer_dirty_inode(bh, inode);
741 written += bsize;
742 from += bsize;
743 bidx++;
745 if (from < to) {
746 prev_bh = bh;
747 bh = affs_bread_ino(inode, bidx, 1);
748 if (IS_ERR(bh))
749 goto out;
750 tmp = min(bsize, to - from);
751 if (tmp > bsize)
752 BUG();
753 memcpy(AFFS_DATA(bh), data + from, tmp);
754 if (buffer_new(bh)) {
755 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
756 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
757 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
758 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
759 AFFS_DATA_HEAD(bh)->next = 0;
760 bh->b_state &= ~(1UL << BH_New);
761 if (prev_bh) {
762 u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
763 if (tmp)
764 affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
765 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
766 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
767 mark_buffer_dirty_inode(prev_bh, inode);
769 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
770 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
771 affs_brelse(prev_bh);
772 affs_fix_checksum(sb, bh);
773 mark_buffer_dirty_inode(bh, inode);
774 written += tmp;
775 from += tmp;
776 bidx++;
778 SetPageUptodate(page);
780 done:
781 affs_brelse(bh);
782 tmp = (page->index << PAGE_CACHE_SHIFT) + from;
783 if (tmp > inode->i_size)
784 inode->i_size = AFFS_I(inode)->mmu_private = tmp;
786 return written;
788 out:
789 bh = prev_bh;
790 if (!written)
791 written = PTR_ERR(bh);
792 goto done;
795 struct address_space_operations affs_aops_ofs = {
796 .readpage = affs_readpage_ofs,
797 //.writepage = affs_writepage_ofs,
798 //.sync_page = affs_sync_page_ofs,
799 .prepare_write = affs_prepare_write_ofs,
800 .commit_write = affs_commit_write_ofs
803 /* Free any preallocated blocks. */
805 void
806 affs_free_prealloc(struct inode *inode)
808 struct super_block *sb = inode->i_sb;
810 pr_debug("AFFS: free_prealloc(ino=%lu)\n", inode->i_ino);
812 while (AFFS_I(inode)->i_pa_cnt) {
813 AFFS_I(inode)->i_pa_cnt--;
814 affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
818 /* Truncate (or enlarge) a file to the requested size. */
820 void
821 affs_truncate(struct inode *inode)
823 struct super_block *sb = inode->i_sb;
824 u32 ext, ext_key;
825 u32 last_blk, blkcnt, blk;
826 u32 size;
827 struct buffer_head *ext_bh;
828 int i;
830 pr_debug("AFFS: truncate(inode=%d, oldsize=%u, newsize=%u)\n",
831 (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
833 last_blk = 0;
834 ext = 0;
835 if (inode->i_size) {
836 last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
837 ext = last_blk / AFFS_SB(sb)->s_hashsize;
840 if (inode->i_size > AFFS_I(inode)->mmu_private) {
841 struct address_space *mapping = inode->i_mapping;
842 struct page *page;
843 u32 size = inode->i_size - 1;
844 int res;
846 page = grab_cache_page(mapping, size >> PAGE_CACHE_SHIFT);
847 if (!page)
848 return;
849 size = (size & (PAGE_CACHE_SIZE - 1)) + 1;
850 res = mapping->a_ops->prepare_write(NULL, page, size, size);
851 if (!res)
852 res = mapping->a_ops->commit_write(NULL, page, size, size);
853 unlock_page(page);
854 page_cache_release(page);
855 mark_inode_dirty(inode);
856 return;
857 } else if (inode->i_size == AFFS_I(inode)->mmu_private)
858 return;
860 // lock cache
861 ext_bh = affs_get_extblock(inode, ext);
862 if (IS_ERR(ext_bh)) {
863 affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
864 ext, PTR_ERR(ext_bh));
865 return;
867 if (AFFS_I(inode)->i_lc) {
868 /* clear linear cache */
869 i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
870 if (AFFS_I(inode)->i_lc_size > i) {
871 AFFS_I(inode)->i_lc_size = i;
872 for (; i < AFFS_LC_SIZE; i++)
873 AFFS_I(inode)->i_lc[i] = 0;
875 /* clear associative cache */
876 for (i = 0; i < AFFS_AC_SIZE; i++)
877 if (AFFS_I(inode)->i_ac[i].ext >= ext)
878 AFFS_I(inode)->i_ac[i].ext = 0;
880 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
882 blkcnt = AFFS_I(inode)->i_blkcnt;
883 i = 0;
884 blk = last_blk;
885 if (inode->i_size) {
886 i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
887 blk++;
888 } else
889 AFFS_HEAD(ext_bh)->first_data = 0;
890 size = AFFS_SB(sb)->s_hashsize;
891 if (size > blkcnt - blk + i)
892 size = blkcnt - blk + i;
893 for (; i < size; i++, blk++) {
894 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
895 AFFS_BLOCK(sb, ext_bh, i) = 0;
897 AFFS_TAIL(sb, ext_bh)->extension = 0;
898 affs_fix_checksum(sb, ext_bh);
899 mark_buffer_dirty_inode(ext_bh, inode);
900 affs_brelse(ext_bh);
902 if (inode->i_size) {
903 AFFS_I(inode)->i_blkcnt = last_blk + 1;
904 AFFS_I(inode)->i_extcnt = ext + 1;
905 if (AFFS_SB(sb)->s_flags & SF_OFS) {
906 struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
907 u32 tmp;
908 if (IS_ERR(ext_bh)) {
909 affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
910 ext, PTR_ERR(ext_bh));
911 return;
913 tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
914 AFFS_DATA_HEAD(bh)->next = 0;
915 affs_adjust_checksum(bh, -tmp);
916 affs_brelse(bh);
918 } else {
919 AFFS_I(inode)->i_blkcnt = 0;
920 AFFS_I(inode)->i_extcnt = 1;
922 AFFS_I(inode)->mmu_private = inode->i_size;
923 // unlock cache
925 while (ext_key) {
926 ext_bh = affs_bread(sb, ext_key);
927 size = AFFS_SB(sb)->s_hashsize;
928 if (size > blkcnt - blk)
929 size = blkcnt - blk;
930 for (i = 0; i < size; i++, blk++)
931 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
932 affs_free_block(sb, ext_key);
933 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
934 affs_brelse(ext_bh);
936 affs_free_prealloc(inode);