2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com>
7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995
9 * Remy Card (card@masi.ibp.fr)
10 * Laboratoire MASI - Institut Blaise Pascal
11 * Universite Pierre et Marie Curie (Paris VI)
13 * linux/fs/minix/inode.c
14 * Copyright (C) 1991, 1992 Linus Torvalds
16 * This file is part of exofs.
18 * exofs is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation. Since it is based on ext2, and the only
21 * valid version of GPL for the Linux kernel is version 2, the only valid
22 * version of GPL for exofs is version 2.
24 * exofs is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with exofs; if not, write to the Free Software
31 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 #include <linux/writeback.h>
35 #include <linux/buffer_head.h>
36 #include <scsi/scsi_device.h>
40 #define EXOFS_DBGMSG2(M...) do {} while (0)
42 enum { BIO_MAX_PAGES_KMALLOC
=
43 (PAGE_SIZE
- sizeof(struct bio
)) / sizeof(struct bio_vec
),
47 struct exofs_sb_info
*sbi
;
48 struct request_queue
*req_q
;
50 unsigned expected_pages
;
51 struct exofs_io_state
*ios
;
56 loff_t pg_first
; /* keep 64bit also in 32-arches */
59 static void _pcol_init(struct page_collect
*pcol
, unsigned expected_pages
,
62 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
65 /* Create master bios on first Q, later on cloning, each clone will be
66 * allocated on it's destination Q
68 pcol
->req_q
= osd_request_queue(sbi
->s_ods
[0]);
70 pcol
->expected_pages
= expected_pages
;
79 static void _pcol_reset(struct page_collect
*pcol
)
81 pcol
->expected_pages
-= min(pcol
->nr_pages
, pcol
->expected_pages
);
89 /* this is probably the end of the loop but in writes
90 * it might not end here. don't be left with nothing
92 if (!pcol
->expected_pages
)
93 pcol
->expected_pages
= BIO_MAX_PAGES_KMALLOC
;
96 static int pcol_try_alloc(struct page_collect
*pcol
)
98 int pages
= min_t(unsigned, pcol
->expected_pages
,
99 BIO_MAX_PAGES_KMALLOC
);
101 if (!pcol
->ios
) { /* First time allocate io_state */
102 int ret
= exofs_get_io_state(pcol
->sbi
, &pcol
->ios
);
108 for (; pages
; pages
>>= 1) {
109 pcol
->bio
= bio_kmalloc(GFP_KERNEL
, pages
);
110 if (likely(pcol
->bio
))
114 EXOFS_ERR("Failed to bio_kmalloc expected_pages=%u\n",
115 pcol
->expected_pages
);
119 static void pcol_free(struct page_collect
*pcol
)
127 exofs_put_io_state(pcol
->ios
);
132 static int pcol_add_page(struct page_collect
*pcol
, struct page
*page
,
135 int added_len
= bio_add_pc_page(pcol
->req_q
, pcol
->bio
, page
, len
, 0);
136 if (unlikely(len
!= added_len
))
144 static int update_read_page(struct page
*page
, int ret
)
147 /* Everything is OK */
148 SetPageUptodate(page
);
150 ClearPageError(page
);
151 } else if (ret
== -EFAULT
) {
152 /* In this case we were trying to read something that wasn't on
153 * disk yet - return a page full of zeroes. This should be OK,
154 * because the object should be empty (if there was a write
155 * before this read, the read would be waiting with the page
157 clear_highpage(page
);
159 SetPageUptodate(page
);
161 ClearPageError(page
);
162 ret
= 0; /* recovered error */
163 EXOFS_DBGMSG("recovered read error\n");
170 static void update_write_page(struct page
*page
, int ret
)
173 mapping_set_error(page
->mapping
, ret
);
176 end_page_writeback(page
);
179 /* Called at the end of reads, to optionally unlock pages and update their
182 static int __readpages_done(struct page_collect
*pcol
, bool do_unlock
)
184 struct bio_vec
*bvec
;
189 int ret
= exofs_check_io(pcol
->ios
, &resid
);
192 good_bytes
= pcol
->length
;
194 good_bytes
= pcol
->length
- resid
;
196 EXOFS_DBGMSG("readpages_done(0x%lx) good_bytes=0x%llx"
197 " length=0x%lx nr_pages=%u\n",
198 pcol
->inode
->i_ino
, _LLU(good_bytes
), pcol
->length
,
201 __bio_for_each_segment(bvec
, pcol
->bio
, i
, 0) {
202 struct page
*page
= bvec
->bv_page
;
203 struct inode
*inode
= page
->mapping
->host
;
206 if (inode
!= pcol
->inode
)
207 continue; /* osd might add more pages at end */
209 if (likely(length
< good_bytes
))
214 EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
215 inode
->i_ino
, page
->index
,
216 page_stat
? "bad_bytes" : "good_bytes");
218 ret
= update_read_page(page
, page_stat
);
221 length
+= bvec
->bv_len
;
225 EXOFS_DBGMSG("readpages_done END\n");
229 /* callback of async reads */
230 static void readpages_done(struct exofs_io_state
*ios
, void *p
)
232 struct page_collect
*pcol
= p
;
234 __readpages_done(pcol
, true);
235 atomic_dec(&pcol
->sbi
->s_curr_pending
);
239 static void _unlock_pcol_pages(struct page_collect
*pcol
, int ret
, int rw
)
241 struct bio_vec
*bvec
;
244 __bio_for_each_segment(bvec
, pcol
->bio
, i
, 0) {
245 struct page
*page
= bvec
->bv_page
;
248 update_read_page(page
, ret
);
250 update_write_page(page
, ret
);
256 static int read_exec(struct page_collect
*pcol
, bool is_sync
)
258 struct exofs_i_info
*oi
= exofs_i(pcol
->inode
);
259 struct exofs_io_state
*ios
= pcol
->ios
;
260 struct page_collect
*pcol_copy
= NULL
;
266 /* see comment in _readpage() about sync reads */
267 WARN_ON(is_sync
&& (pcol
->nr_pages
!= 1));
269 ios
->bio
= pcol
->bio
;
270 ios
->length
= pcol
->length
;
271 ios
->offset
= pcol
->pg_first
<< PAGE_CACHE_SHIFT
;
274 exofs_oi_read(oi
, pcol
->ios
);
275 return __readpages_done(pcol
, false);
278 pcol_copy
= kmalloc(sizeof(*pcol_copy
), GFP_KERNEL
);
285 ios
->done
= readpages_done
;
286 ios
->private = pcol_copy
;
287 ret
= exofs_oi_read(oi
, ios
);
291 atomic_inc(&pcol
->sbi
->s_curr_pending
);
293 EXOFS_DBGMSG("read_exec obj=0x%llx start=0x%llx length=0x%lx\n",
294 ios
->obj
.id
, _LLU(ios
->offset
), pcol
->length
);
296 /* pages ownership was passed to pcol_copy */
302 _unlock_pcol_pages(pcol
, ret
, READ
);
310 /* readpage_strip is called either directly from readpage() or by the VFS from
311 * within read_cache_pages(), to add one more page to be read. It will try to
312 * collect as many contiguous pages as posible. If a discontinuity is
313 * encountered, or it runs out of resources, it will submit the previous segment
314 * and will start a new collection. Eventually caller must submit the last
315 * segment if present.
317 static int readpage_strip(void *data
, struct page
*page
)
319 struct page_collect
*pcol
= data
;
320 struct inode
*inode
= pcol
->inode
;
321 struct exofs_i_info
*oi
= exofs_i(inode
);
322 loff_t i_size
= i_size_read(inode
);
323 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
327 /* FIXME: Just for debugging, will be removed */
328 if (PageUptodate(page
))
329 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol
->inode
->i_ino
,
332 if (page
->index
< end_index
)
333 len
= PAGE_CACHE_SIZE
;
334 else if (page
->index
== end_index
)
335 len
= i_size
& ~PAGE_CACHE_MASK
;
339 if (!len
|| !obj_created(oi
)) {
340 /* this will be out of bounds, or doesn't exist yet.
341 * Current page is cleared and the request is split
343 clear_highpage(page
);
345 SetPageUptodate(page
);
347 ClearPageError(page
);
350 EXOFS_DBGMSG("readpage_strip(0x%lx, 0x%lx) empty page,"
351 " splitting\n", inode
->i_ino
, page
->index
);
353 return read_exec(pcol
, false);
358 if (unlikely(pcol
->pg_first
== -1)) {
359 pcol
->pg_first
= page
->index
;
360 } else if (unlikely((pcol
->pg_first
+ pcol
->nr_pages
) !=
362 /* Discontinuity detected, split the request */
363 ret
= read_exec(pcol
, false);
370 ret
= pcol_try_alloc(pcol
);
375 if (len
!= PAGE_CACHE_SIZE
)
376 zero_user(page
, len
, PAGE_CACHE_SIZE
- len
);
378 EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
379 inode
->i_ino
, page
->index
, len
);
381 ret
= pcol_add_page(pcol
, page
, len
);
383 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
384 "this_len=0x%zx nr_pages=%u length=0x%lx\n",
385 page
, len
, pcol
->nr_pages
, pcol
->length
);
387 /* split the request, and start again with current page */
388 ret
= read_exec(pcol
, false);
398 /* SetPageError(page); ??? */
403 static int exofs_readpages(struct file
*file
, struct address_space
*mapping
,
404 struct list_head
*pages
, unsigned nr_pages
)
406 struct page_collect pcol
;
409 _pcol_init(&pcol
, nr_pages
, mapping
->host
);
411 ret
= read_cache_pages(mapping
, pages
, readpage_strip
, &pcol
);
413 EXOFS_ERR("read_cache_pages => %d\n", ret
);
417 return read_exec(&pcol
, false);
420 static int _readpage(struct page
*page
, bool is_sync
)
422 struct page_collect pcol
;
425 _pcol_init(&pcol
, 1, page
->mapping
->host
);
427 /* readpage_strip might call read_exec(,is_sync==false) at several
428 * places but not if we have a single page.
430 ret
= readpage_strip(&pcol
, page
);
432 EXOFS_ERR("_readpage => %d\n", ret
);
436 return read_exec(&pcol
, is_sync
);
440 * We don't need the file
442 static int exofs_readpage(struct file
*file
, struct page
*page
)
444 return _readpage(page
, false);
447 /* Callback for osd_write. All writes are asynchronous */
448 static void writepages_done(struct exofs_io_state
*ios
, void *p
)
450 struct page_collect
*pcol
= p
;
451 struct bio_vec
*bvec
;
456 int ret
= exofs_check_io(ios
, &resid
);
458 atomic_dec(&pcol
->sbi
->s_curr_pending
);
461 good_bytes
= pcol
->length
;
463 good_bytes
= pcol
->length
- resid
;
465 EXOFS_DBGMSG("writepages_done(0x%lx) good_bytes=0x%llx"
466 " length=0x%lx nr_pages=%u\n",
467 pcol
->inode
->i_ino
, _LLU(good_bytes
), pcol
->length
,
470 __bio_for_each_segment(bvec
, pcol
->bio
, i
, 0) {
471 struct page
*page
= bvec
->bv_page
;
472 struct inode
*inode
= page
->mapping
->host
;
475 if (inode
!= pcol
->inode
)
476 continue; /* osd might add more pages to a bio */
478 if (likely(length
< good_bytes
))
483 update_write_page(page
, page_stat
);
485 EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
486 inode
->i_ino
, page
->index
, page_stat
);
488 length
+= bvec
->bv_len
;
493 EXOFS_DBGMSG("writepages_done END\n");
496 static int write_exec(struct page_collect
*pcol
)
498 struct exofs_i_info
*oi
= exofs_i(pcol
->inode
);
499 struct exofs_io_state
*ios
= pcol
->ios
;
500 struct page_collect
*pcol_copy
= NULL
;
506 pcol_copy
= kmalloc(sizeof(*pcol_copy
), GFP_KERNEL
);
508 EXOFS_ERR("write_exec: Faild to kmalloc(pcol)\n");
515 pcol_copy
->bio
->bi_rw
|= (1 << BIO_RW
); /* FIXME: bio_set_dir() */
517 ios
->bio
= pcol_copy
->bio
;
518 ios
->offset
= pcol_copy
->pg_first
<< PAGE_CACHE_SHIFT
;
519 ios
->length
= pcol_copy
->length
;
520 ios
->done
= writepages_done
;
521 ios
->private = pcol_copy
;
523 ret
= exofs_oi_write(oi
, ios
);
525 EXOFS_ERR("write_exec: exofs_oi_write() Faild\n");
529 atomic_inc(&pcol
->sbi
->s_curr_pending
);
530 EXOFS_DBGMSG("write_exec(0x%lx, 0x%llx) start=0x%llx length=0x%lx\n",
531 pcol
->inode
->i_ino
, pcol
->pg_first
, _LLU(ios
->offset
),
533 /* pages ownership was passed to pcol_copy */
538 _unlock_pcol_pages(pcol
, ret
, WRITE
);
545 /* writepage_strip is called either directly from writepage() or by the VFS from
546 * within write_cache_pages(), to add one more page to be written to storage.
547 * It will try to collect as many contiguous pages as possible. If a
548 * discontinuity is encountered or it runs out of resources it will submit the
549 * previous segment and will start a new collection.
550 * Eventually caller must submit the last segment if present.
552 static int writepage_strip(struct page
*page
,
553 struct writeback_control
*wbc_unused
, void *data
)
555 struct page_collect
*pcol
= data
;
556 struct inode
*inode
= pcol
->inode
;
557 struct exofs_i_info
*oi
= exofs_i(inode
);
558 loff_t i_size
= i_size_read(inode
);
559 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
563 BUG_ON(!PageLocked(page
));
565 ret
= wait_obj_created(oi
);
569 if (page
->index
< end_index
)
570 /* in this case, the page is within the limits of the file */
571 len
= PAGE_CACHE_SIZE
;
573 len
= i_size
& ~PAGE_CACHE_MASK
;
575 if (page
->index
> end_index
|| !len
) {
576 /* in this case, the page is outside the limits
577 * (truncate in progress)
579 ret
= write_exec(pcol
);
583 ClearPageError(page
);
585 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
586 "outside the limits\n",
587 inode
->i_ino
, page
->index
);
594 if (unlikely(pcol
->pg_first
== -1)) {
595 pcol
->pg_first
= page
->index
;
596 } else if (unlikely((pcol
->pg_first
+ pcol
->nr_pages
) !=
598 /* Discontinuity detected, split the request */
599 ret
= write_exec(pcol
);
603 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
604 inode
->i_ino
, page
->index
);
609 ret
= pcol_try_alloc(pcol
);
614 EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
615 inode
->i_ino
, page
->index
, len
);
617 ret
= pcol_add_page(pcol
, page
, len
);
619 EXOFS_DBGMSG("Failed pcol_add_page "
620 "nr_pages=%u total_length=0x%lx\n",
621 pcol
->nr_pages
, pcol
->length
);
623 /* split the request, next loop will start again */
624 ret
= write_exec(pcol
);
626 EXOFS_DBGMSG("write_exec faild => %d", ret
);
633 BUG_ON(PageWriteback(page
));
634 set_page_writeback(page
);
639 EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
640 inode
->i_ino
, page
->index
, ret
);
641 set_bit(AS_EIO
, &page
->mapping
->flags
);
646 static int exofs_writepages(struct address_space
*mapping
,
647 struct writeback_control
*wbc
)
649 struct page_collect pcol
;
650 long start
, end
, expected_pages
;
653 start
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
654 end
= (wbc
->range_end
== LLONG_MAX
) ?
655 start
+ mapping
->nrpages
:
656 wbc
->range_end
>> PAGE_CACHE_SHIFT
;
659 expected_pages
= end
- start
+ 1;
661 expected_pages
= mapping
->nrpages
;
663 if (expected_pages
< 32L)
664 expected_pages
= 32L;
666 EXOFS_DBGMSG("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
667 "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
668 mapping
->host
->i_ino
, wbc
->range_start
, wbc
->range_end
,
669 mapping
->nrpages
, start
, end
, expected_pages
);
671 _pcol_init(&pcol
, expected_pages
, mapping
->host
);
673 ret
= write_cache_pages(mapping
, wbc
, writepage_strip
, &pcol
);
675 EXOFS_ERR("write_cache_pages => %d\n", ret
);
679 return write_exec(&pcol
);
682 static int exofs_writepage(struct page
*page
, struct writeback_control
*wbc
)
684 struct page_collect pcol
;
687 _pcol_init(&pcol
, 1, page
->mapping
->host
);
689 ret
= writepage_strip(page
, NULL
, &pcol
);
691 EXOFS_ERR("exofs_writepage => %d\n", ret
);
695 return write_exec(&pcol
);
698 int exofs_write_begin(struct file
*file
, struct address_space
*mapping
,
699 loff_t pos
, unsigned len
, unsigned flags
,
700 struct page
**pagep
, void **fsdata
)
707 ret
= simple_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
710 EXOFS_DBGMSG("simple_write_begin faild\n");
717 /* read modify write */
718 if (!PageUptodate(page
) && (len
!= PAGE_CACHE_SIZE
)) {
719 ret
= _readpage(page
, true);
721 /*SetPageError was done by _readpage. Is it ok?*/
723 EXOFS_DBGMSG("__readpage_filler faild\n");
730 static int exofs_write_begin_export(struct file
*file
,
731 struct address_space
*mapping
,
732 loff_t pos
, unsigned len
, unsigned flags
,
733 struct page
**pagep
, void **fsdata
)
737 return exofs_write_begin(file
, mapping
, pos
, len
, flags
, pagep
,
741 static int exofs_write_end(struct file
*file
, struct address_space
*mapping
,
742 loff_t pos
, unsigned len
, unsigned copied
,
743 struct page
*page
, void *fsdata
)
745 struct inode
*inode
= mapping
->host
;
746 /* According to comment in simple_write_end i_mutex is held */
747 loff_t i_size
= inode
->i_size
;
750 ret
= simple_write_end(file
, mapping
,pos
, len
, copied
, page
, fsdata
);
751 if (i_size
!= inode
->i_size
)
752 mark_inode_dirty(inode
);
756 const struct address_space_operations exofs_aops
= {
757 .readpage
= exofs_readpage
,
758 .readpages
= exofs_readpages
,
759 .writepage
= exofs_writepage
,
760 .writepages
= exofs_writepages
,
761 .write_begin
= exofs_write_begin_export
,
762 .write_end
= exofs_write_end
,
765 /******************************************************************************
767 *****************************************************************************/
770 * Test whether an inode is a fast symlink.
772 static inline int exofs_inode_is_fast_symlink(struct inode
*inode
)
774 struct exofs_i_info
*oi
= exofs_i(inode
);
776 return S_ISLNK(inode
->i_mode
) && (oi
->i_data
[0] != 0);
780 * get_block_t - Fill in a buffer_head
781 * An OSD takes care of block allocation so we just fake an allocation by
782 * putting in the inode's sector_t in the buffer_head.
783 * TODO: What about the case of create==0 and @iblock does not exist in the
786 static int exofs_get_block(struct inode
*inode
, sector_t iblock
,
787 struct buffer_head
*bh_result
, int create
)
789 map_bh(bh_result
, inode
->i_sb
, iblock
);
793 const struct osd_attr g_attr_logical_length
= ATTR_DEF(
794 OSD_APAGE_OBJECT_INFORMATION
, OSD_ATTR_OI_LOGICAL_LENGTH
, 8);
796 static int _do_truncate(struct inode
*inode
)
798 struct exofs_i_info
*oi
= exofs_i(inode
);
799 loff_t isize
= i_size_read(inode
);
802 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
804 nobh_truncate_page(inode
->i_mapping
, isize
, exofs_get_block
);
806 ret
= exofs_oi_truncate(oi
, (u64
)isize
);
807 EXOFS_DBGMSG("(0x%lx) size=0x%llx\n", inode
->i_ino
, isize
);
812 * Truncate a file to the specified size - all we have to do is set the size
813 * attribute. We make sure the object exists first.
815 void exofs_truncate(struct inode
*inode
)
817 struct exofs_i_info
*oi
= exofs_i(inode
);
820 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
)
821 || S_ISLNK(inode
->i_mode
)))
823 if (exofs_inode_is_fast_symlink(inode
))
825 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
828 /* if we are about to truncate an object, and it hasn't been
831 if (unlikely(wait_obj_created(oi
)))
834 ret
= _do_truncate(inode
);
839 mark_inode_dirty(inode
);
842 make_bad_inode(inode
);
847 * Set inode attributes - just call generic functions.
849 int exofs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
851 struct inode
*inode
= dentry
->d_inode
;
854 error
= inode_change_ok(inode
, iattr
);
858 error
= inode_setattr(inode
, iattr
);
863 * Read an inode from the OSD, and return it as is. We also return the size
864 * attribute in the 'obj_size' argument.
866 static int exofs_get_inode(struct super_block
*sb
, struct exofs_i_info
*oi
,
867 struct exofs_fcb
*inode
, uint64_t *obj_size
)
869 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
870 struct osd_attr attrs
[2];
871 struct exofs_io_state
*ios
;
875 ret
= exofs_get_io_state(sbi
, &ios
);
877 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__
);
881 ios
->obj
.id
= exofs_oi_objno(oi
);
882 exofs_make_credential(oi
->i_cred
, &ios
->obj
);
883 ios
->cred
= oi
->i_cred
;
885 attrs
[0] = g_attr_inode_data
;
886 attrs
[1] = g_attr_logical_length
;
887 ios
->in_attr
= attrs
;
888 ios
->in_attr_len
= ARRAY_SIZE(attrs
);
890 ret
= exofs_sbi_read(ios
);
894 ret
= extract_attr_from_ios(ios
, &attrs
[0]);
896 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__
);
899 WARN_ON(attrs
[0].len
!= EXOFS_INO_ATTR_SIZE
);
900 memcpy(inode
, attrs
[0].val_ptr
, EXOFS_INO_ATTR_SIZE
);
902 ret
= extract_attr_from_ios(ios
, &attrs
[1]);
904 EXOFS_ERR("%s: extract_attr of logical_length failed\n",
908 *obj_size
= get_unaligned_be64(attrs
[1].val_ptr
);
911 exofs_put_io_state(ios
);
915 static void __oi_init(struct exofs_i_info
*oi
)
917 init_waitqueue_head(&oi
->i_wq
);
921 * Fill in an inode read from the OSD and set it up for use
923 struct inode
*exofs_iget(struct super_block
*sb
, unsigned long ino
)
925 struct exofs_i_info
*oi
;
926 struct exofs_fcb fcb
;
931 inode
= iget_locked(sb
, ino
);
933 return ERR_PTR(-ENOMEM
);
934 if (!(inode
->i_state
& I_NEW
))
939 /* read the inode from the osd */
940 ret
= exofs_get_inode(sb
, oi
, &fcb
, &obj_size
);
946 /* copy stuff from on-disk struct to in-memory struct */
947 inode
->i_mode
= le16_to_cpu(fcb
.i_mode
);
948 inode
->i_uid
= le32_to_cpu(fcb
.i_uid
);
949 inode
->i_gid
= le32_to_cpu(fcb
.i_gid
);
950 inode
->i_nlink
= le16_to_cpu(fcb
.i_links_count
);
951 inode
->i_ctime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_ctime
);
952 inode
->i_atime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_atime
);
953 inode
->i_mtime
.tv_sec
= (signed)le32_to_cpu(fcb
.i_mtime
);
954 inode
->i_ctime
.tv_nsec
=
955 inode
->i_atime
.tv_nsec
= inode
->i_mtime
.tv_nsec
= 0;
956 oi
->i_commit_size
= le64_to_cpu(fcb
.i_size
);
957 i_size_write(inode
, oi
->i_commit_size
);
958 inode
->i_blkbits
= EXOFS_BLKSHIFT
;
959 inode
->i_generation
= le32_to_cpu(fcb
.i_generation
);
961 if ((inode
->i_size
!= obj_size
) &&
962 (!exofs_inode_is_fast_symlink(inode
))) {
963 EXOFS_ERR("WARNING: Size of inode=%llu != object=%llu\n",
964 inode
->i_size
, _LLU(obj_size
));
965 /* FIXME: call exofs_inode_recovery() */
968 oi
->i_dir_start_lookup
= 0;
970 if ((inode
->i_nlink
== 0) && (inode
->i_mode
== 0)) {
975 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
978 old_decode_dev(le32_to_cpu(fcb
.i_data
[0]));
981 new_decode_dev(le32_to_cpu(fcb
.i_data
[1]));
983 memcpy(oi
->i_data
, fcb
.i_data
, sizeof(fcb
.i_data
));
986 if (S_ISREG(inode
->i_mode
)) {
987 inode
->i_op
= &exofs_file_inode_operations
;
988 inode
->i_fop
= &exofs_file_operations
;
989 inode
->i_mapping
->a_ops
= &exofs_aops
;
990 } else if (S_ISDIR(inode
->i_mode
)) {
991 inode
->i_op
= &exofs_dir_inode_operations
;
992 inode
->i_fop
= &exofs_dir_operations
;
993 inode
->i_mapping
->a_ops
= &exofs_aops
;
994 } else if (S_ISLNK(inode
->i_mode
)) {
995 if (exofs_inode_is_fast_symlink(inode
))
996 inode
->i_op
= &exofs_fast_symlink_inode_operations
;
998 inode
->i_op
= &exofs_symlink_inode_operations
;
999 inode
->i_mapping
->a_ops
= &exofs_aops
;
1002 inode
->i_op
= &exofs_special_inode_operations
;
1004 init_special_inode(inode
, inode
->i_mode
,
1005 old_decode_dev(le32_to_cpu(fcb
.i_data
[0])));
1007 init_special_inode(inode
, inode
->i_mode
,
1008 new_decode_dev(le32_to_cpu(fcb
.i_data
[1])));
1011 unlock_new_inode(inode
);
1016 return ERR_PTR(ret
);
1019 int __exofs_wait_obj_created(struct exofs_i_info
*oi
)
1021 if (!obj_created(oi
)) {
1022 BUG_ON(!obj_2bcreated(oi
));
1023 wait_event(oi
->i_wq
, obj_created(oi
));
1025 return unlikely(is_bad_inode(&oi
->vfs_inode
)) ? -EIO
: 0;
1028 * Callback function from exofs_new_inode(). The important thing is that we
1029 * set the obj_created flag so that other methods know that the object exists on
1032 static void create_done(struct exofs_io_state
*ios
, void *p
)
1034 struct inode
*inode
= p
;
1035 struct exofs_i_info
*oi
= exofs_i(inode
);
1036 struct exofs_sb_info
*sbi
= inode
->i_sb
->s_fs_info
;
1039 ret
= exofs_check_io(ios
, NULL
);
1040 exofs_put_io_state(ios
);
1042 atomic_dec(&sbi
->s_curr_pending
);
1044 if (unlikely(ret
)) {
1045 EXOFS_ERR("object=0x%llx creation faild in pid=0x%llx",
1046 _LLU(exofs_oi_objno(oi
)), _LLU(sbi
->s_pid
));
1047 /*TODO: When FS is corrupted creation can fail, object already
1048 * exist. Get rid of this asynchronous creation, if exist
1049 * increment the obj counter and try the next object. Until we
1050 * succeed. All these dangling objects will be made into lost
1051 * files by chkfs.exofs
1055 set_obj_created(oi
);
1057 atomic_dec(&inode
->i_count
);
1062 * Set up a new inode and create an object for it on the OSD
1064 struct inode
*exofs_new_inode(struct inode
*dir
, int mode
)
1066 struct super_block
*sb
;
1067 struct inode
*inode
;
1068 struct exofs_i_info
*oi
;
1069 struct exofs_sb_info
*sbi
;
1070 struct exofs_io_state
*ios
;
1074 inode
= new_inode(sb
);
1076 return ERR_PTR(-ENOMEM
);
1078 oi
= exofs_i(inode
);
1081 set_obj_2bcreated(oi
);
1083 sbi
= sb
->s_fs_info
;
1086 inode
->i_uid
= current
->cred
->fsuid
;
1087 if (dir
->i_mode
& S_ISGID
) {
1088 inode
->i_gid
= dir
->i_gid
;
1092 inode
->i_gid
= current
->cred
->fsgid
;
1094 inode
->i_mode
= mode
;
1096 inode
->i_ino
= sbi
->s_nextid
++;
1097 inode
->i_blkbits
= EXOFS_BLKSHIFT
;
1098 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1099 oi
->i_commit_size
= inode
->i_size
= 0;
1100 spin_lock(&sbi
->s_next_gen_lock
);
1101 inode
->i_generation
= sbi
->s_next_generation
++;
1102 spin_unlock(&sbi
->s_next_gen_lock
);
1103 insert_inode_hash(inode
);
1105 mark_inode_dirty(inode
);
1107 ret
= exofs_get_io_state(sbi
, &ios
);
1108 if (unlikely(ret
)) {
1109 EXOFS_ERR("exofs_new_inode: exofs_get_io_state failed\n");
1110 return ERR_PTR(ret
);
1113 ios
->obj
.id
= exofs_oi_objno(oi
);
1114 exofs_make_credential(oi
->i_cred
, &ios
->obj
);
1116 /* increment the refcount so that the inode will still be around when we
1117 * reach the callback
1119 atomic_inc(&inode
->i_count
);
1121 ios
->done
= create_done
;
1122 ios
->private = inode
;
1123 ios
->cred
= oi
->i_cred
;
1124 ret
= exofs_sbi_create(ios
);
1126 atomic_dec(&inode
->i_count
);
1127 exofs_put_io_state(ios
);
1128 return ERR_PTR(ret
);
1130 atomic_inc(&sbi
->s_curr_pending
);
1136 * struct to pass two arguments to update_inode's callback
1138 struct updatei_args
{
1139 struct exofs_sb_info
*sbi
;
1140 struct exofs_fcb fcb
;
1144 * Callback function from exofs_update_inode().
1146 static void updatei_done(struct exofs_io_state
*ios
, void *p
)
1148 struct updatei_args
*args
= p
;
1150 exofs_put_io_state(ios
);
1152 atomic_dec(&args
->sbi
->s_curr_pending
);
1158 * Write the inode to the OSD. Just fill up the struct, and set the attribute
1159 * synchronously or asynchronously depending on the do_sync flag.
1161 static int exofs_update_inode(struct inode
*inode
, int do_sync
)
1163 struct exofs_i_info
*oi
= exofs_i(inode
);
1164 struct super_block
*sb
= inode
->i_sb
;
1165 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1166 struct exofs_io_state
*ios
;
1167 struct osd_attr attr
;
1168 struct exofs_fcb
*fcb
;
1169 struct updatei_args
*args
;
1172 args
= kzalloc(sizeof(*args
), GFP_KERNEL
);
1178 fcb
->i_mode
= cpu_to_le16(inode
->i_mode
);
1179 fcb
->i_uid
= cpu_to_le32(inode
->i_uid
);
1180 fcb
->i_gid
= cpu_to_le32(inode
->i_gid
);
1181 fcb
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
1182 fcb
->i_ctime
= cpu_to_le32(inode
->i_ctime
.tv_sec
);
1183 fcb
->i_atime
= cpu_to_le32(inode
->i_atime
.tv_sec
);
1184 fcb
->i_mtime
= cpu_to_le32(inode
->i_mtime
.tv_sec
);
1185 oi
->i_commit_size
= i_size_read(inode
);
1186 fcb
->i_size
= cpu_to_le64(oi
->i_commit_size
);
1187 fcb
->i_generation
= cpu_to_le32(inode
->i_generation
);
1189 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
1190 if (old_valid_dev(inode
->i_rdev
)) {
1192 cpu_to_le32(old_encode_dev(inode
->i_rdev
));
1197 cpu_to_le32(new_encode_dev(inode
->i_rdev
));
1201 memcpy(fcb
->i_data
, oi
->i_data
, sizeof(fcb
->i_data
));
1203 ret
= exofs_get_io_state(sbi
, &ios
);
1204 if (unlikely(ret
)) {
1205 EXOFS_ERR("%s: exofs_get_io_state failed.\n", __func__
);
1209 attr
= g_attr_inode_data
;
1211 ios
->out_attr_len
= 1;
1212 ios
->out_attr
= &attr
;
1214 if (!obj_created(oi
)) {
1215 EXOFS_DBGMSG("!obj_created\n");
1216 BUG_ON(!obj_2bcreated(oi
));
1217 wait_event(oi
->i_wq
, obj_created(oi
));
1218 EXOFS_DBGMSG("wait_event done\n");
1223 ios
->done
= updatei_done
;
1224 ios
->private = args
;
1227 ret
= exofs_oi_write(oi
, ios
);
1228 if (!do_sync
&& !ret
) {
1229 atomic_inc(&sbi
->s_curr_pending
);
1230 goto out
; /* deallocation in updatei_done */
1233 exofs_put_io_state(ios
);
1237 EXOFS_DBGMSG("ret=>%d\n", ret
);
1241 int exofs_write_inode(struct inode
*inode
, int wait
)
1243 return exofs_update_inode(inode
, wait
);
1247 * Callback function from exofs_delete_inode() - don't have much cleaning up to
1250 static void delete_done(struct exofs_io_state
*ios
, void *p
)
1252 struct exofs_sb_info
*sbi
= p
;
1254 exofs_put_io_state(ios
);
1256 atomic_dec(&sbi
->s_curr_pending
);
1260 * Called when the refcount of an inode reaches zero. We remove the object
1261 * from the OSD here. We make sure the object was created before we try and
1264 void exofs_delete_inode(struct inode
*inode
)
1266 struct exofs_i_info
*oi
= exofs_i(inode
);
1267 struct super_block
*sb
= inode
->i_sb
;
1268 struct exofs_sb_info
*sbi
= sb
->s_fs_info
;
1269 struct exofs_io_state
*ios
;
1272 truncate_inode_pages(&inode
->i_data
, 0);
1274 if (is_bad_inode(inode
))
1277 mark_inode_dirty(inode
);
1278 exofs_update_inode(inode
, inode_needs_sync(inode
));
1281 if (inode
->i_blocks
)
1282 exofs_truncate(inode
);
1286 ret
= exofs_get_io_state(sbi
, &ios
);
1287 if (unlikely(ret
)) {
1288 EXOFS_ERR("%s: exofs_get_io_state failed\n", __func__
);
1292 /* if we are deleting an obj that hasn't been created yet, wait */
1293 if (!obj_created(oi
)) {
1294 BUG_ON(!obj_2bcreated(oi
));
1295 wait_event(oi
->i_wq
, obj_created(oi
));
1298 ios
->obj
.id
= exofs_oi_objno(oi
);
1299 ios
->done
= delete_done
;
1301 ios
->cred
= oi
->i_cred
;
1302 ret
= exofs_sbi_remove(ios
);
1304 EXOFS_ERR("%s: exofs_sbi_remove failed\n", __func__
);
1305 exofs_put_io_state(ios
);
1308 atomic_inc(&sbi
->s_curr_pending
);