2 * fs/logfs/dev_mtd.c - Device access methods for MTD
4 * As should be obvious for Linux kernel code, license is GPLv2
6 * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
9 #include <linux/completion.h>
10 #include <linux/mount.h>
11 #include <linux/sched.h>
13 #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
15 static int mtd_read(struct super_block
*sb
, loff_t ofs
, size_t len
, void *buf
)
17 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
21 ret
= mtd
->read(mtd
, ofs
, len
, &retlen
, buf
);
22 BUG_ON(ret
== -EINVAL
);
26 /* Not sure if we should loop instead. */
33 static int mtd_write(struct super_block
*sb
, loff_t ofs
, size_t len
, void *buf
)
35 struct logfs_super
*super
= logfs_super(sb
);
36 struct mtd_info
*mtd
= super
->s_mtd
;
38 loff_t page_start
, page_end
;
41 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
44 BUG_ON((ofs
>= mtd
->size
) || (len
> mtd
->size
- ofs
));
45 BUG_ON(ofs
!= (ofs
>> super
->s_writeshift
) << super
->s_writeshift
);
46 BUG_ON(len
> PAGE_CACHE_SIZE
);
47 page_start
= ofs
& PAGE_CACHE_MASK
;
48 page_end
= PAGE_CACHE_ALIGN(ofs
+ len
) - 1;
49 ret
= mtd
->write(mtd
, ofs
, len
, &retlen
, buf
);
50 if (ret
|| (retlen
!= len
))
57 * For as long as I can remember (since about 2001) mtd->erase has been an
58 * asynchronous interface lacking the first driver to actually use the
59 * asynchronous properties. So just to prevent the first implementor of such
60 * a thing from breaking logfs in 2350, we do the usual pointless dance to
61 * declare a completion variable and wait for completion before returning
62 * from mtd_erase(). What an excercise in futility!
64 static void logfs_erase_callback(struct erase_info
*ei
)
66 complete((struct completion
*)ei
->priv
);
69 static int mtd_erase_mapping(struct super_block
*sb
, loff_t ofs
, size_t len
)
71 struct logfs_super
*super
= logfs_super(sb
);
72 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
74 pgoff_t index
= ofs
>> PAGE_SHIFT
;
76 for (index
= ofs
>> PAGE_SHIFT
; index
< (ofs
+ len
) >> PAGE_SHIFT
; index
++) {
77 page
= find_get_page(mapping
, index
);
80 memset(page_address(page
), 0xFF, PAGE_SIZE
);
81 page_cache_release(page
);
86 static int mtd_erase(struct super_block
*sb
, loff_t ofs
, size_t len
,
89 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
91 DECLARE_COMPLETION_ONSTACK(complete
);
94 BUG_ON(len
% mtd
->erasesize
);
95 if (logfs_super(sb
)->s_flags
& LOGFS_SB_FLAG_RO
)
98 memset(&ei
, 0, sizeof(ei
));
102 ei
.callback
= logfs_erase_callback
;
103 ei
.priv
= (long)&complete
;
104 ret
= mtd
->erase(mtd
, &ei
);
108 wait_for_completion(&complete
);
109 if (ei
.state
!= MTD_ERASE_DONE
)
111 return mtd_erase_mapping(sb
, ofs
, len
);
114 static void mtd_sync(struct super_block
*sb
)
116 struct mtd_info
*mtd
= logfs_super(sb
)->s_mtd
;
122 static int mtd_readpage(void *_sb
, struct page
*page
)
124 struct super_block
*sb
= _sb
;
127 err
= mtd_read(sb
, page
->index
<< PAGE_SHIFT
, PAGE_SIZE
,
129 if (err
== -EUCLEAN
) {
131 /* FIXME: force GC this segment */
134 ClearPageUptodate(page
);
137 SetPageUptodate(page
);
138 ClearPageError(page
);
144 static struct page
*mtd_find_first_sb(struct super_block
*sb
, u64
*ofs
)
146 struct logfs_super
*super
= logfs_super(sb
);
147 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
148 filler_t
*filler
= mtd_readpage
;
149 struct mtd_info
*mtd
= super
->s_mtd
;
151 if (!mtd
->block_isbad
)
155 while (mtd
->block_isbad(mtd
, *ofs
)) {
156 *ofs
+= mtd
->erasesize
;
157 if (*ofs
>= mtd
->size
)
160 BUG_ON(*ofs
& ~PAGE_MASK
);
161 return read_cache_page(mapping
, *ofs
>> PAGE_SHIFT
, filler
, sb
);
164 static struct page
*mtd_find_last_sb(struct super_block
*sb
, u64
*ofs
)
166 struct logfs_super
*super
= logfs_super(sb
);
167 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
168 filler_t
*filler
= mtd_readpage
;
169 struct mtd_info
*mtd
= super
->s_mtd
;
171 if (!mtd
->block_isbad
)
174 *ofs
= mtd
->size
- mtd
->erasesize
;
175 while (mtd
->block_isbad(mtd
, *ofs
)) {
176 *ofs
-= mtd
->erasesize
;
180 *ofs
= *ofs
+ mtd
->erasesize
- 0x1000;
181 BUG_ON(*ofs
& ~PAGE_MASK
);
182 return read_cache_page(mapping
, *ofs
>> PAGE_SHIFT
, filler
, sb
);
185 static int __mtd_writeseg(struct super_block
*sb
, u64 ofs
, pgoff_t index
,
188 struct logfs_super
*super
= logfs_super(sb
);
189 struct address_space
*mapping
= super
->s_mapping_inode
->i_mapping
;
193 for (i
= 0; i
< nr_pages
; i
++) {
194 page
= find_lock_page(mapping
, index
+ i
);
197 err
= mtd_write(sb
, page
->index
<< PAGE_SHIFT
, PAGE_SIZE
,
200 page_cache_release(page
);
207 static void mtd_writeseg(struct super_block
*sb
, u64 ofs
, size_t len
)
209 struct logfs_super
*super
= logfs_super(sb
);
212 if (super
->s_flags
& LOGFS_SB_FLAG_RO
)
216 /* This can happen when the object fit perfectly into a
217 * segment, the segment gets written per sync and subsequently
222 head
= ofs
& (PAGE_SIZE
- 1);
227 len
= PAGE_ALIGN(len
);
228 __mtd_writeseg(sb
, ofs
, ofs
>> PAGE_SHIFT
, len
>> PAGE_SHIFT
);
231 static void mtd_put_device(struct super_block
*sb
)
233 put_mtd_device(logfs_super(sb
)->s_mtd
);
236 static const struct logfs_device_ops mtd_devops
= {
237 .find_first_sb
= mtd_find_first_sb
,
238 .find_last_sb
= mtd_find_last_sb
,
239 .readpage
= mtd_readpage
,
240 .writeseg
= mtd_writeseg
,
243 .put_device
= mtd_put_device
,
246 int logfs_get_sb_mtd(struct file_system_type
*type
, int flags
,
247 int mtdnr
, struct vfsmount
*mnt
)
249 struct mtd_info
*mtd
;
250 const struct logfs_device_ops
*devops
= &mtd_devops
;
252 mtd
= get_mtd_device(NULL
, mtdnr
);
253 return logfs_get_sb_device(type
, flags
, mtd
, NULL
, devops
, mnt
);