2 * This file is part of UBIFS.
4 * Copyright (C) 2006-2008 Nokia Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 * Authors: Artem Bityutskiy (Битюцкий Артём)
24 * This file implements VFS file and inode operations of regular files, device
25 * nodes and symlinks as well as address space operations.
27 * UBIFS uses 2 page flags: PG_private and PG_checked. PG_private is set if the
28 * page is dirty and is used for budgeting purposes - dirty pages should not be
29 * budgeted. The PG_checked flag is set if full budgeting is required for the
30 * page e.g., when it corresponds to a file hole or it is just beyond the file
31 * size. The budgeting is done in 'ubifs_write_begin()', because it is OK to
32 * fail in this function, and the budget is released in 'ubifs_write_end()'. So
33 * the PG_private and PG_checked flags carry the information about how the page
34 * was budgeted, to make it possible to release the budget properly.
36 * A thing to keep in mind: inode's 'i_mutex' is locked in most VFS operations
37 * we implement. However, this is not true for '->writepage()', which might be
38 * called with 'i_mutex' unlocked. For example, when pdflush is performing
39 * write-back, it calls 'writepage()' with unlocked 'i_mutex', although the
40 * inode has 'I_LOCK' flag in this case. At "normal" work-paths 'i_mutex' is
41 * locked in '->writepage', e.g. in "sys_write -> alloc_pages -> direct reclaim
42 * path'. So, in '->writepage()' we are only guaranteed that the page is
45 * Similarly, 'i_mutex' does not have to be locked in readpage(), e.g.,
46 * readahead path does not have it locked ("sys_read -> generic_file_aio_read
47 * -> ondemand_readahead -> readpage"). In case of readahead, 'I_LOCK' flag is
48 * not set as well. However, UBIFS disables readahead.
50 * This, for example means that there might be 2 concurrent '->writepage()'
51 * calls for the same inode, but different inode dirty pages.
55 #include <linux/mount.h>
56 #include <linux/namei.h>
58 static int read_block(struct inode
*inode
, void *addr
, unsigned int block
,
59 struct ubifs_data_node
*dn
)
61 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
62 int err
, len
, out_len
;
66 data_key_init(c
, &key
, inode
->i_ino
, block
);
67 err
= ubifs_tnc_lookup(c
, &key
, dn
);
70 /* Not found, so it must be a hole */
71 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
75 ubifs_assert(dn
->ch
.sqnum
> ubifs_inode(inode
)->creat_sqnum
);
77 len
= le32_to_cpu(dn
->size
);
78 if (len
<= 0 || len
> UBIFS_BLOCK_SIZE
)
81 dlen
= le32_to_cpu(dn
->ch
.len
) - UBIFS_DATA_NODE_SZ
;
82 out_len
= UBIFS_BLOCK_SIZE
;
83 err
= ubifs_decompress(&dn
->data
, dlen
, addr
, &out_len
,
84 le16_to_cpu(dn
->compr_type
));
85 if (err
|| len
!= out_len
)
89 * Data length can be less than a full block, even for blocks that are
90 * not the last in the file (e.g., as a result of making a hole and
91 * appending data). Ensure that the remainder is zeroed out.
93 if (len
< UBIFS_BLOCK_SIZE
)
94 memset(addr
+ len
, 0, UBIFS_BLOCK_SIZE
- len
);
99 ubifs_err("bad data node (block %u, inode %lu)",
100 block
, inode
->i_ino
);
101 dbg_dump_node(c
, dn
);
105 static int do_readpage(struct page
*page
)
109 unsigned int block
, beyond
;
110 struct ubifs_data_node
*dn
;
111 struct inode
*inode
= page
->mapping
->host
;
112 loff_t i_size
= i_size_read(inode
);
114 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
115 inode
->i_ino
, page
->index
, i_size
, page
->flags
);
116 ubifs_assert(!PageChecked(page
));
117 ubifs_assert(!PagePrivate(page
));
121 block
= page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
;
122 beyond
= (i_size
+ UBIFS_BLOCK_SIZE
- 1) >> UBIFS_BLOCK_SHIFT
;
123 if (block
>= beyond
) {
124 /* Reading beyond inode */
125 SetPageChecked(page
);
126 memset(addr
, 0, PAGE_CACHE_SIZE
);
130 dn
= kmalloc(UBIFS_MAX_DATA_NODE_SZ
, GFP_NOFS
);
140 if (block
>= beyond
) {
141 /* Reading beyond inode */
143 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
145 ret
= read_block(inode
, addr
, block
, dn
);
150 } else if (block
+ 1 == beyond
) {
151 int dlen
= le32_to_cpu(dn
->size
);
152 int ilen
= i_size
& (UBIFS_BLOCK_SIZE
- 1);
154 if (ilen
&& ilen
< dlen
)
155 memset(addr
+ ilen
, 0, dlen
- ilen
);
158 if (++i
>= UBIFS_BLOCKS_PER_PAGE
)
161 addr
+= UBIFS_BLOCK_SIZE
;
164 if (err
== -ENOENT
) {
165 /* Not found, so it must be a hole */
166 SetPageChecked(page
);
170 ubifs_err("cannot read page %lu of inode %lu, error %d",
171 page
->index
, inode
->i_ino
, err
);
178 SetPageUptodate(page
);
179 ClearPageError(page
);
180 flush_dcache_page(page
);
186 ClearPageUptodate(page
);
188 flush_dcache_page(page
);
194 * release_new_page_budget - release budget of a new page.
195 * @c: UBIFS file-system description object
197 * This is a helper function which releases budget corresponding to the budget
198 * of one new page of data.
200 static void release_new_page_budget(struct ubifs_info
*c
)
202 struct ubifs_budget_req req
= { .recalculate
= 1, .new_page
= 1 };
204 ubifs_release_budget(c
, &req
);
208 * release_existing_page_budget - release budget of an existing page.
209 * @c: UBIFS file-system description object
211 * This is a helper function which releases budget corresponding to the budget
212 * of changing one one page of data which already exists on the flash media.
214 static void release_existing_page_budget(struct ubifs_info
*c
)
216 struct ubifs_budget_req req
= { .dd_growth
= c
->page_budget
};
218 ubifs_release_budget(c
, &req
);
221 static int write_begin_slow(struct address_space
*mapping
,
222 loff_t pos
, unsigned len
, struct page
**pagep
)
224 struct inode
*inode
= mapping
->host
;
225 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
226 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
227 struct ubifs_budget_req req
= { .new_page
= 1 };
228 int uninitialized_var(err
), appending
= !!(pos
+ len
> inode
->i_size
);
231 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld",
232 inode
->i_ino
, pos
, len
, inode
->i_size
);
235 * At the slow path we have to budget before locking the page, because
236 * budgeting may force write-back, which would wait on locked pages and
237 * deadlock if we had the page locked. At this point we do not know
238 * anything about the page, so assume that this is a new page which is
239 * written to a hole. This corresponds to largest budget. Later the
240 * budget will be amended if this is not true.
243 /* We are appending data, budget for inode change */
246 err
= ubifs_budget_space(c
, &req
);
250 page
= __grab_cache_page(mapping
, index
);
251 if (unlikely(!page
)) {
252 ubifs_release_budget(c
, &req
);
256 if (!PageUptodate(page
)) {
257 if (!(pos
& PAGE_CACHE_MASK
) && len
== PAGE_CACHE_SIZE
)
258 SetPageChecked(page
);
260 err
= do_readpage(page
);
263 page_cache_release(page
);
268 SetPageUptodate(page
);
269 ClearPageError(page
);
272 if (PagePrivate(page
))
274 * The page is dirty, which means it was budgeted twice:
275 * o first time the budget was allocated by the task which
276 * made the page dirty and set the PG_private flag;
277 * o and then we budgeted for it for the second time at the
278 * very beginning of this function.
280 * So what we have to do is to release the page budget we
283 release_new_page_budget(c
);
284 else if (!PageChecked(page
))
286 * We are changing a page which already exists on the media.
287 * This means that changing the page does not make the amount
288 * of indexing information larger, and this part of the budget
289 * which we have already acquired may be released.
291 ubifs_convert_page_budget(c
);
294 struct ubifs_inode
*ui
= ubifs_inode(inode
);
297 * 'ubifs_write_end()' is optimized from the fast-path part of
298 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked
299 * if data is appended.
301 mutex_lock(&ui
->ui_mutex
);
304 * The inode is dirty already, so we may free the
305 * budget we allocated.
307 ubifs_release_dirty_inode_budget(c
, ui
);
315 * allocate_budget - allocate budget for 'ubifs_write_begin()'.
316 * @c: UBIFS file-system description object
317 * @page: page to allocate budget for
318 * @ui: UBIFS inode object the page belongs to
319 * @appending: non-zero if the page is appended
321 * This is a helper function for 'ubifs_write_begin()' which allocates budget
322 * for the operation. The budget is allocated differently depending on whether
323 * this is appending, whether the page is dirty or not, and so on. This
324 * function leaves the @ui->ui_mutex locked in case of appending. Returns zero
325 * in case of success and %-ENOSPC in case of failure.
327 static int allocate_budget(struct ubifs_info
*c
, struct page
*page
,
328 struct ubifs_inode
*ui
, int appending
)
330 struct ubifs_budget_req req
= { .fast
= 1 };
332 if (PagePrivate(page
)) {
335 * The page is dirty and we are not appending, which
336 * means no budget is needed at all.
340 mutex_lock(&ui
->ui_mutex
);
343 * The page is dirty and we are appending, so the inode
344 * has to be marked as dirty. However, it is already
345 * dirty, so we do not need any budget. We may return,
346 * but @ui->ui_mutex hast to be left locked because we
347 * should prevent write-back from flushing the inode
348 * and freeing the budget. The lock will be released in
349 * 'ubifs_write_end()'.
354 * The page is dirty, we are appending, the inode is clean, so
355 * we need to budget the inode change.
359 if (PageChecked(page
))
361 * The page corresponds to a hole and does not
362 * exist on the media. So changing it makes
363 * make the amount of indexing information
364 * larger, and we have to budget for a new
370 * Not a hole, the change will not add any new
371 * indexing information, budget for page
374 req
.dirtied_page
= 1;
377 mutex_lock(&ui
->ui_mutex
);
380 * The inode is clean but we will have to mark
381 * it as dirty because we are appending. This
388 return ubifs_budget_space(c
, &req
);
392 * This function is called when a page of data is going to be written. Since
393 * the page of data will not necessarily go to the flash straight away, UBIFS
394 * has to reserve space on the media for it, which is done by means of
397 * This is the hot-path of the file-system and we are trying to optimize it as
398 * much as possible. For this reasons it is split on 2 parts - slow and fast.
400 * There many budgeting cases:
401 * o a new page is appended - we have to budget for a new page and for
402 * changing the inode; however, if the inode is already dirty, there is
403 * no need to budget for it;
404 * o an existing clean page is changed - we have budget for it; if the page
405 * does not exist on the media (a hole), we have to budget for a new
406 * page; otherwise, we may budget for changing an existing page; the
407 * difference between these cases is that changing an existing page does
408 * not introduce anything new to the FS indexing information, so it does
409 * not grow, and smaller budget is acquired in this case;
410 * o an existing dirty page is changed - no need to budget at all, because
411 * the page budget has been acquired by earlier, when the page has been
414 * UBIFS budgeting sub-system may force write-back if it thinks there is no
415 * space to reserve. This imposes some locking restrictions and makes it
416 * impossible to take into account the above cases, and makes it impossible to
417 * optimize budgeting.
419 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes
420 * there is a plenty of flash space and the budget will be acquired quickly,
421 * without forcing write-back. The slow path does not make this assumption.
423 static int ubifs_write_begin(struct file
*file
, struct address_space
*mapping
,
424 loff_t pos
, unsigned len
, unsigned flags
,
425 struct page
**pagep
, void **fsdata
)
427 struct inode
*inode
= mapping
->host
;
428 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
429 struct ubifs_inode
*ui
= ubifs_inode(inode
);
430 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
431 int uninitialized_var(err
), appending
= !!(pos
+ len
> inode
->i_size
);
435 ubifs_assert(ubifs_inode(inode
)->ui_size
== inode
->i_size
);
437 if (unlikely(c
->ro_media
))
440 /* Try out the fast-path part first */
441 page
= __grab_cache_page(mapping
, index
);
445 if (!PageUptodate(page
)) {
446 /* The page is not loaded from the flash */
447 if (!(pos
& PAGE_CACHE_MASK
) && len
== PAGE_CACHE_SIZE
)
449 * We change whole page so no need to load it. But we
450 * have to set the @PG_checked flag to make the further
451 * code the page is new. This might be not true, but it
452 * is better to budget more that to read the page from
455 SetPageChecked(page
);
457 err
= do_readpage(page
);
460 page_cache_release(page
);
465 SetPageUptodate(page
);
466 ClearPageError(page
);
469 err
= allocate_budget(c
, page
, ui
, appending
);
471 ubifs_assert(err
== -ENOSPC
);
473 * Budgeting failed which means it would have to force
474 * write-back but didn't, because we set the @fast flag in the
475 * request. Write-back cannot be done now, while we have the
476 * page locked, because it would deadlock. Unlock and free
477 * everything and fall-back to slow-path.
480 ubifs_assert(mutex_is_locked(&ui
->ui_mutex
));
481 mutex_unlock(&ui
->ui_mutex
);
484 page_cache_release(page
);
486 return write_begin_slow(mapping
, pos
, len
, pagep
);
490 * Whee, we aquired budgeting quickly - without involving
491 * garbage-collection, committing or forceing write-back. We return
492 * with @ui->ui_mutex locked if we are appending pages, and unlocked
493 * otherwise. This is an optimization (slightly hacky though).
501 * cancel_budget - cancel budget.
502 * @c: UBIFS file-system description object
503 * @page: page to cancel budget for
504 * @ui: UBIFS inode object the page belongs to
505 * @appending: non-zero if the page is appended
507 * This is a helper function for a page write operation. It unlocks the
508 * @ui->ui_mutex in case of appending.
510 static void cancel_budget(struct ubifs_info
*c
, struct page
*page
,
511 struct ubifs_inode
*ui
, int appending
)
515 ubifs_release_dirty_inode_budget(c
, ui
);
516 mutex_unlock(&ui
->ui_mutex
);
518 if (!PagePrivate(page
)) {
519 if (PageChecked(page
))
520 release_new_page_budget(c
);
522 release_existing_page_budget(c
);
526 static int ubifs_write_end(struct file
*file
, struct address_space
*mapping
,
527 loff_t pos
, unsigned len
, unsigned copied
,
528 struct page
*page
, void *fsdata
)
530 struct inode
*inode
= mapping
->host
;
531 struct ubifs_inode
*ui
= ubifs_inode(inode
);
532 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
533 loff_t end_pos
= pos
+ len
;
534 int appending
= !!(end_pos
> inode
->i_size
);
536 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
537 inode
->i_ino
, pos
, page
->index
, len
, copied
, inode
->i_size
);
539 if (unlikely(copied
< len
&& len
== PAGE_CACHE_SIZE
)) {
541 * VFS copied less data to the page that it intended and
542 * declared in its '->write_begin()' call via the @len
543 * argument. If the page was not up-to-date, and @len was
544 * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did
545 * not load it from the media (for optimization reasons). This
546 * means that part of the page contains garbage. So read the
549 dbg_gen("copied %d instead of %d, read page and repeat",
551 cancel_budget(c
, page
, ui
, appending
);
554 * Return 0 to force VFS to repeat the whole operation, or the
555 * error code if 'do_readpage()' failes.
557 copied
= do_readpage(page
);
561 if (!PagePrivate(page
)) {
562 SetPagePrivate(page
);
563 atomic_long_inc(&c
->dirty_pg_cnt
);
564 __set_page_dirty_nobuffers(page
);
568 i_size_write(inode
, end_pos
);
569 ui
->ui_size
= end_pos
;
571 * Note, we do not set @I_DIRTY_PAGES (which means that the
572 * inode has dirty pages), this has been done in
573 * '__set_page_dirty_nobuffers()'.
575 __mark_inode_dirty(inode
, I_DIRTY_DATASYNC
);
576 ubifs_assert(mutex_is_locked(&ui
->ui_mutex
));
577 mutex_unlock(&ui
->ui_mutex
);
582 page_cache_release(page
);
587 * populate_page - copy data nodes into a page for bulk-read.
588 * @c: UBIFS file-system description object
590 * @bu: bulk-read information
591 * @n: next zbranch slot
593 * This function returns %0 on success and a negative error code on failure.
595 static int populate_page(struct ubifs_info
*c
, struct page
*page
,
596 struct bu_info
*bu
, int *n
)
598 int i
= 0, nn
= *n
, offs
= bu
->zbranch
[0].offs
, hole
= 1, read
= 0;
599 struct inode
*inode
= page
->mapping
->host
;
600 loff_t i_size
= i_size_read(inode
);
601 unsigned int page_block
;
605 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
606 inode
->i_ino
, page
->index
, i_size
, page
->flags
);
608 addr
= zaddr
= kmap(page
);
610 end_index
= (i_size
- 1) >> PAGE_CACHE_SHIFT
;
611 if (!i_size
|| page
->index
> end_index
) {
612 memset(addr
, 0, PAGE_CACHE_SIZE
);
616 page_block
= page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
;
618 int err
, len
, out_len
, dlen
;
621 key_block(c
, &bu
->zbranch
[nn
].key
) != page_block
)
622 memset(addr
, 0, UBIFS_BLOCK_SIZE
);
624 struct ubifs_data_node
*dn
;
626 dn
= bu
->buf
+ (bu
->zbranch
[nn
].offs
- offs
);
628 ubifs_assert(dn
->ch
.sqnum
>
629 ubifs_inode(inode
)->creat_sqnum
);
631 len
= le32_to_cpu(dn
->size
);
632 if (len
<= 0 || len
> UBIFS_BLOCK_SIZE
)
635 dlen
= le32_to_cpu(dn
->ch
.len
) - UBIFS_DATA_NODE_SZ
;
636 out_len
= UBIFS_BLOCK_SIZE
;
637 err
= ubifs_decompress(&dn
->data
, dlen
, addr
, &out_len
,
638 le16_to_cpu(dn
->compr_type
));
639 if (err
|| len
!= out_len
)
642 if (len
< UBIFS_BLOCK_SIZE
)
643 memset(addr
+ len
, 0, UBIFS_BLOCK_SIZE
- len
);
647 read
= (i
<< UBIFS_BLOCK_SHIFT
) + len
;
649 if (++i
>= UBIFS_BLOCKS_PER_PAGE
)
651 addr
+= UBIFS_BLOCK_SIZE
;
655 if (end_index
== page
->index
) {
656 int len
= i_size
& (PAGE_CACHE_SIZE
- 1);
658 if (len
&& len
< read
)
659 memset(zaddr
+ len
, 0, read
- len
);
664 SetPageChecked(page
);
668 SetPageUptodate(page
);
669 ClearPageError(page
);
670 flush_dcache_page(page
);
676 ClearPageUptodate(page
);
678 flush_dcache_page(page
);
680 ubifs_err("bad data node (block %u, inode %lu)",
681 page_block
, inode
->i_ino
);
686 * ubifs_do_bulk_read - do bulk-read.
687 * @c: UBIFS file-system description object
690 * This function returns %1 if the bulk-read is done, otherwise %0 is returned.
692 static int ubifs_do_bulk_read(struct ubifs_info
*c
, struct page
*page1
)
694 pgoff_t offset
= page1
->index
, end_index
;
695 struct address_space
*mapping
= page1
->mapping
;
696 struct inode
*inode
= mapping
->host
;
697 struct ubifs_inode
*ui
= ubifs_inode(inode
);
699 int err
, page_idx
, page_cnt
, ret
= 0, n
= 0;
702 bu
= kmalloc(sizeof(struct bu_info
), GFP_NOFS
);
706 bu
->buf_len
= c
->bulk_read_buf_size
;
707 bu
->buf
= kmalloc(bu
->buf_len
, GFP_NOFS
);
711 data_key_init(c
, &bu
->key
, inode
->i_ino
,
712 offset
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
);
714 err
= ubifs_tnc_get_bu_keys(c
, bu
);
719 /* Turn off bulk-read at the end of the file */
720 ui
->read_in_a_row
= 1;
724 page_cnt
= bu
->blk_cnt
>> UBIFS_BLOCKS_PER_PAGE_SHIFT
;
727 * This happens when there are multiple blocks per page and the
728 * blocks for the first page we are looking for, are not
729 * together. If all the pages were like this, bulk-read would
730 * reduce performance, so we turn it off for a while.
732 ui
->read_in_a_row
= 0;
738 err
= ubifs_tnc_bulk_read(c
, bu
);
743 err
= populate_page(c
, page1
, bu
, &n
);
750 isize
= i_size_read(inode
);
753 end_index
= ((isize
- 1) >> PAGE_CACHE_SHIFT
);
755 for (page_idx
= 1; page_idx
< page_cnt
; page_idx
++) {
756 pgoff_t page_offset
= offset
+ page_idx
;
759 if (page_offset
> end_index
)
761 page
= find_or_create_page(mapping
, page_offset
,
762 GFP_NOFS
| __GFP_COLD
);
765 if (!PageUptodate(page
))
766 err
= populate_page(c
, page
, bu
, &n
);
768 page_cache_release(page
);
773 ui
->last_page_read
= offset
+ page_idx
- 1;
781 ubifs_warn("ignoring error %d and skipping bulk-read", err
);
786 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
787 * @page: page from which to start bulk-read.
789 * Some flash media are capable of reading sequentially at faster rates. UBIFS
790 * bulk-read facility is designed to take advantage of that, by reading in one
791 * go consecutive data nodes that are also located consecutively in the same
792 * LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
794 static int ubifs_bulk_read(struct page
*page
)
796 struct inode
*inode
= page
->mapping
->host
;
797 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
798 struct ubifs_inode
*ui
= ubifs_inode(inode
);
799 pgoff_t index
= page
->index
, last_page_read
= ui
->last_page_read
;
802 ui
->last_page_read
= index
;
807 * Bulk-read is protected by ui_mutex, but it is an optimization, so
808 * don't bother if we cannot lock the mutex.
810 if (!mutex_trylock(&ui
->ui_mutex
))
812 if (index
!= last_page_read
+ 1) {
813 /* Turn off bulk-read if we stop reading sequentially */
814 ui
->read_in_a_row
= 1;
819 if (!ui
->bulk_read
) {
820 ui
->read_in_a_row
+= 1;
821 if (ui
->read_in_a_row
< 3)
823 /* Three reads in a row, so switch on bulk-read */
826 ret
= ubifs_do_bulk_read(c
, page
);
828 mutex_unlock(&ui
->ui_mutex
);
832 static int ubifs_readpage(struct file
*file
, struct page
*page
)
834 if (ubifs_bulk_read(page
))
841 static int do_writepage(struct page
*page
, int len
)
843 int err
= 0, i
, blen
;
847 struct inode
*inode
= page
->mapping
->host
;
848 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
851 spin_lock(&ui
->ui_lock
);
852 ubifs_assert(page
->index
<= ui
->synced_i_size
<< PAGE_CACHE_SIZE
);
853 spin_unlock(&ui
->ui_lock
);
856 /* Update radix tree tags */
857 set_page_writeback(page
);
860 block
= page
->index
<< UBIFS_BLOCKS_PER_PAGE_SHIFT
;
863 blen
= min_t(int, len
, UBIFS_BLOCK_SIZE
);
864 data_key_init(c
, &key
, inode
->i_ino
, block
);
865 err
= ubifs_jnl_write_data(c
, inode
, &key
, addr
, blen
);
868 if (++i
>= UBIFS_BLOCKS_PER_PAGE
)
876 ubifs_err("cannot write page %lu of inode %lu, error %d",
877 page
->index
, inode
->i_ino
, err
);
878 ubifs_ro_mode(c
, err
);
881 ubifs_assert(PagePrivate(page
));
882 if (PageChecked(page
))
883 release_new_page_budget(c
);
885 release_existing_page_budget(c
);
887 atomic_long_dec(&c
->dirty_pg_cnt
);
888 ClearPagePrivate(page
);
889 ClearPageChecked(page
);
893 end_page_writeback(page
);
898 * When writing-back dirty inodes, VFS first writes-back pages belonging to the
899 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a
900 * situation when a we have an inode with size 0, then a megabyte of data is
901 * appended to the inode, then write-back starts and flushes some amount of the
902 * dirty pages, the journal becomes full, commit happens and finishes, and then
903 * an unclean reboot happens. When the file system is mounted next time, the
904 * inode size would still be 0, but there would be many pages which are beyond
905 * the inode size, they would be indexed and consume flash space. Because the
906 * journal has been committed, the replay would not be able to detect this
907 * situation and correct the inode size. This means UBIFS would have to scan
908 * whole index and correct all inode sizes, which is long an unacceptable.
910 * To prevent situations like this, UBIFS writes pages back only if they are
911 * within last synchronized inode size, i.e. the the size which has been
912 * written to the flash media last time. Otherwise, UBIFS forces inode
913 * write-back, thus making sure the on-flash inode contains current inode size,
914 * and then keeps writing pages back.
916 * Some locking issues explanation. 'ubifs_writepage()' first is called with
917 * the page locked, and it locks @ui_mutex. However, write-back does take inode
918 * @i_mutex, which means other VFS operations may be run on this inode at the
919 * same time. And the problematic one is truncation to smaller size, from where
920 * we have to call 'vmtruncate()', which first changes @inode->i_size, then
921 * drops the truncated pages. And while dropping the pages, it takes the page
922 * lock. This means that 'do_truncation()' cannot call 'vmtruncate()' with
923 * @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. This
924 * means that @inode->i_size is changed while @ui_mutex is unlocked.
926 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond
927 * inode size. How do we do this if @inode->i_size may became smaller while we
928 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the
929 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size
930 * internally and updates it under @ui_mutex.
932 * Q: why we do not worry that if we race with truncation, we may end up with a
933 * situation when the inode is truncated while we are in the middle of
934 * 'do_writepage()', so we do write beyond inode size?
935 * A: If we are in the middle of 'do_writepage()', truncation would be locked
936 * on the page lock and it would not write the truncated inode node to the
937 * journal before we have finished.
939 static int ubifs_writepage(struct page
*page
, struct writeback_control
*wbc
)
941 struct inode
*inode
= page
->mapping
->host
;
942 struct ubifs_inode
*ui
= ubifs_inode(inode
);
943 loff_t i_size
= i_size_read(inode
), synced_i_size
;
944 pgoff_t end_index
= i_size
>> PAGE_CACHE_SHIFT
;
945 int err
, len
= i_size
& (PAGE_CACHE_SIZE
- 1);
948 dbg_gen("ino %lu, pg %lu, pg flags %#lx",
949 inode
->i_ino
, page
->index
, page
->flags
);
950 ubifs_assert(PagePrivate(page
));
952 /* Is the page fully outside @i_size? (truncate in progress) */
953 if (page
->index
> end_index
|| (page
->index
== end_index
&& !len
)) {
958 spin_lock(&ui
->ui_lock
);
959 synced_i_size
= ui
->synced_i_size
;
960 spin_unlock(&ui
->ui_lock
);
962 /* Is the page fully inside @i_size? */
963 if (page
->index
< end_index
) {
964 if (page
->index
>= synced_i_size
>> PAGE_CACHE_SHIFT
) {
965 err
= inode
->i_sb
->s_op
->write_inode(inode
, 1);
969 * The inode has been written, but the write-buffer has
970 * not been synchronized, so in case of an unclean
971 * reboot we may end up with some pages beyond inode
972 * size, but they would be in the journal (because
973 * commit flushes write buffers) and recovery would deal
977 return do_writepage(page
, PAGE_CACHE_SIZE
);
981 * The page straddles @i_size. It must be zeroed out on each and every
982 * writepage invocation because it may be mmapped. "A file is mapped
983 * in multiples of the page size. For a file that is not a multiple of
984 * the page size, the remaining memory is zeroed when mapped, and
985 * writes to that region are not written out to the file."
987 kaddr
= kmap_atomic(page
, KM_USER0
);
988 memset(kaddr
+ len
, 0, PAGE_CACHE_SIZE
- len
);
989 flush_dcache_page(page
);
990 kunmap_atomic(kaddr
, KM_USER0
);
992 if (i_size
> synced_i_size
) {
993 err
= inode
->i_sb
->s_op
->write_inode(inode
, 1);
998 return do_writepage(page
, len
);
1006 * do_attr_changes - change inode attributes.
1007 * @inode: inode to change attributes for
1008 * @attr: describes attributes to change
1010 static void do_attr_changes(struct inode
*inode
, const struct iattr
*attr
)
1012 if (attr
->ia_valid
& ATTR_UID
)
1013 inode
->i_uid
= attr
->ia_uid
;
1014 if (attr
->ia_valid
& ATTR_GID
)
1015 inode
->i_gid
= attr
->ia_gid
;
1016 if (attr
->ia_valid
& ATTR_ATIME
)
1017 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
1018 inode
->i_sb
->s_time_gran
);
1019 if (attr
->ia_valid
& ATTR_MTIME
)
1020 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
1021 inode
->i_sb
->s_time_gran
);
1022 if (attr
->ia_valid
& ATTR_CTIME
)
1023 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
1024 inode
->i_sb
->s_time_gran
);
1025 if (attr
->ia_valid
& ATTR_MODE
) {
1026 umode_t mode
= attr
->ia_mode
;
1028 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
1030 inode
->i_mode
= mode
;
1035 * do_truncation - truncate an inode.
1036 * @c: UBIFS file-system description object
1037 * @inode: inode to truncate
1038 * @attr: inode attribute changes description
1040 * This function implements VFS '->setattr()' call when the inode is truncated
1041 * to a smaller size. Returns zero in case of success and a negative error code
1042 * in case of failure.
1044 static int do_truncation(struct ubifs_info
*c
, struct inode
*inode
,
1045 const struct iattr
*attr
)
1048 struct ubifs_budget_req req
;
1049 loff_t old_size
= inode
->i_size
, new_size
= attr
->ia_size
;
1050 int offset
= new_size
& (UBIFS_BLOCK_SIZE
- 1), budgeted
= 1;
1051 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1053 dbg_gen("ino %lu, size %lld -> %lld", inode
->i_ino
, old_size
, new_size
);
1054 memset(&req
, 0, sizeof(struct ubifs_budget_req
));
1057 * If this is truncation to a smaller size, and we do not truncate on a
1058 * block boundary, budget for changing one data block, because the last
1059 * block will be re-written.
1061 if (new_size
& (UBIFS_BLOCK_SIZE
- 1))
1062 req
.dirtied_page
= 1;
1064 req
.dirtied_ino
= 1;
1065 /* A funny way to budget for truncation node */
1066 req
.dirtied_ino_d
= UBIFS_TRUN_NODE_SZ
;
1067 err
= ubifs_budget_space(c
, &req
);
1070 * Treat truncations to zero as deletion and always allow them,
1071 * just like we do for '->unlink()'.
1073 if (new_size
|| err
!= -ENOSPC
)
1078 err
= vmtruncate(inode
, new_size
);
1083 pgoff_t index
= new_size
>> PAGE_CACHE_SHIFT
;
1086 page
= find_lock_page(inode
->i_mapping
, index
);
1088 if (PageDirty(page
)) {
1090 * 'ubifs_jnl_truncate()' will try to truncate
1091 * the last data node, but it contains
1092 * out-of-date data because the page is dirty.
1093 * Write the page now, so that
1094 * 'ubifs_jnl_truncate()' will see an already
1095 * truncated (and up to date) data node.
1097 ubifs_assert(PagePrivate(page
));
1099 clear_page_dirty_for_io(page
);
1100 if (UBIFS_BLOCKS_PER_PAGE_SHIFT
)
1102 (PAGE_CACHE_SIZE
- 1);
1103 err
= do_writepage(page
, offset
);
1104 page_cache_release(page
);
1108 * We could now tell 'ubifs_jnl_truncate()' not
1109 * to read the last block.
1113 * We could 'kmap()' the page and pass the data
1114 * to 'ubifs_jnl_truncate()' to save it from
1115 * having to read it.
1118 page_cache_release(page
);
1123 mutex_lock(&ui
->ui_mutex
);
1124 ui
->ui_size
= inode
->i_size
;
1125 /* Truncation changes inode [mc]time */
1126 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1127 /* The other attributes may be changed at the same time as well */
1128 do_attr_changes(inode
, attr
);
1130 err
= ubifs_jnl_truncate(c
, inode
, old_size
, new_size
);
1131 mutex_unlock(&ui
->ui_mutex
);
1134 ubifs_release_budget(c
, &req
);
1136 c
->nospace
= c
->nospace_rp
= 0;
1143 * do_setattr - change inode attributes.
1144 * @c: UBIFS file-system description object
1145 * @inode: inode to change attributes for
1146 * @attr: inode attribute changes description
1148 * This function implements VFS '->setattr()' call for all cases except
1149 * truncations to smaller size. Returns zero in case of success and a negative
1150 * error code in case of failure.
1152 static int do_setattr(struct ubifs_info
*c
, struct inode
*inode
,
1153 const struct iattr
*attr
)
1156 loff_t new_size
= attr
->ia_size
;
1157 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1158 struct ubifs_budget_req req
= { .dirtied_ino
= 1,
1159 .dirtied_ino_d
= ALIGN(ui
->data_len
, 8) };
1161 err
= ubifs_budget_space(c
, &req
);
1165 if (attr
->ia_valid
& ATTR_SIZE
) {
1166 dbg_gen("size %lld -> %lld", inode
->i_size
, new_size
);
1167 err
= vmtruncate(inode
, new_size
);
1172 mutex_lock(&ui
->ui_mutex
);
1173 if (attr
->ia_valid
& ATTR_SIZE
) {
1174 /* Truncation changes inode [mc]time */
1175 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1176 /* 'vmtruncate()' changed @i_size, update @ui_size */
1177 ui
->ui_size
= inode
->i_size
;
1180 do_attr_changes(inode
, attr
);
1182 release
= ui
->dirty
;
1183 if (attr
->ia_valid
& ATTR_SIZE
)
1185 * Inode length changed, so we have to make sure
1186 * @I_DIRTY_DATASYNC is set.
1188 __mark_inode_dirty(inode
, I_DIRTY_SYNC
| I_DIRTY_DATASYNC
);
1190 mark_inode_dirty_sync(inode
);
1191 mutex_unlock(&ui
->ui_mutex
);
1194 ubifs_release_budget(c
, &req
);
1196 err
= inode
->i_sb
->s_op
->write_inode(inode
, 1);
1200 ubifs_release_budget(c
, &req
);
1204 int ubifs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
1207 struct inode
*inode
= dentry
->d_inode
;
1208 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1210 dbg_gen("ino %lu, mode %#x, ia_valid %#x",
1211 inode
->i_ino
, inode
->i_mode
, attr
->ia_valid
);
1212 err
= inode_change_ok(inode
, attr
);
1216 err
= dbg_check_synced_i_size(inode
);
1220 if ((attr
->ia_valid
& ATTR_SIZE
) && attr
->ia_size
< inode
->i_size
)
1221 /* Truncation to a smaller size */
1222 err
= do_truncation(c
, inode
, attr
);
1224 err
= do_setattr(c
, inode
, attr
);
1229 static void ubifs_invalidatepage(struct page
*page
, unsigned long offset
)
1231 struct inode
*inode
= page
->mapping
->host
;
1232 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1234 ubifs_assert(PagePrivate(page
));
1236 /* Partial page remains dirty */
1239 if (PageChecked(page
))
1240 release_new_page_budget(c
);
1242 release_existing_page_budget(c
);
1244 atomic_long_dec(&c
->dirty_pg_cnt
);
1245 ClearPagePrivate(page
);
1246 ClearPageChecked(page
);
1249 static void *ubifs_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
1251 struct ubifs_inode
*ui
= ubifs_inode(dentry
->d_inode
);
1253 nd_set_link(nd
, ui
->data
);
1257 int ubifs_fsync(struct file
*file
, struct dentry
*dentry
, int datasync
)
1259 struct inode
*inode
= dentry
->d_inode
;
1260 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1263 dbg_gen("syncing inode %lu", inode
->i_ino
);
1266 * VFS has already synchronized dirty pages for this inode. Synchronize
1267 * the inode unless this is a 'datasync()' call.
1269 if (!datasync
|| (inode
->i_state
& I_DIRTY_DATASYNC
)) {
1270 err
= inode
->i_sb
->s_op
->write_inode(inode
, 1);
1276 * Nodes related to this inode may still sit in a write-buffer. Flush
1279 err
= ubifs_sync_wbufs_by_inode(c
, inode
);
1287 * mctime_update_needed - check if mtime or ctime update is needed.
1288 * @inode: the inode to do the check for
1289 * @now: current time
1291 * This helper function checks if the inode mtime/ctime should be updated or
1292 * not. If current values of the time-stamps are within the UBIFS inode time
1293 * granularity, they are not updated. This is an optimization.
1295 static inline int mctime_update_needed(const struct inode
*inode
,
1296 const struct timespec
*now
)
1298 if (!timespec_equal(&inode
->i_mtime
, now
) ||
1299 !timespec_equal(&inode
->i_ctime
, now
))
1305 * update_ctime - update mtime and ctime of an inode.
1306 * @c: UBIFS file-system description object
1307 * @inode: inode to update
1309 * This function updates mtime and ctime of the inode if it is not equivalent to
1310 * current time. Returns zero in case of success and a negative error code in
1313 static int update_mctime(struct ubifs_info
*c
, struct inode
*inode
)
1315 struct timespec now
= ubifs_current_time(inode
);
1316 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1318 if (mctime_update_needed(inode
, &now
)) {
1320 struct ubifs_budget_req req
= { .dirtied_ino
= 1,
1321 .dirtied_ino_d
= ALIGN(ui
->data_len
, 8) };
1323 err
= ubifs_budget_space(c
, &req
);
1327 mutex_lock(&ui
->ui_mutex
);
1328 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1329 release
= ui
->dirty
;
1330 mark_inode_dirty_sync(inode
);
1331 mutex_unlock(&ui
->ui_mutex
);
1333 ubifs_release_budget(c
, &req
);
1339 static ssize_t
ubifs_aio_write(struct kiocb
*iocb
, const struct iovec
*iov
,
1340 unsigned long nr_segs
, loff_t pos
)
1344 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
1345 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1347 err
= update_mctime(c
, inode
);
1351 ret
= generic_file_aio_write(iocb
, iov
, nr_segs
, pos
);
1355 if (ret
> 0 && (IS_SYNC(inode
) || iocb
->ki_filp
->f_flags
& O_SYNC
)) {
1356 err
= ubifs_sync_wbufs_by_inode(c
, inode
);
1364 static int ubifs_set_page_dirty(struct page
*page
)
1368 ret
= __set_page_dirty_nobuffers(page
);
1370 * An attempt to dirty a page without budgeting for it - should not
1373 ubifs_assert(ret
== 0);
1377 static int ubifs_releasepage(struct page
*page
, gfp_t unused_gfp_flags
)
1380 * An attempt to release a dirty page without budgeting for it - should
1383 if (PageWriteback(page
))
1385 ubifs_assert(PagePrivate(page
));
1387 ClearPagePrivate(page
);
1388 ClearPageChecked(page
);
1393 * mmap()d file has taken write protection fault and is being made
1394 * writable. UBIFS must ensure page is budgeted for.
1396 static int ubifs_vm_page_mkwrite(struct vm_area_struct
*vma
, struct page
*page
)
1398 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1399 struct ubifs_info
*c
= inode
->i_sb
->s_fs_info
;
1400 struct timespec now
= ubifs_current_time(inode
);
1401 struct ubifs_budget_req req
= { .new_page
= 1 };
1402 int err
, update_time
;
1404 dbg_gen("ino %lu, pg %lu, i_size %lld", inode
->i_ino
, page
->index
,
1405 i_size_read(inode
));
1406 ubifs_assert(!(inode
->i_sb
->s_flags
& MS_RDONLY
));
1408 if (unlikely(c
->ro_media
))
1412 * We have not locked @page so far so we may budget for changing the
1413 * page. Note, we cannot do this after we locked the page, because
1414 * budgeting may cause write-back which would cause deadlock.
1416 * At the moment we do not know whether the page is dirty or not, so we
1417 * assume that it is not and budget for a new page. We could look at
1418 * the @PG_private flag and figure this out, but we may race with write
1419 * back and the page state may change by the time we lock it, so this
1420 * would need additional care. We do not bother with this at the
1421 * moment, although it might be good idea to do. Instead, we allocate
1422 * budget for a new page and amend it later on if the page was in fact
1425 * The budgeting-related logic of this function is similar to what we
1426 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there
1427 * for more comments.
1429 update_time
= mctime_update_needed(inode
, &now
);
1432 * We have to change inode time stamp which requires extra
1435 req
.dirtied_ino
= 1;
1437 err
= ubifs_budget_space(c
, &req
);
1438 if (unlikely(err
)) {
1440 ubifs_warn("out of space for mmapped file "
1441 "(inode number %lu)", inode
->i_ino
);
1446 if (unlikely(page
->mapping
!= inode
->i_mapping
||
1447 page_offset(page
) > i_size_read(inode
))) {
1448 /* Page got truncated out from underneath us */
1453 if (PagePrivate(page
))
1454 release_new_page_budget(c
);
1456 if (!PageChecked(page
))
1457 ubifs_convert_page_budget(c
);
1458 SetPagePrivate(page
);
1459 atomic_long_inc(&c
->dirty_pg_cnt
);
1460 __set_page_dirty_nobuffers(page
);
1465 struct ubifs_inode
*ui
= ubifs_inode(inode
);
1467 mutex_lock(&ui
->ui_mutex
);
1468 inode
->i_mtime
= inode
->i_ctime
= ubifs_current_time(inode
);
1469 release
= ui
->dirty
;
1470 mark_inode_dirty_sync(inode
);
1471 mutex_unlock(&ui
->ui_mutex
);
1473 ubifs_release_dirty_inode_budget(c
, ui
);
1481 ubifs_release_budget(c
, &req
);
1485 static struct vm_operations_struct ubifs_file_vm_ops
= {
1486 .fault
= filemap_fault
,
1487 .page_mkwrite
= ubifs_vm_page_mkwrite
,
1490 static int ubifs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1494 /* 'generic_file_mmap()' takes care of NOMMU case */
1495 err
= generic_file_mmap(file
, vma
);
1498 vma
->vm_ops
= &ubifs_file_vm_ops
;
1502 struct address_space_operations ubifs_file_address_operations
= {
1503 .readpage
= ubifs_readpage
,
1504 .writepage
= ubifs_writepage
,
1505 .write_begin
= ubifs_write_begin
,
1506 .write_end
= ubifs_write_end
,
1507 .invalidatepage
= ubifs_invalidatepage
,
1508 .set_page_dirty
= ubifs_set_page_dirty
,
1509 .releasepage
= ubifs_releasepage
,
1512 struct inode_operations ubifs_file_inode_operations
= {
1513 .setattr
= ubifs_setattr
,
1514 .getattr
= ubifs_getattr
,
1515 #ifdef CONFIG_UBIFS_FS_XATTR
1516 .setxattr
= ubifs_setxattr
,
1517 .getxattr
= ubifs_getxattr
,
1518 .listxattr
= ubifs_listxattr
,
1519 .removexattr
= ubifs_removexattr
,
1523 struct inode_operations ubifs_symlink_inode_operations
= {
1524 .readlink
= generic_readlink
,
1525 .follow_link
= ubifs_follow_link
,
1526 .setattr
= ubifs_setattr
,
1527 .getattr
= ubifs_getattr
,
1530 struct file_operations ubifs_file_operations
= {
1531 .llseek
= generic_file_llseek
,
1532 .read
= do_sync_read
,
1533 .write
= do_sync_write
,
1534 .aio_read
= generic_file_aio_read
,
1535 .aio_write
= ubifs_aio_write
,
1536 .mmap
= ubifs_file_mmap
,
1537 .fsync
= ubifs_fsync
,
1538 .unlocked_ioctl
= ubifs_ioctl
,
1539 .splice_read
= generic_file_splice_read
,
1540 .splice_write
= generic_file_splice_write
,
1541 #ifdef CONFIG_COMPAT
1542 .compat_ioctl
= ubifs_compat_ioctl
,