usb: dwc2: gadget: force gadget initialization in dev mode
[linux-2.6/btrfs-unstable.git] / fs / btrfs / tree-log.c
blob9a02da16f2beee18f53d7244cde93f3f11cc9866
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
23 #include "tree-log.h"
24 #include "disk-io.h"
25 #include "locking.h"
26 #include "print-tree.h"
27 #include "backref.h"
28 #include "hash.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
34 * during log replay
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
47 * mkdir foo/some_dir
48 * normal commit
49 * rename foo/some_dir foo2/some_dir
50 * mkdir foo/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
70 * mkdir f1/foo
71 * normal commit
72 * rm -rf f1/foo
73 * fsync(f1)
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
79 * ugly details.
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_DIR_INDEX 2
94 #define LOG_WALK_REPLAY_ALL 3
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode,
98 int inode_only,
99 const loff_t start,
100 const loff_t end,
101 struct btrfs_log_ctx *ctx);
102 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root,
104 struct btrfs_path *path, u64 objectid);
105 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_root *log,
108 struct btrfs_path *path,
109 u64 dirid, int del_all);
112 * tree logging is a special write ahead log used to make sure that
113 * fsyncs and O_SYNCs can happen without doing full tree commits.
115 * Full tree commits are expensive because they require commonly
116 * modified blocks to be recowed, creating many dirty pages in the
117 * extent tree an 4x-6x higher write load than ext3.
119 * Instead of doing a tree commit on every fsync, we use the
120 * key ranges and transaction ids to find items for a given file or directory
121 * that have changed in this transaction. Those items are copied into
122 * a special tree (one per subvolume root), that tree is written to disk
123 * and then the fsync is considered complete.
125 * After a crash, items are copied out of the log-tree back into the
126 * subvolume tree. Any file data extents found are recorded in the extent
127 * allocation tree, and the log-tree freed.
129 * The log tree is read three times, once to pin down all the extents it is
130 * using in ram and once, once to create all the inodes logged in the tree
131 * and once to do all the other items.
135 * start a sub transaction and setup the log tree
136 * this increments the log tree writer count to make the people
137 * syncing the tree wait for us to finish
139 static int start_log_trans(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root,
141 struct btrfs_log_ctx *ctx)
143 int index;
144 int ret;
146 mutex_lock(&root->log_mutex);
147 if (root->log_root) {
148 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
149 ret = -EAGAIN;
150 goto out;
152 if (!root->log_start_pid) {
153 root->log_start_pid = current->pid;
154 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
155 } else if (root->log_start_pid != current->pid) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 atomic_inc(&root->log_batch);
160 atomic_inc(&root->log_writers);
161 if (ctx) {
162 index = root->log_transid % 2;
163 list_add_tail(&ctx->list, &root->log_ctxs[index]);
164 ctx->log_transid = root->log_transid;
166 mutex_unlock(&root->log_mutex);
167 return 0;
170 ret = 0;
171 mutex_lock(&root->fs_info->tree_log_mutex);
172 if (!root->fs_info->log_root_tree)
173 ret = btrfs_init_log_root_tree(trans, root->fs_info);
174 mutex_unlock(&root->fs_info->tree_log_mutex);
175 if (ret)
176 goto out;
178 if (!root->log_root) {
179 ret = btrfs_add_log_tree(trans, root);
180 if (ret)
181 goto out;
183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
184 root->log_start_pid = current->pid;
185 atomic_inc(&root->log_batch);
186 atomic_inc(&root->log_writers);
187 if (ctx) {
188 index = root->log_transid % 2;
189 list_add_tail(&ctx->list, &root->log_ctxs[index]);
190 ctx->log_transid = root->log_transid;
192 out:
193 mutex_unlock(&root->log_mutex);
194 return ret;
198 * returns 0 if there was a log transaction running and we were able
199 * to join, or returns -ENOENT if there were not transactions
200 * in progress
202 static int join_running_log_trans(struct btrfs_root *root)
204 int ret = -ENOENT;
206 smp_mb();
207 if (!root->log_root)
208 return -ENOENT;
210 mutex_lock(&root->log_mutex);
211 if (root->log_root) {
212 ret = 0;
213 atomic_inc(&root->log_writers);
215 mutex_unlock(&root->log_mutex);
216 return ret;
220 * This either makes the current running log transaction wait
221 * until you call btrfs_end_log_trans() or it makes any future
222 * log transactions wait until you call btrfs_end_log_trans()
224 int btrfs_pin_log_trans(struct btrfs_root *root)
226 int ret = -ENOENT;
228 mutex_lock(&root->log_mutex);
229 atomic_inc(&root->log_writers);
230 mutex_unlock(&root->log_mutex);
231 return ret;
235 * indicate we're done making changes to the log tree
236 * and wake up anyone waiting to do a sync
238 void btrfs_end_log_trans(struct btrfs_root *root)
240 if (atomic_dec_and_test(&root->log_writers)) {
241 smp_mb();
242 if (waitqueue_active(&root->log_writer_wait))
243 wake_up(&root->log_writer_wait);
249 * the walk control struct is used to pass state down the chain when
250 * processing the log tree. The stage field tells us which part
251 * of the log tree processing we are currently doing. The others
252 * are state fields used for that specific part
254 struct walk_control {
255 /* should we free the extent on disk when done? This is used
256 * at transaction commit time while freeing a log tree
258 int free;
260 /* should we write out the extent buffer? This is used
261 * while flushing the log tree to disk during a sync
263 int write;
265 /* should we wait for the extent buffer io to finish? Also used
266 * while flushing the log tree to disk for a sync
268 int wait;
270 /* pin only walk, we record which extents on disk belong to the
271 * log trees
273 int pin;
275 /* what stage of the replay code we're currently in */
276 int stage;
278 /* the root we are currently replaying */
279 struct btrfs_root *replay_dest;
281 /* the trans handle for the current replay */
282 struct btrfs_trans_handle *trans;
284 /* the function that gets used to process blocks we find in the
285 * tree. Note the extent_buffer might not be up to date when it is
286 * passed in, and it must be checked or read if you need the data
287 * inside it
289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
290 struct walk_control *wc, u64 gen);
294 * process_func used to pin down extents, write them or wait on them
296 static int process_one_buffer(struct btrfs_root *log,
297 struct extent_buffer *eb,
298 struct walk_control *wc, u64 gen)
300 int ret = 0;
303 * If this fs is mixed then we need to be able to process the leaves to
304 * pin down any logged extents, so we have to read the block.
306 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
307 ret = btrfs_read_buffer(eb, gen);
308 if (ret)
309 return ret;
312 if (wc->pin)
313 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
314 eb->start, eb->len);
316 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
317 if (wc->pin && btrfs_header_level(eb) == 0)
318 ret = btrfs_exclude_logged_extents(log, eb);
319 if (wc->write)
320 btrfs_write_tree_block(eb);
321 if (wc->wait)
322 btrfs_wait_tree_block_writeback(eb);
324 return ret;
328 * Item overwrite used by replay and tree logging. eb, slot and key all refer
329 * to the src data we are copying out.
331 * root is the tree we are copying into, and path is a scratch
332 * path for use in this function (it should be released on entry and
333 * will be released on exit).
335 * If the key is already in the destination tree the existing item is
336 * overwritten. If the existing item isn't big enough, it is extended.
337 * If it is too large, it is truncated.
339 * If the key isn't in the destination yet, a new item is inserted.
341 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
342 struct btrfs_root *root,
343 struct btrfs_path *path,
344 struct extent_buffer *eb, int slot,
345 struct btrfs_key *key)
347 int ret;
348 u32 item_size;
349 u64 saved_i_size = 0;
350 int save_old_i_size = 0;
351 unsigned long src_ptr;
352 unsigned long dst_ptr;
353 int overwrite_root = 0;
354 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
357 overwrite_root = 1;
359 item_size = btrfs_item_size_nr(eb, slot);
360 src_ptr = btrfs_item_ptr_offset(eb, slot);
362 /* look for the key in the destination tree */
363 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
364 if (ret < 0)
365 return ret;
367 if (ret == 0) {
368 char *src_copy;
369 char *dst_copy;
370 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
371 path->slots[0]);
372 if (dst_size != item_size)
373 goto insert;
375 if (item_size == 0) {
376 btrfs_release_path(path);
377 return 0;
379 dst_copy = kmalloc(item_size, GFP_NOFS);
380 src_copy = kmalloc(item_size, GFP_NOFS);
381 if (!dst_copy || !src_copy) {
382 btrfs_release_path(path);
383 kfree(dst_copy);
384 kfree(src_copy);
385 return -ENOMEM;
388 read_extent_buffer(eb, src_copy, src_ptr, item_size);
390 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
391 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
392 item_size);
393 ret = memcmp(dst_copy, src_copy, item_size);
395 kfree(dst_copy);
396 kfree(src_copy);
398 * they have the same contents, just return, this saves
399 * us from cowing blocks in the destination tree and doing
400 * extra writes that may not have been done by a previous
401 * sync
403 if (ret == 0) {
404 btrfs_release_path(path);
405 return 0;
409 * We need to load the old nbytes into the inode so when we
410 * replay the extents we've logged we get the right nbytes.
412 if (inode_item) {
413 struct btrfs_inode_item *item;
414 u64 nbytes;
415 u32 mode;
417 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
418 struct btrfs_inode_item);
419 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
420 item = btrfs_item_ptr(eb, slot,
421 struct btrfs_inode_item);
422 btrfs_set_inode_nbytes(eb, item, nbytes);
425 * If this is a directory we need to reset the i_size to
426 * 0 so that we can set it up properly when replaying
427 * the rest of the items in this log.
429 mode = btrfs_inode_mode(eb, item);
430 if (S_ISDIR(mode))
431 btrfs_set_inode_size(eb, item, 0);
433 } else if (inode_item) {
434 struct btrfs_inode_item *item;
435 u32 mode;
438 * New inode, set nbytes to 0 so that the nbytes comes out
439 * properly when we replay the extents.
441 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
442 btrfs_set_inode_nbytes(eb, item, 0);
445 * If this is a directory we need to reset the i_size to 0 so
446 * that we can set it up properly when replaying the rest of
447 * the items in this log.
449 mode = btrfs_inode_mode(eb, item);
450 if (S_ISDIR(mode))
451 btrfs_set_inode_size(eb, item, 0);
453 insert:
454 btrfs_release_path(path);
455 /* try to insert the key into the destination tree */
456 ret = btrfs_insert_empty_item(trans, root, path,
457 key, item_size);
459 /* make sure any existing item is the correct size */
460 if (ret == -EEXIST) {
461 u32 found_size;
462 found_size = btrfs_item_size_nr(path->nodes[0],
463 path->slots[0]);
464 if (found_size > item_size)
465 btrfs_truncate_item(root, path, item_size, 1);
466 else if (found_size < item_size)
467 btrfs_extend_item(root, path,
468 item_size - found_size);
469 } else if (ret) {
470 return ret;
472 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
473 path->slots[0]);
475 /* don't overwrite an existing inode if the generation number
476 * was logged as zero. This is done when the tree logging code
477 * is just logging an inode to make sure it exists after recovery.
479 * Also, don't overwrite i_size on directories during replay.
480 * log replay inserts and removes directory items based on the
481 * state of the tree found in the subvolume, and i_size is modified
482 * as it goes
484 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
485 struct btrfs_inode_item *src_item;
486 struct btrfs_inode_item *dst_item;
488 src_item = (struct btrfs_inode_item *)src_ptr;
489 dst_item = (struct btrfs_inode_item *)dst_ptr;
491 if (btrfs_inode_generation(eb, src_item) == 0)
492 goto no_copy;
494 if (overwrite_root &&
495 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
496 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
497 save_old_i_size = 1;
498 saved_i_size = btrfs_inode_size(path->nodes[0],
499 dst_item);
503 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
504 src_ptr, item_size);
506 if (save_old_i_size) {
507 struct btrfs_inode_item *dst_item;
508 dst_item = (struct btrfs_inode_item *)dst_ptr;
509 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
512 /* make sure the generation is filled in */
513 if (key->type == BTRFS_INODE_ITEM_KEY) {
514 struct btrfs_inode_item *dst_item;
515 dst_item = (struct btrfs_inode_item *)dst_ptr;
516 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
517 btrfs_set_inode_generation(path->nodes[0], dst_item,
518 trans->transid);
521 no_copy:
522 btrfs_mark_buffer_dirty(path->nodes[0]);
523 btrfs_release_path(path);
524 return 0;
528 * simple helper to read an inode off the disk from a given root
529 * This can only be called for subvolume roots and not for the log
531 static noinline struct inode *read_one_inode(struct btrfs_root *root,
532 u64 objectid)
534 struct btrfs_key key;
535 struct inode *inode;
537 key.objectid = objectid;
538 key.type = BTRFS_INODE_ITEM_KEY;
539 key.offset = 0;
540 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
541 if (IS_ERR(inode)) {
542 inode = NULL;
543 } else if (is_bad_inode(inode)) {
544 iput(inode);
545 inode = NULL;
547 return inode;
550 /* replays a single extent in 'eb' at 'slot' with 'key' into the
551 * subvolume 'root'. path is released on entry and should be released
552 * on exit.
554 * extents in the log tree have not been allocated out of the extent
555 * tree yet. So, this completes the allocation, taking a reference
556 * as required if the extent already exists or creating a new extent
557 * if it isn't in the extent allocation tree yet.
559 * The extent is inserted into the file, dropping any existing extents
560 * from the file that overlap the new one.
562 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
563 struct btrfs_root *root,
564 struct btrfs_path *path,
565 struct extent_buffer *eb, int slot,
566 struct btrfs_key *key)
568 int found_type;
569 u64 extent_end;
570 u64 start = key->offset;
571 u64 nbytes = 0;
572 struct btrfs_file_extent_item *item;
573 struct inode *inode = NULL;
574 unsigned long size;
575 int ret = 0;
577 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
578 found_type = btrfs_file_extent_type(eb, item);
580 if (found_type == BTRFS_FILE_EXTENT_REG ||
581 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
582 nbytes = btrfs_file_extent_num_bytes(eb, item);
583 extent_end = start + nbytes;
586 * We don't add to the inodes nbytes if we are prealloc or a
587 * hole.
589 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
590 nbytes = 0;
591 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
592 size = btrfs_file_extent_inline_len(eb, slot, item);
593 nbytes = btrfs_file_extent_ram_bytes(eb, item);
594 extent_end = ALIGN(start + size, root->sectorsize);
595 } else {
596 ret = 0;
597 goto out;
600 inode = read_one_inode(root, key->objectid);
601 if (!inode) {
602 ret = -EIO;
603 goto out;
607 * first check to see if we already have this extent in the
608 * file. This must be done before the btrfs_drop_extents run
609 * so we don't try to drop this extent.
611 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
612 start, 0);
614 if (ret == 0 &&
615 (found_type == BTRFS_FILE_EXTENT_REG ||
616 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
617 struct btrfs_file_extent_item cmp1;
618 struct btrfs_file_extent_item cmp2;
619 struct btrfs_file_extent_item *existing;
620 struct extent_buffer *leaf;
622 leaf = path->nodes[0];
623 existing = btrfs_item_ptr(leaf, path->slots[0],
624 struct btrfs_file_extent_item);
626 read_extent_buffer(eb, &cmp1, (unsigned long)item,
627 sizeof(cmp1));
628 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
629 sizeof(cmp2));
632 * we already have a pointer to this exact extent,
633 * we don't have to do anything
635 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
636 btrfs_release_path(path);
637 goto out;
640 btrfs_release_path(path);
642 /* drop any overlapping extents */
643 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
644 if (ret)
645 goto out;
647 if (found_type == BTRFS_FILE_EXTENT_REG ||
648 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
649 u64 offset;
650 unsigned long dest_offset;
651 struct btrfs_key ins;
653 ret = btrfs_insert_empty_item(trans, root, path, key,
654 sizeof(*item));
655 if (ret)
656 goto out;
657 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
658 path->slots[0]);
659 copy_extent_buffer(path->nodes[0], eb, dest_offset,
660 (unsigned long)item, sizeof(*item));
662 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
663 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
664 ins.type = BTRFS_EXTENT_ITEM_KEY;
665 offset = key->offset - btrfs_file_extent_offset(eb, item);
667 if (ins.objectid > 0) {
668 u64 csum_start;
669 u64 csum_end;
670 LIST_HEAD(ordered_sums);
672 * is this extent already allocated in the extent
673 * allocation tree? If so, just add a reference
675 ret = btrfs_lookup_data_extent(root, ins.objectid,
676 ins.offset);
677 if (ret == 0) {
678 ret = btrfs_inc_extent_ref(trans, root,
679 ins.objectid, ins.offset,
680 0, root->root_key.objectid,
681 key->objectid, offset, 0);
682 if (ret)
683 goto out;
684 } else {
686 * insert the extent pointer in the extent
687 * allocation tree
689 ret = btrfs_alloc_logged_file_extent(trans,
690 root, root->root_key.objectid,
691 key->objectid, offset, &ins);
692 if (ret)
693 goto out;
695 btrfs_release_path(path);
697 if (btrfs_file_extent_compression(eb, item)) {
698 csum_start = ins.objectid;
699 csum_end = csum_start + ins.offset;
700 } else {
701 csum_start = ins.objectid +
702 btrfs_file_extent_offset(eb, item);
703 csum_end = csum_start +
704 btrfs_file_extent_num_bytes(eb, item);
707 ret = btrfs_lookup_csums_range(root->log_root,
708 csum_start, csum_end - 1,
709 &ordered_sums, 0);
710 if (ret)
711 goto out;
712 while (!list_empty(&ordered_sums)) {
713 struct btrfs_ordered_sum *sums;
714 sums = list_entry(ordered_sums.next,
715 struct btrfs_ordered_sum,
716 list);
717 if (!ret)
718 ret = btrfs_csum_file_blocks(trans,
719 root->fs_info->csum_root,
720 sums);
721 list_del(&sums->list);
722 kfree(sums);
724 if (ret)
725 goto out;
726 } else {
727 btrfs_release_path(path);
729 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
730 /* inline extents are easy, we just overwrite them */
731 ret = overwrite_item(trans, root, path, eb, slot, key);
732 if (ret)
733 goto out;
736 inode_add_bytes(inode, nbytes);
737 ret = btrfs_update_inode(trans, root, inode);
738 out:
739 if (inode)
740 iput(inode);
741 return ret;
745 * when cleaning up conflicts between the directory names in the
746 * subvolume, directory names in the log and directory names in the
747 * inode back references, we may have to unlink inodes from directories.
749 * This is a helper function to do the unlink of a specific directory
750 * item
752 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
753 struct btrfs_root *root,
754 struct btrfs_path *path,
755 struct inode *dir,
756 struct btrfs_dir_item *di)
758 struct inode *inode;
759 char *name;
760 int name_len;
761 struct extent_buffer *leaf;
762 struct btrfs_key location;
763 int ret;
765 leaf = path->nodes[0];
767 btrfs_dir_item_key_to_cpu(leaf, di, &location);
768 name_len = btrfs_dir_name_len(leaf, di);
769 name = kmalloc(name_len, GFP_NOFS);
770 if (!name)
771 return -ENOMEM;
773 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
774 btrfs_release_path(path);
776 inode = read_one_inode(root, location.objectid);
777 if (!inode) {
778 ret = -EIO;
779 goto out;
782 ret = link_to_fixup_dir(trans, root, path, location.objectid);
783 if (ret)
784 goto out;
786 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
787 if (ret)
788 goto out;
789 else
790 ret = btrfs_run_delayed_items(trans, root);
791 out:
792 kfree(name);
793 iput(inode);
794 return ret;
798 * helper function to see if a given name and sequence number found
799 * in an inode back reference are already in a directory and correctly
800 * point to this inode
802 static noinline int inode_in_dir(struct btrfs_root *root,
803 struct btrfs_path *path,
804 u64 dirid, u64 objectid, u64 index,
805 const char *name, int name_len)
807 struct btrfs_dir_item *di;
808 struct btrfs_key location;
809 int match = 0;
811 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
812 index, name, name_len, 0);
813 if (di && !IS_ERR(di)) {
814 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
815 if (location.objectid != objectid)
816 goto out;
817 } else
818 goto out;
819 btrfs_release_path(path);
821 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
822 if (di && !IS_ERR(di)) {
823 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
824 if (location.objectid != objectid)
825 goto out;
826 } else
827 goto out;
828 match = 1;
829 out:
830 btrfs_release_path(path);
831 return match;
835 * helper function to check a log tree for a named back reference in
836 * an inode. This is used to decide if a back reference that is
837 * found in the subvolume conflicts with what we find in the log.
839 * inode backreferences may have multiple refs in a single item,
840 * during replay we process one reference at a time, and we don't
841 * want to delete valid links to a file from the subvolume if that
842 * link is also in the log.
844 static noinline int backref_in_log(struct btrfs_root *log,
845 struct btrfs_key *key,
846 u64 ref_objectid,
847 char *name, int namelen)
849 struct btrfs_path *path;
850 struct btrfs_inode_ref *ref;
851 unsigned long ptr;
852 unsigned long ptr_end;
853 unsigned long name_ptr;
854 int found_name_len;
855 int item_size;
856 int ret;
857 int match = 0;
859 path = btrfs_alloc_path();
860 if (!path)
861 return -ENOMEM;
863 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
864 if (ret != 0)
865 goto out;
867 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
869 if (key->type == BTRFS_INODE_EXTREF_KEY) {
870 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
871 name, namelen, NULL))
872 match = 1;
874 goto out;
877 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
878 ptr_end = ptr + item_size;
879 while (ptr < ptr_end) {
880 ref = (struct btrfs_inode_ref *)ptr;
881 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
882 if (found_name_len == namelen) {
883 name_ptr = (unsigned long)(ref + 1);
884 ret = memcmp_extent_buffer(path->nodes[0], name,
885 name_ptr, namelen);
886 if (ret == 0) {
887 match = 1;
888 goto out;
891 ptr = (unsigned long)(ref + 1) + found_name_len;
893 out:
894 btrfs_free_path(path);
895 return match;
898 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
899 struct btrfs_root *root,
900 struct btrfs_path *path,
901 struct btrfs_root *log_root,
902 struct inode *dir, struct inode *inode,
903 struct extent_buffer *eb,
904 u64 inode_objectid, u64 parent_objectid,
905 u64 ref_index, char *name, int namelen,
906 int *search_done)
908 int ret;
909 char *victim_name;
910 int victim_name_len;
911 struct extent_buffer *leaf;
912 struct btrfs_dir_item *di;
913 struct btrfs_key search_key;
914 struct btrfs_inode_extref *extref;
916 again:
917 /* Search old style refs */
918 search_key.objectid = inode_objectid;
919 search_key.type = BTRFS_INODE_REF_KEY;
920 search_key.offset = parent_objectid;
921 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
922 if (ret == 0) {
923 struct btrfs_inode_ref *victim_ref;
924 unsigned long ptr;
925 unsigned long ptr_end;
927 leaf = path->nodes[0];
929 /* are we trying to overwrite a back ref for the root directory
930 * if so, just jump out, we're done
932 if (search_key.objectid == search_key.offset)
933 return 1;
935 /* check all the names in this back reference to see
936 * if they are in the log. if so, we allow them to stay
937 * otherwise they must be unlinked as a conflict
939 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
940 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
941 while (ptr < ptr_end) {
942 victim_ref = (struct btrfs_inode_ref *)ptr;
943 victim_name_len = btrfs_inode_ref_name_len(leaf,
944 victim_ref);
945 victim_name = kmalloc(victim_name_len, GFP_NOFS);
946 if (!victim_name)
947 return -ENOMEM;
949 read_extent_buffer(leaf, victim_name,
950 (unsigned long)(victim_ref + 1),
951 victim_name_len);
953 if (!backref_in_log(log_root, &search_key,
954 parent_objectid,
955 victim_name,
956 victim_name_len)) {
957 inc_nlink(inode);
958 btrfs_release_path(path);
960 ret = btrfs_unlink_inode(trans, root, dir,
961 inode, victim_name,
962 victim_name_len);
963 kfree(victim_name);
964 if (ret)
965 return ret;
966 ret = btrfs_run_delayed_items(trans, root);
967 if (ret)
968 return ret;
969 *search_done = 1;
970 goto again;
972 kfree(victim_name);
974 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
978 * NOTE: we have searched root tree and checked the
979 * coresponding ref, it does not need to check again.
981 *search_done = 1;
983 btrfs_release_path(path);
985 /* Same search but for extended refs */
986 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
987 inode_objectid, parent_objectid, 0,
989 if (!IS_ERR_OR_NULL(extref)) {
990 u32 item_size;
991 u32 cur_offset = 0;
992 unsigned long base;
993 struct inode *victim_parent;
995 leaf = path->nodes[0];
997 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
998 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1000 while (cur_offset < item_size) {
1001 extref = (struct btrfs_inode_extref *)base + cur_offset;
1003 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1005 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1006 goto next;
1008 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1009 if (!victim_name)
1010 return -ENOMEM;
1011 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1012 victim_name_len);
1014 search_key.objectid = inode_objectid;
1015 search_key.type = BTRFS_INODE_EXTREF_KEY;
1016 search_key.offset = btrfs_extref_hash(parent_objectid,
1017 victim_name,
1018 victim_name_len);
1019 ret = 0;
1020 if (!backref_in_log(log_root, &search_key,
1021 parent_objectid, victim_name,
1022 victim_name_len)) {
1023 ret = -ENOENT;
1024 victim_parent = read_one_inode(root,
1025 parent_objectid);
1026 if (victim_parent) {
1027 inc_nlink(inode);
1028 btrfs_release_path(path);
1030 ret = btrfs_unlink_inode(trans, root,
1031 victim_parent,
1032 inode,
1033 victim_name,
1034 victim_name_len);
1035 if (!ret)
1036 ret = btrfs_run_delayed_items(
1037 trans, root);
1039 iput(victim_parent);
1040 kfree(victim_name);
1041 if (ret)
1042 return ret;
1043 *search_done = 1;
1044 goto again;
1046 kfree(victim_name);
1047 if (ret)
1048 return ret;
1049 next:
1050 cur_offset += victim_name_len + sizeof(*extref);
1052 *search_done = 1;
1054 btrfs_release_path(path);
1056 /* look for a conflicting sequence number */
1057 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1058 ref_index, name, namelen, 0);
1059 if (di && !IS_ERR(di)) {
1060 ret = drop_one_dir_item(trans, root, path, dir, di);
1061 if (ret)
1062 return ret;
1064 btrfs_release_path(path);
1066 /* look for a conflicing name */
1067 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1068 name, namelen, 0);
1069 if (di && !IS_ERR(di)) {
1070 ret = drop_one_dir_item(trans, root, path, dir, di);
1071 if (ret)
1072 return ret;
1074 btrfs_release_path(path);
1076 return 0;
1079 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1080 u32 *namelen, char **name, u64 *index,
1081 u64 *parent_objectid)
1083 struct btrfs_inode_extref *extref;
1085 extref = (struct btrfs_inode_extref *)ref_ptr;
1087 *namelen = btrfs_inode_extref_name_len(eb, extref);
1088 *name = kmalloc(*namelen, GFP_NOFS);
1089 if (*name == NULL)
1090 return -ENOMEM;
1092 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1093 *namelen);
1095 *index = btrfs_inode_extref_index(eb, extref);
1096 if (parent_objectid)
1097 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1099 return 0;
1102 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1103 u32 *namelen, char **name, u64 *index)
1105 struct btrfs_inode_ref *ref;
1107 ref = (struct btrfs_inode_ref *)ref_ptr;
1109 *namelen = btrfs_inode_ref_name_len(eb, ref);
1110 *name = kmalloc(*namelen, GFP_NOFS);
1111 if (*name == NULL)
1112 return -ENOMEM;
1114 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1116 *index = btrfs_inode_ref_index(eb, ref);
1118 return 0;
1122 * replay one inode back reference item found in the log tree.
1123 * eb, slot and key refer to the buffer and key found in the log tree.
1124 * root is the destination we are replaying into, and path is for temp
1125 * use by this function. (it should be released on return).
1127 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1128 struct btrfs_root *root,
1129 struct btrfs_root *log,
1130 struct btrfs_path *path,
1131 struct extent_buffer *eb, int slot,
1132 struct btrfs_key *key)
1134 struct inode *dir = NULL;
1135 struct inode *inode = NULL;
1136 unsigned long ref_ptr;
1137 unsigned long ref_end;
1138 char *name = NULL;
1139 int namelen;
1140 int ret;
1141 int search_done = 0;
1142 int log_ref_ver = 0;
1143 u64 parent_objectid;
1144 u64 inode_objectid;
1145 u64 ref_index = 0;
1146 int ref_struct_size;
1148 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1149 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1151 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1152 struct btrfs_inode_extref *r;
1154 ref_struct_size = sizeof(struct btrfs_inode_extref);
1155 log_ref_ver = 1;
1156 r = (struct btrfs_inode_extref *)ref_ptr;
1157 parent_objectid = btrfs_inode_extref_parent(eb, r);
1158 } else {
1159 ref_struct_size = sizeof(struct btrfs_inode_ref);
1160 parent_objectid = key->offset;
1162 inode_objectid = key->objectid;
1165 * it is possible that we didn't log all the parent directories
1166 * for a given inode. If we don't find the dir, just don't
1167 * copy the back ref in. The link count fixup code will take
1168 * care of the rest
1170 dir = read_one_inode(root, parent_objectid);
1171 if (!dir) {
1172 ret = -ENOENT;
1173 goto out;
1176 inode = read_one_inode(root, inode_objectid);
1177 if (!inode) {
1178 ret = -EIO;
1179 goto out;
1182 while (ref_ptr < ref_end) {
1183 if (log_ref_ver) {
1184 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1185 &ref_index, &parent_objectid);
1187 * parent object can change from one array
1188 * item to another.
1190 if (!dir)
1191 dir = read_one_inode(root, parent_objectid);
1192 if (!dir) {
1193 ret = -ENOENT;
1194 goto out;
1196 } else {
1197 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1198 &ref_index);
1200 if (ret)
1201 goto out;
1203 /* if we already have a perfect match, we're done */
1204 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1205 ref_index, name, namelen)) {
1207 * look for a conflicting back reference in the
1208 * metadata. if we find one we have to unlink that name
1209 * of the file before we add our new link. Later on, we
1210 * overwrite any existing back reference, and we don't
1211 * want to create dangling pointers in the directory.
1214 if (!search_done) {
1215 ret = __add_inode_ref(trans, root, path, log,
1216 dir, inode, eb,
1217 inode_objectid,
1218 parent_objectid,
1219 ref_index, name, namelen,
1220 &search_done);
1221 if (ret) {
1222 if (ret == 1)
1223 ret = 0;
1224 goto out;
1228 /* insert our name */
1229 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1230 0, ref_index);
1231 if (ret)
1232 goto out;
1234 btrfs_update_inode(trans, root, inode);
1237 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1238 kfree(name);
1239 name = NULL;
1240 if (log_ref_ver) {
1241 iput(dir);
1242 dir = NULL;
1246 /* finally write the back reference in the inode */
1247 ret = overwrite_item(trans, root, path, eb, slot, key);
1248 out:
1249 btrfs_release_path(path);
1250 kfree(name);
1251 iput(dir);
1252 iput(inode);
1253 return ret;
1256 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1257 struct btrfs_root *root, u64 offset)
1259 int ret;
1260 ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
1261 offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
1262 if (ret > 0)
1263 ret = btrfs_insert_orphan_item(trans, root, offset);
1264 return ret;
1267 static int count_inode_extrefs(struct btrfs_root *root,
1268 struct inode *inode, struct btrfs_path *path)
1270 int ret = 0;
1271 int name_len;
1272 unsigned int nlink = 0;
1273 u32 item_size;
1274 u32 cur_offset = 0;
1275 u64 inode_objectid = btrfs_ino(inode);
1276 u64 offset = 0;
1277 unsigned long ptr;
1278 struct btrfs_inode_extref *extref;
1279 struct extent_buffer *leaf;
1281 while (1) {
1282 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1283 &extref, &offset);
1284 if (ret)
1285 break;
1287 leaf = path->nodes[0];
1288 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1289 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1291 while (cur_offset < item_size) {
1292 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1293 name_len = btrfs_inode_extref_name_len(leaf, extref);
1295 nlink++;
1297 cur_offset += name_len + sizeof(*extref);
1300 offset++;
1301 btrfs_release_path(path);
1303 btrfs_release_path(path);
1305 if (ret < 0)
1306 return ret;
1307 return nlink;
1310 static int count_inode_refs(struct btrfs_root *root,
1311 struct inode *inode, struct btrfs_path *path)
1313 int ret;
1314 struct btrfs_key key;
1315 unsigned int nlink = 0;
1316 unsigned long ptr;
1317 unsigned long ptr_end;
1318 int name_len;
1319 u64 ino = btrfs_ino(inode);
1321 key.objectid = ino;
1322 key.type = BTRFS_INODE_REF_KEY;
1323 key.offset = (u64)-1;
1325 while (1) {
1326 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1327 if (ret < 0)
1328 break;
1329 if (ret > 0) {
1330 if (path->slots[0] == 0)
1331 break;
1332 path->slots[0]--;
1334 process_slot:
1335 btrfs_item_key_to_cpu(path->nodes[0], &key,
1336 path->slots[0]);
1337 if (key.objectid != ino ||
1338 key.type != BTRFS_INODE_REF_KEY)
1339 break;
1340 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1341 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1342 path->slots[0]);
1343 while (ptr < ptr_end) {
1344 struct btrfs_inode_ref *ref;
1346 ref = (struct btrfs_inode_ref *)ptr;
1347 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1348 ref);
1349 ptr = (unsigned long)(ref + 1) + name_len;
1350 nlink++;
1353 if (key.offset == 0)
1354 break;
1355 if (path->slots[0] > 0) {
1356 path->slots[0]--;
1357 goto process_slot;
1359 key.offset--;
1360 btrfs_release_path(path);
1362 btrfs_release_path(path);
1364 return nlink;
1368 * There are a few corners where the link count of the file can't
1369 * be properly maintained during replay. So, instead of adding
1370 * lots of complexity to the log code, we just scan the backrefs
1371 * for any file that has been through replay.
1373 * The scan will update the link count on the inode to reflect the
1374 * number of back refs found. If it goes down to zero, the iput
1375 * will free the inode.
1377 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1378 struct btrfs_root *root,
1379 struct inode *inode)
1381 struct btrfs_path *path;
1382 int ret;
1383 u64 nlink = 0;
1384 u64 ino = btrfs_ino(inode);
1386 path = btrfs_alloc_path();
1387 if (!path)
1388 return -ENOMEM;
1390 ret = count_inode_refs(root, inode, path);
1391 if (ret < 0)
1392 goto out;
1394 nlink = ret;
1396 ret = count_inode_extrefs(root, inode, path);
1397 if (ret == -ENOENT)
1398 ret = 0;
1400 if (ret < 0)
1401 goto out;
1403 nlink += ret;
1405 ret = 0;
1407 if (nlink != inode->i_nlink) {
1408 set_nlink(inode, nlink);
1409 btrfs_update_inode(trans, root, inode);
1411 BTRFS_I(inode)->index_cnt = (u64)-1;
1413 if (inode->i_nlink == 0) {
1414 if (S_ISDIR(inode->i_mode)) {
1415 ret = replay_dir_deletes(trans, root, NULL, path,
1416 ino, 1);
1417 if (ret)
1418 goto out;
1420 ret = insert_orphan_item(trans, root, ino);
1423 out:
1424 btrfs_free_path(path);
1425 return ret;
1428 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1429 struct btrfs_root *root,
1430 struct btrfs_path *path)
1432 int ret;
1433 struct btrfs_key key;
1434 struct inode *inode;
1436 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1437 key.type = BTRFS_ORPHAN_ITEM_KEY;
1438 key.offset = (u64)-1;
1439 while (1) {
1440 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1441 if (ret < 0)
1442 break;
1444 if (ret == 1) {
1445 if (path->slots[0] == 0)
1446 break;
1447 path->slots[0]--;
1450 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1451 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1452 key.type != BTRFS_ORPHAN_ITEM_KEY)
1453 break;
1455 ret = btrfs_del_item(trans, root, path);
1456 if (ret)
1457 goto out;
1459 btrfs_release_path(path);
1460 inode = read_one_inode(root, key.offset);
1461 if (!inode)
1462 return -EIO;
1464 ret = fixup_inode_link_count(trans, root, inode);
1465 iput(inode);
1466 if (ret)
1467 goto out;
1470 * fixup on a directory may create new entries,
1471 * make sure we always look for the highset possible
1472 * offset
1474 key.offset = (u64)-1;
1476 ret = 0;
1477 out:
1478 btrfs_release_path(path);
1479 return ret;
1484 * record a given inode in the fixup dir so we can check its link
1485 * count when replay is done. The link count is incremented here
1486 * so the inode won't go away until we check it
1488 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1489 struct btrfs_root *root,
1490 struct btrfs_path *path,
1491 u64 objectid)
1493 struct btrfs_key key;
1494 int ret = 0;
1495 struct inode *inode;
1497 inode = read_one_inode(root, objectid);
1498 if (!inode)
1499 return -EIO;
1501 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1502 key.type = BTRFS_ORPHAN_ITEM_KEY;
1503 key.offset = objectid;
1505 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1507 btrfs_release_path(path);
1508 if (ret == 0) {
1509 if (!inode->i_nlink)
1510 set_nlink(inode, 1);
1511 else
1512 inc_nlink(inode);
1513 ret = btrfs_update_inode(trans, root, inode);
1514 } else if (ret == -EEXIST) {
1515 ret = 0;
1516 } else {
1517 BUG(); /* Logic Error */
1519 iput(inode);
1521 return ret;
1525 * when replaying the log for a directory, we only insert names
1526 * for inodes that actually exist. This means an fsync on a directory
1527 * does not implicitly fsync all the new files in it
1529 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1530 struct btrfs_root *root,
1531 struct btrfs_path *path,
1532 u64 dirid, u64 index,
1533 char *name, int name_len, u8 type,
1534 struct btrfs_key *location)
1536 struct inode *inode;
1537 struct inode *dir;
1538 int ret;
1540 inode = read_one_inode(root, location->objectid);
1541 if (!inode)
1542 return -ENOENT;
1544 dir = read_one_inode(root, dirid);
1545 if (!dir) {
1546 iput(inode);
1547 return -EIO;
1550 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1552 /* FIXME, put inode into FIXUP list */
1554 iput(inode);
1555 iput(dir);
1556 return ret;
1560 * take a single entry in a log directory item and replay it into
1561 * the subvolume.
1563 * if a conflicting item exists in the subdirectory already,
1564 * the inode it points to is unlinked and put into the link count
1565 * fix up tree.
1567 * If a name from the log points to a file or directory that does
1568 * not exist in the FS, it is skipped. fsyncs on directories
1569 * do not force down inodes inside that directory, just changes to the
1570 * names or unlinks in a directory.
1572 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1573 struct btrfs_root *root,
1574 struct btrfs_path *path,
1575 struct extent_buffer *eb,
1576 struct btrfs_dir_item *di,
1577 struct btrfs_key *key)
1579 char *name;
1580 int name_len;
1581 struct btrfs_dir_item *dst_di;
1582 struct btrfs_key found_key;
1583 struct btrfs_key log_key;
1584 struct inode *dir;
1585 u8 log_type;
1586 int exists;
1587 int ret = 0;
1588 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1590 dir = read_one_inode(root, key->objectid);
1591 if (!dir)
1592 return -EIO;
1594 name_len = btrfs_dir_name_len(eb, di);
1595 name = kmalloc(name_len, GFP_NOFS);
1596 if (!name) {
1597 ret = -ENOMEM;
1598 goto out;
1601 log_type = btrfs_dir_type(eb, di);
1602 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1603 name_len);
1605 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1606 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1607 if (exists == 0)
1608 exists = 1;
1609 else
1610 exists = 0;
1611 btrfs_release_path(path);
1613 if (key->type == BTRFS_DIR_ITEM_KEY) {
1614 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1615 name, name_len, 1);
1616 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1617 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1618 key->objectid,
1619 key->offset, name,
1620 name_len, 1);
1621 } else {
1622 /* Corruption */
1623 ret = -EINVAL;
1624 goto out;
1626 if (IS_ERR_OR_NULL(dst_di)) {
1627 /* we need a sequence number to insert, so we only
1628 * do inserts for the BTRFS_DIR_INDEX_KEY types
1630 if (key->type != BTRFS_DIR_INDEX_KEY)
1631 goto out;
1632 goto insert;
1635 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1636 /* the existing item matches the logged item */
1637 if (found_key.objectid == log_key.objectid &&
1638 found_key.type == log_key.type &&
1639 found_key.offset == log_key.offset &&
1640 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1641 update_size = false;
1642 goto out;
1646 * don't drop the conflicting directory entry if the inode
1647 * for the new entry doesn't exist
1649 if (!exists)
1650 goto out;
1652 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1653 if (ret)
1654 goto out;
1656 if (key->type == BTRFS_DIR_INDEX_KEY)
1657 goto insert;
1658 out:
1659 btrfs_release_path(path);
1660 if (!ret && update_size) {
1661 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1662 ret = btrfs_update_inode(trans, root, dir);
1664 kfree(name);
1665 iput(dir);
1666 return ret;
1668 insert:
1669 btrfs_release_path(path);
1670 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1671 name, name_len, log_type, &log_key);
1672 if (ret && ret != -ENOENT)
1673 goto out;
1674 update_size = false;
1675 ret = 0;
1676 goto out;
1680 * find all the names in a directory item and reconcile them into
1681 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1682 * one name in a directory item, but the same code gets used for
1683 * both directory index types
1685 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1686 struct btrfs_root *root,
1687 struct btrfs_path *path,
1688 struct extent_buffer *eb, int slot,
1689 struct btrfs_key *key)
1691 int ret;
1692 u32 item_size = btrfs_item_size_nr(eb, slot);
1693 struct btrfs_dir_item *di;
1694 int name_len;
1695 unsigned long ptr;
1696 unsigned long ptr_end;
1698 ptr = btrfs_item_ptr_offset(eb, slot);
1699 ptr_end = ptr + item_size;
1700 while (ptr < ptr_end) {
1701 di = (struct btrfs_dir_item *)ptr;
1702 if (verify_dir_item(root, eb, di))
1703 return -EIO;
1704 name_len = btrfs_dir_name_len(eb, di);
1705 ret = replay_one_name(trans, root, path, eb, di, key);
1706 if (ret)
1707 return ret;
1708 ptr = (unsigned long)(di + 1);
1709 ptr += name_len;
1711 return 0;
1715 * directory replay has two parts. There are the standard directory
1716 * items in the log copied from the subvolume, and range items
1717 * created in the log while the subvolume was logged.
1719 * The range items tell us which parts of the key space the log
1720 * is authoritative for. During replay, if a key in the subvolume
1721 * directory is in a logged range item, but not actually in the log
1722 * that means it was deleted from the directory before the fsync
1723 * and should be removed.
1725 static noinline int find_dir_range(struct btrfs_root *root,
1726 struct btrfs_path *path,
1727 u64 dirid, int key_type,
1728 u64 *start_ret, u64 *end_ret)
1730 struct btrfs_key key;
1731 u64 found_end;
1732 struct btrfs_dir_log_item *item;
1733 int ret;
1734 int nritems;
1736 if (*start_ret == (u64)-1)
1737 return 1;
1739 key.objectid = dirid;
1740 key.type = key_type;
1741 key.offset = *start_ret;
1743 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1744 if (ret < 0)
1745 goto out;
1746 if (ret > 0) {
1747 if (path->slots[0] == 0)
1748 goto out;
1749 path->slots[0]--;
1751 if (ret != 0)
1752 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1754 if (key.type != key_type || key.objectid != dirid) {
1755 ret = 1;
1756 goto next;
1758 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1759 struct btrfs_dir_log_item);
1760 found_end = btrfs_dir_log_end(path->nodes[0], item);
1762 if (*start_ret >= key.offset && *start_ret <= found_end) {
1763 ret = 0;
1764 *start_ret = key.offset;
1765 *end_ret = found_end;
1766 goto out;
1768 ret = 1;
1769 next:
1770 /* check the next slot in the tree to see if it is a valid item */
1771 nritems = btrfs_header_nritems(path->nodes[0]);
1772 if (path->slots[0] >= nritems) {
1773 ret = btrfs_next_leaf(root, path);
1774 if (ret)
1775 goto out;
1776 } else {
1777 path->slots[0]++;
1780 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1782 if (key.type != key_type || key.objectid != dirid) {
1783 ret = 1;
1784 goto out;
1786 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1787 struct btrfs_dir_log_item);
1788 found_end = btrfs_dir_log_end(path->nodes[0], item);
1789 *start_ret = key.offset;
1790 *end_ret = found_end;
1791 ret = 0;
1792 out:
1793 btrfs_release_path(path);
1794 return ret;
1798 * this looks for a given directory item in the log. If the directory
1799 * item is not in the log, the item is removed and the inode it points
1800 * to is unlinked
1802 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1803 struct btrfs_root *root,
1804 struct btrfs_root *log,
1805 struct btrfs_path *path,
1806 struct btrfs_path *log_path,
1807 struct inode *dir,
1808 struct btrfs_key *dir_key)
1810 int ret;
1811 struct extent_buffer *eb;
1812 int slot;
1813 u32 item_size;
1814 struct btrfs_dir_item *di;
1815 struct btrfs_dir_item *log_di;
1816 int name_len;
1817 unsigned long ptr;
1818 unsigned long ptr_end;
1819 char *name;
1820 struct inode *inode;
1821 struct btrfs_key location;
1823 again:
1824 eb = path->nodes[0];
1825 slot = path->slots[0];
1826 item_size = btrfs_item_size_nr(eb, slot);
1827 ptr = btrfs_item_ptr_offset(eb, slot);
1828 ptr_end = ptr + item_size;
1829 while (ptr < ptr_end) {
1830 di = (struct btrfs_dir_item *)ptr;
1831 if (verify_dir_item(root, eb, di)) {
1832 ret = -EIO;
1833 goto out;
1836 name_len = btrfs_dir_name_len(eb, di);
1837 name = kmalloc(name_len, GFP_NOFS);
1838 if (!name) {
1839 ret = -ENOMEM;
1840 goto out;
1842 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1843 name_len);
1844 log_di = NULL;
1845 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1846 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1847 dir_key->objectid,
1848 name, name_len, 0);
1849 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1850 log_di = btrfs_lookup_dir_index_item(trans, log,
1851 log_path,
1852 dir_key->objectid,
1853 dir_key->offset,
1854 name, name_len, 0);
1856 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
1857 btrfs_dir_item_key_to_cpu(eb, di, &location);
1858 btrfs_release_path(path);
1859 btrfs_release_path(log_path);
1860 inode = read_one_inode(root, location.objectid);
1861 if (!inode) {
1862 kfree(name);
1863 return -EIO;
1866 ret = link_to_fixup_dir(trans, root,
1867 path, location.objectid);
1868 if (ret) {
1869 kfree(name);
1870 iput(inode);
1871 goto out;
1874 inc_nlink(inode);
1875 ret = btrfs_unlink_inode(trans, root, dir, inode,
1876 name, name_len);
1877 if (!ret)
1878 ret = btrfs_run_delayed_items(trans, root);
1879 kfree(name);
1880 iput(inode);
1881 if (ret)
1882 goto out;
1884 /* there might still be more names under this key
1885 * check and repeat if required
1887 ret = btrfs_search_slot(NULL, root, dir_key, path,
1888 0, 0);
1889 if (ret == 0)
1890 goto again;
1891 ret = 0;
1892 goto out;
1893 } else if (IS_ERR(log_di)) {
1894 kfree(name);
1895 return PTR_ERR(log_di);
1897 btrfs_release_path(log_path);
1898 kfree(name);
1900 ptr = (unsigned long)(di + 1);
1901 ptr += name_len;
1903 ret = 0;
1904 out:
1905 btrfs_release_path(path);
1906 btrfs_release_path(log_path);
1907 return ret;
1911 * deletion replay happens before we copy any new directory items
1912 * out of the log or out of backreferences from inodes. It
1913 * scans the log to find ranges of keys that log is authoritative for,
1914 * and then scans the directory to find items in those ranges that are
1915 * not present in the log.
1917 * Anything we don't find in the log is unlinked and removed from the
1918 * directory.
1920 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1921 struct btrfs_root *root,
1922 struct btrfs_root *log,
1923 struct btrfs_path *path,
1924 u64 dirid, int del_all)
1926 u64 range_start;
1927 u64 range_end;
1928 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1929 int ret = 0;
1930 struct btrfs_key dir_key;
1931 struct btrfs_key found_key;
1932 struct btrfs_path *log_path;
1933 struct inode *dir;
1935 dir_key.objectid = dirid;
1936 dir_key.type = BTRFS_DIR_ITEM_KEY;
1937 log_path = btrfs_alloc_path();
1938 if (!log_path)
1939 return -ENOMEM;
1941 dir = read_one_inode(root, dirid);
1942 /* it isn't an error if the inode isn't there, that can happen
1943 * because we replay the deletes before we copy in the inode item
1944 * from the log
1946 if (!dir) {
1947 btrfs_free_path(log_path);
1948 return 0;
1950 again:
1951 range_start = 0;
1952 range_end = 0;
1953 while (1) {
1954 if (del_all)
1955 range_end = (u64)-1;
1956 else {
1957 ret = find_dir_range(log, path, dirid, key_type,
1958 &range_start, &range_end);
1959 if (ret != 0)
1960 break;
1963 dir_key.offset = range_start;
1964 while (1) {
1965 int nritems;
1966 ret = btrfs_search_slot(NULL, root, &dir_key, path,
1967 0, 0);
1968 if (ret < 0)
1969 goto out;
1971 nritems = btrfs_header_nritems(path->nodes[0]);
1972 if (path->slots[0] >= nritems) {
1973 ret = btrfs_next_leaf(root, path);
1974 if (ret)
1975 break;
1977 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1978 path->slots[0]);
1979 if (found_key.objectid != dirid ||
1980 found_key.type != dir_key.type)
1981 goto next_type;
1983 if (found_key.offset > range_end)
1984 break;
1986 ret = check_item_in_log(trans, root, log, path,
1987 log_path, dir,
1988 &found_key);
1989 if (ret)
1990 goto out;
1991 if (found_key.offset == (u64)-1)
1992 break;
1993 dir_key.offset = found_key.offset + 1;
1995 btrfs_release_path(path);
1996 if (range_end == (u64)-1)
1997 break;
1998 range_start = range_end + 1;
2001 next_type:
2002 ret = 0;
2003 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2004 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2005 dir_key.type = BTRFS_DIR_INDEX_KEY;
2006 btrfs_release_path(path);
2007 goto again;
2009 out:
2010 btrfs_release_path(path);
2011 btrfs_free_path(log_path);
2012 iput(dir);
2013 return ret;
2017 * the process_func used to replay items from the log tree. This
2018 * gets called in two different stages. The first stage just looks
2019 * for inodes and makes sure they are all copied into the subvolume.
2021 * The second stage copies all the other item types from the log into
2022 * the subvolume. The two stage approach is slower, but gets rid of
2023 * lots of complexity around inodes referencing other inodes that exist
2024 * only in the log (references come from either directory items or inode
2025 * back refs).
2027 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2028 struct walk_control *wc, u64 gen)
2030 int nritems;
2031 struct btrfs_path *path;
2032 struct btrfs_root *root = wc->replay_dest;
2033 struct btrfs_key key;
2034 int level;
2035 int i;
2036 int ret;
2038 ret = btrfs_read_buffer(eb, gen);
2039 if (ret)
2040 return ret;
2042 level = btrfs_header_level(eb);
2044 if (level != 0)
2045 return 0;
2047 path = btrfs_alloc_path();
2048 if (!path)
2049 return -ENOMEM;
2051 nritems = btrfs_header_nritems(eb);
2052 for (i = 0; i < nritems; i++) {
2053 btrfs_item_key_to_cpu(eb, &key, i);
2055 /* inode keys are done during the first stage */
2056 if (key.type == BTRFS_INODE_ITEM_KEY &&
2057 wc->stage == LOG_WALK_REPLAY_INODES) {
2058 struct btrfs_inode_item *inode_item;
2059 u32 mode;
2061 inode_item = btrfs_item_ptr(eb, i,
2062 struct btrfs_inode_item);
2063 mode = btrfs_inode_mode(eb, inode_item);
2064 if (S_ISDIR(mode)) {
2065 ret = replay_dir_deletes(wc->trans,
2066 root, log, path, key.objectid, 0);
2067 if (ret)
2068 break;
2070 ret = overwrite_item(wc->trans, root, path,
2071 eb, i, &key);
2072 if (ret)
2073 break;
2075 /* for regular files, make sure corresponding
2076 * orhpan item exist. extents past the new EOF
2077 * will be truncated later by orphan cleanup.
2079 if (S_ISREG(mode)) {
2080 ret = insert_orphan_item(wc->trans, root,
2081 key.objectid);
2082 if (ret)
2083 break;
2086 ret = link_to_fixup_dir(wc->trans, root,
2087 path, key.objectid);
2088 if (ret)
2089 break;
2092 if (key.type == BTRFS_DIR_INDEX_KEY &&
2093 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2094 ret = replay_one_dir_item(wc->trans, root, path,
2095 eb, i, &key);
2096 if (ret)
2097 break;
2100 if (wc->stage < LOG_WALK_REPLAY_ALL)
2101 continue;
2103 /* these keys are simply copied */
2104 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2105 ret = overwrite_item(wc->trans, root, path,
2106 eb, i, &key);
2107 if (ret)
2108 break;
2109 } else if (key.type == BTRFS_INODE_REF_KEY ||
2110 key.type == BTRFS_INODE_EXTREF_KEY) {
2111 ret = add_inode_ref(wc->trans, root, log, path,
2112 eb, i, &key);
2113 if (ret && ret != -ENOENT)
2114 break;
2115 ret = 0;
2116 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2117 ret = replay_one_extent(wc->trans, root, path,
2118 eb, i, &key);
2119 if (ret)
2120 break;
2121 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2122 ret = replay_one_dir_item(wc->trans, root, path,
2123 eb, i, &key);
2124 if (ret)
2125 break;
2128 btrfs_free_path(path);
2129 return ret;
2132 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2133 struct btrfs_root *root,
2134 struct btrfs_path *path, int *level,
2135 struct walk_control *wc)
2137 u64 root_owner;
2138 u64 bytenr;
2139 u64 ptr_gen;
2140 struct extent_buffer *next;
2141 struct extent_buffer *cur;
2142 struct extent_buffer *parent;
2143 u32 blocksize;
2144 int ret = 0;
2146 WARN_ON(*level < 0);
2147 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2149 while (*level > 0) {
2150 WARN_ON(*level < 0);
2151 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2152 cur = path->nodes[*level];
2154 WARN_ON(btrfs_header_level(cur) != *level);
2156 if (path->slots[*level] >=
2157 btrfs_header_nritems(cur))
2158 break;
2160 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2161 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2162 blocksize = root->nodesize;
2164 parent = path->nodes[*level];
2165 root_owner = btrfs_header_owner(parent);
2167 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
2168 if (!next)
2169 return -ENOMEM;
2171 if (*level == 1) {
2172 ret = wc->process_func(root, next, wc, ptr_gen);
2173 if (ret) {
2174 free_extent_buffer(next);
2175 return ret;
2178 path->slots[*level]++;
2179 if (wc->free) {
2180 ret = btrfs_read_buffer(next, ptr_gen);
2181 if (ret) {
2182 free_extent_buffer(next);
2183 return ret;
2186 if (trans) {
2187 btrfs_tree_lock(next);
2188 btrfs_set_lock_blocking(next);
2189 clean_tree_block(trans, root, next);
2190 btrfs_wait_tree_block_writeback(next);
2191 btrfs_tree_unlock(next);
2194 WARN_ON(root_owner !=
2195 BTRFS_TREE_LOG_OBJECTID);
2196 ret = btrfs_free_and_pin_reserved_extent(root,
2197 bytenr, blocksize);
2198 if (ret) {
2199 free_extent_buffer(next);
2200 return ret;
2203 free_extent_buffer(next);
2204 continue;
2206 ret = btrfs_read_buffer(next, ptr_gen);
2207 if (ret) {
2208 free_extent_buffer(next);
2209 return ret;
2212 WARN_ON(*level <= 0);
2213 if (path->nodes[*level-1])
2214 free_extent_buffer(path->nodes[*level-1]);
2215 path->nodes[*level-1] = next;
2216 *level = btrfs_header_level(next);
2217 path->slots[*level] = 0;
2218 cond_resched();
2220 WARN_ON(*level < 0);
2221 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2223 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2225 cond_resched();
2226 return 0;
2229 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2230 struct btrfs_root *root,
2231 struct btrfs_path *path, int *level,
2232 struct walk_control *wc)
2234 u64 root_owner;
2235 int i;
2236 int slot;
2237 int ret;
2239 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2240 slot = path->slots[i];
2241 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2242 path->slots[i]++;
2243 *level = i;
2244 WARN_ON(*level == 0);
2245 return 0;
2246 } else {
2247 struct extent_buffer *parent;
2248 if (path->nodes[*level] == root->node)
2249 parent = path->nodes[*level];
2250 else
2251 parent = path->nodes[*level + 1];
2253 root_owner = btrfs_header_owner(parent);
2254 ret = wc->process_func(root, path->nodes[*level], wc,
2255 btrfs_header_generation(path->nodes[*level]));
2256 if (ret)
2257 return ret;
2259 if (wc->free) {
2260 struct extent_buffer *next;
2262 next = path->nodes[*level];
2264 if (trans) {
2265 btrfs_tree_lock(next);
2266 btrfs_set_lock_blocking(next);
2267 clean_tree_block(trans, root, next);
2268 btrfs_wait_tree_block_writeback(next);
2269 btrfs_tree_unlock(next);
2272 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2273 ret = btrfs_free_and_pin_reserved_extent(root,
2274 path->nodes[*level]->start,
2275 path->nodes[*level]->len);
2276 if (ret)
2277 return ret;
2279 free_extent_buffer(path->nodes[*level]);
2280 path->nodes[*level] = NULL;
2281 *level = i + 1;
2284 return 1;
2288 * drop the reference count on the tree rooted at 'snap'. This traverses
2289 * the tree freeing any blocks that have a ref count of zero after being
2290 * decremented.
2292 static int walk_log_tree(struct btrfs_trans_handle *trans,
2293 struct btrfs_root *log, struct walk_control *wc)
2295 int ret = 0;
2296 int wret;
2297 int level;
2298 struct btrfs_path *path;
2299 int orig_level;
2301 path = btrfs_alloc_path();
2302 if (!path)
2303 return -ENOMEM;
2305 level = btrfs_header_level(log->node);
2306 orig_level = level;
2307 path->nodes[level] = log->node;
2308 extent_buffer_get(log->node);
2309 path->slots[level] = 0;
2311 while (1) {
2312 wret = walk_down_log_tree(trans, log, path, &level, wc);
2313 if (wret > 0)
2314 break;
2315 if (wret < 0) {
2316 ret = wret;
2317 goto out;
2320 wret = walk_up_log_tree(trans, log, path, &level, wc);
2321 if (wret > 0)
2322 break;
2323 if (wret < 0) {
2324 ret = wret;
2325 goto out;
2329 /* was the root node processed? if not, catch it here */
2330 if (path->nodes[orig_level]) {
2331 ret = wc->process_func(log, path->nodes[orig_level], wc,
2332 btrfs_header_generation(path->nodes[orig_level]));
2333 if (ret)
2334 goto out;
2335 if (wc->free) {
2336 struct extent_buffer *next;
2338 next = path->nodes[orig_level];
2340 if (trans) {
2341 btrfs_tree_lock(next);
2342 btrfs_set_lock_blocking(next);
2343 clean_tree_block(trans, log, next);
2344 btrfs_wait_tree_block_writeback(next);
2345 btrfs_tree_unlock(next);
2348 WARN_ON(log->root_key.objectid !=
2349 BTRFS_TREE_LOG_OBJECTID);
2350 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2351 next->len);
2352 if (ret)
2353 goto out;
2357 out:
2358 btrfs_free_path(path);
2359 return ret;
2363 * helper function to update the item for a given subvolumes log root
2364 * in the tree of log roots
2366 static int update_log_root(struct btrfs_trans_handle *trans,
2367 struct btrfs_root *log)
2369 int ret;
2371 if (log->log_transid == 1) {
2372 /* insert root item on the first sync */
2373 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2374 &log->root_key, &log->root_item);
2375 } else {
2376 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2377 &log->root_key, &log->root_item);
2379 return ret;
2382 static void wait_log_commit(struct btrfs_trans_handle *trans,
2383 struct btrfs_root *root, int transid)
2385 DEFINE_WAIT(wait);
2386 int index = transid % 2;
2389 * we only allow two pending log transactions at a time,
2390 * so we know that if ours is more than 2 older than the
2391 * current transaction, we're done
2393 do {
2394 prepare_to_wait(&root->log_commit_wait[index],
2395 &wait, TASK_UNINTERRUPTIBLE);
2396 mutex_unlock(&root->log_mutex);
2398 if (root->log_transid_committed < transid &&
2399 atomic_read(&root->log_commit[index]))
2400 schedule();
2402 finish_wait(&root->log_commit_wait[index], &wait);
2403 mutex_lock(&root->log_mutex);
2404 } while (root->log_transid_committed < transid &&
2405 atomic_read(&root->log_commit[index]));
2408 static void wait_for_writer(struct btrfs_trans_handle *trans,
2409 struct btrfs_root *root)
2411 DEFINE_WAIT(wait);
2413 while (atomic_read(&root->log_writers)) {
2414 prepare_to_wait(&root->log_writer_wait,
2415 &wait, TASK_UNINTERRUPTIBLE);
2416 mutex_unlock(&root->log_mutex);
2417 if (atomic_read(&root->log_writers))
2418 schedule();
2419 mutex_lock(&root->log_mutex);
2420 finish_wait(&root->log_writer_wait, &wait);
2424 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2425 struct btrfs_log_ctx *ctx)
2427 if (!ctx)
2428 return;
2430 mutex_lock(&root->log_mutex);
2431 list_del_init(&ctx->list);
2432 mutex_unlock(&root->log_mutex);
2436 * Invoked in log mutex context, or be sure there is no other task which
2437 * can access the list.
2439 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2440 int index, int error)
2442 struct btrfs_log_ctx *ctx;
2444 if (!error) {
2445 INIT_LIST_HEAD(&root->log_ctxs[index]);
2446 return;
2449 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2450 ctx->log_ret = error;
2452 INIT_LIST_HEAD(&root->log_ctxs[index]);
2456 * btrfs_sync_log does sends a given tree log down to the disk and
2457 * updates the super blocks to record it. When this call is done,
2458 * you know that any inodes previously logged are safely on disk only
2459 * if it returns 0.
2461 * Any other return value means you need to call btrfs_commit_transaction.
2462 * Some of the edge cases for fsyncing directories that have had unlinks
2463 * or renames done in the past mean that sometimes the only safe
2464 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2465 * that has happened.
2467 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2468 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2470 int index1;
2471 int index2;
2472 int mark;
2473 int ret;
2474 struct btrfs_root *log = root->log_root;
2475 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2476 int log_transid = 0;
2477 struct btrfs_log_ctx root_log_ctx;
2478 struct blk_plug plug;
2480 mutex_lock(&root->log_mutex);
2481 log_transid = ctx->log_transid;
2482 if (root->log_transid_committed >= log_transid) {
2483 mutex_unlock(&root->log_mutex);
2484 return ctx->log_ret;
2487 index1 = log_transid % 2;
2488 if (atomic_read(&root->log_commit[index1])) {
2489 wait_log_commit(trans, root, log_transid);
2490 mutex_unlock(&root->log_mutex);
2491 return ctx->log_ret;
2493 ASSERT(log_transid == root->log_transid);
2494 atomic_set(&root->log_commit[index1], 1);
2496 /* wait for previous tree log sync to complete */
2497 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2498 wait_log_commit(trans, root, log_transid - 1);
2500 while (1) {
2501 int batch = atomic_read(&root->log_batch);
2502 /* when we're on an ssd, just kick the log commit out */
2503 if (!btrfs_test_opt(root, SSD) &&
2504 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2505 mutex_unlock(&root->log_mutex);
2506 schedule_timeout_uninterruptible(1);
2507 mutex_lock(&root->log_mutex);
2509 wait_for_writer(trans, root);
2510 if (batch == atomic_read(&root->log_batch))
2511 break;
2514 /* bail out if we need to do a full commit */
2515 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2516 ret = -EAGAIN;
2517 btrfs_free_logged_extents(log, log_transid);
2518 mutex_unlock(&root->log_mutex);
2519 goto out;
2522 if (log_transid % 2 == 0)
2523 mark = EXTENT_DIRTY;
2524 else
2525 mark = EXTENT_NEW;
2527 /* we start IO on all the marked extents here, but we don't actually
2528 * wait for them until later.
2530 blk_start_plug(&plug);
2531 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2532 if (ret) {
2533 blk_finish_plug(&plug);
2534 btrfs_abort_transaction(trans, root, ret);
2535 btrfs_free_logged_extents(log, log_transid);
2536 btrfs_set_log_full_commit(root->fs_info, trans);
2537 mutex_unlock(&root->log_mutex);
2538 goto out;
2541 btrfs_set_root_node(&log->root_item, log->node);
2543 root->log_transid++;
2544 log->log_transid = root->log_transid;
2545 root->log_start_pid = 0;
2547 * IO has been started, blocks of the log tree have WRITTEN flag set
2548 * in their headers. new modifications of the log will be written to
2549 * new positions. so it's safe to allow log writers to go in.
2551 mutex_unlock(&root->log_mutex);
2553 btrfs_init_log_ctx(&root_log_ctx);
2555 mutex_lock(&log_root_tree->log_mutex);
2556 atomic_inc(&log_root_tree->log_batch);
2557 atomic_inc(&log_root_tree->log_writers);
2559 index2 = log_root_tree->log_transid % 2;
2560 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2561 root_log_ctx.log_transid = log_root_tree->log_transid;
2563 mutex_unlock(&log_root_tree->log_mutex);
2565 ret = update_log_root(trans, log);
2567 mutex_lock(&log_root_tree->log_mutex);
2568 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2569 smp_mb();
2570 if (waitqueue_active(&log_root_tree->log_writer_wait))
2571 wake_up(&log_root_tree->log_writer_wait);
2574 if (ret) {
2575 if (!list_empty(&root_log_ctx.list))
2576 list_del_init(&root_log_ctx.list);
2578 blk_finish_plug(&plug);
2579 btrfs_set_log_full_commit(root->fs_info, trans);
2581 if (ret != -ENOSPC) {
2582 btrfs_abort_transaction(trans, root, ret);
2583 mutex_unlock(&log_root_tree->log_mutex);
2584 goto out;
2586 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2587 btrfs_free_logged_extents(log, log_transid);
2588 mutex_unlock(&log_root_tree->log_mutex);
2589 ret = -EAGAIN;
2590 goto out;
2593 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2594 mutex_unlock(&log_root_tree->log_mutex);
2595 ret = root_log_ctx.log_ret;
2596 goto out;
2599 index2 = root_log_ctx.log_transid % 2;
2600 if (atomic_read(&log_root_tree->log_commit[index2])) {
2601 blk_finish_plug(&plug);
2602 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2603 mark);
2604 btrfs_wait_logged_extents(trans, log, log_transid);
2605 wait_log_commit(trans, log_root_tree,
2606 root_log_ctx.log_transid);
2607 mutex_unlock(&log_root_tree->log_mutex);
2608 if (!ret)
2609 ret = root_log_ctx.log_ret;
2610 goto out;
2612 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2613 atomic_set(&log_root_tree->log_commit[index2], 1);
2615 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2616 wait_log_commit(trans, log_root_tree,
2617 root_log_ctx.log_transid - 1);
2620 wait_for_writer(trans, log_root_tree);
2623 * now that we've moved on to the tree of log tree roots,
2624 * check the full commit flag again
2626 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2627 blk_finish_plug(&plug);
2628 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2629 btrfs_free_logged_extents(log, log_transid);
2630 mutex_unlock(&log_root_tree->log_mutex);
2631 ret = -EAGAIN;
2632 goto out_wake_log_root;
2635 ret = btrfs_write_marked_extents(log_root_tree,
2636 &log_root_tree->dirty_log_pages,
2637 EXTENT_DIRTY | EXTENT_NEW);
2638 blk_finish_plug(&plug);
2639 if (ret) {
2640 btrfs_set_log_full_commit(root->fs_info, trans);
2641 btrfs_abort_transaction(trans, root, ret);
2642 btrfs_free_logged_extents(log, log_transid);
2643 mutex_unlock(&log_root_tree->log_mutex);
2644 goto out_wake_log_root;
2646 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2647 if (!ret)
2648 ret = btrfs_wait_marked_extents(log_root_tree,
2649 &log_root_tree->dirty_log_pages,
2650 EXTENT_NEW | EXTENT_DIRTY);
2651 if (ret) {
2652 btrfs_set_log_full_commit(root->fs_info, trans);
2653 btrfs_free_logged_extents(log, log_transid);
2654 mutex_unlock(&log_root_tree->log_mutex);
2655 goto out_wake_log_root;
2657 btrfs_wait_logged_extents(trans, log, log_transid);
2659 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2660 log_root_tree->node->start);
2661 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2662 btrfs_header_level(log_root_tree->node));
2664 log_root_tree->log_transid++;
2665 mutex_unlock(&log_root_tree->log_mutex);
2668 * nobody else is going to jump in and write the the ctree
2669 * super here because the log_commit atomic below is protecting
2670 * us. We must be called with a transaction handle pinning
2671 * the running transaction open, so a full commit can't hop
2672 * in and cause problems either.
2674 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2675 if (ret) {
2676 btrfs_set_log_full_commit(root->fs_info, trans);
2677 btrfs_abort_transaction(trans, root, ret);
2678 goto out_wake_log_root;
2681 mutex_lock(&root->log_mutex);
2682 if (root->last_log_commit < log_transid)
2683 root->last_log_commit = log_transid;
2684 mutex_unlock(&root->log_mutex);
2686 out_wake_log_root:
2688 * We needn't get log_mutex here because we are sure all
2689 * the other tasks are blocked.
2691 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2693 mutex_lock(&log_root_tree->log_mutex);
2694 log_root_tree->log_transid_committed++;
2695 atomic_set(&log_root_tree->log_commit[index2], 0);
2696 mutex_unlock(&log_root_tree->log_mutex);
2698 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2699 wake_up(&log_root_tree->log_commit_wait[index2]);
2700 out:
2701 /* See above. */
2702 btrfs_remove_all_log_ctxs(root, index1, ret);
2704 mutex_lock(&root->log_mutex);
2705 root->log_transid_committed++;
2706 atomic_set(&root->log_commit[index1], 0);
2707 mutex_unlock(&root->log_mutex);
2709 if (waitqueue_active(&root->log_commit_wait[index1]))
2710 wake_up(&root->log_commit_wait[index1]);
2711 return ret;
2714 static void free_log_tree(struct btrfs_trans_handle *trans,
2715 struct btrfs_root *log)
2717 int ret;
2718 u64 start;
2719 u64 end;
2720 struct walk_control wc = {
2721 .free = 1,
2722 .process_func = process_one_buffer
2725 ret = walk_log_tree(trans, log, &wc);
2726 /* I don't think this can happen but just in case */
2727 if (ret)
2728 btrfs_abort_transaction(trans, log, ret);
2730 while (1) {
2731 ret = find_first_extent_bit(&log->dirty_log_pages,
2732 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2733 NULL);
2734 if (ret)
2735 break;
2737 clear_extent_bits(&log->dirty_log_pages, start, end,
2738 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2742 * We may have short-circuited the log tree with the full commit logic
2743 * and left ordered extents on our list, so clear these out to keep us
2744 * from leaking inodes and memory.
2746 btrfs_free_logged_extents(log, 0);
2747 btrfs_free_logged_extents(log, 1);
2749 free_extent_buffer(log->node);
2750 kfree(log);
2754 * free all the extents used by the tree log. This should be called
2755 * at commit time of the full transaction
2757 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2759 if (root->log_root) {
2760 free_log_tree(trans, root->log_root);
2761 root->log_root = NULL;
2763 return 0;
2766 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2767 struct btrfs_fs_info *fs_info)
2769 if (fs_info->log_root_tree) {
2770 free_log_tree(trans, fs_info->log_root_tree);
2771 fs_info->log_root_tree = NULL;
2773 return 0;
2777 * If both a file and directory are logged, and unlinks or renames are
2778 * mixed in, we have a few interesting corners:
2780 * create file X in dir Y
2781 * link file X to X.link in dir Y
2782 * fsync file X
2783 * unlink file X but leave X.link
2784 * fsync dir Y
2786 * After a crash we would expect only X.link to exist. But file X
2787 * didn't get fsync'd again so the log has back refs for X and X.link.
2789 * We solve this by removing directory entries and inode backrefs from the
2790 * log when a file that was logged in the current transaction is
2791 * unlinked. Any later fsync will include the updated log entries, and
2792 * we'll be able to reconstruct the proper directory items from backrefs.
2794 * This optimizations allows us to avoid relogging the entire inode
2795 * or the entire directory.
2797 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2798 struct btrfs_root *root,
2799 const char *name, int name_len,
2800 struct inode *dir, u64 index)
2802 struct btrfs_root *log;
2803 struct btrfs_dir_item *di;
2804 struct btrfs_path *path;
2805 int ret;
2806 int err = 0;
2807 int bytes_del = 0;
2808 u64 dir_ino = btrfs_ino(dir);
2810 if (BTRFS_I(dir)->logged_trans < trans->transid)
2811 return 0;
2813 ret = join_running_log_trans(root);
2814 if (ret)
2815 return 0;
2817 mutex_lock(&BTRFS_I(dir)->log_mutex);
2819 log = root->log_root;
2820 path = btrfs_alloc_path();
2821 if (!path) {
2822 err = -ENOMEM;
2823 goto out_unlock;
2826 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2827 name, name_len, -1);
2828 if (IS_ERR(di)) {
2829 err = PTR_ERR(di);
2830 goto fail;
2832 if (di) {
2833 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2834 bytes_del += name_len;
2835 if (ret) {
2836 err = ret;
2837 goto fail;
2840 btrfs_release_path(path);
2841 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2842 index, name, name_len, -1);
2843 if (IS_ERR(di)) {
2844 err = PTR_ERR(di);
2845 goto fail;
2847 if (di) {
2848 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2849 bytes_del += name_len;
2850 if (ret) {
2851 err = ret;
2852 goto fail;
2856 /* update the directory size in the log to reflect the names
2857 * we have removed
2859 if (bytes_del) {
2860 struct btrfs_key key;
2862 key.objectid = dir_ino;
2863 key.offset = 0;
2864 key.type = BTRFS_INODE_ITEM_KEY;
2865 btrfs_release_path(path);
2867 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2868 if (ret < 0) {
2869 err = ret;
2870 goto fail;
2872 if (ret == 0) {
2873 struct btrfs_inode_item *item;
2874 u64 i_size;
2876 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2877 struct btrfs_inode_item);
2878 i_size = btrfs_inode_size(path->nodes[0], item);
2879 if (i_size > bytes_del)
2880 i_size -= bytes_del;
2881 else
2882 i_size = 0;
2883 btrfs_set_inode_size(path->nodes[0], item, i_size);
2884 btrfs_mark_buffer_dirty(path->nodes[0]);
2885 } else
2886 ret = 0;
2887 btrfs_release_path(path);
2889 fail:
2890 btrfs_free_path(path);
2891 out_unlock:
2892 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2893 if (ret == -ENOSPC) {
2894 btrfs_set_log_full_commit(root->fs_info, trans);
2895 ret = 0;
2896 } else if (ret < 0)
2897 btrfs_abort_transaction(trans, root, ret);
2899 btrfs_end_log_trans(root);
2901 return err;
2904 /* see comments for btrfs_del_dir_entries_in_log */
2905 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2906 struct btrfs_root *root,
2907 const char *name, int name_len,
2908 struct inode *inode, u64 dirid)
2910 struct btrfs_root *log;
2911 u64 index;
2912 int ret;
2914 if (BTRFS_I(inode)->logged_trans < trans->transid)
2915 return 0;
2917 ret = join_running_log_trans(root);
2918 if (ret)
2919 return 0;
2920 log = root->log_root;
2921 mutex_lock(&BTRFS_I(inode)->log_mutex);
2923 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2924 dirid, &index);
2925 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2926 if (ret == -ENOSPC) {
2927 btrfs_set_log_full_commit(root->fs_info, trans);
2928 ret = 0;
2929 } else if (ret < 0 && ret != -ENOENT)
2930 btrfs_abort_transaction(trans, root, ret);
2931 btrfs_end_log_trans(root);
2933 return ret;
2937 * creates a range item in the log for 'dirid'. first_offset and
2938 * last_offset tell us which parts of the key space the log should
2939 * be considered authoritative for.
2941 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2942 struct btrfs_root *log,
2943 struct btrfs_path *path,
2944 int key_type, u64 dirid,
2945 u64 first_offset, u64 last_offset)
2947 int ret;
2948 struct btrfs_key key;
2949 struct btrfs_dir_log_item *item;
2951 key.objectid = dirid;
2952 key.offset = first_offset;
2953 if (key_type == BTRFS_DIR_ITEM_KEY)
2954 key.type = BTRFS_DIR_LOG_ITEM_KEY;
2955 else
2956 key.type = BTRFS_DIR_LOG_INDEX_KEY;
2957 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
2958 if (ret)
2959 return ret;
2961 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2962 struct btrfs_dir_log_item);
2963 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
2964 btrfs_mark_buffer_dirty(path->nodes[0]);
2965 btrfs_release_path(path);
2966 return 0;
2970 * log all the items included in the current transaction for a given
2971 * directory. This also creates the range items in the log tree required
2972 * to replay anything deleted before the fsync
2974 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2975 struct btrfs_root *root, struct inode *inode,
2976 struct btrfs_path *path,
2977 struct btrfs_path *dst_path, int key_type,
2978 u64 min_offset, u64 *last_offset_ret)
2980 struct btrfs_key min_key;
2981 struct btrfs_root *log = root->log_root;
2982 struct extent_buffer *src;
2983 int err = 0;
2984 int ret;
2985 int i;
2986 int nritems;
2987 u64 first_offset = min_offset;
2988 u64 last_offset = (u64)-1;
2989 u64 ino = btrfs_ino(inode);
2991 log = root->log_root;
2993 min_key.objectid = ino;
2994 min_key.type = key_type;
2995 min_key.offset = min_offset;
2997 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3000 * we didn't find anything from this transaction, see if there
3001 * is anything at all
3003 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3004 min_key.objectid = ino;
3005 min_key.type = key_type;
3006 min_key.offset = (u64)-1;
3007 btrfs_release_path(path);
3008 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3009 if (ret < 0) {
3010 btrfs_release_path(path);
3011 return ret;
3013 ret = btrfs_previous_item(root, path, ino, key_type);
3015 /* if ret == 0 there are items for this type,
3016 * create a range to tell us the last key of this type.
3017 * otherwise, there are no items in this directory after
3018 * *min_offset, and we create a range to indicate that.
3020 if (ret == 0) {
3021 struct btrfs_key tmp;
3022 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3023 path->slots[0]);
3024 if (key_type == tmp.type)
3025 first_offset = max(min_offset, tmp.offset) + 1;
3027 goto done;
3030 /* go backward to find any previous key */
3031 ret = btrfs_previous_item(root, path, ino, key_type);
3032 if (ret == 0) {
3033 struct btrfs_key tmp;
3034 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3035 if (key_type == tmp.type) {
3036 first_offset = tmp.offset;
3037 ret = overwrite_item(trans, log, dst_path,
3038 path->nodes[0], path->slots[0],
3039 &tmp);
3040 if (ret) {
3041 err = ret;
3042 goto done;
3046 btrfs_release_path(path);
3048 /* find the first key from this transaction again */
3049 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3050 if (WARN_ON(ret != 0))
3051 goto done;
3054 * we have a block from this transaction, log every item in it
3055 * from our directory
3057 while (1) {
3058 struct btrfs_key tmp;
3059 src = path->nodes[0];
3060 nritems = btrfs_header_nritems(src);
3061 for (i = path->slots[0]; i < nritems; i++) {
3062 btrfs_item_key_to_cpu(src, &min_key, i);
3064 if (min_key.objectid != ino || min_key.type != key_type)
3065 goto done;
3066 ret = overwrite_item(trans, log, dst_path, src, i,
3067 &min_key);
3068 if (ret) {
3069 err = ret;
3070 goto done;
3073 path->slots[0] = nritems;
3076 * look ahead to the next item and see if it is also
3077 * from this directory and from this transaction
3079 ret = btrfs_next_leaf(root, path);
3080 if (ret == 1) {
3081 last_offset = (u64)-1;
3082 goto done;
3084 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3085 if (tmp.objectid != ino || tmp.type != key_type) {
3086 last_offset = (u64)-1;
3087 goto done;
3089 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3090 ret = overwrite_item(trans, log, dst_path,
3091 path->nodes[0], path->slots[0],
3092 &tmp);
3093 if (ret)
3094 err = ret;
3095 else
3096 last_offset = tmp.offset;
3097 goto done;
3100 done:
3101 btrfs_release_path(path);
3102 btrfs_release_path(dst_path);
3104 if (err == 0) {
3105 *last_offset_ret = last_offset;
3107 * insert the log range keys to indicate where the log
3108 * is valid
3110 ret = insert_dir_log_key(trans, log, path, key_type,
3111 ino, first_offset, last_offset);
3112 if (ret)
3113 err = ret;
3115 return err;
3119 * logging directories is very similar to logging inodes, We find all the items
3120 * from the current transaction and write them to the log.
3122 * The recovery code scans the directory in the subvolume, and if it finds a
3123 * key in the range logged that is not present in the log tree, then it means
3124 * that dir entry was unlinked during the transaction.
3126 * In order for that scan to work, we must include one key smaller than
3127 * the smallest logged by this transaction and one key larger than the largest
3128 * key logged by this transaction.
3130 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3131 struct btrfs_root *root, struct inode *inode,
3132 struct btrfs_path *path,
3133 struct btrfs_path *dst_path)
3135 u64 min_key;
3136 u64 max_key;
3137 int ret;
3138 int key_type = BTRFS_DIR_ITEM_KEY;
3140 again:
3141 min_key = 0;
3142 max_key = 0;
3143 while (1) {
3144 ret = log_dir_items(trans, root, inode, path,
3145 dst_path, key_type, min_key,
3146 &max_key);
3147 if (ret)
3148 return ret;
3149 if (max_key == (u64)-1)
3150 break;
3151 min_key = max_key + 1;
3154 if (key_type == BTRFS_DIR_ITEM_KEY) {
3155 key_type = BTRFS_DIR_INDEX_KEY;
3156 goto again;
3158 return 0;
3162 * a helper function to drop items from the log before we relog an
3163 * inode. max_key_type indicates the highest item type to remove.
3164 * This cannot be run for file data extents because it does not
3165 * free the extents they point to.
3167 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3168 struct btrfs_root *log,
3169 struct btrfs_path *path,
3170 u64 objectid, int max_key_type)
3172 int ret;
3173 struct btrfs_key key;
3174 struct btrfs_key found_key;
3175 int start_slot;
3177 key.objectid = objectid;
3178 key.type = max_key_type;
3179 key.offset = (u64)-1;
3181 while (1) {
3182 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3183 BUG_ON(ret == 0); /* Logic error */
3184 if (ret < 0)
3185 break;
3187 if (path->slots[0] == 0)
3188 break;
3190 path->slots[0]--;
3191 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3192 path->slots[0]);
3194 if (found_key.objectid != objectid)
3195 break;
3197 found_key.offset = 0;
3198 found_key.type = 0;
3199 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3200 &start_slot);
3202 ret = btrfs_del_items(trans, log, path, start_slot,
3203 path->slots[0] - start_slot + 1);
3205 * If start slot isn't 0 then we don't need to re-search, we've
3206 * found the last guy with the objectid in this tree.
3208 if (ret || start_slot != 0)
3209 break;
3210 btrfs_release_path(path);
3212 btrfs_release_path(path);
3213 if (ret > 0)
3214 ret = 0;
3215 return ret;
3218 static void fill_inode_item(struct btrfs_trans_handle *trans,
3219 struct extent_buffer *leaf,
3220 struct btrfs_inode_item *item,
3221 struct inode *inode, int log_inode_only)
3223 struct btrfs_map_token token;
3225 btrfs_init_map_token(&token);
3227 if (log_inode_only) {
3228 /* set the generation to zero so the recover code
3229 * can tell the difference between an logging
3230 * just to say 'this inode exists' and a logging
3231 * to say 'update this inode with these values'
3233 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3234 btrfs_set_token_inode_size(leaf, item, 0, &token);
3235 } else {
3236 btrfs_set_token_inode_generation(leaf, item,
3237 BTRFS_I(inode)->generation,
3238 &token);
3239 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3242 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3243 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3244 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3245 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3247 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
3248 inode->i_atime.tv_sec, &token);
3249 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
3250 inode->i_atime.tv_nsec, &token);
3252 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
3253 inode->i_mtime.tv_sec, &token);
3254 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
3255 inode->i_mtime.tv_nsec, &token);
3257 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
3258 inode->i_ctime.tv_sec, &token);
3259 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
3260 inode->i_ctime.tv_nsec, &token);
3262 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3263 &token);
3265 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3266 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3267 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3268 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3269 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3272 static int log_inode_item(struct btrfs_trans_handle *trans,
3273 struct btrfs_root *log, struct btrfs_path *path,
3274 struct inode *inode)
3276 struct btrfs_inode_item *inode_item;
3277 int ret;
3279 ret = btrfs_insert_empty_item(trans, log, path,
3280 &BTRFS_I(inode)->location,
3281 sizeof(*inode_item));
3282 if (ret && ret != -EEXIST)
3283 return ret;
3284 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3285 struct btrfs_inode_item);
3286 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
3287 btrfs_release_path(path);
3288 return 0;
3291 static noinline int copy_items(struct btrfs_trans_handle *trans,
3292 struct inode *inode,
3293 struct btrfs_path *dst_path,
3294 struct btrfs_path *src_path, u64 *last_extent,
3295 int start_slot, int nr, int inode_only)
3297 unsigned long src_offset;
3298 unsigned long dst_offset;
3299 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3300 struct btrfs_file_extent_item *extent;
3301 struct btrfs_inode_item *inode_item;
3302 struct extent_buffer *src = src_path->nodes[0];
3303 struct btrfs_key first_key, last_key, key;
3304 int ret;
3305 struct btrfs_key *ins_keys;
3306 u32 *ins_sizes;
3307 char *ins_data;
3308 int i;
3309 struct list_head ordered_sums;
3310 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3311 bool has_extents = false;
3312 bool need_find_last_extent = true;
3313 bool done = false;
3315 INIT_LIST_HEAD(&ordered_sums);
3317 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3318 nr * sizeof(u32), GFP_NOFS);
3319 if (!ins_data)
3320 return -ENOMEM;
3322 first_key.objectid = (u64)-1;
3324 ins_sizes = (u32 *)ins_data;
3325 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3327 for (i = 0; i < nr; i++) {
3328 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3329 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3331 ret = btrfs_insert_empty_items(trans, log, dst_path,
3332 ins_keys, ins_sizes, nr);
3333 if (ret) {
3334 kfree(ins_data);
3335 return ret;
3338 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3339 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3340 dst_path->slots[0]);
3342 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3344 if ((i == (nr - 1)))
3345 last_key = ins_keys[i];
3347 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3348 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3349 dst_path->slots[0],
3350 struct btrfs_inode_item);
3351 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3352 inode, inode_only == LOG_INODE_EXISTS);
3353 } else {
3354 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3355 src_offset, ins_sizes[i]);
3359 * We set need_find_last_extent here in case we know we were
3360 * processing other items and then walk into the first extent in
3361 * the inode. If we don't hit an extent then nothing changes,
3362 * we'll do the last search the next time around.
3364 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3365 has_extents = true;
3366 if (first_key.objectid == (u64)-1)
3367 first_key = ins_keys[i];
3368 } else {
3369 need_find_last_extent = false;
3372 /* take a reference on file data extents so that truncates
3373 * or deletes of this inode don't have to relog the inode
3374 * again
3376 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3377 !skip_csum) {
3378 int found_type;
3379 extent = btrfs_item_ptr(src, start_slot + i,
3380 struct btrfs_file_extent_item);
3382 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3383 continue;
3385 found_type = btrfs_file_extent_type(src, extent);
3386 if (found_type == BTRFS_FILE_EXTENT_REG) {
3387 u64 ds, dl, cs, cl;
3388 ds = btrfs_file_extent_disk_bytenr(src,
3389 extent);
3390 /* ds == 0 is a hole */
3391 if (ds == 0)
3392 continue;
3394 dl = btrfs_file_extent_disk_num_bytes(src,
3395 extent);
3396 cs = btrfs_file_extent_offset(src, extent);
3397 cl = btrfs_file_extent_num_bytes(src,
3398 extent);
3399 if (btrfs_file_extent_compression(src,
3400 extent)) {
3401 cs = 0;
3402 cl = dl;
3405 ret = btrfs_lookup_csums_range(
3406 log->fs_info->csum_root,
3407 ds + cs, ds + cs + cl - 1,
3408 &ordered_sums, 0);
3409 if (ret) {
3410 btrfs_release_path(dst_path);
3411 kfree(ins_data);
3412 return ret;
3418 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3419 btrfs_release_path(dst_path);
3420 kfree(ins_data);
3423 * we have to do this after the loop above to avoid changing the
3424 * log tree while trying to change the log tree.
3426 ret = 0;
3427 while (!list_empty(&ordered_sums)) {
3428 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3429 struct btrfs_ordered_sum,
3430 list);
3431 if (!ret)
3432 ret = btrfs_csum_file_blocks(trans, log, sums);
3433 list_del(&sums->list);
3434 kfree(sums);
3437 if (!has_extents)
3438 return ret;
3440 if (need_find_last_extent && *last_extent == first_key.offset) {
3442 * We don't have any leafs between our current one and the one
3443 * we processed before that can have file extent items for our
3444 * inode (and have a generation number smaller than our current
3445 * transaction id).
3447 need_find_last_extent = false;
3451 * Because we use btrfs_search_forward we could skip leaves that were
3452 * not modified and then assume *last_extent is valid when it really
3453 * isn't. So back up to the previous leaf and read the end of the last
3454 * extent before we go and fill in holes.
3456 if (need_find_last_extent) {
3457 u64 len;
3459 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3460 if (ret < 0)
3461 return ret;
3462 if (ret)
3463 goto fill_holes;
3464 if (src_path->slots[0])
3465 src_path->slots[0]--;
3466 src = src_path->nodes[0];
3467 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3468 if (key.objectid != btrfs_ino(inode) ||
3469 key.type != BTRFS_EXTENT_DATA_KEY)
3470 goto fill_holes;
3471 extent = btrfs_item_ptr(src, src_path->slots[0],
3472 struct btrfs_file_extent_item);
3473 if (btrfs_file_extent_type(src, extent) ==
3474 BTRFS_FILE_EXTENT_INLINE) {
3475 len = btrfs_file_extent_inline_len(src,
3476 src_path->slots[0],
3477 extent);
3478 *last_extent = ALIGN(key.offset + len,
3479 log->sectorsize);
3480 } else {
3481 len = btrfs_file_extent_num_bytes(src, extent);
3482 *last_extent = key.offset + len;
3485 fill_holes:
3486 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3487 * things could have happened
3489 * 1) A merge could have happened, so we could currently be on a leaf
3490 * that holds what we were copying in the first place.
3491 * 2) A split could have happened, and now not all of the items we want
3492 * are on the same leaf.
3494 * So we need to adjust how we search for holes, we need to drop the
3495 * path and re-search for the first extent key we found, and then walk
3496 * forward until we hit the last one we copied.
3498 if (need_find_last_extent) {
3499 /* btrfs_prev_leaf could return 1 without releasing the path */
3500 btrfs_release_path(src_path);
3501 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3502 src_path, 0, 0);
3503 if (ret < 0)
3504 return ret;
3505 ASSERT(ret == 0);
3506 src = src_path->nodes[0];
3507 i = src_path->slots[0];
3508 } else {
3509 i = start_slot;
3513 * Ok so here we need to go through and fill in any holes we may have
3514 * to make sure that holes are punched for those areas in case they had
3515 * extents previously.
3517 while (!done) {
3518 u64 offset, len;
3519 u64 extent_end;
3521 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3522 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3523 if (ret < 0)
3524 return ret;
3525 ASSERT(ret == 0);
3526 src = src_path->nodes[0];
3527 i = 0;
3530 btrfs_item_key_to_cpu(src, &key, i);
3531 if (!btrfs_comp_cpu_keys(&key, &last_key))
3532 done = true;
3533 if (key.objectid != btrfs_ino(inode) ||
3534 key.type != BTRFS_EXTENT_DATA_KEY) {
3535 i++;
3536 continue;
3538 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3539 if (btrfs_file_extent_type(src, extent) ==
3540 BTRFS_FILE_EXTENT_INLINE) {
3541 len = btrfs_file_extent_inline_len(src, i, extent);
3542 extent_end = ALIGN(key.offset + len, log->sectorsize);
3543 } else {
3544 len = btrfs_file_extent_num_bytes(src, extent);
3545 extent_end = key.offset + len;
3547 i++;
3549 if (*last_extent == key.offset) {
3550 *last_extent = extent_end;
3551 continue;
3553 offset = *last_extent;
3554 len = key.offset - *last_extent;
3555 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3556 offset, 0, 0, len, 0, len, 0,
3557 0, 0);
3558 if (ret)
3559 break;
3560 *last_extent = extent_end;
3563 * Need to let the callers know we dropped the path so they should
3564 * re-search.
3566 if (!ret && need_find_last_extent)
3567 ret = 1;
3568 return ret;
3571 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3573 struct extent_map *em1, *em2;
3575 em1 = list_entry(a, struct extent_map, list);
3576 em2 = list_entry(b, struct extent_map, list);
3578 if (em1->start < em2->start)
3579 return -1;
3580 else if (em1->start > em2->start)
3581 return 1;
3582 return 0;
3585 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3586 struct inode *inode,
3587 struct btrfs_root *root,
3588 const struct extent_map *em,
3589 const struct list_head *logged_list,
3590 bool *ordered_io_error)
3592 struct btrfs_ordered_extent *ordered;
3593 struct btrfs_root *log = root->log_root;
3594 u64 mod_start = em->mod_start;
3595 u64 mod_len = em->mod_len;
3596 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3597 u64 csum_offset;
3598 u64 csum_len;
3599 LIST_HEAD(ordered_sums);
3600 int ret = 0;
3602 *ordered_io_error = false;
3604 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3605 em->block_start == EXTENT_MAP_HOLE)
3606 return 0;
3609 * Wait far any ordered extent that covers our extent map. If it
3610 * finishes without an error, first check and see if our csums are on
3611 * our outstanding ordered extents.
3613 list_for_each_entry(ordered, logged_list, log_list) {
3614 struct btrfs_ordered_sum *sum;
3616 if (!mod_len)
3617 break;
3619 if (ordered->file_offset + ordered->len <= mod_start ||
3620 mod_start + mod_len <= ordered->file_offset)
3621 continue;
3623 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3624 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3625 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3626 const u64 start = ordered->file_offset;
3627 const u64 end = ordered->file_offset + ordered->len - 1;
3629 WARN_ON(ordered->inode != inode);
3630 filemap_fdatawrite_range(inode->i_mapping, start, end);
3633 wait_event(ordered->wait,
3634 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3635 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3637 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3639 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3640 * i_mapping flags, so that the next fsync won't get
3641 * an outdated io error too.
3643 btrfs_inode_check_errors(inode);
3644 *ordered_io_error = true;
3645 break;
3648 * We are going to copy all the csums on this ordered extent, so
3649 * go ahead and adjust mod_start and mod_len in case this
3650 * ordered extent has already been logged.
3652 if (ordered->file_offset > mod_start) {
3653 if (ordered->file_offset + ordered->len >=
3654 mod_start + mod_len)
3655 mod_len = ordered->file_offset - mod_start;
3657 * If we have this case
3659 * |--------- logged extent ---------|
3660 * |----- ordered extent ----|
3662 * Just don't mess with mod_start and mod_len, we'll
3663 * just end up logging more csums than we need and it
3664 * will be ok.
3666 } else {
3667 if (ordered->file_offset + ordered->len <
3668 mod_start + mod_len) {
3669 mod_len = (mod_start + mod_len) -
3670 (ordered->file_offset + ordered->len);
3671 mod_start = ordered->file_offset +
3672 ordered->len;
3673 } else {
3674 mod_len = 0;
3678 if (skip_csum)
3679 continue;
3682 * To keep us from looping for the above case of an ordered
3683 * extent that falls inside of the logged extent.
3685 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3686 &ordered->flags))
3687 continue;
3689 if (ordered->csum_bytes_left) {
3690 btrfs_start_ordered_extent(inode, ordered, 0);
3691 wait_event(ordered->wait,
3692 ordered->csum_bytes_left == 0);
3695 list_for_each_entry(sum, &ordered->list, list) {
3696 ret = btrfs_csum_file_blocks(trans, log, sum);
3697 if (ret)
3698 break;
3702 if (*ordered_io_error || !mod_len || ret || skip_csum)
3703 return ret;
3705 if (em->compress_type) {
3706 csum_offset = 0;
3707 csum_len = max(em->block_len, em->orig_block_len);
3708 } else {
3709 csum_offset = mod_start - em->start;
3710 csum_len = mod_len;
3713 /* block start is already adjusted for the file extent offset. */
3714 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3715 em->block_start + csum_offset,
3716 em->block_start + csum_offset +
3717 csum_len - 1, &ordered_sums, 0);
3718 if (ret)
3719 return ret;
3721 while (!list_empty(&ordered_sums)) {
3722 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3723 struct btrfs_ordered_sum,
3724 list);
3725 if (!ret)
3726 ret = btrfs_csum_file_blocks(trans, log, sums);
3727 list_del(&sums->list);
3728 kfree(sums);
3731 return ret;
3734 static int log_one_extent(struct btrfs_trans_handle *trans,
3735 struct inode *inode, struct btrfs_root *root,
3736 const struct extent_map *em,
3737 struct btrfs_path *path,
3738 const struct list_head *logged_list,
3739 struct btrfs_log_ctx *ctx)
3741 struct btrfs_root *log = root->log_root;
3742 struct btrfs_file_extent_item *fi;
3743 struct extent_buffer *leaf;
3744 struct btrfs_map_token token;
3745 struct btrfs_key key;
3746 u64 extent_offset = em->start - em->orig_start;
3747 u64 block_len;
3748 int ret;
3749 int extent_inserted = 0;
3750 bool ordered_io_err = false;
3752 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
3753 &ordered_io_err);
3754 if (ret)
3755 return ret;
3757 if (ordered_io_err) {
3758 ctx->io_err = -EIO;
3759 return 0;
3762 btrfs_init_map_token(&token);
3764 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3765 em->start + em->len, NULL, 0, 1,
3766 sizeof(*fi), &extent_inserted);
3767 if (ret)
3768 return ret;
3770 if (!extent_inserted) {
3771 key.objectid = btrfs_ino(inode);
3772 key.type = BTRFS_EXTENT_DATA_KEY;
3773 key.offset = em->start;
3775 ret = btrfs_insert_empty_item(trans, log, path, &key,
3776 sizeof(*fi));
3777 if (ret)
3778 return ret;
3780 leaf = path->nodes[0];
3781 fi = btrfs_item_ptr(leaf, path->slots[0],
3782 struct btrfs_file_extent_item);
3784 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
3785 &token);
3786 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3787 btrfs_set_token_file_extent_type(leaf, fi,
3788 BTRFS_FILE_EXTENT_PREALLOC,
3789 &token);
3790 else
3791 btrfs_set_token_file_extent_type(leaf, fi,
3792 BTRFS_FILE_EXTENT_REG,
3793 &token);
3795 block_len = max(em->block_len, em->orig_block_len);
3796 if (em->compress_type != BTRFS_COMPRESS_NONE) {
3797 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3798 em->block_start,
3799 &token);
3800 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3801 &token);
3802 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3803 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3804 em->block_start -
3805 extent_offset, &token);
3806 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3807 &token);
3808 } else {
3809 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3810 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3811 &token);
3814 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
3815 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3816 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
3817 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3818 &token);
3819 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3820 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3821 btrfs_mark_buffer_dirty(leaf);
3823 btrfs_release_path(path);
3825 return ret;
3828 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3829 struct btrfs_root *root,
3830 struct inode *inode,
3831 struct btrfs_path *path,
3832 struct list_head *logged_list,
3833 struct btrfs_log_ctx *ctx)
3835 struct extent_map *em, *n;
3836 struct list_head extents;
3837 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3838 u64 test_gen;
3839 int ret = 0;
3840 int num = 0;
3842 INIT_LIST_HEAD(&extents);
3844 write_lock(&tree->lock);
3845 test_gen = root->fs_info->last_trans_committed;
3847 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3848 list_del_init(&em->list);
3851 * Just an arbitrary number, this can be really CPU intensive
3852 * once we start getting a lot of extents, and really once we
3853 * have a bunch of extents we just want to commit since it will
3854 * be faster.
3856 if (++num > 32768) {
3857 list_del_init(&tree->modified_extents);
3858 ret = -EFBIG;
3859 goto process;
3862 if (em->generation <= test_gen)
3863 continue;
3864 /* Need a ref to keep it from getting evicted from cache */
3865 atomic_inc(&em->refs);
3866 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3867 list_add_tail(&em->list, &extents);
3868 num++;
3871 list_sort(NULL, &extents, extent_cmp);
3873 process:
3874 while (!list_empty(&extents)) {
3875 em = list_entry(extents.next, struct extent_map, list);
3877 list_del_init(&em->list);
3880 * If we had an error we just need to delete everybody from our
3881 * private list.
3883 if (ret) {
3884 clear_em_logging(tree, em);
3885 free_extent_map(em);
3886 continue;
3889 write_unlock(&tree->lock);
3891 ret = log_one_extent(trans, inode, root, em, path, logged_list,
3892 ctx);
3893 write_lock(&tree->lock);
3894 clear_em_logging(tree, em);
3895 free_extent_map(em);
3897 WARN_ON(!list_empty(&extents));
3898 write_unlock(&tree->lock);
3900 btrfs_release_path(path);
3901 return ret;
3904 /* log a single inode in the tree log.
3905 * At least one parent directory for this inode must exist in the tree
3906 * or be logged already.
3908 * Any items from this inode changed by the current transaction are copied
3909 * to the log tree. An extra reference is taken on any extents in this
3910 * file, allowing us to avoid a whole pile of corner cases around logging
3911 * blocks that have been removed from the tree.
3913 * See LOG_INODE_ALL and related defines for a description of what inode_only
3914 * does.
3916 * This handles both files and directories.
3918 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3919 struct btrfs_root *root, struct inode *inode,
3920 int inode_only,
3921 const loff_t start,
3922 const loff_t end,
3923 struct btrfs_log_ctx *ctx)
3925 struct btrfs_path *path;
3926 struct btrfs_path *dst_path;
3927 struct btrfs_key min_key;
3928 struct btrfs_key max_key;
3929 struct btrfs_root *log = root->log_root;
3930 struct extent_buffer *src = NULL;
3931 LIST_HEAD(logged_list);
3932 u64 last_extent = 0;
3933 int err = 0;
3934 int ret;
3935 int nritems;
3936 int ins_start_slot = 0;
3937 int ins_nr;
3938 bool fast_search = false;
3939 u64 ino = btrfs_ino(inode);
3940 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3942 path = btrfs_alloc_path();
3943 if (!path)
3944 return -ENOMEM;
3945 dst_path = btrfs_alloc_path();
3946 if (!dst_path) {
3947 btrfs_free_path(path);
3948 return -ENOMEM;
3951 min_key.objectid = ino;
3952 min_key.type = BTRFS_INODE_ITEM_KEY;
3953 min_key.offset = 0;
3955 max_key.objectid = ino;
3958 /* today the code can only do partial logging of directories */
3959 if (S_ISDIR(inode->i_mode) ||
3960 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3961 &BTRFS_I(inode)->runtime_flags) &&
3962 inode_only == LOG_INODE_EXISTS))
3963 max_key.type = BTRFS_XATTR_ITEM_KEY;
3964 else
3965 max_key.type = (u8)-1;
3966 max_key.offset = (u64)-1;
3968 /* Only run delayed items if we are a dir or a new file */
3969 if (S_ISDIR(inode->i_mode) ||
3970 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
3971 ret = btrfs_commit_inode_delayed_items(trans, inode);
3972 if (ret) {
3973 btrfs_free_path(path);
3974 btrfs_free_path(dst_path);
3975 return ret;
3979 mutex_lock(&BTRFS_I(inode)->log_mutex);
3981 btrfs_get_logged_extents(inode, &logged_list, start, end);
3984 * a brute force approach to making sure we get the most uptodate
3985 * copies of everything.
3987 if (S_ISDIR(inode->i_mode)) {
3988 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
3990 if (inode_only == LOG_INODE_EXISTS)
3991 max_key_type = BTRFS_XATTR_ITEM_KEY;
3992 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
3993 } else {
3994 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3995 &BTRFS_I(inode)->runtime_flags)) {
3996 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
3997 &BTRFS_I(inode)->runtime_flags);
3998 ret = btrfs_truncate_inode_items(trans, log,
3999 inode, 0, 0);
4000 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4001 &BTRFS_I(inode)->runtime_flags) ||
4002 inode_only == LOG_INODE_EXISTS) {
4003 if (inode_only == LOG_INODE_ALL)
4004 fast_search = true;
4005 max_key.type = BTRFS_XATTR_ITEM_KEY;
4006 ret = drop_objectid_items(trans, log, path, ino,
4007 max_key.type);
4008 } else {
4009 if (inode_only == LOG_INODE_ALL)
4010 fast_search = true;
4011 ret = log_inode_item(trans, log, dst_path, inode);
4012 if (ret) {
4013 err = ret;
4014 goto out_unlock;
4016 goto log_extents;
4020 if (ret) {
4021 err = ret;
4022 goto out_unlock;
4025 while (1) {
4026 ins_nr = 0;
4027 ret = btrfs_search_forward(root, &min_key,
4028 path, trans->transid);
4029 if (ret != 0)
4030 break;
4031 again:
4032 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4033 if (min_key.objectid != ino)
4034 break;
4035 if (min_key.type > max_key.type)
4036 break;
4038 src = path->nodes[0];
4039 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4040 ins_nr++;
4041 goto next_slot;
4042 } else if (!ins_nr) {
4043 ins_start_slot = path->slots[0];
4044 ins_nr = 1;
4045 goto next_slot;
4048 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4049 ins_start_slot, ins_nr, inode_only);
4050 if (ret < 0) {
4051 err = ret;
4052 goto out_unlock;
4054 if (ret) {
4055 ins_nr = 0;
4056 btrfs_release_path(path);
4057 continue;
4059 ins_nr = 1;
4060 ins_start_slot = path->slots[0];
4061 next_slot:
4063 nritems = btrfs_header_nritems(path->nodes[0]);
4064 path->slots[0]++;
4065 if (path->slots[0] < nritems) {
4066 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4067 path->slots[0]);
4068 goto again;
4070 if (ins_nr) {
4071 ret = copy_items(trans, inode, dst_path, path,
4072 &last_extent, ins_start_slot,
4073 ins_nr, inode_only);
4074 if (ret < 0) {
4075 err = ret;
4076 goto out_unlock;
4078 ret = 0;
4079 ins_nr = 0;
4081 btrfs_release_path(path);
4083 if (min_key.offset < (u64)-1) {
4084 min_key.offset++;
4085 } else if (min_key.type < max_key.type) {
4086 min_key.type++;
4087 min_key.offset = 0;
4088 } else {
4089 break;
4092 if (ins_nr) {
4093 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4094 ins_start_slot, ins_nr, inode_only);
4095 if (ret < 0) {
4096 err = ret;
4097 goto out_unlock;
4099 ret = 0;
4100 ins_nr = 0;
4103 log_extents:
4104 btrfs_release_path(path);
4105 btrfs_release_path(dst_path);
4106 if (fast_search) {
4108 * Some ordered extents started by fsync might have completed
4109 * before we collected the ordered extents in logged_list, which
4110 * means they're gone, not in our logged_list nor in the inode's
4111 * ordered tree. We want the application/user space to know an
4112 * error happened while attempting to persist file data so that
4113 * it can take proper action. If such error happened, we leave
4114 * without writing to the log tree and the fsync must report the
4115 * file data write error and not commit the current transaction.
4117 err = btrfs_inode_check_errors(inode);
4118 if (err) {
4119 ctx->io_err = err;
4120 goto out_unlock;
4122 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4123 &logged_list, ctx);
4124 if (ret) {
4125 err = ret;
4126 goto out_unlock;
4128 } else if (inode_only == LOG_INODE_ALL) {
4129 struct extent_map *em, *n;
4131 write_lock(&em_tree->lock);
4133 * We can't just remove every em if we're called for a ranged
4134 * fsync - that is, one that doesn't cover the whole possible
4135 * file range (0 to LLONG_MAX). This is because we can have
4136 * em's that fall outside the range we're logging and therefore
4137 * their ordered operations haven't completed yet
4138 * (btrfs_finish_ordered_io() not invoked yet). This means we
4139 * didn't get their respective file extent item in the fs/subvol
4140 * tree yet, and need to let the next fast fsync (one which
4141 * consults the list of modified extent maps) find the em so
4142 * that it logs a matching file extent item and waits for the
4143 * respective ordered operation to complete (if it's still
4144 * running).
4146 * Removing every em outside the range we're logging would make
4147 * the next fast fsync not log their matching file extent items,
4148 * therefore making us lose data after a log replay.
4150 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4151 list) {
4152 const u64 mod_end = em->mod_start + em->mod_len - 1;
4154 if (em->mod_start >= start && mod_end <= end)
4155 list_del_init(&em->list);
4157 write_unlock(&em_tree->lock);
4160 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4161 ret = log_directory_changes(trans, root, inode, path, dst_path);
4162 if (ret) {
4163 err = ret;
4164 goto out_unlock;
4168 BTRFS_I(inode)->logged_trans = trans->transid;
4169 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4170 out_unlock:
4171 if (unlikely(err))
4172 btrfs_put_logged_extents(&logged_list);
4173 else
4174 btrfs_submit_logged_extents(&logged_list, log);
4175 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4177 btrfs_free_path(path);
4178 btrfs_free_path(dst_path);
4179 return err;
4183 * follow the dentry parent pointers up the chain and see if any
4184 * of the directories in it require a full commit before they can
4185 * be logged. Returns zero if nothing special needs to be done or 1 if
4186 * a full commit is required.
4188 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4189 struct inode *inode,
4190 struct dentry *parent,
4191 struct super_block *sb,
4192 u64 last_committed)
4194 int ret = 0;
4195 struct btrfs_root *root;
4196 struct dentry *old_parent = NULL;
4197 struct inode *orig_inode = inode;
4200 * for regular files, if its inode is already on disk, we don't
4201 * have to worry about the parents at all. This is because
4202 * we can use the last_unlink_trans field to record renames
4203 * and other fun in this file.
4205 if (S_ISREG(inode->i_mode) &&
4206 BTRFS_I(inode)->generation <= last_committed &&
4207 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4208 goto out;
4210 if (!S_ISDIR(inode->i_mode)) {
4211 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4212 goto out;
4213 inode = parent->d_inode;
4216 while (1) {
4218 * If we are logging a directory then we start with our inode,
4219 * not our parents inode, so we need to skipp setting the
4220 * logged_trans so that further down in the log code we don't
4221 * think this inode has already been logged.
4223 if (inode != orig_inode)
4224 BTRFS_I(inode)->logged_trans = trans->transid;
4225 smp_mb();
4227 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4228 root = BTRFS_I(inode)->root;
4231 * make sure any commits to the log are forced
4232 * to be full commits
4234 btrfs_set_log_full_commit(root->fs_info, trans);
4235 ret = 1;
4236 break;
4239 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4240 break;
4242 if (IS_ROOT(parent))
4243 break;
4245 parent = dget_parent(parent);
4246 dput(old_parent);
4247 old_parent = parent;
4248 inode = parent->d_inode;
4251 dput(old_parent);
4252 out:
4253 return ret;
4257 * helper function around btrfs_log_inode to make sure newly created
4258 * parent directories also end up in the log. A minimal inode and backref
4259 * only logging is done of any parent directories that are older than
4260 * the last committed transaction
4262 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4263 struct btrfs_root *root, struct inode *inode,
4264 struct dentry *parent,
4265 const loff_t start,
4266 const loff_t end,
4267 int exists_only,
4268 struct btrfs_log_ctx *ctx)
4270 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
4271 struct super_block *sb;
4272 struct dentry *old_parent = NULL;
4273 int ret = 0;
4274 u64 last_committed = root->fs_info->last_trans_committed;
4276 sb = inode->i_sb;
4278 if (btrfs_test_opt(root, NOTREELOG)) {
4279 ret = 1;
4280 goto end_no_trans;
4284 * The prev transaction commit doesn't complete, we need do
4285 * full commit by ourselves.
4287 if (root->fs_info->last_trans_log_full_commit >
4288 root->fs_info->last_trans_committed) {
4289 ret = 1;
4290 goto end_no_trans;
4293 if (root != BTRFS_I(inode)->root ||
4294 btrfs_root_refs(&root->root_item) == 0) {
4295 ret = 1;
4296 goto end_no_trans;
4299 ret = check_parent_dirs_for_sync(trans, inode, parent,
4300 sb, last_committed);
4301 if (ret)
4302 goto end_no_trans;
4304 if (btrfs_inode_in_log(inode, trans->transid)) {
4305 ret = BTRFS_NO_LOG_SYNC;
4306 goto end_no_trans;
4309 ret = start_log_trans(trans, root, ctx);
4310 if (ret)
4311 goto end_no_trans;
4313 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
4314 if (ret)
4315 goto end_trans;
4318 * for regular files, if its inode is already on disk, we don't
4319 * have to worry about the parents at all. This is because
4320 * we can use the last_unlink_trans field to record renames
4321 * and other fun in this file.
4323 if (S_ISREG(inode->i_mode) &&
4324 BTRFS_I(inode)->generation <= last_committed &&
4325 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
4326 ret = 0;
4327 goto end_trans;
4330 inode_only = LOG_INODE_EXISTS;
4331 while (1) {
4332 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4333 break;
4335 inode = parent->d_inode;
4336 if (root != BTRFS_I(inode)->root)
4337 break;
4339 if (BTRFS_I(inode)->generation >
4340 root->fs_info->last_trans_committed) {
4341 ret = btrfs_log_inode(trans, root, inode, inode_only,
4342 0, LLONG_MAX, ctx);
4343 if (ret)
4344 goto end_trans;
4346 if (IS_ROOT(parent))
4347 break;
4349 parent = dget_parent(parent);
4350 dput(old_parent);
4351 old_parent = parent;
4353 ret = 0;
4354 end_trans:
4355 dput(old_parent);
4356 if (ret < 0) {
4357 btrfs_set_log_full_commit(root->fs_info, trans);
4358 ret = 1;
4361 if (ret)
4362 btrfs_remove_log_ctx(root, ctx);
4363 btrfs_end_log_trans(root);
4364 end_no_trans:
4365 return ret;
4369 * it is not safe to log dentry if the chunk root has added new
4370 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4371 * If this returns 1, you must commit the transaction to safely get your
4372 * data on disk.
4374 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
4375 struct btrfs_root *root, struct dentry *dentry,
4376 const loff_t start,
4377 const loff_t end,
4378 struct btrfs_log_ctx *ctx)
4380 struct dentry *parent = dget_parent(dentry);
4381 int ret;
4383 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
4384 start, end, 0, ctx);
4385 dput(parent);
4387 return ret;
4391 * should be called during mount to recover any replay any log trees
4392 * from the FS
4394 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4396 int ret;
4397 struct btrfs_path *path;
4398 struct btrfs_trans_handle *trans;
4399 struct btrfs_key key;
4400 struct btrfs_key found_key;
4401 struct btrfs_key tmp_key;
4402 struct btrfs_root *log;
4403 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
4404 struct walk_control wc = {
4405 .process_func = process_one_buffer,
4406 .stage = 0,
4409 path = btrfs_alloc_path();
4410 if (!path)
4411 return -ENOMEM;
4413 fs_info->log_root_recovering = 1;
4415 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4416 if (IS_ERR(trans)) {
4417 ret = PTR_ERR(trans);
4418 goto error;
4421 wc.trans = trans;
4422 wc.pin = 1;
4424 ret = walk_log_tree(trans, log_root_tree, &wc);
4425 if (ret) {
4426 btrfs_error(fs_info, ret, "Failed to pin buffers while "
4427 "recovering log root tree.");
4428 goto error;
4431 again:
4432 key.objectid = BTRFS_TREE_LOG_OBJECTID;
4433 key.offset = (u64)-1;
4434 key.type = BTRFS_ROOT_ITEM_KEY;
4436 while (1) {
4437 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
4439 if (ret < 0) {
4440 btrfs_error(fs_info, ret,
4441 "Couldn't find tree log root.");
4442 goto error;
4444 if (ret > 0) {
4445 if (path->slots[0] == 0)
4446 break;
4447 path->slots[0]--;
4449 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4450 path->slots[0]);
4451 btrfs_release_path(path);
4452 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4453 break;
4455 log = btrfs_read_fs_root(log_root_tree, &found_key);
4456 if (IS_ERR(log)) {
4457 ret = PTR_ERR(log);
4458 btrfs_error(fs_info, ret,
4459 "Couldn't read tree log root.");
4460 goto error;
4463 tmp_key.objectid = found_key.offset;
4464 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
4465 tmp_key.offset = (u64)-1;
4467 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4468 if (IS_ERR(wc.replay_dest)) {
4469 ret = PTR_ERR(wc.replay_dest);
4470 free_extent_buffer(log->node);
4471 free_extent_buffer(log->commit_root);
4472 kfree(log);
4473 btrfs_error(fs_info, ret, "Couldn't read target root "
4474 "for tree log recovery.");
4475 goto error;
4478 wc.replay_dest->log_root = log;
4479 btrfs_record_root_in_trans(trans, wc.replay_dest);
4480 ret = walk_log_tree(trans, log, &wc);
4482 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4483 ret = fixup_inode_link_counts(trans, wc.replay_dest,
4484 path);
4487 key.offset = found_key.offset - 1;
4488 wc.replay_dest->log_root = NULL;
4489 free_extent_buffer(log->node);
4490 free_extent_buffer(log->commit_root);
4491 kfree(log);
4493 if (ret)
4494 goto error;
4496 if (found_key.offset == 0)
4497 break;
4499 btrfs_release_path(path);
4501 /* step one is to pin it all, step two is to replay just inodes */
4502 if (wc.pin) {
4503 wc.pin = 0;
4504 wc.process_func = replay_one_buffer;
4505 wc.stage = LOG_WALK_REPLAY_INODES;
4506 goto again;
4508 /* step three is to replay everything */
4509 if (wc.stage < LOG_WALK_REPLAY_ALL) {
4510 wc.stage++;
4511 goto again;
4514 btrfs_free_path(path);
4516 /* step 4: commit the transaction, which also unpins the blocks */
4517 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
4518 if (ret)
4519 return ret;
4521 free_extent_buffer(log_root_tree->node);
4522 log_root_tree->log_root = NULL;
4523 fs_info->log_root_recovering = 0;
4524 kfree(log_root_tree);
4526 return 0;
4527 error:
4528 if (wc.trans)
4529 btrfs_end_transaction(wc.trans, fs_info->tree_root);
4530 btrfs_free_path(path);
4531 return ret;
4535 * there are some corner cases where we want to force a full
4536 * commit instead of allowing a directory to be logged.
4538 * They revolve around files there were unlinked from the directory, and
4539 * this function updates the parent directory so that a full commit is
4540 * properly done if it is fsync'd later after the unlinks are done.
4542 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4543 struct inode *dir, struct inode *inode,
4544 int for_rename)
4547 * when we're logging a file, if it hasn't been renamed
4548 * or unlinked, and its inode is fully committed on disk,
4549 * we don't have to worry about walking up the directory chain
4550 * to log its parents.
4552 * So, we use the last_unlink_trans field to put this transid
4553 * into the file. When the file is logged we check it and
4554 * don't log the parents if the file is fully on disk.
4556 if (S_ISREG(inode->i_mode))
4557 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4560 * if this directory was already logged any new
4561 * names for this file/dir will get recorded
4563 smp_mb();
4564 if (BTRFS_I(dir)->logged_trans == trans->transid)
4565 return;
4568 * if the inode we're about to unlink was logged,
4569 * the log will be properly updated for any new names
4571 if (BTRFS_I(inode)->logged_trans == trans->transid)
4572 return;
4575 * when renaming files across directories, if the directory
4576 * there we're unlinking from gets fsync'd later on, there's
4577 * no way to find the destination directory later and fsync it
4578 * properly. So, we have to be conservative and force commits
4579 * so the new name gets discovered.
4581 if (for_rename)
4582 goto record;
4584 /* we can safely do the unlink without any special recording */
4585 return;
4587 record:
4588 BTRFS_I(dir)->last_unlink_trans = trans->transid;
4592 * Call this after adding a new name for a file and it will properly
4593 * update the log to reflect the new name.
4595 * It will return zero if all goes well, and it will return 1 if a
4596 * full transaction commit is required.
4598 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4599 struct inode *inode, struct inode *old_dir,
4600 struct dentry *parent)
4602 struct btrfs_root * root = BTRFS_I(inode)->root;
4605 * this will force the logging code to walk the dentry chain
4606 * up for the file
4608 if (S_ISREG(inode->i_mode))
4609 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4612 * if this inode hasn't been logged and directory we're renaming it
4613 * from hasn't been logged, we don't need to log it
4615 if (BTRFS_I(inode)->logged_trans <=
4616 root->fs_info->last_trans_committed &&
4617 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4618 root->fs_info->last_trans_committed))
4619 return 0;
4621 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
4622 LLONG_MAX, 1, NULL);