2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
25 #define BTRFS_DELAYED_WRITEBACK 400
26 #define BTRFS_DELAYED_BACKGROUND 100
28 static struct kmem_cache
*delayed_node_cache
;
30 int __init
btrfs_delayed_inode_init(void)
32 delayed_node_cache
= kmem_cache_create("delayed_node",
33 sizeof(struct btrfs_delayed_node
),
35 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
37 if (!delayed_node_cache
)
42 void btrfs_delayed_inode_exit(void)
44 if (delayed_node_cache
)
45 kmem_cache_destroy(delayed_node_cache
);
48 static inline void btrfs_init_delayed_node(
49 struct btrfs_delayed_node
*delayed_node
,
50 struct btrfs_root
*root
, u64 inode_id
)
52 delayed_node
->root
= root
;
53 delayed_node
->inode_id
= inode_id
;
54 atomic_set(&delayed_node
->refs
, 0);
55 delayed_node
->count
= 0;
56 delayed_node
->in_list
= 0;
57 delayed_node
->inode_dirty
= 0;
58 delayed_node
->ins_root
= RB_ROOT
;
59 delayed_node
->del_root
= RB_ROOT
;
60 mutex_init(&delayed_node
->mutex
);
61 delayed_node
->index_cnt
= 0;
62 INIT_LIST_HEAD(&delayed_node
->n_list
);
63 INIT_LIST_HEAD(&delayed_node
->p_list
);
64 delayed_node
->bytes_reserved
= 0;
67 static inline int btrfs_is_continuous_delayed_item(
68 struct btrfs_delayed_item
*item1
,
69 struct btrfs_delayed_item
*item2
)
71 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
72 item1
->key
.objectid
== item2
->key
.objectid
&&
73 item1
->key
.type
== item2
->key
.type
&&
74 item1
->key
.offset
+ 1 == item2
->key
.offset
)
79 static inline struct btrfs_delayed_root
*btrfs_get_delayed_root(
80 struct btrfs_root
*root
)
82 return root
->fs_info
->delayed_root
;
85 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
88 struct btrfs_delayed_node
*node
;
89 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
90 struct btrfs_root
*root
= btrfs_inode
->root
;
91 u64 ino
= btrfs_ino(inode
);
95 node
= ACCESS_ONCE(btrfs_inode
->delayed_node
);
97 atomic_inc(&node
->refs
); /* can be accessed */
101 spin_lock(&root
->inode_lock
);
102 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
104 if (btrfs_inode
->delayed_node
) {
105 spin_unlock(&root
->inode_lock
);
108 btrfs_inode
->delayed_node
= node
;
109 atomic_inc(&node
->refs
); /* can be accessed */
110 atomic_inc(&node
->refs
); /* cached in the inode */
111 spin_unlock(&root
->inode_lock
);
114 spin_unlock(&root
->inode_lock
);
116 node
= kmem_cache_alloc(delayed_node_cache
, GFP_NOFS
);
118 return ERR_PTR(-ENOMEM
);
119 btrfs_init_delayed_node(node
, root
, ino
);
121 atomic_inc(&node
->refs
); /* cached in the btrfs inode */
122 atomic_inc(&node
->refs
); /* can be accessed */
124 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
126 kmem_cache_free(delayed_node_cache
, node
);
130 spin_lock(&root
->inode_lock
);
131 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
132 if (ret
== -EEXIST
) {
133 kmem_cache_free(delayed_node_cache
, node
);
134 spin_unlock(&root
->inode_lock
);
135 radix_tree_preload_end();
138 btrfs_inode
->delayed_node
= node
;
139 spin_unlock(&root
->inode_lock
);
140 radix_tree_preload_end();
146 * Call it when holding delayed_node->mutex
148 * If mod = 1, add this node into the prepared list.
150 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
151 struct btrfs_delayed_node
*node
,
154 spin_lock(&root
->lock
);
156 if (!list_empty(&node
->p_list
))
157 list_move_tail(&node
->p_list
, &root
->prepare_list
);
159 list_add_tail(&node
->p_list
, &root
->prepare_list
);
161 list_add_tail(&node
->n_list
, &root
->node_list
);
162 list_add_tail(&node
->p_list
, &root
->prepare_list
);
163 atomic_inc(&node
->refs
); /* inserted into list */
167 spin_unlock(&root
->lock
);
170 /* Call it when holding delayed_node->mutex */
171 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
172 struct btrfs_delayed_node
*node
)
174 spin_lock(&root
->lock
);
177 atomic_dec(&node
->refs
); /* not in the list */
178 list_del_init(&node
->n_list
);
179 if (!list_empty(&node
->p_list
))
180 list_del_init(&node
->p_list
);
183 spin_unlock(&root
->lock
);
186 struct btrfs_delayed_node
*btrfs_first_delayed_node(
187 struct btrfs_delayed_root
*delayed_root
)
190 struct btrfs_delayed_node
*node
= NULL
;
192 spin_lock(&delayed_root
->lock
);
193 if (list_empty(&delayed_root
->node_list
))
196 p
= delayed_root
->node_list
.next
;
197 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
198 atomic_inc(&node
->refs
);
200 spin_unlock(&delayed_root
->lock
);
205 struct btrfs_delayed_node
*btrfs_next_delayed_node(
206 struct btrfs_delayed_node
*node
)
208 struct btrfs_delayed_root
*delayed_root
;
210 struct btrfs_delayed_node
*next
= NULL
;
212 delayed_root
= node
->root
->fs_info
->delayed_root
;
213 spin_lock(&delayed_root
->lock
);
214 if (!node
->in_list
) { /* not in the list */
215 if (list_empty(&delayed_root
->node_list
))
217 p
= delayed_root
->node_list
.next
;
218 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
221 p
= node
->n_list
.next
;
223 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
224 atomic_inc(&next
->refs
);
226 spin_unlock(&delayed_root
->lock
);
231 static void __btrfs_release_delayed_node(
232 struct btrfs_delayed_node
*delayed_node
,
235 struct btrfs_delayed_root
*delayed_root
;
240 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
242 mutex_lock(&delayed_node
->mutex
);
243 if (delayed_node
->count
)
244 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
246 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
247 mutex_unlock(&delayed_node
->mutex
);
249 if (atomic_dec_and_test(&delayed_node
->refs
)) {
250 struct btrfs_root
*root
= delayed_node
->root
;
251 spin_lock(&root
->inode_lock
);
252 if (atomic_read(&delayed_node
->refs
) == 0) {
253 radix_tree_delete(&root
->delayed_nodes_tree
,
254 delayed_node
->inode_id
);
255 kmem_cache_free(delayed_node_cache
, delayed_node
);
257 spin_unlock(&root
->inode_lock
);
261 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
263 __btrfs_release_delayed_node(node
, 0);
266 struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
267 struct btrfs_delayed_root
*delayed_root
)
270 struct btrfs_delayed_node
*node
= NULL
;
272 spin_lock(&delayed_root
->lock
);
273 if (list_empty(&delayed_root
->prepare_list
))
276 p
= delayed_root
->prepare_list
.next
;
278 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
279 atomic_inc(&node
->refs
);
281 spin_unlock(&delayed_root
->lock
);
286 static inline void btrfs_release_prepared_delayed_node(
287 struct btrfs_delayed_node
*node
)
289 __btrfs_release_delayed_node(node
, 1);
292 struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
294 struct btrfs_delayed_item
*item
;
295 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
297 item
->data_len
= data_len
;
298 item
->ins_or_del
= 0;
299 item
->bytes_reserved
= 0;
300 item
->block_rsv
= NULL
;
301 item
->delayed_node
= NULL
;
302 atomic_set(&item
->refs
, 1);
308 * __btrfs_lookup_delayed_item - look up the delayed item by key
309 * @delayed_node: pointer to the delayed node
310 * @key: the key to look up
311 * @prev: used to store the prev item if the right item isn't found
312 * @next: used to store the next item if the right item isn't found
314 * Note: if we don't find the right item, we will return the prev item and
317 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
318 struct rb_root
*root
,
319 struct btrfs_key
*key
,
320 struct btrfs_delayed_item
**prev
,
321 struct btrfs_delayed_item
**next
)
323 struct rb_node
*node
, *prev_node
= NULL
;
324 struct btrfs_delayed_item
*delayed_item
= NULL
;
327 node
= root
->rb_node
;
330 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
333 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
335 node
= node
->rb_right
;
337 node
= node
->rb_left
;
346 *prev
= delayed_item
;
347 else if ((node
= rb_prev(prev_node
)) != NULL
) {
348 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
358 *next
= delayed_item
;
359 else if ((node
= rb_next(prev_node
)) != NULL
) {
360 *next
= rb_entry(node
, struct btrfs_delayed_item
,
368 struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
369 struct btrfs_delayed_node
*delayed_node
,
370 struct btrfs_key
*key
)
372 struct btrfs_delayed_item
*item
;
374 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
379 struct btrfs_delayed_item
*__btrfs_lookup_delayed_deletion_item(
380 struct btrfs_delayed_node
*delayed_node
,
381 struct btrfs_key
*key
)
383 struct btrfs_delayed_item
*item
;
385 item
= __btrfs_lookup_delayed_item(&delayed_node
->del_root
, key
,
390 struct btrfs_delayed_item
*__btrfs_search_delayed_insertion_item(
391 struct btrfs_delayed_node
*delayed_node
,
392 struct btrfs_key
*key
)
394 struct btrfs_delayed_item
*item
, *next
;
396 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
404 struct btrfs_delayed_item
*__btrfs_search_delayed_deletion_item(
405 struct btrfs_delayed_node
*delayed_node
,
406 struct btrfs_key
*key
)
408 struct btrfs_delayed_item
*item
, *next
;
410 item
= __btrfs_lookup_delayed_item(&delayed_node
->del_root
, key
,
418 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
419 struct btrfs_delayed_item
*ins
,
422 struct rb_node
**p
, *node
;
423 struct rb_node
*parent_node
= NULL
;
424 struct rb_root
*root
;
425 struct btrfs_delayed_item
*item
;
428 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
429 root
= &delayed_node
->ins_root
;
430 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
431 root
= &delayed_node
->del_root
;
435 node
= &ins
->rb_node
;
439 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
442 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
451 rb_link_node(node
, parent_node
, p
);
452 rb_insert_color(node
, root
);
453 ins
->delayed_node
= delayed_node
;
454 ins
->ins_or_del
= action
;
456 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
457 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
458 ins
->key
.offset
>= delayed_node
->index_cnt
)
459 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
461 delayed_node
->count
++;
462 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
466 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
467 struct btrfs_delayed_item
*item
)
469 return __btrfs_add_delayed_item(node
, item
,
470 BTRFS_DELAYED_INSERTION_ITEM
);
473 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
474 struct btrfs_delayed_item
*item
)
476 return __btrfs_add_delayed_item(node
, item
,
477 BTRFS_DELAYED_DELETION_ITEM
);
480 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
482 struct rb_root
*root
;
483 struct btrfs_delayed_root
*delayed_root
;
485 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
487 BUG_ON(!delayed_root
);
488 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
489 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
491 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
492 root
= &delayed_item
->delayed_node
->ins_root
;
494 root
= &delayed_item
->delayed_node
->del_root
;
496 rb_erase(&delayed_item
->rb_node
, root
);
497 delayed_item
->delayed_node
->count
--;
498 atomic_dec(&delayed_root
->items
);
499 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
&&
500 waitqueue_active(&delayed_root
->wait
))
501 wake_up(&delayed_root
->wait
);
504 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
507 __btrfs_remove_delayed_item(item
);
508 if (atomic_dec_and_test(&item
->refs
))
513 struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
514 struct btrfs_delayed_node
*delayed_node
)
517 struct btrfs_delayed_item
*item
= NULL
;
519 p
= rb_first(&delayed_node
->ins_root
);
521 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
526 struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
527 struct btrfs_delayed_node
*delayed_node
)
530 struct btrfs_delayed_item
*item
= NULL
;
532 p
= rb_first(&delayed_node
->del_root
);
534 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
539 struct btrfs_delayed_item
*__btrfs_next_delayed_item(
540 struct btrfs_delayed_item
*item
)
543 struct btrfs_delayed_item
*next
= NULL
;
545 p
= rb_next(&item
->rb_node
);
547 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
552 static inline struct btrfs_delayed_node
*btrfs_get_delayed_node(
555 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
556 struct btrfs_delayed_node
*delayed_node
;
558 delayed_node
= btrfs_inode
->delayed_node
;
560 atomic_inc(&delayed_node
->refs
);
565 static inline struct btrfs_root
*btrfs_get_fs_root(struct btrfs_root
*root
,
568 struct btrfs_key root_key
;
570 if (root
->objectid
== root_id
)
573 root_key
.objectid
= root_id
;
574 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
575 root_key
.offset
= (u64
)-1;
576 return btrfs_read_fs_root_no_name(root
->fs_info
, &root_key
);
579 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
580 struct btrfs_root
*root
,
581 struct btrfs_delayed_item
*item
)
583 struct btrfs_block_rsv
*src_rsv
;
584 struct btrfs_block_rsv
*dst_rsv
;
588 if (!trans
->bytes_reserved
)
591 src_rsv
= trans
->block_rsv
;
592 dst_rsv
= &root
->fs_info
->global_block_rsv
;
594 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
595 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
597 item
->bytes_reserved
= num_bytes
;
598 item
->block_rsv
= dst_rsv
;
604 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
605 struct btrfs_delayed_item
*item
)
607 if (!item
->bytes_reserved
)
610 btrfs_block_rsv_release(root
, item
->block_rsv
,
611 item
->bytes_reserved
);
614 static int btrfs_delayed_inode_reserve_metadata(
615 struct btrfs_trans_handle
*trans
,
616 struct btrfs_root
*root
,
617 struct btrfs_delayed_node
*node
)
619 struct btrfs_block_rsv
*src_rsv
;
620 struct btrfs_block_rsv
*dst_rsv
;
624 if (!trans
->bytes_reserved
)
627 src_rsv
= trans
->block_rsv
;
628 dst_rsv
= &root
->fs_info
->global_block_rsv
;
630 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
631 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
633 node
->bytes_reserved
= num_bytes
;
638 static void btrfs_delayed_inode_release_metadata(struct btrfs_root
*root
,
639 struct btrfs_delayed_node
*node
)
641 struct btrfs_block_rsv
*rsv
;
643 if (!node
->bytes_reserved
)
646 rsv
= &root
->fs_info
->global_block_rsv
;
647 btrfs_block_rsv_release(root
, rsv
,
648 node
->bytes_reserved
);
649 node
->bytes_reserved
= 0;
653 * This helper will insert some continuous items into the same leaf according
654 * to the free space of the leaf.
656 static int btrfs_batch_insert_items(struct btrfs_trans_handle
*trans
,
657 struct btrfs_root
*root
,
658 struct btrfs_path
*path
,
659 struct btrfs_delayed_item
*item
)
661 struct btrfs_delayed_item
*curr
, *next
;
663 int total_data_size
= 0, total_size
= 0;
664 struct extent_buffer
*leaf
;
666 struct btrfs_key
*keys
;
668 struct list_head head
;
674 BUG_ON(!path
->nodes
[0]);
676 leaf
= path
->nodes
[0];
677 free_space
= btrfs_leaf_free_space(root
, leaf
);
678 INIT_LIST_HEAD(&head
);
684 * count the number of the continuous items that we can insert in batch
686 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
688 total_data_size
+= next
->data_len
;
689 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
690 list_add_tail(&next
->tree_list
, &head
);
694 next
= __btrfs_next_delayed_item(curr
);
698 if (!btrfs_is_continuous_delayed_item(curr
, next
))
708 * we need allocate some memory space, but it might cause the task
709 * to sleep, so we set all locked nodes in the path to blocking locks
712 btrfs_set_path_blocking(path
);
714 keys
= kmalloc(sizeof(struct btrfs_key
) * nitems
, GFP_NOFS
);
720 data_size
= kmalloc(sizeof(u32
) * nitems
, GFP_NOFS
);
726 /* get keys of all the delayed items */
728 list_for_each_entry(next
, &head
, tree_list
) {
730 data_size
[i
] = next
->data_len
;
734 /* reset all the locked nodes in the patch to spinning locks. */
735 btrfs_clear_path_blocking(path
, NULL
);
737 /* insert the keys of the items */
738 ret
= setup_items_for_insert(trans
, root
, path
, keys
, data_size
,
739 total_data_size
, total_size
, nitems
);
743 /* insert the dir index items */
744 slot
= path
->slots
[0];
745 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
746 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
747 write_extent_buffer(leaf
, &curr
->data
,
748 (unsigned long)data_ptr
,
752 btrfs_delayed_item_release_metadata(root
, curr
);
754 list_del(&curr
->tree_list
);
755 btrfs_release_delayed_item(curr
);
766 * This helper can just do simple insertion that needn't extend item for new
767 * data, such as directory name index insertion, inode insertion.
769 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
770 struct btrfs_root
*root
,
771 struct btrfs_path
*path
,
772 struct btrfs_delayed_item
*delayed_item
)
774 struct extent_buffer
*leaf
;
775 struct btrfs_item
*item
;
779 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
780 delayed_item
->data_len
);
781 if (ret
< 0 && ret
!= -EEXIST
)
784 leaf
= path
->nodes
[0];
786 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
787 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
789 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
790 delayed_item
->data_len
);
791 btrfs_mark_buffer_dirty(leaf
);
793 btrfs_delayed_item_release_metadata(root
, delayed_item
);
798 * we insert an item first, then if there are some continuous items, we try
799 * to insert those items into the same leaf.
801 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
802 struct btrfs_path
*path
,
803 struct btrfs_root
*root
,
804 struct btrfs_delayed_node
*node
)
806 struct btrfs_delayed_item
*curr
, *prev
;
810 mutex_lock(&node
->mutex
);
811 curr
= __btrfs_first_delayed_insertion_item(node
);
815 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
817 btrfs_release_path(path
);
822 curr
= __btrfs_next_delayed_item(prev
);
823 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
824 /* insert the continuous items into the same leaf */
826 btrfs_batch_insert_items(trans
, root
, path
, curr
);
828 btrfs_release_delayed_item(prev
);
829 btrfs_mark_buffer_dirty(path
->nodes
[0]);
831 btrfs_release_path(path
);
832 mutex_unlock(&node
->mutex
);
836 mutex_unlock(&node
->mutex
);
840 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
841 struct btrfs_root
*root
,
842 struct btrfs_path
*path
,
843 struct btrfs_delayed_item
*item
)
845 struct btrfs_delayed_item
*curr
, *next
;
846 struct extent_buffer
*leaf
;
847 struct btrfs_key key
;
848 struct list_head head
;
849 int nitems
, i
, last_item
;
852 BUG_ON(!path
->nodes
[0]);
854 leaf
= path
->nodes
[0];
857 last_item
= btrfs_header_nritems(leaf
) - 1;
859 return -ENOENT
; /* FIXME: Is errno suitable? */
862 INIT_LIST_HEAD(&head
);
863 btrfs_item_key_to_cpu(leaf
, &key
, i
);
866 * count the number of the dir index items that we can delete in batch
868 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
869 list_add_tail(&next
->tree_list
, &head
);
873 next
= __btrfs_next_delayed_item(curr
);
877 if (!btrfs_is_continuous_delayed_item(curr
, next
))
883 btrfs_item_key_to_cpu(leaf
, &key
, i
);
889 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
893 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
894 btrfs_delayed_item_release_metadata(root
, curr
);
895 list_del(&curr
->tree_list
);
896 btrfs_release_delayed_item(curr
);
903 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
904 struct btrfs_path
*path
,
905 struct btrfs_root
*root
,
906 struct btrfs_delayed_node
*node
)
908 struct btrfs_delayed_item
*curr
, *prev
;
912 mutex_lock(&node
->mutex
);
913 curr
= __btrfs_first_delayed_deletion_item(node
);
917 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
922 * can't find the item which the node points to, so this node
923 * is invalid, just drop it.
926 curr
= __btrfs_next_delayed_item(prev
);
927 btrfs_release_delayed_item(prev
);
929 btrfs_release_path(path
);
936 btrfs_batch_delete_items(trans
, root
, path
, curr
);
937 btrfs_release_path(path
);
938 mutex_unlock(&node
->mutex
);
942 btrfs_release_path(path
);
943 mutex_unlock(&node
->mutex
);
947 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
949 struct btrfs_delayed_root
*delayed_root
;
951 if (delayed_node
&& delayed_node
->inode_dirty
) {
952 BUG_ON(!delayed_node
->root
);
953 delayed_node
->inode_dirty
= 0;
954 delayed_node
->count
--;
956 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
957 atomic_dec(&delayed_root
->items
);
958 if (atomic_read(&delayed_root
->items
) <
959 BTRFS_DELAYED_BACKGROUND
&&
960 waitqueue_active(&delayed_root
->wait
))
961 wake_up(&delayed_root
->wait
);
965 static int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
966 struct btrfs_root
*root
,
967 struct btrfs_path
*path
,
968 struct btrfs_delayed_node
*node
)
970 struct btrfs_key key
;
971 struct btrfs_inode_item
*inode_item
;
972 struct extent_buffer
*leaf
;
975 mutex_lock(&node
->mutex
);
976 if (!node
->inode_dirty
) {
977 mutex_unlock(&node
->mutex
);
981 key
.objectid
= node
->inode_id
;
982 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
984 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, 1);
986 btrfs_release_path(path
);
987 mutex_unlock(&node
->mutex
);
989 } else if (ret
< 0) {
990 mutex_unlock(&node
->mutex
);
994 btrfs_unlock_up_safe(path
, 1);
995 leaf
= path
->nodes
[0];
996 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
997 struct btrfs_inode_item
);
998 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
999 sizeof(struct btrfs_inode_item
));
1000 btrfs_mark_buffer_dirty(leaf
);
1001 btrfs_release_path(path
);
1003 btrfs_delayed_inode_release_metadata(root
, node
);
1004 btrfs_release_delayed_inode(node
);
1005 mutex_unlock(&node
->mutex
);
1010 /* Called when committing the transaction. */
1011 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1012 struct btrfs_root
*root
)
1014 struct btrfs_delayed_root
*delayed_root
;
1015 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1016 struct btrfs_path
*path
;
1019 path
= btrfs_alloc_path();
1022 path
->leave_spinning
= 1;
1024 delayed_root
= btrfs_get_delayed_root(root
);
1026 curr_node
= btrfs_first_delayed_node(delayed_root
);
1028 root
= curr_node
->root
;
1029 ret
= btrfs_insert_delayed_items(trans
, path
, root
,
1032 ret
= btrfs_delete_delayed_items(trans
, path
, root
,
1035 ret
= btrfs_update_delayed_inode(trans
, root
, path
,
1038 btrfs_release_delayed_node(curr_node
);
1042 prev_node
= curr_node
;
1043 curr_node
= btrfs_next_delayed_node(curr_node
);
1044 btrfs_release_delayed_node(prev_node
);
1047 btrfs_free_path(path
);
1051 static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1052 struct btrfs_delayed_node
*node
)
1054 struct btrfs_path
*path
;
1057 path
= btrfs_alloc_path();
1060 path
->leave_spinning
= 1;
1062 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1064 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1066 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1067 btrfs_free_path(path
);
1072 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1073 struct inode
*inode
)
1075 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1081 mutex_lock(&delayed_node
->mutex
);
1082 if (!delayed_node
->count
) {
1083 mutex_unlock(&delayed_node
->mutex
);
1084 btrfs_release_delayed_node(delayed_node
);
1087 mutex_unlock(&delayed_node
->mutex
);
1089 ret
= __btrfs_commit_inode_delayed_items(trans
, delayed_node
);
1090 btrfs_release_delayed_node(delayed_node
);
1094 void btrfs_remove_delayed_node(struct inode
*inode
)
1096 struct btrfs_delayed_node
*delayed_node
;
1098 delayed_node
= ACCESS_ONCE(BTRFS_I(inode
)->delayed_node
);
1102 BTRFS_I(inode
)->delayed_node
= NULL
;
1103 btrfs_release_delayed_node(delayed_node
);
1106 struct btrfs_async_delayed_node
{
1107 struct btrfs_root
*root
;
1108 struct btrfs_delayed_node
*delayed_node
;
1109 struct btrfs_work work
;
1112 static void btrfs_async_run_delayed_node_done(struct btrfs_work
*work
)
1114 struct btrfs_async_delayed_node
*async_node
;
1115 struct btrfs_trans_handle
*trans
;
1116 struct btrfs_path
*path
;
1117 struct btrfs_delayed_node
*delayed_node
= NULL
;
1118 struct btrfs_root
*root
;
1119 unsigned long nr
= 0;
1120 int need_requeue
= 0;
1123 async_node
= container_of(work
, struct btrfs_async_delayed_node
, work
);
1125 path
= btrfs_alloc_path();
1128 path
->leave_spinning
= 1;
1130 delayed_node
= async_node
->delayed_node
;
1131 root
= delayed_node
->root
;
1133 trans
= btrfs_join_transaction(root
);
1137 ret
= btrfs_insert_delayed_items(trans
, path
, root
, delayed_node
);
1139 ret
= btrfs_delete_delayed_items(trans
, path
, root
,
1143 btrfs_update_delayed_inode(trans
, root
, path
, delayed_node
);
1146 * Maybe new delayed items have been inserted, so we need requeue
1147 * the work. Besides that, we must dequeue the empty delayed nodes
1148 * to avoid the race between delayed items balance and the worker.
1149 * The race like this:
1150 * Task1 Worker thread
1151 * count == 0, needn't requeue
1152 * also needn't insert the
1153 * delayed node into prepare
1155 * add lots of delayed items
1156 * queue the delayed node
1157 * already in the list,
1158 * and not in the prepare
1159 * list, it means the delayed
1160 * node is being dealt with
1162 * do delayed items balance
1163 * the delayed node is being
1164 * dealt with by the worker
1166 * the worker goto idle.
1167 * Task1 will sleep until the transaction is commited.
1169 mutex_lock(&delayed_node
->mutex
);
1170 if (delayed_node
->count
)
1173 btrfs_dequeue_delayed_node(root
->fs_info
->delayed_root
,
1175 mutex_unlock(&delayed_node
->mutex
);
1177 nr
= trans
->blocks_used
;
1179 btrfs_end_transaction_dmeta(trans
, root
);
1180 __btrfs_btree_balance_dirty(root
, nr
);
1182 btrfs_free_path(path
);
1185 btrfs_requeue_work(&async_node
->work
);
1187 btrfs_release_prepared_delayed_node(delayed_node
);
1192 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1193 struct btrfs_root
*root
, int all
)
1195 struct btrfs_async_delayed_node
*async_node
;
1196 struct btrfs_delayed_node
*curr
;
1200 curr
= btrfs_first_prepared_delayed_node(delayed_root
);
1204 async_node
= kmalloc(sizeof(*async_node
), GFP_NOFS
);
1206 btrfs_release_prepared_delayed_node(curr
);
1210 async_node
->root
= root
;
1211 async_node
->delayed_node
= curr
;
1213 async_node
->work
.func
= btrfs_async_run_delayed_node_done
;
1214 async_node
->work
.flags
= 0;
1216 btrfs_queue_worker(&root
->fs_info
->delayed_workers
, &async_node
->work
);
1219 if (all
|| count
< 4)
1225 void btrfs_balance_delayed_items(struct btrfs_root
*root
)
1227 struct btrfs_delayed_root
*delayed_root
;
1229 delayed_root
= btrfs_get_delayed_root(root
);
1231 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1234 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1236 ret
= btrfs_wq_run_delayed_node(delayed_root
, root
, 1);
1240 wait_event_interruptible_timeout(
1242 (atomic_read(&delayed_root
->items
) <
1243 BTRFS_DELAYED_BACKGROUND
),
1248 btrfs_wq_run_delayed_node(delayed_root
, root
, 0);
1251 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1252 struct btrfs_root
*root
, const char *name
,
1253 int name_len
, struct inode
*dir
,
1254 struct btrfs_disk_key
*disk_key
, u8 type
,
1257 struct btrfs_delayed_node
*delayed_node
;
1258 struct btrfs_delayed_item
*delayed_item
;
1259 struct btrfs_dir_item
*dir_item
;
1262 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1263 if (IS_ERR(delayed_node
))
1264 return PTR_ERR(delayed_node
);
1266 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1267 if (!delayed_item
) {
1272 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, delayed_item
);
1274 * we have reserved enough space when we start a new transaction,
1275 * so reserving metadata failure is impossible
1279 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1280 btrfs_set_key_type(&delayed_item
->key
, BTRFS_DIR_INDEX_KEY
);
1281 delayed_item
->key
.offset
= index
;
1283 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1284 dir_item
->location
= *disk_key
;
1285 dir_item
->transid
= cpu_to_le64(trans
->transid
);
1286 dir_item
->data_len
= 0;
1287 dir_item
->name_len
= cpu_to_le16(name_len
);
1288 dir_item
->type
= type
;
1289 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1291 mutex_lock(&delayed_node
->mutex
);
1292 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1293 if (unlikely(ret
)) {
1294 printk(KERN_ERR
"err add delayed dir index item(name: %s) into "
1295 "the insertion tree of the delayed node"
1296 "(root id: %llu, inode id: %llu, errno: %d)\n",
1298 (unsigned long long)delayed_node
->root
->objectid
,
1299 (unsigned long long)delayed_node
->inode_id
,
1303 mutex_unlock(&delayed_node
->mutex
);
1306 btrfs_release_delayed_node(delayed_node
);
1310 static int btrfs_delete_delayed_insertion_item(struct btrfs_root
*root
,
1311 struct btrfs_delayed_node
*node
,
1312 struct btrfs_key
*key
)
1314 struct btrfs_delayed_item
*item
;
1316 mutex_lock(&node
->mutex
);
1317 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1319 mutex_unlock(&node
->mutex
);
1323 btrfs_delayed_item_release_metadata(root
, item
);
1324 btrfs_release_delayed_item(item
);
1325 mutex_unlock(&node
->mutex
);
1329 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1330 struct btrfs_root
*root
, struct inode
*dir
,
1333 struct btrfs_delayed_node
*node
;
1334 struct btrfs_delayed_item
*item
;
1335 struct btrfs_key item_key
;
1338 node
= btrfs_get_or_create_delayed_node(dir
);
1340 return PTR_ERR(node
);
1342 item_key
.objectid
= btrfs_ino(dir
);
1343 btrfs_set_key_type(&item_key
, BTRFS_DIR_INDEX_KEY
);
1344 item_key
.offset
= index
;
1346 ret
= btrfs_delete_delayed_insertion_item(root
, node
, &item_key
);
1350 item
= btrfs_alloc_delayed_item(0);
1356 item
->key
= item_key
;
1358 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, item
);
1360 * we have reserved enough space when we start a new transaction,
1361 * so reserving metadata failure is impossible.
1365 mutex_lock(&node
->mutex
);
1366 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1367 if (unlikely(ret
)) {
1368 printk(KERN_ERR
"err add delayed dir index item(index: %llu) "
1369 "into the deletion tree of the delayed node"
1370 "(root id: %llu, inode id: %llu, errno: %d)\n",
1371 (unsigned long long)index
,
1372 (unsigned long long)node
->root
->objectid
,
1373 (unsigned long long)node
->inode_id
,
1377 mutex_unlock(&node
->mutex
);
1379 btrfs_release_delayed_node(node
);
1383 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1385 struct btrfs_delayed_node
*delayed_node
= BTRFS_I(inode
)->delayed_node
;
1392 * Since we have held i_mutex of this directory, it is impossible that
1393 * a new directory index is added into the delayed node and index_cnt
1394 * is updated now. So we needn't lock the delayed node.
1396 if (!delayed_node
->index_cnt
)
1399 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1403 void btrfs_get_delayed_items(struct inode
*inode
, struct list_head
*ins_list
,
1404 struct list_head
*del_list
)
1406 struct btrfs_delayed_node
*delayed_node
;
1407 struct btrfs_delayed_item
*item
;
1409 delayed_node
= btrfs_get_delayed_node(inode
);
1413 mutex_lock(&delayed_node
->mutex
);
1414 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1416 atomic_inc(&item
->refs
);
1417 list_add_tail(&item
->readdir_list
, ins_list
);
1418 item
= __btrfs_next_delayed_item(item
);
1421 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1423 atomic_inc(&item
->refs
);
1424 list_add_tail(&item
->readdir_list
, del_list
);
1425 item
= __btrfs_next_delayed_item(item
);
1427 mutex_unlock(&delayed_node
->mutex
);
1429 * This delayed node is still cached in the btrfs inode, so refs
1430 * must be > 1 now, and we needn't check it is going to be freed
1433 * Besides that, this function is used to read dir, we do not
1434 * insert/delete delayed items in this period. So we also needn't
1435 * requeue or dequeue this delayed node.
1437 atomic_dec(&delayed_node
->refs
);
1440 void btrfs_put_delayed_items(struct list_head
*ins_list
,
1441 struct list_head
*del_list
)
1443 struct btrfs_delayed_item
*curr
, *next
;
1445 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1446 list_del(&curr
->readdir_list
);
1447 if (atomic_dec_and_test(&curr
->refs
))
1451 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1452 list_del(&curr
->readdir_list
);
1453 if (atomic_dec_and_test(&curr
->refs
))
1458 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1461 struct btrfs_delayed_item
*curr
, *next
;
1464 if (list_empty(del_list
))
1467 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1468 if (curr
->key
.offset
> index
)
1471 list_del(&curr
->readdir_list
);
1472 ret
= (curr
->key
.offset
== index
);
1474 if (atomic_dec_and_test(&curr
->refs
))
1486 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1489 int btrfs_readdir_delayed_dir_index(struct file
*filp
, void *dirent
,
1491 struct list_head
*ins_list
)
1493 struct btrfs_dir_item
*di
;
1494 struct btrfs_delayed_item
*curr
, *next
;
1495 struct btrfs_key location
;
1499 unsigned char d_type
;
1501 if (list_empty(ins_list
))
1505 * Changing the data of the delayed item is impossible. So
1506 * we needn't lock them. And we have held i_mutex of the
1507 * directory, nobody can delete any directory indexes now.
1509 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1510 list_del(&curr
->readdir_list
);
1512 if (curr
->key
.offset
< filp
->f_pos
) {
1513 if (atomic_dec_and_test(&curr
->refs
))
1518 filp
->f_pos
= curr
->key
.offset
;
1520 di
= (struct btrfs_dir_item
*)curr
->data
;
1521 name
= (char *)(di
+ 1);
1522 name_len
= le16_to_cpu(di
->name_len
);
1524 d_type
= btrfs_filetype_table
[di
->type
];
1525 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1527 over
= filldir(dirent
, name
, name_len
, curr
->key
.offset
,
1528 location
.objectid
, d_type
);
1530 if (atomic_dec_and_test(&curr
->refs
))
1539 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation
, struct btrfs_inode_item
,
1541 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence
, struct btrfs_inode_item
,
1543 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid
, struct btrfs_inode_item
,
1545 BTRFS_SETGET_STACK_FUNCS(stack_inode_size
, struct btrfs_inode_item
, size
, 64);
1546 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes
, struct btrfs_inode_item
,
1548 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group
, struct btrfs_inode_item
,
1550 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink
, struct btrfs_inode_item
, nlink
, 32);
1551 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid
, struct btrfs_inode_item
, uid
, 32);
1552 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid
, struct btrfs_inode_item
, gid
, 32);
1553 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode
, struct btrfs_inode_item
, mode
, 32);
1554 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev
, struct btrfs_inode_item
, rdev
, 64);
1555 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags
, struct btrfs_inode_item
, flags
, 64);
1557 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec
, struct btrfs_timespec
, sec
, 64);
1558 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec
, struct btrfs_timespec
, nsec
, 32);
1560 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1561 struct btrfs_inode_item
*inode_item
,
1562 struct inode
*inode
)
1564 btrfs_set_stack_inode_uid(inode_item
, inode
->i_uid
);
1565 btrfs_set_stack_inode_gid(inode_item
, inode
->i_gid
);
1566 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1567 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1568 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1569 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1570 btrfs_set_stack_inode_generation(inode_item
,
1571 BTRFS_I(inode
)->generation
);
1572 btrfs_set_stack_inode_sequence(inode_item
, BTRFS_I(inode
)->sequence
);
1573 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1574 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1575 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1576 btrfs_set_stack_inode_block_group(inode_item
, 0);
1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item
),
1579 inode
->i_atime
.tv_sec
);
1580 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item
),
1581 inode
->i_atime
.tv_nsec
);
1583 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item
),
1584 inode
->i_mtime
.tv_sec
);
1585 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item
),
1586 inode
->i_mtime
.tv_nsec
);
1588 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item
),
1589 inode
->i_ctime
.tv_sec
);
1590 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item
),
1591 inode
->i_ctime
.tv_nsec
);
1594 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1595 struct btrfs_root
*root
, struct inode
*inode
)
1597 struct btrfs_delayed_node
*delayed_node
;
1600 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1601 if (IS_ERR(delayed_node
))
1602 return PTR_ERR(delayed_node
);
1604 mutex_lock(&delayed_node
->mutex
);
1605 if (delayed_node
->inode_dirty
) {
1606 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1610 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, delayed_node
);
1612 * we must reserve enough space when we start a new transaction,
1613 * so reserving metadata failure is impossible
1617 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1618 delayed_node
->inode_dirty
= 1;
1619 delayed_node
->count
++;
1620 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1622 mutex_unlock(&delayed_node
->mutex
);
1623 btrfs_release_delayed_node(delayed_node
);
1627 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1629 struct btrfs_root
*root
= delayed_node
->root
;
1630 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1632 mutex_lock(&delayed_node
->mutex
);
1633 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1635 btrfs_delayed_item_release_metadata(root
, curr_item
);
1636 prev_item
= curr_item
;
1637 curr_item
= __btrfs_next_delayed_item(prev_item
);
1638 btrfs_release_delayed_item(prev_item
);
1641 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1643 btrfs_delayed_item_release_metadata(root
, curr_item
);
1644 prev_item
= curr_item
;
1645 curr_item
= __btrfs_next_delayed_item(prev_item
);
1646 btrfs_release_delayed_item(prev_item
);
1649 if (delayed_node
->inode_dirty
) {
1650 btrfs_delayed_inode_release_metadata(root
, delayed_node
);
1651 btrfs_release_delayed_inode(delayed_node
);
1653 mutex_unlock(&delayed_node
->mutex
);
1656 void btrfs_kill_delayed_inode_items(struct inode
*inode
)
1658 struct btrfs_delayed_node
*delayed_node
;
1660 delayed_node
= btrfs_get_delayed_node(inode
);
1664 __btrfs_kill_delayed_node(delayed_node
);
1665 btrfs_release_delayed_node(delayed_node
);
1668 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1671 struct btrfs_delayed_node
*delayed_nodes
[8];
1675 spin_lock(&root
->inode_lock
);
1676 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1677 (void **)delayed_nodes
, inode_id
,
1678 ARRAY_SIZE(delayed_nodes
));
1680 spin_unlock(&root
->inode_lock
);
1684 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1686 for (i
= 0; i
< n
; i
++)
1687 atomic_inc(&delayed_nodes
[i
]->refs
);
1688 spin_unlock(&root
->inode_lock
);
1690 for (i
= 0; i
< n
; i
++) {
1691 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1692 btrfs_release_delayed_node(delayed_nodes
[i
]);