2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
29 #include "transaction.h"
34 * This is the implementation for the generic read ahead framework.
36 * To trigger a readahead, btrfs_reada_add must be called. It will start
37 * a read ahead for the given range [start, end) on tree root. The returned
38 * handle can either be used to wait on the readahead to finish
39 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41 * The read ahead works as follows:
42 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
43 * reada_start_machine will then search for extents to prefetch and trigger
44 * some reads. When a read finishes for a node, all contained node/leaf
45 * pointers that lie in the given range will also be enqueued. The reads will
46 * be triggered in sequential order, thus giving a big win over a naive
47 * enumeration. It will also make use of multi-device layouts. Each disk
48 * will have its on read pointer and all disks will by utilized in parallel.
49 * Also will no two disks read both sides of a mirror simultaneously, as this
50 * would waste seeking capacity. Instead both disks will read different parts
52 * Any number of readaheads can be started in parallel. The read order will be
53 * determined globally, i.e. 2 parallel readaheads will normally finish faster
54 * than the 2 started one after another.
58 #define MAX_IN_FLIGHT 6
61 struct list_head list
;
62 struct reada_control
*rc
;
71 struct list_head extctl
;
74 struct reada_zone
*zones
[MAX_MIRRORS
];
76 struct btrfs_device
*scheduled_for
;
83 struct list_head list
;
86 struct btrfs_device
*device
;
87 struct btrfs_device
*devs
[MAX_MIRRORS
]; /* full list, incl self */
92 struct reada_machine_work
{
93 struct btrfs_work work
;
94 struct btrfs_fs_info
*fs_info
;
97 static void reada_extent_put(struct btrfs_fs_info
*, struct reada_extent
*);
98 static void reada_control_release(struct kref
*kref
);
99 static void reada_zone_release(struct kref
*kref
);
100 static void reada_start_machine(struct btrfs_fs_info
*fs_info
);
101 static void __reada_start_machine(struct btrfs_fs_info
*fs_info
);
103 static int reada_add_block(struct reada_control
*rc
, u64 logical
,
104 struct btrfs_key
*top
, int level
, u64 generation
);
107 /* in case of err, eb might be NULL */
108 static int __readahead_hook(struct btrfs_root
*root
, struct extent_buffer
*eb
,
116 struct reada_extent
*re
;
117 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
118 struct list_head list
;
119 unsigned long index
= start
>> PAGE_CACHE_SHIFT
;
120 struct btrfs_device
*for_dev
;
123 level
= btrfs_header_level(eb
);
126 spin_lock(&fs_info
->reada_lock
);
127 re
= radix_tree_lookup(&fs_info
->reada_tree
, index
);
129 kref_get(&re
->refcnt
);
130 spin_unlock(&fs_info
->reada_lock
);
135 spin_lock(&re
->lock
);
137 * just take the full list from the extent. afterwards we
138 * don't need the lock anymore
140 list_replace_init(&re
->extctl
, &list
);
141 for_dev
= re
->scheduled_for
;
142 re
->scheduled_for
= NULL
;
143 spin_unlock(&re
->lock
);
146 nritems
= level
? btrfs_header_nritems(eb
) : 0;
147 generation
= btrfs_header_generation(eb
);
149 * FIXME: currently we just set nritems to 0 if this is a leaf,
150 * effectively ignoring the content. In a next step we could
151 * trigger more readahead depending from the content, e.g.
152 * fetch the checksums for the extents in the leaf.
156 * this is the error case, the extent buffer has not been
157 * read correctly. We won't access anything from it and
158 * just cleanup our data structures. Effectively this will
159 * cut the branch below this node from read ahead.
165 for (i
= 0; i
< nritems
; i
++) {
166 struct reada_extctl
*rec
;
168 struct btrfs_key key
;
169 struct btrfs_key next_key
;
171 btrfs_node_key_to_cpu(eb
, &key
, i
);
173 btrfs_node_key_to_cpu(eb
, &next_key
, i
+ 1);
176 bytenr
= btrfs_node_blockptr(eb
, i
);
177 n_gen
= btrfs_node_ptr_generation(eb
, i
);
179 list_for_each_entry(rec
, &list
, list
) {
180 struct reada_control
*rc
= rec
->rc
;
183 * if the generation doesn't match, just ignore this
184 * extctl. This will probably cut off a branch from
185 * prefetch. Alternatively one could start a new (sub-)
186 * prefetch for this branch, starting again from root.
187 * FIXME: move the generation check out of this loop
190 if (rec
->generation
!= generation
) {
191 printk(KERN_DEBUG
"generation mismatch for "
192 "(%llu,%d,%llu) %llu != %llu\n",
193 key
.objectid
, key
.type
, key
.offset
,
194 rec
->generation
, generation
);
197 if (rec
->generation
== generation
&&
198 btrfs_comp_cpu_keys(&key
, &rc
->key_end
) < 0 &&
199 btrfs_comp_cpu_keys(&next_key
, &rc
->key_start
) > 0)
200 reada_add_block(rc
, bytenr
, &next_key
,
205 * free extctl records
207 while (!list_empty(&list
)) {
208 struct reada_control
*rc
;
209 struct reada_extctl
*rec
;
211 rec
= list_first_entry(&list
, struct reada_extctl
, list
);
212 list_del(&rec
->list
);
216 kref_get(&rc
->refcnt
);
217 if (atomic_dec_and_test(&rc
->elems
)) {
218 kref_put(&rc
->refcnt
, reada_control_release
);
221 kref_put(&rc
->refcnt
, reada_control_release
);
223 reada_extent_put(fs_info
, re
); /* one ref for each entry */
225 reada_extent_put(fs_info
, re
); /* our ref */
227 atomic_dec(&for_dev
->reada_in_flight
);
233 * start is passed separately in case eb in NULL, which may be the case with
236 int btree_readahead_hook(struct btrfs_root
*root
, struct extent_buffer
*eb
,
241 ret
= __readahead_hook(root
, eb
, start
, err
);
243 reada_start_machine(root
->fs_info
);
248 static struct reada_zone
*reada_find_zone(struct btrfs_fs_info
*fs_info
,
249 struct btrfs_device
*dev
, u64 logical
,
250 struct btrfs_bio
*bbio
)
254 struct reada_zone
*zone
;
255 struct btrfs_block_group_cache
*cache
= NULL
;
262 spin_lock(&fs_info
->reada_lock
);
263 ret
= radix_tree_gang_lookup(&dev
->reada_zones
, (void **)&zone
,
264 logical
>> PAGE_CACHE_SHIFT
, 1);
266 kref_get(&zone
->refcnt
);
267 spin_unlock(&fs_info
->reada_lock
);
270 if (logical
>= zone
->start
&& logical
< zone
->end
)
272 spin_lock(&fs_info
->reada_lock
);
273 kref_put(&zone
->refcnt
, reada_zone_release
);
274 spin_unlock(&fs_info
->reada_lock
);
280 cache
= btrfs_lookup_block_group(fs_info
, logical
);
284 start
= cache
->key
.objectid
;
285 end
= start
+ cache
->key
.offset
- 1;
286 btrfs_put_block_group(cache
);
288 zone
= kzalloc(sizeof(*zone
), GFP_NOFS
);
294 INIT_LIST_HEAD(&zone
->list
);
295 spin_lock_init(&zone
->lock
);
297 kref_init(&zone
->refcnt
);
299 zone
->device
= dev
; /* our device always sits at index 0 */
300 for (i
= 0; i
< bbio
->num_stripes
; ++i
) {
301 /* bounds have already been checked */
302 zone
->devs
[i
] = bbio
->stripes
[i
].dev
;
304 zone
->ndevs
= bbio
->num_stripes
;
306 spin_lock(&fs_info
->reada_lock
);
307 ret
= radix_tree_insert(&dev
->reada_zones
,
308 (unsigned long)(zone
->end
>> PAGE_CACHE_SHIFT
),
310 spin_unlock(&fs_info
->reada_lock
);
321 static struct reada_extent
*reada_find_extent(struct btrfs_root
*root
,
323 struct btrfs_key
*top
, int level
)
327 struct reada_extent
*re
= NULL
;
328 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
329 struct btrfs_mapping_tree
*map_tree
= &fs_info
->mapping_tree
;
330 struct btrfs_bio
*bbio
= NULL
;
331 struct btrfs_device
*dev
;
336 unsigned long index
= logical
>> PAGE_CACHE_SHIFT
;
339 spin_lock(&fs_info
->reada_lock
);
340 re
= radix_tree_lookup(&fs_info
->reada_tree
, index
);
342 kref_get(&re
->refcnt
);
343 spin_unlock(&fs_info
->reada_lock
);
348 re
= kzalloc(sizeof(*re
), GFP_NOFS
);
352 blocksize
= btrfs_level_size(root
, level
);
353 re
->logical
= logical
;
354 re
->blocksize
= blocksize
;
356 INIT_LIST_HEAD(&re
->extctl
);
357 spin_lock_init(&re
->lock
);
358 kref_init(&re
->refcnt
);
364 ret
= btrfs_map_block(map_tree
, REQ_WRITE
, logical
, &length
, &bbio
, 0);
365 if (ret
|| !bbio
|| length
< blocksize
)
368 if (bbio
->num_stripes
> MAX_MIRRORS
) {
369 printk(KERN_ERR
"btrfs readahead: more than %d copies not "
370 "supported", MAX_MIRRORS
);
374 for (nzones
= 0; nzones
< bbio
->num_stripes
; ++nzones
) {
375 struct reada_zone
*zone
;
377 dev
= bbio
->stripes
[nzones
].dev
;
378 zone
= reada_find_zone(fs_info
, dev
, logical
, bbio
);
382 re
->zones
[nzones
] = zone
;
383 spin_lock(&zone
->lock
);
385 kref_get(&zone
->refcnt
);
387 spin_unlock(&zone
->lock
);
388 spin_lock(&fs_info
->reada_lock
);
389 kref_put(&zone
->refcnt
, reada_zone_release
);
390 spin_unlock(&fs_info
->reada_lock
);
394 /* not a single zone found, error and out */
398 /* insert extent in reada_tree + all per-device trees, all or nothing */
399 spin_lock(&fs_info
->reada_lock
);
400 ret
= radix_tree_insert(&fs_info
->reada_tree
, index
, re
);
402 spin_unlock(&fs_info
->reada_lock
);
403 if (ret
!= -ENOMEM
) {
404 /* someone inserted the extent in the meantime */
409 for (i
= 0; i
< nzones
; ++i
) {
410 dev
= bbio
->stripes
[i
].dev
;
411 ret
= radix_tree_insert(&dev
->reada_extents
, index
, re
);
414 dev
= bbio
->stripes
[i
].dev
;
416 radix_tree_delete(&dev
->reada_extents
, index
);
418 BUG_ON(fs_info
== NULL
);
419 radix_tree_delete(&fs_info
->reada_tree
, index
);
420 spin_unlock(&fs_info
->reada_lock
);
424 spin_unlock(&fs_info
->reada_lock
);
431 struct reada_zone
*zone
;
434 zone
= re
->zones
[nzones
];
435 kref_get(&zone
->refcnt
);
436 spin_lock(&zone
->lock
);
438 if (zone
->elems
== 0) {
440 * no fs_info->reada_lock needed, as this can't be
443 kref_put(&zone
->refcnt
, reada_zone_release
);
445 spin_unlock(&zone
->lock
);
447 spin_lock(&fs_info
->reada_lock
);
448 kref_put(&zone
->refcnt
, reada_zone_release
);
449 spin_unlock(&fs_info
->reada_lock
);
458 static void reada_kref_dummy(struct kref
*kr
)
462 static void reada_extent_put(struct btrfs_fs_info
*fs_info
,
463 struct reada_extent
*re
)
466 unsigned long index
= re
->logical
>> PAGE_CACHE_SHIFT
;
468 spin_lock(&fs_info
->reada_lock
);
469 if (!kref_put(&re
->refcnt
, reada_kref_dummy
)) {
470 spin_unlock(&fs_info
->reada_lock
);
474 radix_tree_delete(&fs_info
->reada_tree
, index
);
475 for (i
= 0; i
< re
->nzones
; ++i
) {
476 struct reada_zone
*zone
= re
->zones
[i
];
478 radix_tree_delete(&zone
->device
->reada_extents
, index
);
481 spin_unlock(&fs_info
->reada_lock
);
483 for (i
= 0; i
< re
->nzones
; ++i
) {
484 struct reada_zone
*zone
= re
->zones
[i
];
486 kref_get(&zone
->refcnt
);
487 spin_lock(&zone
->lock
);
489 if (zone
->elems
== 0) {
490 /* no fs_info->reada_lock needed, as this can't be
492 kref_put(&zone
->refcnt
, reada_zone_release
);
494 spin_unlock(&zone
->lock
);
496 spin_lock(&fs_info
->reada_lock
);
497 kref_put(&zone
->refcnt
, reada_zone_release
);
498 spin_unlock(&fs_info
->reada_lock
);
500 if (re
->scheduled_for
)
501 atomic_dec(&re
->scheduled_for
->reada_in_flight
);
506 static void reada_zone_release(struct kref
*kref
)
508 struct reada_zone
*zone
= container_of(kref
, struct reada_zone
, refcnt
);
510 radix_tree_delete(&zone
->device
->reada_zones
,
511 zone
->end
>> PAGE_CACHE_SHIFT
);
516 static void reada_control_release(struct kref
*kref
)
518 struct reada_control
*rc
= container_of(kref
, struct reada_control
,
524 static int reada_add_block(struct reada_control
*rc
, u64 logical
,
525 struct btrfs_key
*top
, int level
, u64 generation
)
527 struct btrfs_root
*root
= rc
->root
;
528 struct reada_extent
*re
;
529 struct reada_extctl
*rec
;
531 re
= reada_find_extent(root
, logical
, top
, level
); /* takes one ref */
535 rec
= kzalloc(sizeof(*rec
), GFP_NOFS
);
537 reada_extent_put(root
->fs_info
, re
);
542 rec
->generation
= generation
;
543 atomic_inc(&rc
->elems
);
545 spin_lock(&re
->lock
);
546 list_add_tail(&rec
->list
, &re
->extctl
);
547 spin_unlock(&re
->lock
);
549 /* leave the ref on the extent */
555 * called with fs_info->reada_lock held
557 static void reada_peer_zones_set_lock(struct reada_zone
*zone
, int lock
)
560 unsigned long index
= zone
->end
>> PAGE_CACHE_SHIFT
;
562 for (i
= 0; i
< zone
->ndevs
; ++i
) {
563 struct reada_zone
*peer
;
564 peer
= radix_tree_lookup(&zone
->devs
[i
]->reada_zones
, index
);
565 if (peer
&& peer
->device
!= zone
->device
)
571 * called with fs_info->reada_lock held
573 static int reada_pick_zone(struct btrfs_device
*dev
)
575 struct reada_zone
*top_zone
= NULL
;
576 struct reada_zone
*top_locked_zone
= NULL
;
578 u64 top_locked_elems
= 0;
579 unsigned long index
= 0;
582 if (dev
->reada_curr_zone
) {
583 reada_peer_zones_set_lock(dev
->reada_curr_zone
, 0);
584 kref_put(&dev
->reada_curr_zone
->refcnt
, reada_zone_release
);
585 dev
->reada_curr_zone
= NULL
;
587 /* pick the zone with the most elements */
589 struct reada_zone
*zone
;
591 ret
= radix_tree_gang_lookup(&dev
->reada_zones
,
592 (void **)&zone
, index
, 1);
595 index
= (zone
->end
>> PAGE_CACHE_SHIFT
) + 1;
597 if (zone
->elems
> top_locked_elems
) {
598 top_locked_elems
= zone
->elems
;
599 top_locked_zone
= zone
;
602 if (zone
->elems
> top_elems
) {
603 top_elems
= zone
->elems
;
609 dev
->reada_curr_zone
= top_zone
;
610 else if (top_locked_zone
)
611 dev
->reada_curr_zone
= top_locked_zone
;
615 dev
->reada_next
= dev
->reada_curr_zone
->start
;
616 kref_get(&dev
->reada_curr_zone
->refcnt
);
617 reada_peer_zones_set_lock(dev
->reada_curr_zone
, 1);
622 static int reada_start_machine_dev(struct btrfs_fs_info
*fs_info
,
623 struct btrfs_device
*dev
)
625 struct reada_extent
*re
= NULL
;
627 struct extent_buffer
*eb
= NULL
;
634 spin_lock(&fs_info
->reada_lock
);
635 if (dev
->reada_curr_zone
== NULL
) {
636 ret
= reada_pick_zone(dev
);
638 spin_unlock(&fs_info
->reada_lock
);
643 * FIXME currently we issue the reads one extent at a time. If we have
644 * a contiguous block of extents, we could also coagulate them or use
645 * plugging to speed things up
647 ret
= radix_tree_gang_lookup(&dev
->reada_extents
, (void **)&re
,
648 dev
->reada_next
>> PAGE_CACHE_SHIFT
, 1);
649 if (ret
== 0 || re
->logical
>= dev
->reada_curr_zone
->end
) {
650 ret
= reada_pick_zone(dev
);
652 spin_unlock(&fs_info
->reada_lock
);
656 ret
= radix_tree_gang_lookup(&dev
->reada_extents
, (void **)&re
,
657 dev
->reada_next
>> PAGE_CACHE_SHIFT
, 1);
660 spin_unlock(&fs_info
->reada_lock
);
663 dev
->reada_next
= re
->logical
+ re
->blocksize
;
664 kref_get(&re
->refcnt
);
666 spin_unlock(&fs_info
->reada_lock
);
671 for (i
= 0; i
< re
->nzones
; ++i
) {
672 if (re
->zones
[i
]->device
== dev
) {
677 logical
= re
->logical
;
678 blocksize
= re
->blocksize
;
680 spin_lock(&re
->lock
);
681 if (re
->scheduled_for
== NULL
) {
682 re
->scheduled_for
= dev
;
685 spin_unlock(&re
->lock
);
687 reada_extent_put(fs_info
, re
);
692 atomic_inc(&dev
->reada_in_flight
);
693 ret
= reada_tree_block_flagged(fs_info
->extent_root
, logical
, blocksize
,
696 __readahead_hook(fs_info
->extent_root
, NULL
, logical
, ret
);
698 __readahead_hook(fs_info
->extent_root
, eb
, eb
->start
, ret
);
701 free_extent_buffer(eb
);
707 static void reada_start_machine_worker(struct btrfs_work
*work
)
709 struct reada_machine_work
*rmw
;
710 struct btrfs_fs_info
*fs_info
;
712 rmw
= container_of(work
, struct reada_machine_work
, work
);
713 fs_info
= rmw
->fs_info
;
717 __reada_start_machine(fs_info
);
720 static void __reada_start_machine(struct btrfs_fs_info
*fs_info
)
722 struct btrfs_device
*device
;
723 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
730 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
731 if (atomic_read(&device
->reada_in_flight
) <
733 enqueued
+= reada_start_machine_dev(fs_info
,
737 } while (enqueued
&& total
< 10000);
743 * If everything is already in the cache, this is effectively single
744 * threaded. To a) not hold the caller for too long and b) to utilize
745 * more cores, we broke the loop above after 10000 iterations and now
746 * enqueue to workers to finish it. This will distribute the load to
749 for (i
= 0; i
< 2; ++i
)
750 reada_start_machine(fs_info
);
753 static void reada_start_machine(struct btrfs_fs_info
*fs_info
)
755 struct reada_machine_work
*rmw
;
757 rmw
= kzalloc(sizeof(*rmw
), GFP_NOFS
);
759 /* FIXME we cannot handle this properly right now */
762 rmw
->work
.func
= reada_start_machine_worker
;
763 rmw
->fs_info
= fs_info
;
765 btrfs_queue_worker(&fs_info
->readahead_workers
, &rmw
->work
);
769 static void dump_devs(struct btrfs_fs_info
*fs_info
, int all
)
771 struct btrfs_device
*device
;
772 struct btrfs_fs_devices
*fs_devices
= fs_info
->fs_devices
;
779 spin_lock(&fs_info
->reada_lock
);
780 list_for_each_entry(device
, &fs_devices
->devices
, dev_list
) {
781 printk(KERN_DEBUG
"dev %lld has %d in flight\n", device
->devid
,
782 atomic_read(&device
->reada_in_flight
));
785 struct reada_zone
*zone
;
786 ret
= radix_tree_gang_lookup(&device
->reada_zones
,
787 (void **)&zone
, index
, 1);
790 printk(KERN_DEBUG
" zone %llu-%llu elems %llu locked "
791 "%d devs", zone
->start
, zone
->end
, zone
->elems
,
793 for (j
= 0; j
< zone
->ndevs
; ++j
) {
794 printk(KERN_CONT
" %lld",
795 zone
->devs
[j
]->devid
);
797 if (device
->reada_curr_zone
== zone
)
798 printk(KERN_CONT
" curr off %llu",
799 device
->reada_next
- zone
->start
);
800 printk(KERN_CONT
"\n");
801 index
= (zone
->end
>> PAGE_CACHE_SHIFT
) + 1;
806 struct reada_extent
*re
= NULL
;
808 ret
= radix_tree_gang_lookup(&device
->reada_extents
,
809 (void **)&re
, index
, 1);
813 " re: logical %llu size %u empty %d for %lld",
814 re
->logical
, re
->blocksize
,
815 list_empty(&re
->extctl
), re
->scheduled_for
?
816 re
->scheduled_for
->devid
: -1);
818 for (i
= 0; i
< re
->nzones
; ++i
) {
819 printk(KERN_CONT
" zone %llu-%llu devs",
822 for (j
= 0; j
< re
->zones
[i
]->ndevs
; ++j
) {
823 printk(KERN_CONT
" %lld",
824 re
->zones
[i
]->devs
[j
]->devid
);
827 printk(KERN_CONT
"\n");
828 index
= (re
->logical
>> PAGE_CACHE_SHIFT
) + 1;
837 struct reada_extent
*re
= NULL
;
839 ret
= radix_tree_gang_lookup(&fs_info
->reada_tree
, (void **)&re
,
843 if (!re
->scheduled_for
) {
844 index
= (re
->logical
>> PAGE_CACHE_SHIFT
) + 1;
848 "re: logical %llu size %u list empty %d for %lld",
849 re
->logical
, re
->blocksize
, list_empty(&re
->extctl
),
850 re
->scheduled_for
? re
->scheduled_for
->devid
: -1);
851 for (i
= 0; i
< re
->nzones
; ++i
) {
852 printk(KERN_CONT
" zone %llu-%llu devs",
855 for (i
= 0; i
< re
->nzones
; ++i
) {
856 printk(KERN_CONT
" zone %llu-%llu devs",
859 for (j
= 0; j
< re
->zones
[i
]->ndevs
; ++j
) {
860 printk(KERN_CONT
" %lld",
861 re
->zones
[i
]->devs
[j
]->devid
);
865 printk(KERN_CONT
"\n");
866 index
= (re
->logical
>> PAGE_CACHE_SHIFT
) + 1;
868 spin_unlock(&fs_info
->reada_lock
);
875 struct reada_control
*btrfs_reada_add(struct btrfs_root
*root
,
876 struct btrfs_key
*key_start
, struct btrfs_key
*key_end
)
878 struct reada_control
*rc
;
882 struct extent_buffer
*node
;
883 static struct btrfs_key max_key
= {
889 rc
= kzalloc(sizeof(*rc
), GFP_NOFS
);
891 return ERR_PTR(-ENOMEM
);
894 rc
->key_start
= *key_start
;
895 rc
->key_end
= *key_end
;
896 atomic_set(&rc
->elems
, 0);
897 init_waitqueue_head(&rc
->wait
);
898 kref_init(&rc
->refcnt
);
899 kref_get(&rc
->refcnt
); /* one ref for having elements */
901 node
= btrfs_root_node(root
);
903 level
= btrfs_header_level(node
);
904 generation
= btrfs_header_generation(node
);
905 free_extent_buffer(node
);
907 reada_add_block(rc
, start
, &max_key
, level
, generation
);
909 reada_start_machine(root
->fs_info
);
915 int btrfs_reada_wait(void *handle
)
917 struct reada_control
*rc
= handle
;
919 while (atomic_read(&rc
->elems
)) {
920 wait_event_timeout(rc
->wait
, atomic_read(&rc
->elems
) == 0,
922 dump_devs(rc
->root
->fs_info
, rc
->elems
< 10 ? 1 : 0);
925 dump_devs(rc
->root
->fs_info
, rc
->elems
< 10 ? 1 : 0);
927 kref_put(&rc
->refcnt
, reada_control_release
);
932 int btrfs_reada_wait(void *handle
)
934 struct reada_control
*rc
= handle
;
936 while (atomic_read(&rc
->elems
)) {
937 wait_event(rc
->wait
, atomic_read(&rc
->elems
) == 0);
940 kref_put(&rc
->refcnt
, reada_control_release
);
946 void btrfs_reada_detach(void *handle
)
948 struct reada_control
*rc
= handle
;
950 kref_put(&rc
->refcnt
, reada_control_release
);