4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2016 by Delphix. All rights reserved.
27 #include <sys/spa_impl.h>
29 #include <sys/vdev_impl.h>
30 #include <sys/refcount.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/dsl_synctask.h>
34 #include <sys/dmu_tx.h>
37 * Maximum number of metaslabs per group that can be initialized
40 int max_initialize_ms
= 3;
43 * Value that is written to disk during initialization.
45 uint64_t zfs_initialize_value
= 0xdeadbeefdeadbeefULL
;
47 /* maximum number of I/Os outstanding per leaf vdev */
48 int zfs_initialize_limit
= 1;
50 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
51 uint64_t zfs_initialize_chunk_size
= 1024 * 1024;
54 vdev_initialize_should_stop(vdev_t
*vd
)
56 return (vd
->vdev_initialize_exit_wanted
|| !vdev_writeable(vd
) ||
57 vd
->vdev_detached
|| vd
->vdev_top
->vdev_removing
);
61 vdev_initialize_zap_update_sync(void *arg
, dmu_tx_t
*tx
)
64 * We pass in the guid instead of the vdev_t since the vdev may
65 * have been freed prior to the sync task being processed. This
66 * happens when a vdev is detached as we call spa_config_vdev_exit(),
67 * stop the intializing thread, schedule the sync task, and free
68 * the vdev. Later when the scheduled sync task is invoked, it would
69 * find that the vdev has been freed.
71 uint64_t guid
= *(uint64_t *)arg
;
72 uint64_t txg
= dmu_tx_get_txg(tx
);
73 kmem_free(arg
, sizeof (uint64_t));
75 vdev_t
*vd
= spa_lookup_by_guid(tx
->tx_pool
->dp_spa
, guid
, B_FALSE
);
76 if (vd
== NULL
|| vd
->vdev_top
->vdev_removing
|| !vdev_is_concrete(vd
))
79 uint64_t last_offset
= vd
->vdev_initialize_offset
[txg
& TXG_MASK
];
80 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = 0;
82 VERIFY(vd
->vdev_leaf_zap
!= 0);
84 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
86 if (last_offset
> 0) {
87 vd
->vdev_initialize_last_offset
= last_offset
;
88 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
89 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
90 sizeof (last_offset
), 1, &last_offset
, tx
));
92 if (vd
->vdev_initialize_action_time
> 0) {
93 uint64_t val
= (uint64_t)vd
->vdev_initialize_action_time
;
94 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
95 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
, sizeof (val
),
99 uint64_t initialize_state
= vd
->vdev_initialize_state
;
100 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
101 VDEV_LEAF_ZAP_INITIALIZE_STATE
, sizeof (initialize_state
), 1,
102 &initialize_state
, tx
));
106 vdev_initialize_change_state(vdev_t
*vd
, vdev_initializing_state_t new_state
)
108 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
109 spa_t
*spa
= vd
->vdev_spa
;
111 if (new_state
== vd
->vdev_initialize_state
)
115 * Copy the vd's guid, this will be freed by the sync task.
117 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
118 *guid
= vd
->vdev_guid
;
121 * If we're suspending, then preserving the original start time.
123 if (vd
->vdev_initialize_state
!= VDEV_INITIALIZE_SUSPENDED
) {
124 vd
->vdev_initialize_action_time
= gethrestime_sec();
126 vd
->vdev_initialize_state
= new_state
;
128 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
129 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
130 dsl_sync_task_nowait(spa_get_dsl(spa
), vdev_initialize_zap_update_sync
,
131 guid
, 2, ZFS_SPACE_CHECK_RESERVED
, tx
);
134 case VDEV_INITIALIZE_ACTIVE
:
135 spa_history_log_internal(spa
, "initialize", tx
,
136 "vdev=%s activated", vd
->vdev_path
);
138 case VDEV_INITIALIZE_SUSPENDED
:
139 spa_history_log_internal(spa
, "initialize", tx
,
140 "vdev=%s suspended", vd
->vdev_path
);
142 case VDEV_INITIALIZE_CANCELED
:
143 spa_history_log_internal(spa
, "initialize", tx
,
144 "vdev=%s canceled", vd
->vdev_path
);
146 case VDEV_INITIALIZE_COMPLETE
:
147 spa_history_log_internal(spa
, "initialize", tx
,
148 "vdev=%s complete", vd
->vdev_path
);
151 panic("invalid state %llu", (unsigned long long)new_state
);
158 vdev_initialize_cb(zio_t
*zio
)
160 vdev_t
*vd
= zio
->io_vd
;
161 mutex_enter(&vd
->vdev_initialize_io_lock
);
162 if (zio
->io_error
== ENXIO
&& !vdev_writeable(vd
)) {
164 * The I/O failed because the vdev was unavailable; roll the
165 * last offset back. (This works because spa_sync waits on
166 * spa_txg_zio before it runs sync tasks.)
169 &vd
->vdev_initialize_offset
[zio
->io_txg
& TXG_MASK
];
170 *off
= MIN(*off
, zio
->io_offset
);
173 * Since initializing is best-effort, we ignore I/O errors and
174 * rely on vdev_probe to determine if the errors are more
177 if (zio
->io_error
!= 0)
178 vd
->vdev_stat
.vs_initialize_errors
++;
180 vd
->vdev_initialize_bytes_done
+= zio
->io_orig_size
;
182 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
183 vd
->vdev_initialize_inflight
--;
184 cv_broadcast(&vd
->vdev_initialize_io_cv
);
185 mutex_exit(&vd
->vdev_initialize_io_lock
);
187 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
190 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
192 vdev_initialize_write(vdev_t
*vd
, uint64_t start
, uint64_t size
, abd_t
*data
)
194 spa_t
*spa
= vd
->vdev_spa
;
196 /* Limit inflight initializing I/Os */
197 mutex_enter(&vd
->vdev_initialize_io_lock
);
198 while (vd
->vdev_initialize_inflight
>= zfs_initialize_limit
) {
199 cv_wait(&vd
->vdev_initialize_io_cv
,
200 &vd
->vdev_initialize_io_lock
);
202 vd
->vdev_initialize_inflight
++;
203 mutex_exit(&vd
->vdev_initialize_io_lock
);
205 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
206 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
207 uint64_t txg
= dmu_tx_get_txg(tx
);
209 spa_config_enter(spa
, SCL_STATE_ALL
, vd
, RW_READER
);
210 mutex_enter(&vd
->vdev_initialize_lock
);
212 if (vd
->vdev_initialize_offset
[txg
& TXG_MASK
] == 0) {
213 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
214 *guid
= vd
->vdev_guid
;
216 /* This is the first write of this txg. */
217 dsl_sync_task_nowait(spa_get_dsl(spa
),
218 vdev_initialize_zap_update_sync
, guid
, 2,
219 ZFS_SPACE_CHECK_RESERVED
, tx
);
223 * We know the vdev struct will still be around since all
224 * consumers of vdev_free must stop the initialization first.
226 if (vdev_initialize_should_stop(vd
)) {
227 mutex_enter(&vd
->vdev_initialize_io_lock
);
228 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
229 vd
->vdev_initialize_inflight
--;
230 mutex_exit(&vd
->vdev_initialize_io_lock
);
231 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
232 mutex_exit(&vd
->vdev_initialize_lock
);
234 return (SET_ERROR(EINTR
));
236 mutex_exit(&vd
->vdev_initialize_lock
);
238 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = start
+ size
;
239 zio_nowait(zio_write_phys(spa
->spa_txg_zio
[txg
& TXG_MASK
], vd
, start
,
240 size
, data
, ZIO_CHECKSUM_OFF
, vdev_initialize_cb
, NULL
,
241 ZIO_PRIORITY_INITIALIZING
, ZIO_FLAG_CANFAIL
, B_FALSE
));
242 /* vdev_initialize_cb releases SCL_STATE_ALL */
250 * Translate a logical range to the physical range for the specified vdev_t.
251 * This function is initially called with a leaf vdev and will walk each
252 * parent vdev until it reaches a top-level vdev. Once the top-level is
253 * reached the physical range is initialized and the recursive function
254 * begins to unwind. As it unwinds it calls the parent's vdev specific
255 * translation function to do the real conversion.
258 vdev_xlate(vdev_t
*vd
, const range_seg_t
*logical_rs
, range_seg_t
*physical_rs
)
261 * Walk up the vdev tree
263 if (vd
!= vd
->vdev_top
) {
264 vdev_xlate(vd
->vdev_parent
, logical_rs
, physical_rs
);
267 * We've reached the top-level vdev, initialize the
268 * physical range to the logical range and start to
271 physical_rs
->rs_start
= logical_rs
->rs_start
;
272 physical_rs
->rs_end
= logical_rs
->rs_end
;
276 vdev_t
*pvd
= vd
->vdev_parent
;
277 ASSERT3P(pvd
, !=, NULL
);
278 ASSERT3P(pvd
->vdev_ops
->vdev_op_xlate
, !=, NULL
);
281 * As this recursive function unwinds, translate the logical
282 * range into its physical components by calling the
283 * vdev specific translate function.
285 range_seg_t intermediate
= { 0 };
286 pvd
->vdev_ops
->vdev_op_xlate(vd
, physical_rs
, &intermediate
);
288 physical_rs
->rs_start
= intermediate
.rs_start
;
289 physical_rs
->rs_end
= intermediate
.rs_end
;
293 * Callback to fill each ABD chunk with zfs_initialize_value. len must be
294 * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
295 * allocation will guarantee these for us.
299 vdev_initialize_block_fill(void *buf
, size_t len
, void *unused
)
301 ASSERT0(len
% sizeof (uint64_t));
302 for (uint64_t i
= 0; i
< len
; i
+= sizeof (uint64_t)) {
303 *(uint64_t *)((char *)(buf
) + i
) = zfs_initialize_value
;
309 vdev_initialize_block_alloc()
311 /* Allocate ABD for filler data */
312 abd_t
*data
= abd_alloc_for_io(zfs_initialize_chunk_size
, B_FALSE
);
314 ASSERT0(zfs_initialize_chunk_size
% sizeof (uint64_t));
315 (void) abd_iterate_func(data
, 0, zfs_initialize_chunk_size
,
316 vdev_initialize_block_fill
, NULL
);
322 vdev_initialize_block_free(abd_t
*data
)
328 vdev_initialize_ranges(vdev_t
*vd
, abd_t
*data
)
330 avl_tree_t
*rt
= &vd
->vdev_initialize_tree
->rt_root
;
332 for (range_seg_t
*rs
= avl_first(rt
); rs
!= NULL
;
333 rs
= AVL_NEXT(rt
, rs
)) {
334 uint64_t size
= rs
->rs_end
- rs
->rs_start
;
336 /* Split range into legally-sized physical chunks */
337 uint64_t writes_required
=
338 ((size
- 1) / zfs_initialize_chunk_size
) + 1;
340 for (uint64_t w
= 0; w
< writes_required
; w
++) {
343 error
= vdev_initialize_write(vd
,
344 VDEV_LABEL_START_SIZE
+ rs
->rs_start
+
345 (w
* zfs_initialize_chunk_size
),
346 MIN(size
- (w
* zfs_initialize_chunk_size
),
347 zfs_initialize_chunk_size
), data
);
356 vdev_initialize_ms_load(metaslab_t
*msp
)
358 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
360 metaslab_load_wait(msp
);
362 VERIFY0(metaslab_load(msp
));
366 vdev_initialize_mg_wait(metaslab_group_t
*mg
)
368 ASSERT(MUTEX_HELD(&mg
->mg_ms_initialize_lock
));
369 while (mg
->mg_initialize_updating
) {
370 cv_wait(&mg
->mg_ms_initialize_cv
, &mg
->mg_ms_initialize_lock
);
375 vdev_initialize_mg_mark(metaslab_group_t
*mg
)
377 ASSERT(MUTEX_HELD(&mg
->mg_ms_initialize_lock
));
378 ASSERT(mg
->mg_initialize_updating
);
380 while (mg
->mg_ms_initializing
>= max_initialize_ms
) {
381 cv_wait(&mg
->mg_ms_initialize_cv
, &mg
->mg_ms_initialize_lock
);
383 mg
->mg_ms_initializing
++;
384 ASSERT3U(mg
->mg_ms_initializing
, <=, max_initialize_ms
);
388 * Mark the metaslab as being initialized to prevent any allocations
389 * on this metaslab. We must also track how many metaslabs are currently
390 * being initialized within a metaslab group and limit them to prevent
391 * allocation failures from occurring because all metaslabs are being
395 vdev_initialize_ms_mark(metaslab_t
*msp
)
397 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
398 metaslab_group_t
*mg
= msp
->ms_group
;
400 mutex_enter(&mg
->mg_ms_initialize_lock
);
403 * To keep an accurate count of how many threads are initializing
404 * a specific metaslab group, we only allow one thread to mark
405 * the metaslab group at a time. This ensures that the value of
406 * ms_initializing will be accurate when we decide to mark a metaslab
407 * group as being initialized. To do this we force all other threads
408 * to wait till the metaslab's mg_initialize_updating flag is no
411 vdev_initialize_mg_wait(mg
);
412 mg
->mg_initialize_updating
= B_TRUE
;
413 if (msp
->ms_initializing
== 0) {
414 vdev_initialize_mg_mark(mg
);
416 mutex_enter(&msp
->ms_lock
);
417 msp
->ms_initializing
++;
418 mutex_exit(&msp
->ms_lock
);
420 mg
->mg_initialize_updating
= B_FALSE
;
421 cv_broadcast(&mg
->mg_ms_initialize_cv
);
422 mutex_exit(&mg
->mg_ms_initialize_lock
);
426 vdev_initialize_ms_unmark(metaslab_t
*msp
)
428 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
429 metaslab_group_t
*mg
= msp
->ms_group
;
430 mutex_enter(&mg
->mg_ms_initialize_lock
);
431 mutex_enter(&msp
->ms_lock
);
432 if (--msp
->ms_initializing
== 0) {
433 mg
->mg_ms_initializing
--;
434 cv_broadcast(&mg
->mg_ms_initialize_cv
);
436 mutex_exit(&msp
->ms_lock
);
437 mutex_exit(&mg
->mg_ms_initialize_lock
);
441 vdev_initialize_calculate_progress(vdev_t
*vd
)
443 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
444 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
445 ASSERT(vd
->vdev_leaf_zap
!= 0);
447 vd
->vdev_initialize_bytes_est
= 0;
448 vd
->vdev_initialize_bytes_done
= 0;
450 for (uint64_t i
= 0; i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
451 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
452 mutex_enter(&msp
->ms_lock
);
454 uint64_t ms_free
= msp
->ms_size
-
455 space_map_allocated(msp
->ms_sm
);
457 if (vd
->vdev_top
->vdev_ops
== &vdev_raidz_ops
)
458 ms_free
/= vd
->vdev_top
->vdev_children
;
461 * Convert the metaslab range to a physical range
462 * on our vdev. We use this to determine if we are
463 * in the middle of this metaslab range.
465 range_seg_t logical_rs
, physical_rs
;
466 logical_rs
.rs_start
= msp
->ms_start
;
467 logical_rs
.rs_end
= msp
->ms_start
+ msp
->ms_size
;
468 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
470 if (vd
->vdev_initialize_last_offset
<= physical_rs
.rs_start
) {
471 vd
->vdev_initialize_bytes_est
+= ms_free
;
472 mutex_exit(&msp
->ms_lock
);
474 } else if (vd
->vdev_initialize_last_offset
>
475 physical_rs
.rs_end
) {
476 vd
->vdev_initialize_bytes_done
+= ms_free
;
477 vd
->vdev_initialize_bytes_est
+= ms_free
;
478 mutex_exit(&msp
->ms_lock
);
483 * If we get here, we're in the middle of initializing this
484 * metaslab. Load it and walk the free tree for more accurate
485 * progress estimation.
487 vdev_initialize_ms_load(msp
);
489 for (range_seg_t
*rs
= avl_first(&msp
->ms_allocatable
->rt_root
); rs
;
490 rs
= AVL_NEXT(&msp
->ms_allocatable
->rt_root
, rs
)) {
491 logical_rs
.rs_start
= rs
->rs_start
;
492 logical_rs
.rs_end
= rs
->rs_end
;
493 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
495 uint64_t size
= physical_rs
.rs_end
-
496 physical_rs
.rs_start
;
497 vd
->vdev_initialize_bytes_est
+= size
;
498 if (vd
->vdev_initialize_last_offset
>
499 physical_rs
.rs_end
) {
500 vd
->vdev_initialize_bytes_done
+= size
;
501 } else if (vd
->vdev_initialize_last_offset
>
502 physical_rs
.rs_start
&&
503 vd
->vdev_initialize_last_offset
<
504 physical_rs
.rs_end
) {
505 vd
->vdev_initialize_bytes_done
+=
506 vd
->vdev_initialize_last_offset
-
507 physical_rs
.rs_start
;
510 mutex_exit(&msp
->ms_lock
);
515 vdev_initialize_load(vdev_t
*vd
)
517 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
518 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
519 ASSERT(vd
->vdev_leaf_zap
!= 0);
521 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_ACTIVE
||
522 vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
) {
523 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
524 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
525 sizeof (vd
->vdev_initialize_last_offset
), 1,
526 &vd
->vdev_initialize_last_offset
);
527 ASSERT(err
== 0 || err
== ENOENT
);
530 vdev_initialize_calculate_progress(vd
);
535 * Convert the logical range into a physcial range and add it to our
539 vdev_initialize_range_add(void *arg
, uint64_t start
, uint64_t size
)
542 range_seg_t logical_rs
, physical_rs
;
543 logical_rs
.rs_start
= start
;
544 logical_rs
.rs_end
= start
+ size
;
546 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
547 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
549 IMPLY(vd
->vdev_top
== vd
,
550 logical_rs
.rs_start
== physical_rs
.rs_start
);
551 IMPLY(vd
->vdev_top
== vd
,
552 logical_rs
.rs_end
== physical_rs
.rs_end
);
554 /* Only add segments that we have not visited yet */
555 if (physical_rs
.rs_end
<= vd
->vdev_initialize_last_offset
)
558 /* Pick up where we left off mid-range. */
559 if (vd
->vdev_initialize_last_offset
> physical_rs
.rs_start
) {
560 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
561 "(%llu, %llu)", vd
->vdev_path
,
562 (u_longlong_t
)physical_rs
.rs_start
,
563 (u_longlong_t
)physical_rs
.rs_end
,
564 (u_longlong_t
)vd
->vdev_initialize_last_offset
,
565 (u_longlong_t
)physical_rs
.rs_end
);
566 ASSERT3U(physical_rs
.rs_end
, >,
567 vd
->vdev_initialize_last_offset
);
568 physical_rs
.rs_start
= vd
->vdev_initialize_last_offset
;
570 ASSERT3U(physical_rs
.rs_end
, >=, physical_rs
.rs_start
);
573 * With raidz, it's possible that the logical range does not live on
574 * this leaf vdev. We only add the physical range to this vdev's if it
575 * has a length greater than 0.
577 if (physical_rs
.rs_end
> physical_rs
.rs_start
) {
578 range_tree_add(vd
->vdev_initialize_tree
, physical_rs
.rs_start
,
579 physical_rs
.rs_end
- physical_rs
.rs_start
);
581 ASSERT3U(physical_rs
.rs_end
, ==, physical_rs
.rs_start
);
586 vdev_initialize_thread(void *arg
)
589 spa_t
*spa
= vd
->vdev_spa
;
591 uint64_t ms_count
= 0;
593 ASSERT(vdev_is_concrete(vd
));
594 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
596 vd
->vdev_initialize_last_offset
= 0;
597 vdev_initialize_load(vd
);
599 abd_t
*deadbeef
= vdev_initialize_block_alloc();
601 vd
->vdev_initialize_tree
= range_tree_create(NULL
, NULL
);
603 for (uint64_t i
= 0; !vd
->vdev_detached
&&
604 i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
605 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
608 * If we've expanded the top-level vdev or it's our
609 * first pass, calculate our progress.
611 if (vd
->vdev_top
->vdev_ms_count
!= ms_count
) {
612 vdev_initialize_calculate_progress(vd
);
613 ms_count
= vd
->vdev_top
->vdev_ms_count
;
616 vdev_initialize_ms_mark(msp
);
617 mutex_enter(&msp
->ms_lock
);
618 vdev_initialize_ms_load(msp
);
620 range_tree_walk(msp
->ms_allocatable
, vdev_initialize_range_add
,
622 mutex_exit(&msp
->ms_lock
);
624 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
625 error
= vdev_initialize_ranges(vd
, deadbeef
);
626 vdev_initialize_ms_unmark(msp
);
627 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
629 range_tree_vacate(vd
->vdev_initialize_tree
, NULL
, NULL
);
634 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
635 mutex_enter(&vd
->vdev_initialize_io_lock
);
636 while (vd
->vdev_initialize_inflight
> 0) {
637 cv_wait(&vd
->vdev_initialize_io_cv
,
638 &vd
->vdev_initialize_io_lock
);
640 mutex_exit(&vd
->vdev_initialize_io_lock
);
642 range_tree_destroy(vd
->vdev_initialize_tree
);
643 vdev_initialize_block_free(deadbeef
);
644 vd
->vdev_initialize_tree
= NULL
;
646 mutex_enter(&vd
->vdev_initialize_lock
);
647 if (!vd
->vdev_initialize_exit_wanted
&& vdev_writeable(vd
)) {
648 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_COMPLETE
);
650 ASSERT(vd
->vdev_initialize_thread
!= NULL
||
651 vd
->vdev_initialize_inflight
== 0);
654 * Drop the vdev_initialize_lock while we sync out the
655 * txg since it's possible that a device might be trying to
656 * come online and must check to see if it needs to restart an
657 * initialization. That thread will be holding the spa_config_lock
658 * which would prevent the txg_wait_synced from completing.
660 mutex_exit(&vd
->vdev_initialize_lock
);
661 txg_wait_synced(spa_get_dsl(spa
), 0);
662 mutex_enter(&vd
->vdev_initialize_lock
);
664 vd
->vdev_initialize_thread
= NULL
;
665 cv_broadcast(&vd
->vdev_initialize_cv
);
666 mutex_exit(&vd
->vdev_initialize_lock
);
670 * Initiates a device. Caller must hold vdev_initialize_lock.
671 * Device must be a leaf and not already be initializing.
674 vdev_initialize(vdev_t
*vd
)
676 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
677 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
678 ASSERT(vdev_is_concrete(vd
));
679 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
680 ASSERT(!vd
->vdev_detached
);
681 ASSERT(!vd
->vdev_initialize_exit_wanted
);
682 ASSERT(!vd
->vdev_top
->vdev_removing
);
684 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_ACTIVE
);
685 vd
->vdev_initialize_thread
= thread_create(NULL
, 0,
686 vdev_initialize_thread
, vd
, 0, &p0
, TS_RUN
, maxclsyspri
);
690 * Stop initializng a device, with the resultant initialing state being
691 * tgt_state. Blocks until the initializing thread has exited.
692 * Caller must hold vdev_initialize_lock and must not be writing to the spa
693 * config, as the initializing thread may try to enter the config as a reader
697 vdev_initialize_stop(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
699 spa_t
*spa
= vd
->vdev_spa
;
700 ASSERT(!spa_config_held(spa
, SCL_CONFIG
| SCL_STATE
, RW_WRITER
));
702 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
703 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
704 ASSERT(vdev_is_concrete(vd
));
707 * Allow cancel requests to proceed even if the initialize thread
710 if (vd
->vdev_initialize_thread
== NULL
&&
711 tgt_state
!= VDEV_INITIALIZE_CANCELED
) {
715 vdev_initialize_change_state(vd
, tgt_state
);
716 vd
->vdev_initialize_exit_wanted
= B_TRUE
;
717 while (vd
->vdev_initialize_thread
!= NULL
)
718 cv_wait(&vd
->vdev_initialize_cv
, &vd
->vdev_initialize_lock
);
720 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
721 vd
->vdev_initialize_exit_wanted
= B_FALSE
;
725 vdev_initialize_stop_all_impl(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
727 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_is_concrete(vd
)) {
728 mutex_enter(&vd
->vdev_initialize_lock
);
729 vdev_initialize_stop(vd
, tgt_state
);
730 mutex_exit(&vd
->vdev_initialize_lock
);
734 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
735 vdev_initialize_stop_all_impl(vd
->vdev_child
[i
], tgt_state
);
740 * Convenience function to stop initializing of a vdev tree and set all
741 * initialize thread pointers to NULL.
744 vdev_initialize_stop_all(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
746 vdev_initialize_stop_all_impl(vd
, tgt_state
);
748 if (vd
->vdev_spa
->spa_sync_on
) {
749 /* Make sure that our state has been synced to disk */
750 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
755 vdev_initialize_restart(vdev_t
*vd
)
757 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
758 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
760 if (vd
->vdev_leaf_zap
!= 0) {
761 mutex_enter(&vd
->vdev_initialize_lock
);
762 uint64_t initialize_state
= VDEV_INITIALIZE_NONE
;
763 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
764 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_STATE
,
765 sizeof (initialize_state
), 1, &initialize_state
);
766 ASSERT(err
== 0 || err
== ENOENT
);
767 vd
->vdev_initialize_state
= initialize_state
;
769 uint64_t timestamp
= 0;
770 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
771 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
,
772 sizeof (timestamp
), 1, ×tamp
);
773 ASSERT(err
== 0 || err
== ENOENT
);
774 vd
->vdev_initialize_action_time
= (time_t)timestamp
;
776 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
||
778 /* load progress for reporting, but don't resume */
779 vdev_initialize_load(vd
);
780 } else if (vd
->vdev_initialize_state
==
781 VDEV_INITIALIZE_ACTIVE
&& vdev_writeable(vd
)) {
785 mutex_exit(&vd
->vdev_initialize_lock
);
788 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
789 vdev_initialize_restart(vd
->vdev_child
[i
]);