4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/fs/zfs.h>
37 * Virtual device vector for mirroring.
40 typedef struct mirror_child
{
46 uint8_t mc_speculative
;
49 typedef struct mirror_map
{
54 mirror_child_t mm_child
[1];
57 int vdev_mirror_shift
= 21;
60 vdev_mirror_map_free(zio_t
*zio
)
62 mirror_map_t
*mm
= zio
->io_vsd
;
64 kmem_free(mm
, offsetof(mirror_map_t
, mm_child
[mm
->mm_children
]));
67 static const zio_vsd_ops_t vdev_mirror_vsd_ops
= {
69 zio_vsd_default_cksum_report
73 vdev_mirror_map_alloc(zio_t
*zio
)
75 mirror_map_t
*mm
= NULL
;
77 vdev_t
*vd
= zio
->io_vd
;
81 dva_t
*dva
= zio
->io_bp
->blk_dva
;
82 spa_t
*spa
= zio
->io_spa
;
84 c
= BP_GET_NDVAS(zio
->io_bp
);
86 mm
= kmem_zalloc(offsetof(mirror_map_t
, mm_child
[c
]), KM_SLEEP
);
88 mm
->mm_replacing
= B_FALSE
;
89 mm
->mm_preferred
= spa_get_random(c
);
93 * Check the other, lower-index DVAs to see if they're on
94 * the same vdev as the child we picked. If they are, use
95 * them since they are likely to have been allocated from
96 * the primary metaslab in use at the time, and hence are
97 * more likely to have locality with single-copy data.
99 for (c
= mm
->mm_preferred
, d
= c
- 1; d
>= 0; d
--) {
100 if (DVA_GET_VDEV(&dva
[d
]) == DVA_GET_VDEV(&dva
[c
]))
101 mm
->mm_preferred
= d
;
104 for (c
= 0; c
< mm
->mm_children
; c
++) {
105 mc
= &mm
->mm_child
[c
];
107 mc
->mc_vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dva
[c
]));
108 mc
->mc_offset
= DVA_GET_OFFSET(&dva
[c
]);
111 c
= vd
->vdev_children
;
113 mm
= kmem_zalloc(offsetof(mirror_map_t
, mm_child
[c
]), KM_SLEEP
);
115 mm
->mm_replacing
= (vd
->vdev_ops
== &vdev_replacing_ops
||
116 vd
->vdev_ops
== &vdev_spare_ops
);
117 mm
->mm_preferred
= mm
->mm_replacing
? 0 :
118 (zio
->io_offset
>> vdev_mirror_shift
) % c
;
119 mm
->mm_root
= B_FALSE
;
121 for (c
= 0; c
< mm
->mm_children
; c
++) {
122 mc
= &mm
->mm_child
[c
];
123 mc
->mc_vd
= vd
->vdev_child
[c
];
124 mc
->mc_offset
= zio
->io_offset
;
129 zio
->io_vsd_ops
= &vdev_mirror_vsd_ops
;
134 vdev_mirror_open(vdev_t
*vd
, uint64_t *asize
, uint64_t *max_asize
,
140 if (vd
->vdev_children
== 0) {
141 vd
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
142 return (SET_ERROR(EINVAL
));
145 vdev_open_children(vd
);
147 for (int c
= 0; c
< vd
->vdev_children
; c
++) {
148 vdev_t
*cvd
= vd
->vdev_child
[c
];
150 if (cvd
->vdev_open_error
) {
151 lasterror
= cvd
->vdev_open_error
;
156 *asize
= MIN(*asize
- 1, cvd
->vdev_asize
- 1) + 1;
157 *max_asize
= MIN(*max_asize
- 1, cvd
->vdev_max_asize
- 1) + 1;
158 *ashift
= MAX(*ashift
, cvd
->vdev_ashift
);
161 if (numerrors
== vd
->vdev_children
) {
162 vd
->vdev_stat
.vs_aux
= VDEV_AUX_NO_REPLICAS
;
170 vdev_mirror_close(vdev_t
*vd
)
172 for (int c
= 0; c
< vd
->vdev_children
; c
++)
173 vdev_close(vd
->vdev_child
[c
]);
177 vdev_mirror_child_done(zio_t
*zio
)
179 mirror_child_t
*mc
= zio
->io_private
;
181 mc
->mc_error
= zio
->io_error
;
187 vdev_mirror_scrub_done(zio_t
*zio
)
189 mirror_child_t
*mc
= zio
->io_private
;
191 if (zio
->io_error
== 0) {
194 mutex_enter(&zio
->io_lock
);
195 while ((pio
= zio_walk_parents(zio
)) != NULL
) {
196 mutex_enter(&pio
->io_lock
);
197 ASSERT3U(zio
->io_size
, >=, pio
->io_size
);
198 bcopy(zio
->io_data
, pio
->io_data
, pio
->io_size
);
199 mutex_exit(&pio
->io_lock
);
201 mutex_exit(&zio
->io_lock
);
204 zio_buf_free(zio
->io_data
, zio
->io_size
);
206 mc
->mc_error
= zio
->io_error
;
212 * Try to find a child whose DTL doesn't contain the block we want to read.
213 * If we can't, try the read on any vdev we haven't already tried.
216 vdev_mirror_child_select(zio_t
*zio
)
218 mirror_map_t
*mm
= zio
->io_vsd
;
220 uint64_t txg
= zio
->io_txg
;
223 ASSERT(zio
->io_bp
== NULL
|| BP_PHYSICAL_BIRTH(zio
->io_bp
) == txg
);
226 * Try to find a child whose DTL doesn't contain the block to read.
227 * If a child is known to be completely inaccessible (indicated by
228 * vdev_readable() returning B_FALSE), don't even try.
230 for (i
= 0, c
= mm
->mm_preferred
; i
< mm
->mm_children
; i
++, c
++) {
231 if (c
>= mm
->mm_children
)
233 mc
= &mm
->mm_child
[c
];
234 if (mc
->mc_tried
|| mc
->mc_skipped
)
236 if (!vdev_readable(mc
->mc_vd
)) {
237 mc
->mc_error
= SET_ERROR(ENXIO
);
238 mc
->mc_tried
= 1; /* don't even try */
242 if (!vdev_dtl_contains(mc
->mc_vd
, DTL_MISSING
, txg
, 1))
244 mc
->mc_error
= SET_ERROR(ESTALE
);
246 mc
->mc_speculative
= 1;
250 * Every device is either missing or has this txg in its DTL.
251 * Look for any child we haven't already tried before giving up.
253 for (c
= 0; c
< mm
->mm_children
; c
++)
254 if (!mm
->mm_child
[c
].mc_tried
)
258 * Every child failed. There's no place left to look.
264 vdev_mirror_io_start(zio_t
*zio
)
270 mm
= vdev_mirror_map_alloc(zio
);
272 if (zio
->io_type
== ZIO_TYPE_READ
) {
273 if ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && !mm
->mm_replacing
) {
275 * For scrubbing reads we need to allocate a read
276 * buffer for each child and issue reads to all
277 * children. If any child succeeds, it will copy its
278 * data into zio->io_data in vdev_mirror_scrub_done.
280 for (c
= 0; c
< mm
->mm_children
; c
++) {
281 mc
= &mm
->mm_child
[c
];
282 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
283 mc
->mc_vd
, mc
->mc_offset
,
284 zio_buf_alloc(zio
->io_size
), zio
->io_size
,
285 zio
->io_type
, zio
->io_priority
, 0,
286 vdev_mirror_scrub_done
, mc
));
292 * For normal reads just pick one child.
294 c
= vdev_mirror_child_select(zio
);
297 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
300 * Writes go to all children.
303 children
= mm
->mm_children
;
307 mc
= &mm
->mm_child
[c
];
308 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
309 mc
->mc_vd
, mc
->mc_offset
, zio
->io_data
, zio
->io_size
,
310 zio
->io_type
, zio
->io_priority
, 0,
311 vdev_mirror_child_done
, mc
));
319 vdev_mirror_worst_error(mirror_map_t
*mm
)
321 int error
[2] = { 0, 0 };
323 for (int c
= 0; c
< mm
->mm_children
; c
++) {
324 mirror_child_t
*mc
= &mm
->mm_child
[c
];
325 int s
= mc
->mc_speculative
;
326 error
[s
] = zio_worst_error(error
[s
], mc
->mc_error
);
329 return (error
[0] ? error
[0] : error
[1]);
333 vdev_mirror_io_done(zio_t
*zio
)
335 mirror_map_t
*mm
= zio
->io_vsd
;
339 int unexpected_errors
= 0;
341 for (c
= 0; c
< mm
->mm_children
; c
++) {
342 mc
= &mm
->mm_child
[c
];
347 } else if (mc
->mc_tried
) {
352 if (zio
->io_type
== ZIO_TYPE_WRITE
) {
354 * XXX -- for now, treat partial writes as success.
356 * Now that we support write reallocation, it would be better
357 * to treat partial failure as real failure unless there are
358 * no non-degraded top-level vdevs left, and not update DTLs
359 * if we intend to reallocate.
362 if (good_copies
!= mm
->mm_children
) {
364 * Always require at least one good copy.
366 * For ditto blocks (io_vd == NULL), require
367 * all copies to be good.
369 * XXX -- for replacing vdevs, there's no great answer.
370 * If the old device is really dead, we may not even
371 * be able to access it -- so we only want to
372 * require good writes to the new device. But if
373 * the new device turns out to be flaky, we want
374 * to be able to detach it -- which requires all
375 * writes to the old device to have succeeded.
377 if (good_copies
== 0 || zio
->io_vd
== NULL
)
378 zio
->io_error
= vdev_mirror_worst_error(mm
);
383 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
386 * If we don't have a good copy yet, keep trying other children.
389 if (good_copies
== 0 && (c
= vdev_mirror_child_select(zio
)) != -1) {
390 ASSERT(c
>= 0 && c
< mm
->mm_children
);
391 mc
= &mm
->mm_child
[c
];
392 zio_vdev_io_redone(zio
);
393 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
394 mc
->mc_vd
, mc
->mc_offset
, zio
->io_data
, zio
->io_size
,
395 ZIO_TYPE_READ
, zio
->io_priority
, 0,
396 vdev_mirror_child_done
, mc
));
401 if (good_copies
== 0) {
402 zio
->io_error
= vdev_mirror_worst_error(mm
);
403 ASSERT(zio
->io_error
!= 0);
406 if (good_copies
&& spa_writeable(zio
->io_spa
) &&
407 (unexpected_errors
||
408 (zio
->io_flags
& ZIO_FLAG_RESILVER
) ||
409 ((zio
->io_flags
& ZIO_FLAG_SCRUB
) && mm
->mm_replacing
))) {
411 * Use the good data we have in hand to repair damaged children.
413 for (c
= 0; c
< mm
->mm_children
; c
++) {
415 * Don't rewrite known good children.
416 * Not only is it unnecessary, it could
417 * actually be harmful: if the system lost
418 * power while rewriting the only good copy,
419 * there would be no good copies left!
421 mc
= &mm
->mm_child
[c
];
423 if (mc
->mc_error
== 0) {
426 if (!(zio
->io_flags
& ZIO_FLAG_SCRUB
) &&
427 !vdev_dtl_contains(mc
->mc_vd
, DTL_PARTIAL
,
430 mc
->mc_error
= SET_ERROR(ESTALE
);
433 zio_nowait(zio_vdev_child_io(zio
, zio
->io_bp
,
434 mc
->mc_vd
, mc
->mc_offset
,
435 zio
->io_data
, zio
->io_size
,
436 ZIO_TYPE_WRITE
, ZIO_PRIORITY_ASYNC_WRITE
,
437 ZIO_FLAG_IO_REPAIR
| (unexpected_errors
?
438 ZIO_FLAG_SELF_HEAL
: 0), NULL
, NULL
));
444 vdev_mirror_state_change(vdev_t
*vd
, int faulted
, int degraded
)
446 if (faulted
== vd
->vdev_children
)
447 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_CANT_OPEN
,
448 VDEV_AUX_NO_REPLICAS
);
449 else if (degraded
+ faulted
!= 0)
450 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_DEGRADED
, VDEV_AUX_NONE
);
452 vdev_set_state(vd
, B_FALSE
, VDEV_STATE_HEALTHY
, VDEV_AUX_NONE
);
455 vdev_ops_t vdev_mirror_ops
= {
459 vdev_mirror_io_start
,
461 vdev_mirror_state_change
,
464 VDEV_TYPE_MIRROR
, /* name of this vdev type */
465 B_FALSE
/* not a leaf vdev */
468 vdev_ops_t vdev_replacing_ops
= {
472 vdev_mirror_io_start
,
474 vdev_mirror_state_change
,
477 VDEV_TYPE_REPLACING
, /* name of this vdev type */
478 B_FALSE
/* not a leaf vdev */
481 vdev_ops_t vdev_spare_ops
= {
485 vdev_mirror_io_start
,
487 vdev_mirror_state_change
,
490 VDEV_TYPE_SPARE
, /* name of this vdev type */
491 B_FALSE
/* not a leaf vdev */