4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
33 #include <sys/dmu_tx.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/zio_checksum.h>
37 #include <sys/zio_compress.h>
38 #include <sys/dsl_scan.h>
41 * Enable/disable prefetching of dedup-ed blocks which are going to be freed.
43 int zfs_dedup_prefetch
= 1;
45 static const ddt_ops_t
*ddt_ops
[DDT_TYPES
] = {
49 static const char *ddt_class_name
[DDT_CLASSES
] = {
56 ddt_object_create(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
59 spa_t
*spa
= ddt
->ddt_spa
;
60 objset_t
*os
= ddt
->ddt_os
;
61 uint64_t *objectp
= &ddt
->ddt_object
[type
][class];
62 boolean_t prehash
= zio_checksum_table
[ddt
->ddt_checksum
].ci_flags
&
64 char name
[DDT_NAMELEN
];
66 ddt_object_name(ddt
, type
, class, name
);
68 ASSERT(*objectp
== 0);
69 VERIFY(ddt_ops
[type
]->ddt_op_create(os
, objectp
, tx
, prehash
) == 0);
70 ASSERT(*objectp
!= 0);
72 VERIFY(zap_add(os
, DMU_POOL_DIRECTORY_OBJECT
, name
,
73 sizeof (uint64_t), 1, objectp
, tx
) == 0);
75 VERIFY(zap_add(os
, spa
->spa_ddt_stat_object
, name
,
76 sizeof (uint64_t), sizeof (ddt_histogram_t
) / sizeof (uint64_t),
77 &ddt
->ddt_histogram
[type
][class], tx
) == 0);
81 ddt_object_destroy(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
84 spa_t
*spa
= ddt
->ddt_spa
;
85 objset_t
*os
= ddt
->ddt_os
;
86 uint64_t *objectp
= &ddt
->ddt_object
[type
][class];
87 char name
[DDT_NAMELEN
];
89 ddt_object_name(ddt
, type
, class, name
);
91 ASSERT(*objectp
!= 0);
92 ASSERT(ddt_object_count(ddt
, type
, class) == 0);
93 ASSERT(ddt_histogram_empty(&ddt
->ddt_histogram
[type
][class]));
94 VERIFY(zap_remove(os
, DMU_POOL_DIRECTORY_OBJECT
, name
, tx
) == 0);
95 VERIFY(zap_remove(os
, spa
->spa_ddt_stat_object
, name
, tx
) == 0);
96 VERIFY(ddt_ops
[type
]->ddt_op_destroy(os
, *objectp
, tx
) == 0);
97 bzero(&ddt
->ddt_object_stats
[type
][class], sizeof (ddt_object_t
));
103 ddt_object_load(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class)
105 ddt_object_t
*ddo
= &ddt
->ddt_object_stats
[type
][class];
106 dmu_object_info_t doi
;
107 char name
[DDT_NAMELEN
];
110 ddt_object_name(ddt
, type
, class, name
);
112 error
= zap_lookup(ddt
->ddt_os
, DMU_POOL_DIRECTORY_OBJECT
, name
,
113 sizeof (uint64_t), 1, &ddt
->ddt_object
[type
][class]);
118 VERIFY0(zap_lookup(ddt
->ddt_os
, ddt
->ddt_spa
->spa_ddt_stat_object
, name
,
119 sizeof (uint64_t), sizeof (ddt_histogram_t
) / sizeof (uint64_t),
120 &ddt
->ddt_histogram
[type
][class]));
123 * Seed the cached statistics.
125 VERIFY(ddt_object_info(ddt
, type
, class, &doi
) == 0);
127 ddo
->ddo_count
= ddt_object_count(ddt
, type
, class);
128 ddo
->ddo_dspace
= doi
.doi_physical_blocks_512
<< 9;
129 ddo
->ddo_mspace
= doi
.doi_fill_count
* doi
.doi_data_block_size
;
135 ddt_object_sync(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
138 ddt_object_t
*ddo
= &ddt
->ddt_object_stats
[type
][class];
139 dmu_object_info_t doi
;
140 char name
[DDT_NAMELEN
];
142 ddt_object_name(ddt
, type
, class, name
);
144 VERIFY(zap_update(ddt
->ddt_os
, ddt
->ddt_spa
->spa_ddt_stat_object
, name
,
145 sizeof (uint64_t), sizeof (ddt_histogram_t
) / sizeof (uint64_t),
146 &ddt
->ddt_histogram
[type
][class], tx
) == 0);
149 * Cache DDT statistics; this is the only time they'll change.
151 VERIFY(ddt_object_info(ddt
, type
, class, &doi
) == 0);
153 ddo
->ddo_count
= ddt_object_count(ddt
, type
, class);
154 ddo
->ddo_dspace
= doi
.doi_physical_blocks_512
<< 9;
155 ddo
->ddo_mspace
= doi
.doi_fill_count
* doi
.doi_data_block_size
;
159 ddt_object_lookup(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
162 if (!ddt_object_exists(ddt
, type
, class))
163 return (SET_ERROR(ENOENT
));
165 return (ddt_ops
[type
]->ddt_op_lookup(ddt
->ddt_os
,
166 ddt
->ddt_object
[type
][class], dde
));
170 ddt_object_prefetch(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
173 if (!ddt_object_exists(ddt
, type
, class))
176 ddt_ops
[type
]->ddt_op_prefetch(ddt
->ddt_os
,
177 ddt
->ddt_object
[type
][class], dde
);
181 ddt_object_update(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
182 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
184 ASSERT(ddt_object_exists(ddt
, type
, class));
186 return (ddt_ops
[type
]->ddt_op_update(ddt
->ddt_os
,
187 ddt
->ddt_object
[type
][class], dde
, tx
));
191 ddt_object_remove(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
192 ddt_entry_t
*dde
, dmu_tx_t
*tx
)
194 ASSERT(ddt_object_exists(ddt
, type
, class));
196 return (ddt_ops
[type
]->ddt_op_remove(ddt
->ddt_os
,
197 ddt
->ddt_object
[type
][class], dde
, tx
));
201 ddt_object_walk(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
202 uint64_t *walk
, ddt_entry_t
*dde
)
204 ASSERT(ddt_object_exists(ddt
, type
, class));
206 return (ddt_ops
[type
]->ddt_op_walk(ddt
->ddt_os
,
207 ddt
->ddt_object
[type
][class], dde
, walk
));
211 ddt_object_count(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class)
213 ASSERT(ddt_object_exists(ddt
, type
, class));
215 return (ddt_ops
[type
]->ddt_op_count(ddt
->ddt_os
,
216 ddt
->ddt_object
[type
][class]));
220 ddt_object_info(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
221 dmu_object_info_t
*doi
)
223 if (!ddt_object_exists(ddt
, type
, class))
224 return (SET_ERROR(ENOENT
));
226 return (dmu_object_info(ddt
->ddt_os
, ddt
->ddt_object
[type
][class],
231 ddt_object_exists(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class)
233 return (!!ddt
->ddt_object
[type
][class]);
237 ddt_object_name(ddt_t
*ddt
, enum ddt_type type
, enum ddt_class
class,
240 (void) sprintf(name
, DMU_POOL_DDT
,
241 zio_checksum_table
[ddt
->ddt_checksum
].ci_name
,
242 ddt_ops
[type
]->ddt_op_name
, ddt_class_name
[class]);
246 ddt_bp_fill(const ddt_phys_t
*ddp
, blkptr_t
*bp
, uint64_t txg
)
250 for (int d
= 0; d
< SPA_DVAS_PER_BP
; d
++)
251 bp
->blk_dva
[d
] = ddp
->ddp_dva
[d
];
252 BP_SET_BIRTH(bp
, txg
, ddp
->ddp_phys_birth
);
256 ddt_bp_create(enum zio_checksum checksum
,
257 const ddt_key_t
*ddk
, const ddt_phys_t
*ddp
, blkptr_t
*bp
)
262 ddt_bp_fill(ddp
, bp
, ddp
->ddp_phys_birth
);
264 bp
->blk_cksum
= ddk
->ddk_cksum
;
267 BP_SET_LSIZE(bp
, DDK_GET_LSIZE(ddk
));
268 BP_SET_PSIZE(bp
, DDK_GET_PSIZE(ddk
));
269 BP_SET_COMPRESS(bp
, DDK_GET_COMPRESS(ddk
));
270 BP_SET_CHECKSUM(bp
, checksum
);
271 BP_SET_TYPE(bp
, DMU_OT_DEDUP
);
274 BP_SET_BYTEORDER(bp
, ZFS_HOST_BYTEORDER
);
278 ddt_key_fill(ddt_key_t
*ddk
, const blkptr_t
*bp
)
280 ddk
->ddk_cksum
= bp
->blk_cksum
;
283 DDK_SET_LSIZE(ddk
, BP_GET_LSIZE(bp
));
284 DDK_SET_PSIZE(ddk
, BP_GET_PSIZE(bp
));
285 DDK_SET_COMPRESS(ddk
, BP_GET_COMPRESS(bp
));
289 ddt_phys_fill(ddt_phys_t
*ddp
, const blkptr_t
*bp
)
291 ASSERT(ddp
->ddp_phys_birth
== 0);
293 for (int d
= 0; d
< SPA_DVAS_PER_BP
; d
++)
294 ddp
->ddp_dva
[d
] = bp
->blk_dva
[d
];
295 ddp
->ddp_phys_birth
= BP_PHYSICAL_BIRTH(bp
);
299 ddt_phys_clear(ddt_phys_t
*ddp
)
301 bzero(ddp
, sizeof (*ddp
));
305 ddt_phys_addref(ddt_phys_t
*ddp
)
311 ddt_phys_decref(ddt_phys_t
*ddp
)
313 ASSERT((int64_t)ddp
->ddp_refcnt
> 0);
318 ddt_phys_free(ddt_t
*ddt
, ddt_key_t
*ddk
, ddt_phys_t
*ddp
, uint64_t txg
)
322 ddt_bp_create(ddt
->ddt_checksum
, ddk
, ddp
, &blk
);
324 zio_free(ddt
->ddt_spa
, txg
, &blk
);
328 ddt_phys_select(const ddt_entry_t
*dde
, const blkptr_t
*bp
)
330 ddt_phys_t
*ddp
= (ddt_phys_t
*)dde
->dde_phys
;
332 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
333 if (DVA_EQUAL(BP_IDENTITY(bp
), &ddp
->ddp_dva
[0]) &&
334 BP_PHYSICAL_BIRTH(bp
) == ddp
->ddp_phys_birth
)
341 ddt_phys_total_refcnt(const ddt_entry_t
*dde
)
345 for (int p
= DDT_PHYS_SINGLE
; p
<= DDT_PHYS_TRIPLE
; p
++)
346 refcnt
+= dde
->dde_phys
[p
].ddp_refcnt
;
352 ddt_stat_generate(ddt_t
*ddt
, ddt_entry_t
*dde
, ddt_stat_t
*dds
)
354 spa_t
*spa
= ddt
->ddt_spa
;
355 ddt_phys_t
*ddp
= dde
->dde_phys
;
356 ddt_key_t
*ddk
= &dde
->dde_key
;
357 uint64_t lsize
= DDK_GET_LSIZE(ddk
);
358 uint64_t psize
= DDK_GET_PSIZE(ddk
);
360 bzero(dds
, sizeof (*dds
));
362 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
364 uint64_t refcnt
= ddp
->ddp_refcnt
;
366 if (ddp
->ddp_phys_birth
== 0)
369 for (int d
= 0; d
< SPA_DVAS_PER_BP
; d
++)
370 dsize
+= dva_get_dsize_sync(spa
, &ddp
->ddp_dva
[d
]);
372 dds
->dds_blocks
+= 1;
373 dds
->dds_lsize
+= lsize
;
374 dds
->dds_psize
+= psize
;
375 dds
->dds_dsize
+= dsize
;
377 dds
->dds_ref_blocks
+= refcnt
;
378 dds
->dds_ref_lsize
+= lsize
* refcnt
;
379 dds
->dds_ref_psize
+= psize
* refcnt
;
380 dds
->dds_ref_dsize
+= dsize
* refcnt
;
385 ddt_stat_add(ddt_stat_t
*dst
, const ddt_stat_t
*src
, uint64_t neg
)
387 const uint64_t *s
= (const uint64_t *)src
;
388 uint64_t *d
= (uint64_t *)dst
;
389 uint64_t *d_end
= (uint64_t *)(dst
+ 1);
391 ASSERT(neg
== 0 || neg
== -1ULL); /* add or subtract */
394 *d
++ += (*s
++ ^ neg
) - neg
;
398 ddt_stat_update(ddt_t
*ddt
, ddt_entry_t
*dde
, uint64_t neg
)
401 ddt_histogram_t
*ddh
;
404 ddt_stat_generate(ddt
, dde
, &dds
);
406 bucket
= highbit64(dds
.dds_ref_blocks
) - 1;
409 ddh
= &ddt
->ddt_histogram
[dde
->dde_type
][dde
->dde_class
];
411 ddt_stat_add(&ddh
->ddh_stat
[bucket
], &dds
, neg
);
415 ddt_histogram_add(ddt_histogram_t
*dst
, const ddt_histogram_t
*src
)
417 for (int h
= 0; h
< 64; h
++)
418 ddt_stat_add(&dst
->ddh_stat
[h
], &src
->ddh_stat
[h
], 0);
422 ddt_histogram_stat(ddt_stat_t
*dds
, const ddt_histogram_t
*ddh
)
424 bzero(dds
, sizeof (*dds
));
426 for (int h
= 0; h
< 64; h
++)
427 ddt_stat_add(dds
, &ddh
->ddh_stat
[h
], 0);
431 ddt_histogram_empty(const ddt_histogram_t
*ddh
)
433 const uint64_t *s
= (const uint64_t *)ddh
;
434 const uint64_t *s_end
= (const uint64_t *)(ddh
+ 1);
444 ddt_get_dedup_object_stats(spa_t
*spa
, ddt_object_t
*ddo_total
)
446 /* Sum the statistics we cached in ddt_object_sync(). */
447 for (enum zio_checksum c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
448 ddt_t
*ddt
= spa
->spa_ddt
[c
];
449 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++) {
450 for (enum ddt_class
class = 0; class < DDT_CLASSES
;
453 &ddt
->ddt_object_stats
[type
][class];
454 ddo_total
->ddo_count
+= ddo
->ddo_count
;
455 ddo_total
->ddo_dspace
+= ddo
->ddo_dspace
;
456 ddo_total
->ddo_mspace
+= ddo
->ddo_mspace
;
461 /* ... and compute the averages. */
462 if (ddo_total
->ddo_count
!= 0) {
463 ddo_total
->ddo_dspace
/= ddo_total
->ddo_count
;
464 ddo_total
->ddo_mspace
/= ddo_total
->ddo_count
;
469 ddt_get_dedup_histogram(spa_t
*spa
, ddt_histogram_t
*ddh
)
471 for (enum zio_checksum c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
472 ddt_t
*ddt
= spa
->spa_ddt
[c
];
473 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++) {
474 for (enum ddt_class
class = 0; class < DDT_CLASSES
;
476 ddt_histogram_add(ddh
,
477 &ddt
->ddt_histogram_cache
[type
][class]);
484 ddt_get_dedup_stats(spa_t
*spa
, ddt_stat_t
*dds_total
)
486 ddt_histogram_t
*ddh_total
;
488 ddh_total
= kmem_zalloc(sizeof (ddt_histogram_t
), KM_SLEEP
);
489 ddt_get_dedup_histogram(spa
, ddh_total
);
490 ddt_histogram_stat(dds_total
, ddh_total
);
491 kmem_free(ddh_total
, sizeof (ddt_histogram_t
));
495 ddt_get_dedup_dspace(spa_t
*spa
)
497 ddt_stat_t dds_total
= { 0 };
499 ddt_get_dedup_stats(spa
, &dds_total
);
500 return (dds_total
.dds_ref_dsize
- dds_total
.dds_dsize
);
504 ddt_get_pool_dedup_ratio(spa_t
*spa
)
506 ddt_stat_t dds_total
= { 0 };
508 ddt_get_dedup_stats(spa
, &dds_total
);
509 if (dds_total
.dds_dsize
== 0)
512 return (dds_total
.dds_ref_dsize
* 100 / dds_total
.dds_dsize
);
516 ddt_ditto_copies_needed(ddt_t
*ddt
, ddt_entry_t
*dde
, ddt_phys_t
*ddp_willref
)
518 spa_t
*spa
= ddt
->ddt_spa
;
519 uint64_t total_refcnt
= 0;
520 uint64_t ditto
= spa
->spa_dedup_ditto
;
521 int total_copies
= 0;
522 int desired_copies
= 0;
524 for (int p
= DDT_PHYS_SINGLE
; p
<= DDT_PHYS_TRIPLE
; p
++) {
525 ddt_phys_t
*ddp
= &dde
->dde_phys
[p
];
526 zio_t
*zio
= dde
->dde_lead_zio
[p
];
527 uint64_t refcnt
= ddp
->ddp_refcnt
; /* committed refs */
529 refcnt
+= zio
->io_parent_count
; /* pending refs */
530 if (ddp
== ddp_willref
)
531 refcnt
++; /* caller's ref */
533 total_refcnt
+= refcnt
;
538 if (ditto
== 0 || ditto
> UINT32_MAX
)
541 if (total_refcnt
>= 1)
543 if (total_refcnt
>= ditto
)
545 if (total_refcnt
>= ditto
* ditto
)
548 return (MAX(desired_copies
, total_copies
) - total_copies
);
552 ddt_ditto_copies_present(ddt_entry_t
*dde
)
554 ddt_phys_t
*ddp
= &dde
->dde_phys
[DDT_PHYS_DITTO
];
555 dva_t
*dva
= ddp
->ddp_dva
;
556 int copies
= 0 - DVA_GET_GANG(dva
);
558 for (int d
= 0; d
< SPA_DVAS_PER_BP
; d
++, dva
++)
559 if (DVA_IS_VALID(dva
))
562 ASSERT(copies
>= 0 && copies
< SPA_DVAS_PER_BP
);
568 ddt_compress(void *src
, uchar_t
*dst
, size_t s_len
, size_t d_len
)
570 uchar_t
*version
= dst
++;
571 int cpfunc
= ZIO_COMPRESS_ZLE
;
572 zio_compress_info_t
*ci
= &zio_compress_table
[cpfunc
];
575 ASSERT(d_len
>= s_len
+ 1); /* no compression plus version byte */
577 c_len
= ci
->ci_compress(src
, dst
, s_len
, d_len
- 1, ci
->ci_level
);
579 if (c_len
== s_len
) {
580 cpfunc
= ZIO_COMPRESS_OFF
;
581 bcopy(src
, dst
, s_len
);
586 if (ZFS_HOST_BYTEORDER
)
587 *version
|= DDT_COMPRESS_BYTEORDER_MASK
;
593 ddt_decompress(uchar_t
*src
, void *dst
, size_t s_len
, size_t d_len
)
595 uchar_t version
= *src
++;
596 int cpfunc
= version
& DDT_COMPRESS_FUNCTION_MASK
;
597 zio_compress_info_t
*ci
= &zio_compress_table
[cpfunc
];
599 if (ci
->ci_decompress
!= NULL
)
600 (void) ci
->ci_decompress(src
, dst
, s_len
, d_len
, ci
->ci_level
);
602 bcopy(src
, dst
, d_len
);
604 if (((version
& DDT_COMPRESS_BYTEORDER_MASK
) != 0) !=
605 (ZFS_HOST_BYTEORDER
!= 0))
606 byteswap_uint64_array(dst
, d_len
);
610 ddt_select_by_checksum(spa_t
*spa
, enum zio_checksum c
)
612 return (spa
->spa_ddt
[c
]);
616 ddt_select(spa_t
*spa
, const blkptr_t
*bp
)
618 return (spa
->spa_ddt
[BP_GET_CHECKSUM(bp
)]);
622 ddt_enter(ddt_t
*ddt
)
624 mutex_enter(&ddt
->ddt_lock
);
630 mutex_exit(&ddt
->ddt_lock
);
634 ddt_alloc(const ddt_key_t
*ddk
)
638 dde
= kmem_zalloc(sizeof (ddt_entry_t
), KM_SLEEP
);
639 cv_init(&dde
->dde_cv
, NULL
, CV_DEFAULT
, NULL
);
647 ddt_free(ddt_entry_t
*dde
)
649 ASSERT(!dde
->dde_loading
);
651 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++)
652 ASSERT(dde
->dde_lead_zio
[p
] == NULL
);
654 if (dde
->dde_repair_data
!= NULL
)
655 zio_buf_free(dde
->dde_repair_data
,
656 DDK_GET_PSIZE(&dde
->dde_key
));
658 cv_destroy(&dde
->dde_cv
);
659 kmem_free(dde
, sizeof (*dde
));
663 ddt_remove(ddt_t
*ddt
, ddt_entry_t
*dde
)
665 ASSERT(MUTEX_HELD(&ddt
->ddt_lock
));
667 avl_remove(&ddt
->ddt_tree
, dde
);
672 ddt_lookup(ddt_t
*ddt
, const blkptr_t
*bp
, boolean_t add
)
674 ddt_entry_t
*dde
, dde_search
;
676 enum ddt_class
class;
680 ASSERT(MUTEX_HELD(&ddt
->ddt_lock
));
682 ddt_key_fill(&dde_search
.dde_key
, bp
);
684 dde
= avl_find(&ddt
->ddt_tree
, &dde_search
, &where
);
688 dde
= ddt_alloc(&dde_search
.dde_key
);
689 avl_insert(&ddt
->ddt_tree
, dde
, where
);
692 while (dde
->dde_loading
)
693 cv_wait(&dde
->dde_cv
, &ddt
->ddt_lock
);
698 dde
->dde_loading
= B_TRUE
;
704 for (type
= 0; type
< DDT_TYPES
; type
++) {
705 for (class = 0; class < DDT_CLASSES
; class++) {
706 error
= ddt_object_lookup(ddt
, type
, class, dde
);
714 ASSERT(error
== 0 || error
== ENOENT
);
718 ASSERT(dde
->dde_loaded
== B_FALSE
);
719 ASSERT(dde
->dde_loading
== B_TRUE
);
721 dde
->dde_type
= type
; /* will be DDT_TYPES if no entry found */
722 dde
->dde_class
= class; /* will be DDT_CLASSES if no entry found */
723 dde
->dde_loaded
= B_TRUE
;
724 dde
->dde_loading
= B_FALSE
;
727 ddt_stat_update(ddt
, dde
, -1ULL);
729 cv_broadcast(&dde
->dde_cv
);
735 ddt_prefetch(spa_t
*spa
, const blkptr_t
*bp
)
740 if (!zfs_dedup_prefetch
|| bp
== NULL
|| !BP_GET_DEDUP(bp
))
744 * We only remove the DDT once all tables are empty and only
745 * prefetch dedup blocks when there are entries in the DDT.
746 * Thus no locking is required as the DDT can't disappear on us.
748 ddt
= ddt_select(spa
, bp
);
749 ddt_key_fill(&dde
.dde_key
, bp
);
751 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++) {
752 for (enum ddt_class
class = 0; class < DDT_CLASSES
; class++) {
753 ddt_object_prefetch(ddt
, type
, class, &dde
);
759 ddt_entry_compare(const void *x1
, const void *x2
)
761 const ddt_entry_t
*dde1
= x1
;
762 const ddt_entry_t
*dde2
= x2
;
763 const uint64_t *u1
= (const uint64_t *)&dde1
->dde_key
;
764 const uint64_t *u2
= (const uint64_t *)&dde2
->dde_key
;
766 for (int i
= 0; i
< DDT_KEY_WORDS
; i
++) {
777 ddt_table_alloc(spa_t
*spa
, enum zio_checksum c
)
781 ddt
= kmem_zalloc(sizeof (*ddt
), KM_SLEEP
);
783 mutex_init(&ddt
->ddt_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
784 avl_create(&ddt
->ddt_tree
, ddt_entry_compare
,
785 sizeof (ddt_entry_t
), offsetof(ddt_entry_t
, dde_node
));
786 avl_create(&ddt
->ddt_repair_tree
, ddt_entry_compare
,
787 sizeof (ddt_entry_t
), offsetof(ddt_entry_t
, dde_node
));
788 ddt
->ddt_checksum
= c
;
790 ddt
->ddt_os
= spa
->spa_meta_objset
;
796 ddt_table_free(ddt_t
*ddt
)
798 ASSERT(avl_numnodes(&ddt
->ddt_tree
) == 0);
799 ASSERT(avl_numnodes(&ddt
->ddt_repair_tree
) == 0);
800 avl_destroy(&ddt
->ddt_tree
);
801 avl_destroy(&ddt
->ddt_repair_tree
);
802 mutex_destroy(&ddt
->ddt_lock
);
803 kmem_free(ddt
, sizeof (*ddt
));
807 ddt_create(spa_t
*spa
)
809 spa
->spa_dedup_checksum
= ZIO_DEDUPCHECKSUM
;
811 for (enum zio_checksum c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++)
812 spa
->spa_ddt
[c
] = ddt_table_alloc(spa
, c
);
822 error
= zap_lookup(spa
->spa_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
823 DMU_POOL_DDT_STATS
, sizeof (uint64_t), 1,
824 &spa
->spa_ddt_stat_object
);
827 return (error
== ENOENT
? 0 : error
);
829 for (enum zio_checksum c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
830 ddt_t
*ddt
= spa
->spa_ddt
[c
];
831 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++) {
832 for (enum ddt_class
class = 0; class < DDT_CLASSES
;
834 error
= ddt_object_load(ddt
, type
, class);
835 if (error
!= 0 && error
!= ENOENT
)
841 * Seed the cached histograms.
843 bcopy(ddt
->ddt_histogram
, &ddt
->ddt_histogram_cache
,
844 sizeof (ddt
->ddt_histogram
));
851 ddt_unload(spa_t
*spa
)
853 for (enum zio_checksum c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
854 if (spa
->spa_ddt
[c
]) {
855 ddt_table_free(spa
->spa_ddt
[c
]);
856 spa
->spa_ddt
[c
] = NULL
;
862 ddt_class_contains(spa_t
*spa
, enum ddt_class max_class
, const blkptr_t
*bp
)
867 if (!BP_GET_DEDUP(bp
))
870 if (max_class
== DDT_CLASS_UNIQUE
)
873 ddt
= spa
->spa_ddt
[BP_GET_CHECKSUM(bp
)];
875 ddt_key_fill(&dde
.dde_key
, bp
);
877 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++)
878 for (enum ddt_class
class = 0; class <= max_class
; class++)
879 if (ddt_object_lookup(ddt
, type
, class, &dde
) == 0)
886 ddt_repair_start(ddt_t
*ddt
, const blkptr_t
*bp
)
891 ddt_key_fill(&ddk
, bp
);
893 dde
= ddt_alloc(&ddk
);
895 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++) {
896 for (enum ddt_class
class = 0; class < DDT_CLASSES
; class++) {
898 * We can only do repair if there are multiple copies
899 * of the block. For anything in the UNIQUE class,
900 * there's definitely only one copy, so don't even try.
902 if (class != DDT_CLASS_UNIQUE
&&
903 ddt_object_lookup(ddt
, type
, class, dde
) == 0)
908 bzero(dde
->dde_phys
, sizeof (dde
->dde_phys
));
914 ddt_repair_done(ddt_t
*ddt
, ddt_entry_t
*dde
)
920 if (dde
->dde_repair_data
!= NULL
&& spa_writeable(ddt
->ddt_spa
) &&
921 avl_find(&ddt
->ddt_repair_tree
, dde
, &where
) == NULL
)
922 avl_insert(&ddt
->ddt_repair_tree
, dde
, where
);
930 ddt_repair_entry_done(zio_t
*zio
)
932 ddt_entry_t
*rdde
= zio
->io_private
;
938 ddt_repair_entry(ddt_t
*ddt
, ddt_entry_t
*dde
, ddt_entry_t
*rdde
, zio_t
*rio
)
940 ddt_phys_t
*ddp
= dde
->dde_phys
;
941 ddt_phys_t
*rddp
= rdde
->dde_phys
;
942 ddt_key_t
*ddk
= &dde
->dde_key
;
943 ddt_key_t
*rddk
= &rdde
->dde_key
;
947 zio
= zio_null(rio
, rio
->io_spa
, NULL
,
948 ddt_repair_entry_done
, rdde
, rio
->io_flags
);
950 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++, rddp
++) {
951 if (ddp
->ddp_phys_birth
== 0 ||
952 ddp
->ddp_phys_birth
!= rddp
->ddp_phys_birth
||
953 bcmp(ddp
->ddp_dva
, rddp
->ddp_dva
, sizeof (ddp
->ddp_dva
)))
955 ddt_bp_create(ddt
->ddt_checksum
, ddk
, ddp
, &blk
);
956 zio_nowait(zio_rewrite(zio
, zio
->io_spa
, 0, &blk
,
957 rdde
->dde_repair_data
, DDK_GET_PSIZE(rddk
), NULL
, NULL
,
958 ZIO_PRIORITY_SYNC_WRITE
, ZIO_DDT_CHILD_FLAGS(zio
), NULL
));
965 ddt_repair_table(ddt_t
*ddt
, zio_t
*rio
)
967 spa_t
*spa
= ddt
->ddt_spa
;
968 ddt_entry_t
*dde
, *rdde_next
, *rdde
;
969 avl_tree_t
*t
= &ddt
->ddt_repair_tree
;
972 if (spa_sync_pass(spa
) > 1)
976 for (rdde
= avl_first(t
); rdde
!= NULL
; rdde
= rdde_next
) {
977 rdde_next
= AVL_NEXT(t
, rdde
);
978 avl_remove(&ddt
->ddt_repair_tree
, rdde
);
980 ddt_bp_create(ddt
->ddt_checksum
, &rdde
->dde_key
, NULL
, &blk
);
981 dde
= ddt_repair_start(ddt
, &blk
);
982 ddt_repair_entry(ddt
, dde
, rdde
, rio
);
983 ddt_repair_done(ddt
, dde
);
990 ddt_sync_entry(ddt_t
*ddt
, ddt_entry_t
*dde
, dmu_tx_t
*tx
, uint64_t txg
)
992 dsl_pool_t
*dp
= ddt
->ddt_spa
->spa_dsl_pool
;
993 ddt_phys_t
*ddp
= dde
->dde_phys
;
994 ddt_key_t
*ddk
= &dde
->dde_key
;
995 enum ddt_type otype
= dde
->dde_type
;
996 enum ddt_type ntype
= DDT_TYPE_CURRENT
;
997 enum ddt_class oclass
= dde
->dde_class
;
998 enum ddt_class nclass
;
999 uint64_t total_refcnt
= 0;
1001 ASSERT(dde
->dde_loaded
);
1002 ASSERT(!dde
->dde_loading
);
1004 for (int p
= 0; p
< DDT_PHYS_TYPES
; p
++, ddp
++) {
1005 ASSERT(dde
->dde_lead_zio
[p
] == NULL
);
1006 ASSERT((int64_t)ddp
->ddp_refcnt
>= 0);
1007 if (ddp
->ddp_phys_birth
== 0) {
1008 ASSERT(ddp
->ddp_refcnt
== 0);
1011 if (p
== DDT_PHYS_DITTO
) {
1012 if (ddt_ditto_copies_needed(ddt
, dde
, NULL
) == 0)
1013 ddt_phys_free(ddt
, ddk
, ddp
, txg
);
1016 if (ddp
->ddp_refcnt
== 0)
1017 ddt_phys_free(ddt
, ddk
, ddp
, txg
);
1018 total_refcnt
+= ddp
->ddp_refcnt
;
1021 if (dde
->dde_phys
[DDT_PHYS_DITTO
].ddp_phys_birth
!= 0)
1022 nclass
= DDT_CLASS_DITTO
;
1023 else if (total_refcnt
> 1)
1024 nclass
= DDT_CLASS_DUPLICATE
;
1026 nclass
= DDT_CLASS_UNIQUE
;
1028 if (otype
!= DDT_TYPES
&&
1029 (otype
!= ntype
|| oclass
!= nclass
|| total_refcnt
== 0)) {
1030 VERIFY(ddt_object_remove(ddt
, otype
, oclass
, dde
, tx
) == 0);
1031 ASSERT(ddt_object_lookup(ddt
, otype
, oclass
, dde
) == ENOENT
);
1034 if (total_refcnt
!= 0) {
1035 dde
->dde_type
= ntype
;
1036 dde
->dde_class
= nclass
;
1037 ddt_stat_update(ddt
, dde
, 0);
1038 if (!ddt_object_exists(ddt
, ntype
, nclass
))
1039 ddt_object_create(ddt
, ntype
, nclass
, tx
);
1040 VERIFY(ddt_object_update(ddt
, ntype
, nclass
, dde
, tx
) == 0);
1043 * If the class changes, the order that we scan this bp
1044 * changes. If it decreases, we could miss it, so
1045 * scan it right now. (This covers both class changing
1046 * while we are doing ddt_walk(), and when we are
1049 if (nclass
< oclass
) {
1050 dsl_scan_ddt_entry(dp
->dp_scan
,
1051 ddt
->ddt_checksum
, dde
, tx
);
1057 ddt_sync_table(ddt_t
*ddt
, dmu_tx_t
*tx
, uint64_t txg
)
1059 spa_t
*spa
= ddt
->ddt_spa
;
1061 void *cookie
= NULL
;
1063 if (avl_numnodes(&ddt
->ddt_tree
) == 0)
1066 ASSERT(spa
->spa_uberblock
.ub_version
>= SPA_VERSION_DEDUP
);
1068 if (spa
->spa_ddt_stat_object
== 0) {
1069 spa
->spa_ddt_stat_object
= zap_create_link(ddt
->ddt_os
,
1070 DMU_OT_DDT_STATS
, DMU_POOL_DIRECTORY_OBJECT
,
1071 DMU_POOL_DDT_STATS
, tx
);
1074 while ((dde
= avl_destroy_nodes(&ddt
->ddt_tree
, &cookie
)) != NULL
) {
1075 ddt_sync_entry(ddt
, dde
, tx
, txg
);
1079 for (enum ddt_type type
= 0; type
< DDT_TYPES
; type
++) {
1081 for (enum ddt_class
class = 0; class < DDT_CLASSES
; class++) {
1082 if (ddt_object_exists(ddt
, type
, class)) {
1083 ddt_object_sync(ddt
, type
, class, tx
);
1084 count
+= ddt_object_count(ddt
, type
, class);
1087 for (enum ddt_class
class = 0; class < DDT_CLASSES
; class++) {
1088 if (count
== 0 && ddt_object_exists(ddt
, type
, class))
1089 ddt_object_destroy(ddt
, type
, class, tx
);
1093 bcopy(ddt
->ddt_histogram
, &ddt
->ddt_histogram_cache
,
1094 sizeof (ddt
->ddt_histogram
));
1098 ddt_sync(spa_t
*spa
, uint64_t txg
)
1101 zio_t
*rio
= zio_root(spa
, NULL
, NULL
,
1102 ZIO_FLAG_CANFAIL
| ZIO_FLAG_SPECULATIVE
);
1104 ASSERT(spa_syncing_txg(spa
) == txg
);
1106 tx
= dmu_tx_create_assigned(spa
->spa_dsl_pool
, txg
);
1108 for (enum zio_checksum c
= 0; c
< ZIO_CHECKSUM_FUNCTIONS
; c
++) {
1109 ddt_t
*ddt
= spa
->spa_ddt
[c
];
1112 ddt_sync_table(ddt
, tx
, txg
);
1113 ddt_repair_table(ddt
, rio
);
1116 (void) zio_wait(rio
);
1122 ddt_walk(spa_t
*spa
, ddt_bookmark_t
*ddb
, ddt_entry_t
*dde
)
1127 ddt_t
*ddt
= spa
->spa_ddt
[ddb
->ddb_checksum
];
1129 if (ddt_object_exists(ddt
, ddb
->ddb_type
,
1131 error
= ddt_object_walk(ddt
,
1132 ddb
->ddb_type
, ddb
->ddb_class
,
1133 &ddb
->ddb_cursor
, dde
);
1135 dde
->dde_type
= ddb
->ddb_type
;
1136 dde
->dde_class
= ddb
->ddb_class
;
1139 if (error
!= ENOENT
)
1141 ddb
->ddb_cursor
= 0;
1142 } while (++ddb
->ddb_checksum
< ZIO_CHECKSUM_FUNCTIONS
);
1143 ddb
->ddb_checksum
= 0;
1144 } while (++ddb
->ddb_type
< DDT_TYPES
);
1146 } while (++ddb
->ddb_class
< DDT_CLASSES
);
1148 return (SET_ERROR(ENOENT
));