4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
24 * Copyright 2014 HybridCluster. All rights reserved.
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dnode.h>
32 #include <sys/zfeature.h>
35 dmu_object_alloc(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
36 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
39 uint64_t L1_dnode_count
= DNODES_PER_BLOCK
<<
40 (DMU_META_DNODE(os
)->dn_indblkshift
- SPA_BLKPTRSHIFT
);
43 mutex_enter(&os
->os_obj_lock
);
45 object
= os
->os_obj_next
;
47 * Each time we polish off a L1 bp worth of dnodes (2^12
48 * objects), move to another L1 bp that's still reasonably
49 * sparse (at most 1/4 full). Look from the beginning at most
50 * once per txg, but after that keep looking from here.
51 * os_scan_dnodes is set during txg sync if enough objects
52 * have been freed since the previous rescan to justify
53 * backfilling again. If we can't find a suitable block, just
54 * keep going from here.
56 * Note that dmu_traverse depends on the behavior that we use
57 * multiple blocks of the dnode object before going back to
58 * reuse objects. Any change to this algorithm should preserve
59 * that property or find another solution to the issues
60 * described in traverse_visitbp.
63 if (P2PHASE(object
, L1_dnode_count
) == 0) {
66 if (os
->os_rescan_dnodes
) {
68 os
->os_rescan_dnodes
= B_FALSE
;
70 offset
= object
<< DNODE_SHIFT
;
72 error
= dnode_next_offset(DMU_META_DNODE(os
),
74 &offset
, 2, DNODES_PER_BLOCK
>> 2, 0);
76 object
= offset
>> DNODE_SHIFT
;
78 os
->os_obj_next
= ++object
;
81 * XXX We should check for an i/o error here and return
82 * up to our caller. Actually we should pre-read it in
83 * dmu_tx_assign(), but there is currently no mechanism
86 (void) dnode_hold_impl(os
, object
, DNODE_MUST_BE_FREE
,
91 if (dmu_object_next(os
, &object
, B_TRUE
, 0) == 0)
92 os
->os_obj_next
= object
- 1;
95 dnode_allocate(dn
, ot
, blocksize
, 0, bonustype
, bonuslen
, tx
);
96 mutex_exit(&os
->os_obj_lock
);
98 dmu_tx_add_new_object(tx
, dn
);
105 dmu_object_claim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
106 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
111 if (object
== DMU_META_DNODE_OBJECT
&& !dmu_tx_private_ok(tx
))
112 return (SET_ERROR(EBADF
));
114 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_FREE
, FTAG
, &dn
);
117 dnode_allocate(dn
, ot
, blocksize
, 0, bonustype
, bonuslen
, tx
);
118 dmu_tx_add_new_object(tx
, dn
);
120 dnode_rele(dn
, FTAG
);
126 dmu_object_reclaim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
127 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
132 if (object
== DMU_META_DNODE_OBJECT
)
133 return (SET_ERROR(EBADF
));
135 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
,
140 dnode_reallocate(dn
, ot
, blocksize
, bonustype
, bonuslen
, tx
);
142 dnode_rele(dn
, FTAG
);
147 dmu_object_free(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
)
152 ASSERT(object
!= DMU_META_DNODE_OBJECT
|| dmu_tx_private_ok(tx
));
154 err
= dnode_hold_impl(os
, object
, DNODE_MUST_BE_ALLOCATED
,
159 ASSERT(dn
->dn_type
!= DMU_OT_NONE
);
160 dnode_free_range(dn
, 0, DMU_OBJECT_END
, tx
);
162 dnode_rele(dn
, FTAG
);
168 * Return (in *objectp) the next object which is allocated (or a hole)
169 * after *object, taking into account only objects that may have been modified
170 * after the specified txg.
173 dmu_object_next(objset_t
*os
, uint64_t *objectp
, boolean_t hole
, uint64_t txg
)
175 uint64_t offset
= (*objectp
+ 1) << DNODE_SHIFT
;
178 error
= dnode_next_offset(DMU_META_DNODE(os
),
179 (hole
? DNODE_FIND_HOLE
: 0), &offset
, 0, DNODES_PER_BLOCK
, txg
);
181 *objectp
= offset
>> DNODE_SHIFT
;
187 * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
188 * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
190 * Only for use from syncing context, on MOS objects.
193 dmu_object_zapify(objset_t
*mos
, uint64_t object
, dmu_object_type_t old_type
,
198 ASSERT(dmu_tx_is_syncing(tx
));
200 VERIFY0(dnode_hold(mos
, object
, FTAG
, &dn
));
201 if (dn
->dn_type
== DMU_OTN_ZAP_METADATA
) {
202 dnode_rele(dn
, FTAG
);
205 ASSERT3U(dn
->dn_type
, ==, old_type
);
206 ASSERT0(dn
->dn_maxblkid
);
207 dn
->dn_next_type
[tx
->tx_txg
& TXG_MASK
] = dn
->dn_type
=
208 DMU_OTN_ZAP_METADATA
;
209 dnode_setdirty(dn
, tx
);
210 dnode_rele(dn
, FTAG
);
212 mzap_create_impl(mos
, object
, 0, 0, tx
);
214 spa_feature_incr(dmu_objset_spa(mos
),
215 SPA_FEATURE_EXTENSIBLE_DATASET
, tx
);
219 dmu_object_free_zapified(objset_t
*mos
, uint64_t object
, dmu_tx_t
*tx
)
224 ASSERT(dmu_tx_is_syncing(tx
));
226 VERIFY0(dnode_hold(mos
, object
, FTAG
, &dn
));
228 dnode_rele(dn
, FTAG
);
230 if (t
== DMU_OTN_ZAP_METADATA
) {
231 spa_feature_decr(dmu_objset_spa(mos
),
232 SPA_FEATURE_EXTENSIBLE_DATASET
, tx
);
234 VERIFY0(dmu_object_free(mos
, object
, tx
));