4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
27 * Copyright 2013 DEY Storage Systems, Inc.
28 * Copyright 2014 HybridCluster. All rights reserved.
29 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
30 * Copyright 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2014 Integros [integros.com]
34 /* Portions Copyright 2010 Robert Milkowski */
40 * This file describes the interface that the DMU provides for its
43 * The DMU also interacts with the SPA. That interface is described in
47 #include <sys/zfs_context.h>
48 #include <sys/inttypes.h>
50 #include <sys/fs/zfs.h>
51 #include <sys/zio_compress.h>
52 #include <sys/zio_priority.h>
72 struct zbookmark_phys
;
79 typedef struct objset objset_t
;
80 typedef struct dmu_tx dmu_tx_t
;
81 typedef struct dsl_dir dsl_dir_t
;
82 typedef struct dnode dnode_t
;
84 typedef enum dmu_object_byteswap
{
96 * Allocating a new byteswap type number makes the on-disk format
97 * incompatible with any other format that uses the same number.
99 * Data can usually be structured to work with one of the
100 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
103 } dmu_object_byteswap_t
;
105 #define DMU_OT_NEWTYPE 0x80
106 #define DMU_OT_METADATA 0x40
107 #define DMU_OT_BYTESWAP_MASK 0x3f
110 * Defines a uint8_t object type. Object types specify if the data
111 * in the object is metadata (boolean) and how to byteswap the data
112 * (dmu_object_byteswap_t). All of the types created by this method
113 * are cached in the dbuf metadata cache.
115 #define DMU_OT(byteswap, metadata) \
117 ((metadata) ? DMU_OT_METADATA : 0) | \
118 ((byteswap) & DMU_OT_BYTESWAP_MASK))
120 #define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \
121 ((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \
122 (ot) < DMU_OT_NUMTYPES)
124 #define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \
125 ((ot) & DMU_OT_METADATA) : \
126 dmu_ot[(ot)].ot_metadata)
128 #define DMU_OT_IS_METADATA_CACHED(ot) (((ot) & DMU_OT_NEWTYPE) ? \
129 B_TRUE : dmu_ot[(ot)].ot_dbuf_metadata_cache)
132 * These object types use bp_fill != 1 for their L0 bp's. Therefore they can't
133 * have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill
134 * is repurposed for embedded BPs.
136 #define DMU_OT_HAS_FILL(ot) \
137 ((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET)
139 #define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \
140 ((ot) & DMU_OT_BYTESWAP_MASK) : \
141 dmu_ot[(ot)].ot_byteswap)
143 typedef enum dmu_object_type
{
146 DMU_OT_OBJECT_DIRECTORY
, /* ZAP */
147 DMU_OT_OBJECT_ARRAY
, /* UINT64 */
148 DMU_OT_PACKED_NVLIST
, /* UINT8 (XDR by nvlist_pack/unpack) */
149 DMU_OT_PACKED_NVLIST_SIZE
, /* UINT64 */
150 DMU_OT_BPOBJ
, /* UINT64 */
151 DMU_OT_BPOBJ_HDR
, /* UINT64 */
153 DMU_OT_SPACE_MAP_HEADER
, /* UINT64 */
154 DMU_OT_SPACE_MAP
, /* UINT64 */
156 DMU_OT_INTENT_LOG
, /* UINT64 */
158 DMU_OT_DNODE
, /* DNODE */
159 DMU_OT_OBJSET
, /* OBJSET */
161 DMU_OT_DSL_DIR
, /* UINT64 */
162 DMU_OT_DSL_DIR_CHILD_MAP
, /* ZAP */
163 DMU_OT_DSL_DS_SNAP_MAP
, /* ZAP */
164 DMU_OT_DSL_PROPS
, /* ZAP */
165 DMU_OT_DSL_DATASET
, /* UINT64 */
167 DMU_OT_ZNODE
, /* ZNODE */
168 DMU_OT_OLDACL
, /* Old ACL */
169 DMU_OT_PLAIN_FILE_CONTENTS
, /* UINT8 */
170 DMU_OT_DIRECTORY_CONTENTS
, /* ZAP */
171 DMU_OT_MASTER_NODE
, /* ZAP */
172 DMU_OT_UNLINKED_SET
, /* ZAP */
174 DMU_OT_ZVOL
, /* UINT8 */
175 DMU_OT_ZVOL_PROP
, /* ZAP */
176 /* other; for testing only! */
177 DMU_OT_PLAIN_OTHER
, /* UINT8 */
178 DMU_OT_UINT64_OTHER
, /* UINT64 */
179 DMU_OT_ZAP_OTHER
, /* ZAP */
180 /* new object types: */
181 DMU_OT_ERROR_LOG
, /* ZAP */
182 DMU_OT_SPA_HISTORY
, /* UINT8 */
183 DMU_OT_SPA_HISTORY_OFFSETS
, /* spa_his_phys_t */
184 DMU_OT_POOL_PROPS
, /* ZAP */
185 DMU_OT_DSL_PERMS
, /* ZAP */
186 DMU_OT_ACL
, /* ACL */
187 DMU_OT_SYSACL
, /* SYSACL */
188 DMU_OT_FUID
, /* FUID table (Packed NVLIST UINT8) */
189 DMU_OT_FUID_SIZE
, /* FUID table size UINT64 */
190 DMU_OT_NEXT_CLONES
, /* ZAP */
191 DMU_OT_SCAN_QUEUE
, /* ZAP */
192 DMU_OT_USERGROUP_USED
, /* ZAP */
193 DMU_OT_USERGROUP_QUOTA
, /* ZAP */
194 DMU_OT_USERREFS
, /* ZAP */
195 DMU_OT_DDT_ZAP
, /* ZAP */
196 DMU_OT_DDT_STATS
, /* ZAP */
197 DMU_OT_SA
, /* System attr */
198 DMU_OT_SA_MASTER_NODE
, /* ZAP */
199 DMU_OT_SA_ATTR_REGISTRATION
, /* ZAP */
200 DMU_OT_SA_ATTR_LAYOUTS
, /* ZAP */
201 DMU_OT_SCAN_XLATE
, /* ZAP */
202 DMU_OT_DEDUP
, /* fake dedup BP from ddt_bp_create() */
203 DMU_OT_DEADLIST
, /* ZAP */
204 DMU_OT_DEADLIST_HDR
, /* UINT64 */
205 DMU_OT_DSL_CLONES
, /* ZAP */
206 DMU_OT_BPOBJ_SUBOBJ
, /* UINT64 */
208 * Do not allocate new object types here. Doing so makes the on-disk
209 * format incompatible with any other format that uses the same object
212 * When creating an object which does not have one of the above types
213 * use the DMU_OTN_* type with the correct byteswap and metadata
216 * The DMU_OTN_* types do not have entries in the dmu_ot table,
217 * use the DMU_OT_IS_METDATA() and DMU_OT_BYTESWAP() macros instead
218 * of indexing into dmu_ot directly (this works for both DMU_OT_* types
219 * and DMU_OTN_* types).
224 * Names for valid types declared with DMU_OT().
226 DMU_OTN_UINT8_DATA
= DMU_OT(DMU_BSWAP_UINT8
, B_FALSE
),
227 DMU_OTN_UINT8_METADATA
= DMU_OT(DMU_BSWAP_UINT8
, B_TRUE
),
228 DMU_OTN_UINT16_DATA
= DMU_OT(DMU_BSWAP_UINT16
, B_FALSE
),
229 DMU_OTN_UINT16_METADATA
= DMU_OT(DMU_BSWAP_UINT16
, B_TRUE
),
230 DMU_OTN_UINT32_DATA
= DMU_OT(DMU_BSWAP_UINT32
, B_FALSE
),
231 DMU_OTN_UINT32_METADATA
= DMU_OT(DMU_BSWAP_UINT32
, B_TRUE
),
232 DMU_OTN_UINT64_DATA
= DMU_OT(DMU_BSWAP_UINT64
, B_FALSE
),
233 DMU_OTN_UINT64_METADATA
= DMU_OT(DMU_BSWAP_UINT64
, B_TRUE
),
234 DMU_OTN_ZAP_DATA
= DMU_OT(DMU_BSWAP_ZAP
, B_FALSE
),
235 DMU_OTN_ZAP_METADATA
= DMU_OT(DMU_BSWAP_ZAP
, B_TRUE
),
239 * These flags are intended to be used to specify the "txg_how"
240 * parameter when calling the dmu_tx_assign() function. See the comment
241 * above dmu_tx_assign() for more details on the meaning of these flags.
243 #define TXG_NOWAIT (0ULL)
244 #define TXG_WAIT (1ULL<<0)
245 #define TXG_NOTHROTTLE (1ULL<<1)
247 void byteswap_uint64_array(void *buf
, size_t size
);
248 void byteswap_uint32_array(void *buf
, size_t size
);
249 void byteswap_uint16_array(void *buf
, size_t size
);
250 void byteswap_uint8_array(void *buf
, size_t size
);
251 void zap_byteswap(void *buf
, size_t size
);
252 void zfs_oldacl_byteswap(void *buf
, size_t size
);
253 void zfs_acl_byteswap(void *buf
, size_t size
);
254 void zfs_znode_byteswap(void *buf
, size_t size
);
256 #define DS_FIND_SNAPSHOTS (1<<0)
257 #define DS_FIND_CHILDREN (1<<1)
258 #define DS_FIND_SERIALIZE (1<<2)
261 * The maximum number of bytes that can be accessed as part of one
262 * operation, including metadata.
264 #define DMU_MAX_ACCESS (32 * 1024 * 1024) /* 32MB */
265 #define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */
267 #define DMU_USERUSED_OBJECT (-1ULL)
268 #define DMU_GROUPUSED_OBJECT (-2ULL)
271 * artificial blkids for bonus buffer and spill blocks
273 #define DMU_BONUS_BLKID (-1ULL)
274 #define DMU_SPILL_BLKID (-2ULL)
276 * Public routines to create, destroy, open, and close objsets.
278 int dmu_objset_hold(const char *name
, void *tag
, objset_t
**osp
);
279 int dmu_objset_own(const char *name
, dmu_objset_type_t type
,
280 boolean_t readonly
, void *tag
, objset_t
**osp
);
281 void dmu_objset_rele(objset_t
*os
, void *tag
);
282 void dmu_objset_disown(objset_t
*os
, void *tag
);
283 int dmu_objset_open_ds(struct dsl_dataset
*ds
, objset_t
**osp
);
285 void dmu_objset_evict_dbufs(objset_t
*os
);
286 int dmu_objset_create(const char *name
, dmu_objset_type_t type
, uint64_t flags
,
287 void (*func
)(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
), void *arg
);
288 int dmu_objset_clone(const char *name
, const char *origin
);
289 int dsl_destroy_snapshots_nvl(struct nvlist
*snaps
, boolean_t defer
,
290 struct nvlist
*errlist
);
291 int dmu_objset_snapshot_one(const char *fsname
, const char *snapname
);
292 int dmu_objset_snapshot_tmp(const char *, const char *, int);
293 int dmu_objset_find(char *name
, int func(const char *, void *), void *arg
,
295 void dmu_objset_byteswap(void *buf
, size_t size
);
296 int dsl_dataset_rename_snapshot(const char *fsname
,
297 const char *oldsnapname
, const char *newsnapname
, boolean_t recursive
);
298 int dmu_objset_remap_indirects(const char *fsname
);
300 typedef struct dmu_buf
{
301 uint64_t db_object
; /* object that this buffer is part of */
302 uint64_t db_offset
; /* byte offset in this object */
303 uint64_t db_size
; /* size of buffer in bytes */
304 void *db_data
; /* data in buffer */
308 * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
310 #define DMU_POOL_DIRECTORY_OBJECT 1
311 #define DMU_POOL_CONFIG "config"
312 #define DMU_POOL_FEATURES_FOR_WRITE "features_for_write"
313 #define DMU_POOL_FEATURES_FOR_READ "features_for_read"
314 #define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions"
315 #define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg"
316 #define DMU_POOL_ROOT_DATASET "root_dataset"
317 #define DMU_POOL_SYNC_BPOBJ "sync_bplist"
318 #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
319 #define DMU_POOL_ERRLOG_LAST "errlog_last"
320 #define DMU_POOL_SPARES "spares"
321 #define DMU_POOL_DEFLATE "deflate"
322 #define DMU_POOL_HISTORY "history"
323 #define DMU_POOL_PROPS "pool_props"
324 #define DMU_POOL_L2CACHE "l2cache"
325 #define DMU_POOL_TMP_USERREFS "tmp_userrefs"
326 #define DMU_POOL_DDT "DDT-%s-%s-%s"
327 #define DMU_POOL_DDT_STATS "DDT-statistics"
328 #define DMU_POOL_CREATION_VERSION "creation_version"
329 #define DMU_POOL_SCAN "scan"
330 #define DMU_POOL_FREE_BPOBJ "free_bpobj"
331 #define DMU_POOL_BPTREE_OBJ "bptree_obj"
332 #define DMU_POOL_EMPTY_BPOBJ "empty_bpobj"
333 #define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt"
334 #define DMU_POOL_VDEV_ZAP_MAP "com.delphix:vdev_zap_map"
335 #define DMU_POOL_REMOVING "com.delphix:removing"
336 #define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj"
337 #define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect"
338 #define DMU_POOL_ZPOOL_CHECKPOINT "com.delphix:zpool_checkpoint"
341 * Allocate an object from this objset. The range of object numbers
342 * available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode.
344 * The transaction must be assigned to a txg. The newly allocated
345 * object will be "held" in the transaction (ie. you can modify the
346 * newly allocated object in this transaction).
348 * dmu_object_alloc() chooses an object and returns it in *objectp.
350 * dmu_object_claim() allocates a specific object number. If that
351 * number is already allocated, it fails and returns EEXIST.
353 * Return 0 on success, or ENOSPC or EEXIST as specified above.
355 uint64_t dmu_object_alloc(objset_t
*os
, dmu_object_type_t ot
,
356 int blocksize
, dmu_object_type_t bonus_type
, int bonus_len
, dmu_tx_t
*tx
);
357 uint64_t dmu_object_alloc_ibs(objset_t
*os
, dmu_object_type_t ot
, int blocksize
,
358 int indirect_blockshift
,
359 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
);
360 int dmu_object_claim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
361 int blocksize
, dmu_object_type_t bonus_type
, int bonus_len
, dmu_tx_t
*tx
);
362 int dmu_object_reclaim(objset_t
*os
, uint64_t object
, dmu_object_type_t ot
,
363 int blocksize
, dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*txp
);
366 * Free an object from this objset.
368 * The object's data will be freed as well (ie. you don't need to call
369 * dmu_free(object, 0, -1, tx)).
371 * The object need not be held in the transaction.
373 * If there are any holds on this object's buffers (via dmu_buf_hold()),
374 * or tx holds on the object (via dmu_tx_hold_object()), you can not
375 * free it; it fails and returns EBUSY.
377 * If the object is not allocated, it fails and returns ENOENT.
379 * Return 0 on success, or EBUSY or ENOENT as specified above.
381 int dmu_object_free(objset_t
*os
, uint64_t object
, dmu_tx_t
*tx
);
384 * Find the next allocated or free object.
386 * The objectp parameter is in-out. It will be updated to be the next
387 * object which is allocated. Ignore objects which have not been
388 * modified since txg.
390 * XXX Can only be called on a objset with no dirty data.
392 * Returns 0 on success, or ENOENT if there are no more objects.
394 int dmu_object_next(objset_t
*os
, uint64_t *objectp
,
395 boolean_t hole
, uint64_t txg
);
398 * Set the data blocksize for an object.
400 * The object cannot have any blocks allcated beyond the first. If
401 * the first block is allocated already, the new size must be greater
402 * than the current block size. If these conditions are not met,
403 * ENOTSUP will be returned.
405 * Returns 0 on success, or EBUSY if there are any holds on the object
406 * contents, or ENOTSUP as described above.
408 int dmu_object_set_blocksize(objset_t
*os
, uint64_t object
, uint64_t size
,
409 int ibs
, dmu_tx_t
*tx
);
412 * Set the checksum property on a dnode. The new checksum algorithm will
413 * apply to all newly written blocks; existing blocks will not be affected.
415 void dmu_object_set_checksum(objset_t
*os
, uint64_t object
, uint8_t checksum
,
419 * Set the compress property on a dnode. The new compression algorithm will
420 * apply to all newly written blocks; existing blocks will not be affected.
422 void dmu_object_set_compress(objset_t
*os
, uint64_t object
, uint8_t compress
,
425 int dmu_object_remap_indirects(objset_t
*os
, uint64_t object
, uint64_t txg
);
428 dmu_write_embedded(objset_t
*os
, uint64_t object
, uint64_t offset
,
429 void *data
, uint8_t etype
, uint8_t comp
, int uncompressed_size
,
430 int compressed_size
, int byteorder
, dmu_tx_t
*tx
);
433 * Decide how to write a block: checksum, compression, number of copies, etc.
435 #define WP_NOFILL 0x1
436 #define WP_DMU_SYNC 0x2
439 void dmu_write_policy(objset_t
*os
, dnode_t
*dn
, int level
, int wp
,
440 struct zio_prop
*zp
);
442 * The bonus data is accessed more or less like a regular buffer.
443 * You must dmu_bonus_hold() to get the buffer, which will give you a
444 * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus
445 * data. As with any normal buffer, you must call dmu_buf_will_dirty()
446 * before modifying it, and the
447 * object must be held in an assigned transaction before calling
448 * dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus
449 * buffer as well. You must release your hold with dmu_buf_rele().
451 * Returns ENOENT, EIO, or 0.
453 int dmu_bonus_hold(objset_t
*os
, uint64_t object
, void *tag
, dmu_buf_t
**);
454 int dmu_bonus_max(void);
455 int dmu_set_bonus(dmu_buf_t
*, int, dmu_tx_t
*);
456 int dmu_set_bonustype(dmu_buf_t
*, dmu_object_type_t
, dmu_tx_t
*);
457 dmu_object_type_t
dmu_get_bonustype(dmu_buf_t
*);
458 int dmu_rm_spill(objset_t
*, uint64_t, dmu_tx_t
*);
461 * Special spill buffer support used by "SA" framework
464 int dmu_spill_hold_by_bonus(dmu_buf_t
*bonus
, void *tag
, dmu_buf_t
**dbp
);
465 int dmu_spill_hold_by_dnode(dnode_t
*dn
, uint32_t flags
,
466 void *tag
, dmu_buf_t
**dbp
);
467 int dmu_spill_hold_existing(dmu_buf_t
*bonus
, void *tag
, dmu_buf_t
**dbp
);
470 * Obtain the DMU buffer from the specified object which contains the
471 * specified offset. dmu_buf_hold() puts a "hold" on the buffer, so
472 * that it will remain in memory. You must release the hold with
473 * dmu_buf_rele(). You musn't access the dmu_buf_t after releasing your
474 * hold. You must have a hold on any dmu_buf_t* you pass to the DMU.
476 * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill
477 * on the returned buffer before reading or writing the buffer's
478 * db_data. The comments for those routines describe what particular
479 * operations are valid after calling them.
481 * The object number must be a valid, allocated object number.
483 int dmu_buf_hold(objset_t
*os
, uint64_t object
, uint64_t offset
,
484 void *tag
, dmu_buf_t
**, int flags
);
485 int dmu_buf_hold_by_dnode(dnode_t
*dn
, uint64_t offset
,
486 void *tag
, dmu_buf_t
**dbp
, int flags
);
489 * Add a reference to a dmu buffer that has already been held via
490 * dmu_buf_hold() in the current context.
492 void dmu_buf_add_ref(dmu_buf_t
*db
, void* tag
);
495 * Attempt to add a reference to a dmu buffer that is in an unknown state,
496 * using a pointer that may have been invalidated by eviction processing.
497 * The request will succeed if the passed in dbuf still represents the
498 * same os/object/blkid, is ineligible for eviction, and has at least
499 * one hold by a user other than the syncer.
501 boolean_t
dmu_buf_try_add_ref(dmu_buf_t
*, objset_t
*os
, uint64_t object
,
502 uint64_t blkid
, void *tag
);
504 void dmu_buf_rele(dmu_buf_t
*db
, void *tag
);
505 uint64_t dmu_buf_refcount(dmu_buf_t
*db
);
508 * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a
509 * range of an object. A pointer to an array of dmu_buf_t*'s is
510 * returned (in *dbpp).
512 * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and
513 * frees the array. The hold on the array of buffers MUST be released
514 * with dmu_buf_rele_array. You can NOT release the hold on each buffer
515 * individually with dmu_buf_rele.
517 int dmu_buf_hold_array_by_bonus(dmu_buf_t
*db
, uint64_t offset
,
518 uint64_t length
, boolean_t read
, void *tag
,
519 int *numbufsp
, dmu_buf_t
***dbpp
);
520 int dmu_buf_hold_array_by_dnode(dnode_t
*dn
, uint64_t offset
, uint64_t length
,
521 boolean_t read
, void *tag
, int *numbufsp
, dmu_buf_t
***dbpp
,
523 void dmu_buf_rele_array(dmu_buf_t
**, int numbufs
, void *tag
);
525 typedef void dmu_buf_evict_func_t(void *user_ptr
);
528 * A DMU buffer user object may be associated with a dbuf for the
529 * duration of its lifetime. This allows the user of a dbuf (client)
530 * to attach private data to a dbuf (e.g. in-core only data such as a
531 * dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified
532 * when that dbuf has been evicted. Clients typically respond to the
533 * eviction notification by freeing their private data, thus ensuring
534 * the same lifetime for both dbuf and private data.
536 * The mapping from a dmu_buf_user_t to any client private data is the
537 * client's responsibility. All current consumers of the API with private
538 * data embed a dmu_buf_user_t as the first member of the structure for
539 * their private data. This allows conversions between the two types
540 * with a simple cast. Since the DMU buf user API never needs access
541 * to the private data, other strategies can be employed if necessary
542 * or convenient for the client (e.g. using container_of() to do the
543 * conversion for private data that cannot have the dmu_buf_user_t as
546 * Eviction callbacks are executed without the dbuf mutex held or any
547 * other type of mechanism to guarantee that the dbuf is still available.
548 * For this reason, users must assume the dbuf has already been freed
549 * and not reference the dbuf from the callback context.
551 * Users requesting "immediate eviction" are notified as soon as the dbuf
552 * is only referenced by dirty records (dirties == holds). Otherwise the
553 * notification occurs after eviction processing for the dbuf begins.
555 typedef struct dmu_buf_user
{
557 * Asynchronous user eviction callback state.
559 taskq_ent_t dbu_tqent
;
562 * This instance's eviction function pointers.
564 * dbu_evict_func_sync is called synchronously and then
565 * dbu_evict_func_async is executed asynchronously on a taskq.
567 dmu_buf_evict_func_t
*dbu_evict_func_sync
;
568 dmu_buf_evict_func_t
*dbu_evict_func_async
;
571 * Pointer to user's dbuf pointer. NULL for clients that do
572 * not associate a dbuf with their user data.
574 * The dbuf pointer is cleared upon eviction so as to catch
575 * use-after-evict bugs in clients.
577 dmu_buf_t
**dbu_clear_on_evict_dbufp
;
582 * Initialize the given dmu_buf_user_t instance with the eviction function
583 * evict_func, to be called when the user is evicted.
585 * NOTE: This function should only be called once on a given dmu_buf_user_t.
586 * To allow enforcement of this, dbu must already be zeroed on entry.
590 dmu_buf_init_user(dmu_buf_user_t
*dbu
, dmu_buf_evict_func_t
*evict_func_sync
,
591 dmu_buf_evict_func_t
*evict_func_async
, dmu_buf_t
**clear_on_evict_dbufp
)
593 ASSERT(dbu
->dbu_evict_func_sync
== NULL
);
594 ASSERT(dbu
->dbu_evict_func_async
== NULL
);
596 /* must have at least one evict func */
597 IMPLY(evict_func_sync
== NULL
, evict_func_async
!= NULL
);
598 dbu
->dbu_evict_func_sync
= evict_func_sync
;
599 dbu
->dbu_evict_func_async
= evict_func_async
;
601 dbu
->dbu_clear_on_evict_dbufp
= clear_on_evict_dbufp
;
606 * Attach user data to a dbuf and mark it for normal (when the dbuf's
607 * data is cleared or its reference count goes to zero) eviction processing.
609 * Returns NULL on success, or the existing user if another user currently
612 void *dmu_buf_set_user(dmu_buf_t
*db
, dmu_buf_user_t
*user
);
615 * Attach user data to a dbuf and mark it for immediate (its dirty and
616 * reference counts are equal) eviction processing.
618 * Returns NULL on success, or the existing user if another user currently
621 void *dmu_buf_set_user_ie(dmu_buf_t
*db
, dmu_buf_user_t
*user
);
624 * Replace the current user of a dbuf.
626 * If given the current user of a dbuf, replaces the dbuf's user with
627 * "new_user" and returns the user data pointer that was replaced.
628 * Otherwise returns the current, and unmodified, dbuf user pointer.
630 void *dmu_buf_replace_user(dmu_buf_t
*db
,
631 dmu_buf_user_t
*old_user
, dmu_buf_user_t
*new_user
);
634 * Remove the specified user data for a DMU buffer.
636 * Returns the user that was removed on success, or the current user if
637 * another user currently owns the buffer.
639 void *dmu_buf_remove_user(dmu_buf_t
*db
, dmu_buf_user_t
*user
);
642 * Returns the user data (dmu_buf_user_t *) associated with this dbuf.
644 void *dmu_buf_get_user(dmu_buf_t
*db
);
646 objset_t
*dmu_buf_get_objset(dmu_buf_t
*db
);
647 dnode_t
*dmu_buf_dnode_enter(dmu_buf_t
*db
);
648 void dmu_buf_dnode_exit(dmu_buf_t
*db
);
650 /* Block until any in-progress dmu buf user evictions complete. */
651 void dmu_buf_user_evict_wait(void);
654 * Returns the blkptr associated with this dbuf, or NULL if not set.
656 struct blkptr
*dmu_buf_get_blkptr(dmu_buf_t
*db
);
659 * Indicate that you are going to modify the buffer's data (db_data).
661 * The transaction (tx) must be assigned to a txg (ie. you've called
662 * dmu_tx_assign()). The buffer's object must be held in the tx
663 * (ie. you've called dmu_tx_hold_object(tx, db->db_object)).
665 void dmu_buf_will_dirty(dmu_buf_t
*db
, dmu_tx_t
*tx
);
668 * You must create a transaction, then hold the objects which you will
669 * (or might) modify as part of this transaction. Then you must assign
670 * the transaction to a transaction group. Once the transaction has
671 * been assigned, you can modify buffers which belong to held objects as
672 * part of this transaction. You can't modify buffers before the
673 * transaction has been assigned; you can't modify buffers which don't
674 * belong to objects which this transaction holds; you can't hold
675 * objects once the transaction has been assigned. You may hold an
676 * object which you are going to free (with dmu_object_free()), but you
679 * You can abort the transaction before it has been assigned.
681 * Note that you may hold buffers (with dmu_buf_hold) at any time,
682 * regardless of transaction state.
685 #define DMU_NEW_OBJECT (-1ULL)
686 #define DMU_OBJECT_END (-1ULL)
688 dmu_tx_t
*dmu_tx_create(objset_t
*os
);
689 void dmu_tx_hold_write(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
, int len
);
690 void dmu_tx_hold_write_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
,
692 void dmu_tx_hold_free(dmu_tx_t
*tx
, uint64_t object
, uint64_t off
,
694 void dmu_tx_hold_free_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, uint64_t off
,
696 void dmu_tx_hold_remap_l1indirect(dmu_tx_t
*tx
, uint64_t object
);
697 void dmu_tx_hold_zap(dmu_tx_t
*tx
, uint64_t object
, int add
, const char *name
);
698 void dmu_tx_hold_zap_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
, int add
,
700 void dmu_tx_hold_bonus(dmu_tx_t
*tx
, uint64_t object
);
701 void dmu_tx_hold_bonus_by_dnode(dmu_tx_t
*tx
, dnode_t
*dn
);
702 void dmu_tx_hold_spill(dmu_tx_t
*tx
, uint64_t object
);
703 void dmu_tx_hold_sa(dmu_tx_t
*tx
, struct sa_handle
*hdl
, boolean_t may_grow
);
704 void dmu_tx_hold_sa_create(dmu_tx_t
*tx
, int total_size
);
705 void dmu_tx_abort(dmu_tx_t
*tx
);
706 int dmu_tx_assign(dmu_tx_t
*tx
, uint64_t txg_how
);
707 void dmu_tx_wait(dmu_tx_t
*tx
);
708 void dmu_tx_commit(dmu_tx_t
*tx
);
709 void dmu_tx_mark_netfree(dmu_tx_t
*tx
);
712 * To register a commit callback, dmu_tx_callback_register() must be called.
714 * dcb_data is a pointer to caller private data that is passed on as a
715 * callback parameter. The caller is responsible for properly allocating and
718 * When registering a callback, the transaction must be already created, but
719 * it cannot be committed or aborted. It can be assigned to a txg or not.
721 * The callback will be called after the transaction has been safely written
722 * to stable storage and will also be called if the dmu_tx is aborted.
723 * If there is any error which prevents the transaction from being committed to
724 * disk, the callback will be called with a value of error != 0.
726 typedef void dmu_tx_callback_func_t(void *dcb_data
, int error
);
728 void dmu_tx_callback_register(dmu_tx_t
*tx
, dmu_tx_callback_func_t
*dcb_func
,
732 * Free up the data blocks for a defined range of a file. If size is
733 * -1, the range from offset to end-of-file is freed.
735 int dmu_free_range(objset_t
*os
, uint64_t object
, uint64_t offset
,
736 uint64_t size
, dmu_tx_t
*tx
);
737 int dmu_free_long_range(objset_t
*os
, uint64_t object
, uint64_t offset
,
739 int dmu_free_long_object(objset_t
*os
, uint64_t object
);
742 * Convenience functions.
744 * Canfail routines will return 0 on success, or an errno if there is a
745 * nonrecoverable I/O error.
747 #define DMU_READ_PREFETCH 0 /* prefetch */
748 #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */
749 int dmu_read(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
750 void *buf
, uint32_t flags
);
751 int dmu_read_by_dnode(dnode_t
*dn
, uint64_t offset
, uint64_t size
, void *buf
,
753 void dmu_write(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
754 const void *buf
, dmu_tx_t
*tx
);
755 void dmu_write_by_dnode(dnode_t
*dn
, uint64_t offset
, uint64_t size
,
756 const void *buf
, dmu_tx_t
*tx
);
757 void dmu_prealloc(objset_t
*os
, uint64_t object
, uint64_t offset
, uint64_t size
,
759 int dmu_read_uio(objset_t
*os
, uint64_t object
, struct uio
*uio
, uint64_t size
);
760 int dmu_read_uio_dbuf(dmu_buf_t
*zdb
, struct uio
*uio
, uint64_t size
);
761 int dmu_read_uio_dnode(dnode_t
*dn
, struct uio
*uio
, uint64_t size
);
762 int dmu_write_uio(objset_t
*os
, uint64_t object
, struct uio
*uio
, uint64_t size
,
764 int dmu_write_uio_dbuf(dmu_buf_t
*zdb
, struct uio
*uio
, uint64_t size
,
766 int dmu_write_uio_dnode(dnode_t
*dn
, struct uio
*uio
, uint64_t size
,
768 int dmu_write_pages(objset_t
*os
, uint64_t object
, uint64_t offset
,
769 uint64_t size
, struct page
*pp
, dmu_tx_t
*tx
);
770 struct arc_buf
*dmu_request_arcbuf(dmu_buf_t
*handle
, int size
);
771 void dmu_return_arcbuf(struct arc_buf
*buf
);
772 void dmu_assign_arcbuf_dnode(dnode_t
*handle
, uint64_t offset
,
773 struct arc_buf
*buf
, dmu_tx_t
*tx
);
774 void dmu_assign_arcbuf(dmu_buf_t
*handle
, uint64_t offset
, struct arc_buf
*buf
,
776 int dmu_xuio_init(struct xuio
*uio
, int niov
);
777 void dmu_xuio_fini(struct xuio
*uio
);
778 int dmu_xuio_add(struct xuio
*uio
, struct arc_buf
*abuf
, offset_t off
,
780 int dmu_xuio_cnt(struct xuio
*uio
);
781 struct arc_buf
*dmu_xuio_arcbuf(struct xuio
*uio
, int i
);
782 void dmu_xuio_clear(struct xuio
*uio
, int i
);
783 void xuio_stat_wbuf_copied(void);
784 void xuio_stat_wbuf_nocopy(void);
786 extern boolean_t zfs_prefetch_disable
;
787 extern int zfs_max_recordsize
;
790 * Asynchronously try to read in the data.
792 void dmu_prefetch(objset_t
*os
, uint64_t object
, int64_t level
, uint64_t offset
,
793 uint64_t len
, enum zio_priority pri
);
795 typedef struct dmu_object_info
{
796 /* All sizes are in bytes unless otherwise indicated. */
797 uint32_t doi_data_block_size
;
798 uint32_t doi_metadata_block_size
;
799 dmu_object_type_t doi_type
;
800 dmu_object_type_t doi_bonus_type
;
801 uint64_t doi_bonus_size
;
802 uint8_t doi_indirection
; /* 2 = dnode->indirect->data */
803 uint8_t doi_checksum
;
804 uint8_t doi_compress
;
807 uint64_t doi_physical_blocks_512
; /* data + metadata, 512b blks */
808 uint64_t doi_max_offset
;
809 uint64_t doi_fill_count
; /* number of non-empty blocks */
812 typedef void arc_byteswap_func_t(void *buf
, size_t size
);
814 typedef struct dmu_object_type_info
{
815 dmu_object_byteswap_t ot_byteswap
;
816 boolean_t ot_metadata
;
817 boolean_t ot_dbuf_metadata_cache
;
819 } dmu_object_type_info_t
;
821 typedef struct dmu_object_byteswap_info
{
822 arc_byteswap_func_t
*ob_func
;
824 } dmu_object_byteswap_info_t
;
826 extern const dmu_object_type_info_t dmu_ot
[DMU_OT_NUMTYPES
];
827 extern const dmu_object_byteswap_info_t dmu_ot_byteswap
[DMU_BSWAP_NUMFUNCS
];
830 * Get information on a DMU object.
832 * Return 0 on success or ENOENT if object is not allocated.
834 * If doi is NULL, just indicates whether the object exists.
836 int dmu_object_info(objset_t
*os
, uint64_t object
, dmu_object_info_t
*doi
);
837 /* Like dmu_object_info, but faster if you have a held dnode in hand. */
838 void dmu_object_info_from_dnode(dnode_t
*dn
, dmu_object_info_t
*doi
);
839 /* Like dmu_object_info, but faster if you have a held dbuf in hand. */
840 void dmu_object_info_from_db(dmu_buf_t
*db
, dmu_object_info_t
*doi
);
842 * Like dmu_object_info_from_db, but faster still when you only care about
843 * the size. This is specifically optimized for zfs_getattr().
845 void dmu_object_size_from_db(dmu_buf_t
*db
, uint32_t *blksize
,
846 u_longlong_t
*nblk512
);
848 typedef struct dmu_objset_stats
{
849 uint64_t dds_num_clones
; /* number of clones of this */
850 uint64_t dds_creation_txg
;
852 dmu_objset_type_t dds_type
;
853 uint8_t dds_is_snapshot
;
854 uint8_t dds_inconsistent
;
855 char dds_origin
[ZFS_MAX_DATASET_NAME_LEN
];
856 } dmu_objset_stats_t
;
859 * Get stats on a dataset.
861 void dmu_objset_fast_stat(objset_t
*os
, dmu_objset_stats_t
*stat
);
864 * Add entries to the nvlist for all the objset's properties. See
865 * zfs_prop_table[] and zfs(1m) for details on the properties.
867 void dmu_objset_stats(objset_t
*os
, struct nvlist
*nv
);
870 * Get the space usage statistics for statvfs().
872 * refdbytes is the amount of space "referenced" by this objset.
873 * availbytes is the amount of space available to this objset, taking
874 * into account quotas & reservations, assuming that no other objsets
875 * use the space first. These values correspond to the 'referenced' and
876 * 'available' properties, described in the zfs(1m) manpage.
878 * usedobjs and availobjs are the number of objects currently allocated,
881 void dmu_objset_space(objset_t
*os
, uint64_t *refdbytesp
, uint64_t *availbytesp
,
882 uint64_t *usedobjsp
, uint64_t *availobjsp
);
885 * The fsid_guid is a 56-bit ID that can change to avoid collisions.
886 * (Contrast with the ds_guid which is a 64-bit ID that will never
887 * change, so there is a small probability that it will collide.)
889 uint64_t dmu_objset_fsid_guid(objset_t
*os
);
892 * Get the [cm]time for an objset's snapshot dir
894 timestruc_t
dmu_objset_snap_cmtime(objset_t
*os
);
896 int dmu_objset_is_snapshot(objset_t
*os
);
898 extern struct spa
*dmu_objset_spa(objset_t
*os
);
899 extern struct zilog
*dmu_objset_zil(objset_t
*os
);
900 extern struct dsl_pool
*dmu_objset_pool(objset_t
*os
);
901 extern struct dsl_dataset
*dmu_objset_ds(objset_t
*os
);
902 extern void dmu_objset_name(objset_t
*os
, char *buf
);
903 extern dmu_objset_type_t
dmu_objset_type(objset_t
*os
);
904 extern uint64_t dmu_objset_id(objset_t
*os
);
905 extern zfs_sync_type_t
dmu_objset_syncprop(objset_t
*os
);
906 extern zfs_logbias_op_t
dmu_objset_logbias(objset_t
*os
);
907 extern int dmu_snapshot_list_next(objset_t
*os
, int namelen
, char *name
,
908 uint64_t *id
, uint64_t *offp
, boolean_t
*case_conflict
);
909 extern int dmu_snapshot_realname(objset_t
*os
, char *name
, char *real
,
910 int maxlen
, boolean_t
*conflict
);
911 extern int dmu_dir_list_next(objset_t
*os
, int namelen
, char *name
,
912 uint64_t *idp
, uint64_t *offp
);
914 typedef int objset_used_cb_t(dmu_object_type_t bonustype
,
915 void *bonus
, uint64_t *userp
, uint64_t *groupp
);
916 extern void dmu_objset_register_type(dmu_objset_type_t ost
,
917 objset_used_cb_t
*cb
);
918 extern void dmu_objset_set_user(objset_t
*os
, void *user_ptr
);
919 extern void *dmu_objset_get_user(objset_t
*os
);
922 * Return the txg number for the given assigned transaction.
924 uint64_t dmu_tx_get_txg(dmu_tx_t
*tx
);
928 * If a parent zio is provided this function initiates a write on the
929 * provided buffer as a child of the parent zio.
930 * In the absence of a parent zio, the write is completed synchronously.
931 * At write completion, blk is filled with the bp of the written block.
932 * Note that while the data covered by this function will be on stable
933 * storage when the write completes this new data does not become a
934 * permanent part of the file until the associated transaction commits.
938 * {zfs,zvol,ztest}_get_done() args
942 struct blkptr
*zgd_bp
;
948 typedef void dmu_sync_cb_t(zgd_t
*arg
, int error
);
949 int dmu_sync(struct zio
*zio
, uint64_t txg
, dmu_sync_cb_t
*done
, zgd_t
*zgd
);
952 * Find the next hole or data block in file starting at *off
953 * Return found offset in *off. Return ESRCH for end of file.
955 int dmu_offset_next(objset_t
*os
, uint64_t object
, boolean_t hole
,
959 * Check if a DMU object has any dirty blocks. If so, sync out
960 * all pending transaction groups. Otherwise, this function
961 * does not alter DMU state. This could be improved to only sync
962 * out the necessary transaction groups for this particular
965 int dmu_object_wait_synced(objset_t
*os
, uint64_t object
);
968 * Initial setup and final teardown.
970 extern void dmu_init(void);
971 extern void dmu_fini(void);
973 typedef void (*dmu_traverse_cb_t
)(objset_t
*os
, void *arg
, struct blkptr
*bp
,
974 uint64_t object
, uint64_t offset
, int len
);
975 void dmu_traverse_objset(objset_t
*os
, uint64_t txg_start
,
976 dmu_traverse_cb_t cb
, void *arg
);
978 int dmu_diff(const char *tosnap_name
, const char *fromsnap_name
,
979 struct vnode
*vp
, offset_t
*offp
);
982 #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
983 extern uint64_t zfs_crc64_table
[256];
985 extern int zfs_mdcomp_disable
;
991 #endif /* _SYS_DMU_H */