9423 loader: zfs_bootfs() needs to use config pool txg for boot device
[unleashed.git] / usr / src / boot / sys / cddl / boot / zfs / zfsimpl.h
blobcfddbfe9740a4c8672fc4d45329219ad0e9238b1
1 /*-
2 * Copyright (c) 2002 McAfee, Inc.
3 * All rights reserved.
5 * This software was developed for the FreeBSD Project by Marshall
6 * Kirk McKusick and McAfee Research,, the Security Research Division of
7 * McAfee, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as
8 * part of the DARPA CHATS research program
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
32 * CDDL HEADER START
34 * The contents of this file are subject to the terms of the
35 * Common Development and Distribution License (the "License").
36 * You may not use this file except in compliance with the License.
38 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
39 * or http://www.opensolaris.org/os/licensing.
40 * See the License for the specific language governing permissions
41 * and limitations under the License.
43 * When distributing Covered Code, include this CDDL HEADER in each
44 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
45 * If applicable, add the following below this CDDL HEADER, with the
46 * fields enclosed by brackets "[]" replaced with your own identifying
47 * information: Portions Copyright [yyyy] [name of copyright owner]
49 * CDDL HEADER END
52 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
53 * Use is subject to license terms.
56 * Copyright 2013 by Saso Kiselkov. All rights reserved.
59 * Copyright (c) 2013 by Delphix. All rights reserved.
62 #define MAXNAMELEN 256
64 #define _NOTE(s)
66 typedef enum { B_FALSE, B_TRUE } boolean_t;
68 /* CRC64 table */
69 #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
72 * Macros for various sorts of alignment and rounding when the alignment
73 * is known to be a power of 2.
75 #define P2ALIGN(x, align) ((x) & -(align))
76 #define P2PHASE(x, align) ((x) & ((align) - 1))
77 #define P2NPHASE(x, align) (-(x) & ((align) - 1))
78 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
79 #define P2END(x, align) (-(~(x) & -(align)))
80 #define P2PHASEUP(x, align, phase) ((phase) - (((phase) - (x)) & -(align)))
81 #define P2BOUNDARY(off, len, align) (((off) ^ ((off) + (len) - 1)) > (align) - 1)
84 * General-purpose 32-bit and 64-bit bitfield encodings.
86 #define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len))
87 #define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len))
88 #define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low))
89 #define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low))
91 #define BF32_GET(x, low, len) BF32_DECODE(x, low, len)
92 #define BF64_GET(x, low, len) BF64_DECODE(x, low, len)
94 #define BF32_SET(x, low, len, val) \
95 ((x) ^= BF32_ENCODE((x >> low) ^ (val), low, len))
96 #define BF64_SET(x, low, len, val) \
97 ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len))
99 #define BF32_GET_SB(x, low, len, shift, bias) \
100 ((BF32_GET(x, low, len) + (bias)) << (shift))
101 #define BF64_GET_SB(x, low, len, shift, bias) \
102 ((BF64_GET(x, low, len) + (bias)) << (shift))
104 #define BF32_SET_SB(x, low, len, shift, bias, val) \
105 BF32_SET(x, low, len, ((val) >> (shift)) - (bias))
106 #define BF64_SET_SB(x, low, len, shift, bias, val) \
107 BF64_SET(x, low, len, ((val) >> (shift)) - (bias))
110 * Macros to reverse byte order
112 #define BSWAP_8(x) ((x) & 0xff)
113 #define BSWAP_16(x) ((BSWAP_8(x) << 8) | BSWAP_8((x) >> 8))
114 #define BSWAP_32(x) ((BSWAP_16(x) << 16) | BSWAP_16((x) >> 16))
115 #define BSWAP_64(x) ((BSWAP_32(x) << 32) | BSWAP_32((x) >> 32))
117 #define SPA_MINBLOCKSHIFT 9
118 #define SPA_OLDMAXBLOCKSHIFT 17
119 #define SPA_MAXBLOCKSHIFT 24
120 #define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT)
121 #define SPA_OLDMAXBLOCKSIZE (1ULL << SPA_OLDMAXBLOCKSHIFT)
122 #define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT)
125 * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
126 * The ASIZE encoding should be at least 64 times larger (6 more bits)
127 * to support up to 4-way RAID-Z mirror mode with worst-case gang block
128 * overhead, three DVAs per bp, plus one more bit in case we do anything
129 * else that expands the ASIZE.
131 #define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
132 #define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
133 #define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
136 * All SPA data is represented by 128-bit data virtual addresses (DVAs).
137 * The members of the dva_t should be considered opaque outside the SPA.
139 typedef struct dva {
140 uint64_t dva_word[2];
141 } dva_t;
144 * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
146 typedef struct zio_cksum {
147 uint64_t zc_word[4];
148 } zio_cksum_t;
151 * Some checksums/hashes need a 256-bit initialization salt. This salt is kept
152 * secret and is suitable for use in MAC algorithms as the key.
154 typedef struct zio_cksum_salt {
155 uint8_t zcs_bytes[32];
156 } zio_cksum_salt_t;
159 * Each block is described by its DVAs, time of birth, checksum, etc.
160 * The word-by-word, bit-by-bit layout of the blkptr is as follows:
162 * 64 56 48 40 32 24 16 8 0
163 * +-------+-------+-------+-------+-------+-------+-------+-------+
164 * 0 | vdev1 | GRID | ASIZE |
165 * +-------+-------+-------+-------+-------+-------+-------+-------+
166 * 1 |G| offset1 |
167 * +-------+-------+-------+-------+-------+-------+-------+-------+
168 * 2 | vdev2 | GRID | ASIZE |
169 * +-------+-------+-------+-------+-------+-------+-------+-------+
170 * 3 |G| offset2 |
171 * +-------+-------+-------+-------+-------+-------+-------+-------+
172 * 4 | vdev3 | GRID | ASIZE |
173 * +-------+-------+-------+-------+-------+-------+-------+-------+
174 * 5 |G| offset3 |
175 * +-------+-------+-------+-------+-------+-------+-------+-------+
176 * 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
177 * +-------+-------+-------+-------+-------+-------+-------+-------+
178 * 7 | padding |
179 * +-------+-------+-------+-------+-------+-------+-------+-------+
180 * 8 | padding |
181 * +-------+-------+-------+-------+-------+-------+-------+-------+
182 * 9 | physical birth txg |
183 * +-------+-------+-------+-------+-------+-------+-------+-------+
184 * a | logical birth txg |
185 * +-------+-------+-------+-------+-------+-------+-------+-------+
186 * b | fill count |
187 * +-------+-------+-------+-------+-------+-------+-------+-------+
188 * c | checksum[0] |
189 * +-------+-------+-------+-------+-------+-------+-------+-------+
190 * d | checksum[1] |
191 * +-------+-------+-------+-------+-------+-------+-------+-------+
192 * e | checksum[2] |
193 * +-------+-------+-------+-------+-------+-------+-------+-------+
194 * f | checksum[3] |
195 * +-------+-------+-------+-------+-------+-------+-------+-------+
197 * Legend:
199 * vdev virtual device ID
200 * offset offset into virtual device
201 * LSIZE logical size
202 * PSIZE physical size (after compression)
203 * ASIZE allocated size (including RAID-Z parity and gang block headers)
204 * GRID RAID-Z layout information (reserved for future use)
205 * cksum checksum function
206 * comp compression function
207 * G gang block indicator
208 * B byteorder (endianness)
209 * D dedup
210 * X encryption (on version 30, which is not supported)
211 * E blkptr_t contains embedded data (see below)
212 * lvl level of indirection
213 * type DMU object type
214 * phys birth txg of block allocation; zero if same as logical birth txg
215 * log. birth transaction group in which the block was logically born
216 * fill count number of non-zero blocks under this bp
217 * checksum[4] 256-bit checksum of the data this bp describes
221 * "Embedded" blkptr_t's don't actually point to a block, instead they
222 * have a data payload embedded in the blkptr_t itself. See the comment
223 * in blkptr.c for more details.
225 * The blkptr_t is laid out as follows:
227 * 64 56 48 40 32 24 16 8 0
228 * +-------+-------+-------+-------+-------+-------+-------+-------+
229 * 0 | payload |
230 * 1 | payload |
231 * 2 | payload |
232 * 3 | payload |
233 * 4 | payload |
234 * 5 | payload |
235 * +-------+-------+-------+-------+-------+-------+-------+-------+
236 * 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE |
237 * +-------+-------+-------+-------+-------+-------+-------+-------+
238 * 7 | payload |
239 * 8 | payload |
240 * 9 | payload |
241 * +-------+-------+-------+-------+-------+-------+-------+-------+
242 * a | logical birth txg |
243 * +-------+-------+-------+-------+-------+-------+-------+-------+
244 * b | payload |
245 * c | payload |
246 * d | payload |
247 * e | payload |
248 * f | payload |
249 * +-------+-------+-------+-------+-------+-------+-------+-------+
251 * Legend:
253 * payload contains the embedded data
254 * B (byteorder) byteorder (endianness)
255 * D (dedup) padding (set to zero)
256 * X encryption (set to zero; see above)
257 * E (embedded) set to one
258 * lvl indirection level
259 * type DMU object type
260 * etype how to interpret embedded data (BP_EMBEDDED_TYPE_*)
261 * comp compression function of payload
262 * PSIZE size of payload after compression, in bytes
263 * LSIZE logical size of payload, in bytes
264 * note that 25 bits is enough to store the largest
265 * "normal" BP's LSIZE (2^16 * 2^9) in bytes
266 * log. birth transaction group in which the block was logically born
268 * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
269 * bp's they are stored in units of SPA_MINBLOCKSHIFT.
270 * Generally, the generic BP_GET_*() macros can be used on embedded BP's.
271 * The B, D, X, lvl, type, and comp fields are stored the same as with normal
272 * BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must
273 * be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before
274 * other macros, as they assert that they are only used on BP's of the correct
275 * "embedded-ness".
278 #define BPE_GET_ETYPE(bp) \
279 (ASSERT(BP_IS_EMBEDDED(bp)), \
280 BF64_GET((bp)->blk_prop, 40, 8))
281 #define BPE_SET_ETYPE(bp, t) do { \
282 ASSERT(BP_IS_EMBEDDED(bp)); \
283 BF64_SET((bp)->blk_prop, 40, 8, t); \
284 _NOTE(CONSTCOND) } while (0)
286 #define BPE_GET_LSIZE(bp) \
287 (ASSERT(BP_IS_EMBEDDED(bp)), \
288 BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
289 #define BPE_SET_LSIZE(bp, x) do { \
290 ASSERT(BP_IS_EMBEDDED(bp)); \
291 BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
292 _NOTE(CONSTCOND) } while (0)
294 #define BPE_GET_PSIZE(bp) \
295 (ASSERT(BP_IS_EMBEDDED(bp)), \
296 BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
297 #define BPE_SET_PSIZE(bp, x) do { \
298 ASSERT(BP_IS_EMBEDDED(bp)); \
299 BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
300 _NOTE(CONSTCOND) } while (0)
302 typedef enum bp_embedded_type {
303 BP_EMBEDDED_TYPE_DATA,
304 BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */
305 NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED
306 } bp_embedded_type_t;
308 #define BPE_NUM_WORDS 14
309 #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
310 #define BPE_IS_PAYLOADWORD(bp, wp) \
311 ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
313 #define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
314 #define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
316 typedef struct blkptr {
317 dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
318 uint64_t blk_prop; /* size, compression, type, etc */
319 uint64_t blk_pad[2]; /* Extra space for the future */
320 uint64_t blk_phys_birth; /* txg when block was allocated */
321 uint64_t blk_birth; /* transaction group at birth */
322 uint64_t blk_fill; /* fill count */
323 zio_cksum_t blk_cksum; /* 256-bit checksum */
324 } blkptr_t;
327 * Macros to get and set fields in a bp or DVA.
329 #define DVA_GET_ASIZE(dva) \
330 BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
331 #define DVA_SET_ASIZE(dva, x) \
332 BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
333 SPA_MINBLOCKSHIFT, 0, x)
335 #define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
336 #define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
338 #define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32)
339 #define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x)
341 #define DVA_GET_OFFSET(dva) \
342 BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
343 #define DVA_SET_OFFSET(dva, x) \
344 BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
346 #define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
347 #define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
349 #define BP_GET_LSIZE(bp) \
350 (BP_IS_EMBEDDED(bp) ? \
351 (BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
352 BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
353 #define BP_SET_LSIZE(bp, x) do { \
354 ASSERT(!BP_IS_EMBEDDED(bp)); \
355 BF64_SET_SB((bp)->blk_prop, \
356 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
357 _NOTE(CONSTCOND) } while (0)
359 #define BP_GET_PSIZE(bp) \
360 BF64_GET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1)
361 #define BP_SET_PSIZE(bp, x) \
362 BF64_SET_SB((bp)->blk_prop, 16, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x)
364 #define BP_GET_COMPRESS(bp) BF64_GET((bp)->blk_prop, 32, 7)
365 #define BP_SET_COMPRESS(bp, x) BF64_SET((bp)->blk_prop, 32, 7, x)
367 #define BP_GET_CHECKSUM(bp) BF64_GET((bp)->blk_prop, 40, 8)
368 #define BP_SET_CHECKSUM(bp, x) BF64_SET((bp)->blk_prop, 40, 8, x)
370 #define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
371 #define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
373 #define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
374 #define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
376 #define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
378 #define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1)
379 #define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x)
381 #define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1)
382 #define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
384 #define BP_PHYSICAL_BIRTH(bp) \
385 ((bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
387 #define BP_GET_ASIZE(bp) \
388 (DVA_GET_ASIZE(&(bp)->blk_dva[0]) + DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
389 DVA_GET_ASIZE(&(bp)->blk_dva[2]))
391 #define BP_GET_UCSIZE(bp) \
392 ((BP_GET_LEVEL(bp) > 0 || dmu_ot[BP_GET_TYPE(bp)].ot_metadata) ? \
393 BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp));
395 #define BP_GET_NDVAS(bp) \
396 (!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
397 !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
398 !!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
400 #define DVA_EQUAL(dva1, dva2) \
401 ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
402 (dva1)->dva_word[0] == (dva2)->dva_word[0])
404 #define ZIO_CHECKSUM_EQUAL(zc1, zc2) \
405 (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
406 ((zc1).zc_word[1] - (zc2).zc_word[1]) | \
407 ((zc1).zc_word[2] - (zc2).zc_word[2]) | \
408 ((zc1).zc_word[3] - (zc2).zc_word[3])))
411 #define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
413 #define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3) \
415 (zcp)->zc_word[0] = w0; \
416 (zcp)->zc_word[1] = w1; \
417 (zcp)->zc_word[2] = w2; \
418 (zcp)->zc_word[3] = w3; \
421 #define BP_IDENTITY(bp) (&(bp)->blk_dva[0])
422 #define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp))
423 #define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \
424 (dva)->dva_word[1] == 0ULL)
425 #define BP_IS_HOLE(bp) DVA_IS_EMPTY(BP_IDENTITY(bp))
426 #define BP_IS_OLDER(bp, txg) (!BP_IS_HOLE(bp) && (bp)->blk_birth < (txg))
428 #define BP_ZERO(bp) \
430 (bp)->blk_dva[0].dva_word[0] = 0; \
431 (bp)->blk_dva[0].dva_word[1] = 0; \
432 (bp)->blk_dva[1].dva_word[0] = 0; \
433 (bp)->blk_dva[1].dva_word[1] = 0; \
434 (bp)->blk_dva[2].dva_word[0] = 0; \
435 (bp)->blk_dva[2].dva_word[1] = 0; \
436 (bp)->blk_prop = 0; \
437 (bp)->blk_pad[0] = 0; \
438 (bp)->blk_pad[1] = 0; \
439 (bp)->blk_phys_birth = 0; \
440 (bp)->blk_birth = 0; \
441 (bp)->blk_fill = 0; \
442 ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
445 #define BPE_NUM_WORDS 14
446 #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
447 #define BPE_IS_PAYLOADWORD(bp, wp) \
448 ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
451 * Embedded checksum
453 #define ZEC_MAGIC 0x210da7ab10c7a11ULL
455 typedef struct zio_eck {
456 uint64_t zec_magic; /* for validation, endianness */
457 zio_cksum_t zec_cksum; /* 256-bit checksum */
458 } zio_eck_t;
461 * Gang block headers are self-checksumming and contain an array
462 * of block pointers.
464 #define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
465 #define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
466 sizeof (zio_eck_t)) / sizeof (blkptr_t))
467 #define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
468 sizeof (zio_eck_t) - \
469 (SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
470 sizeof (uint64_t))
472 typedef struct zio_gbh {
473 blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
474 uint64_t zg_filler[SPA_GBH_FILLER];
475 zio_eck_t zg_tail;
476 } zio_gbh_phys_t;
478 #define VDEV_RAIDZ_MAXPARITY 3
480 #define VDEV_PAD_SIZE (8 << 10)
481 /* 2 padding areas (vl_pad1 and vl_pad2) to skip */
482 #define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
483 #define VDEV_PHYS_SIZE (112 << 10)
484 #define VDEV_UBERBLOCK_RING (128 << 10)
486 #define VDEV_UBERBLOCK_SHIFT(vd) \
487 MAX((vd)->v_top->v_ashift, UBERBLOCK_SHIFT)
488 #define VDEV_UBERBLOCK_COUNT(vd) \
489 (VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
490 #define VDEV_UBERBLOCK_OFFSET(vd, n) \
491 offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
492 #define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
494 typedef struct vdev_phys {
495 char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
496 zio_eck_t vp_zbt;
497 } vdev_phys_t;
499 typedef struct vdev_label {
500 char vl_pad1[VDEV_PAD_SIZE]; /* 8K */
501 char vl_pad2[VDEV_PAD_SIZE]; /* 8K */
502 vdev_phys_t vl_vdev_phys; /* 112K */
503 char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
504 } vdev_label_t; /* 256K total */
507 * vdev_dirty() flags
509 #define VDD_METASLAB 0x01
510 #define VDD_DTL 0x02
513 * Size and offset of embedded boot loader region on each label.
514 * The total size of the first two labels plus the boot area is 4MB.
516 #define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
517 #define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
520 * Size of label regions at the start and end of each leaf device.
522 #define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
523 #define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
524 #define VDEV_LABELS 4
526 enum zio_checksum {
527 ZIO_CHECKSUM_INHERIT = 0,
528 ZIO_CHECKSUM_ON,
529 ZIO_CHECKSUM_OFF,
530 ZIO_CHECKSUM_LABEL,
531 ZIO_CHECKSUM_GANG_HEADER,
532 ZIO_CHECKSUM_ZILOG,
533 ZIO_CHECKSUM_FLETCHER_2,
534 ZIO_CHECKSUM_FLETCHER_4,
535 ZIO_CHECKSUM_SHA256,
536 ZIO_CHECKSUM_ZILOG2,
537 ZIO_CHECKSUM_NOPARITY,
538 ZIO_CHECKSUM_SHA512,
539 ZIO_CHECKSUM_SKEIN,
540 ZIO_CHECKSUM_EDONR,
541 ZIO_CHECKSUM_FUNCTIONS
544 #define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
545 #define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
547 enum zio_compress {
548 ZIO_COMPRESS_INHERIT = 0,
549 ZIO_COMPRESS_ON,
550 ZIO_COMPRESS_OFF,
551 ZIO_COMPRESS_LZJB,
552 ZIO_COMPRESS_EMPTY,
553 ZIO_COMPRESS_GZIP_1,
554 ZIO_COMPRESS_GZIP_2,
555 ZIO_COMPRESS_GZIP_3,
556 ZIO_COMPRESS_GZIP_4,
557 ZIO_COMPRESS_GZIP_5,
558 ZIO_COMPRESS_GZIP_6,
559 ZIO_COMPRESS_GZIP_7,
560 ZIO_COMPRESS_GZIP_8,
561 ZIO_COMPRESS_GZIP_9,
562 ZIO_COMPRESS_ZLE,
563 ZIO_COMPRESS_LZ4,
564 ZIO_COMPRESS_FUNCTIONS
567 #define ZIO_COMPRESS_ON_VALUE ZIO_COMPRESS_LZJB
568 #define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
570 /* nvlist pack encoding */
571 #define NV_ENCODE_NATIVE 0
572 #define NV_ENCODE_XDR 1
574 typedef enum {
575 DATA_TYPE_UNKNOWN = 0,
576 DATA_TYPE_BOOLEAN,
577 DATA_TYPE_BYTE,
578 DATA_TYPE_INT16,
579 DATA_TYPE_UINT16,
580 DATA_TYPE_INT32,
581 DATA_TYPE_UINT32,
582 DATA_TYPE_INT64,
583 DATA_TYPE_UINT64,
584 DATA_TYPE_STRING,
585 DATA_TYPE_BYTE_ARRAY,
586 DATA_TYPE_INT16_ARRAY,
587 DATA_TYPE_UINT16_ARRAY,
588 DATA_TYPE_INT32_ARRAY,
589 DATA_TYPE_UINT32_ARRAY,
590 DATA_TYPE_INT64_ARRAY,
591 DATA_TYPE_UINT64_ARRAY,
592 DATA_TYPE_STRING_ARRAY,
593 DATA_TYPE_HRTIME,
594 DATA_TYPE_NVLIST,
595 DATA_TYPE_NVLIST_ARRAY,
596 DATA_TYPE_BOOLEAN_VALUE,
597 DATA_TYPE_INT8,
598 DATA_TYPE_UINT8,
599 DATA_TYPE_BOOLEAN_ARRAY,
600 DATA_TYPE_INT8_ARRAY,
601 DATA_TYPE_UINT8_ARRAY
602 } data_type_t;
605 * On-disk version number.
607 #define SPA_VERSION_1 1ULL
608 #define SPA_VERSION_2 2ULL
609 #define SPA_VERSION_3 3ULL
610 #define SPA_VERSION_4 4ULL
611 #define SPA_VERSION_5 5ULL
612 #define SPA_VERSION_6 6ULL
613 #define SPA_VERSION_7 7ULL
614 #define SPA_VERSION_8 8ULL
615 #define SPA_VERSION_9 9ULL
616 #define SPA_VERSION_10 10ULL
617 #define SPA_VERSION_11 11ULL
618 #define SPA_VERSION_12 12ULL
619 #define SPA_VERSION_13 13ULL
620 #define SPA_VERSION_14 14ULL
621 #define SPA_VERSION_15 15ULL
622 #define SPA_VERSION_16 16ULL
623 #define SPA_VERSION_17 17ULL
624 #define SPA_VERSION_18 18ULL
625 #define SPA_VERSION_19 19ULL
626 #define SPA_VERSION_20 20ULL
627 #define SPA_VERSION_21 21ULL
628 #define SPA_VERSION_22 22ULL
629 #define SPA_VERSION_23 23ULL
630 #define SPA_VERSION_24 24ULL
631 #define SPA_VERSION_25 25ULL
632 #define SPA_VERSION_26 26ULL
633 #define SPA_VERSION_27 27ULL
634 #define SPA_VERSION_28 28ULL
635 #define SPA_VERSION_5000 5000ULL
638 * When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk
639 * format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*},
640 * and do the appropriate changes. Also bump the version number in
641 * usr/src/grub/capability.
643 #define SPA_VERSION SPA_VERSION_5000
644 #define SPA_VERSION_STRING "5000"
647 * Symbolic names for the changes that caused a SPA_VERSION switch.
648 * Used in the code when checking for presence or absence of a feature.
649 * Feel free to define multiple symbolic names for each version if there
650 * were multiple changes to on-disk structures during that version.
652 * NOTE: When checking the current SPA_VERSION in your code, be sure
653 * to use spa_version() since it reports the version of the
654 * last synced uberblock. Checking the in-flight version can
655 * be dangerous in some cases.
657 #define SPA_VERSION_INITIAL SPA_VERSION_1
658 #define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2
659 #define SPA_VERSION_SPARES SPA_VERSION_3
660 #define SPA_VERSION_RAID6 SPA_VERSION_3
661 #define SPA_VERSION_BPLIST_ACCOUNT SPA_VERSION_3
662 #define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3
663 #define SPA_VERSION_DNODE_BYTES SPA_VERSION_3
664 #define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4
665 #define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5
666 #define SPA_VERSION_BOOTFS SPA_VERSION_6
667 #define SPA_VERSION_SLOGS SPA_VERSION_7
668 #define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8
669 #define SPA_VERSION_FUID SPA_VERSION_9
670 #define SPA_VERSION_REFRESERVATION SPA_VERSION_9
671 #define SPA_VERSION_REFQUOTA SPA_VERSION_9
672 #define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9
673 #define SPA_VERSION_L2CACHE SPA_VERSION_10
674 #define SPA_VERSION_NEXT_CLONES SPA_VERSION_11
675 #define SPA_VERSION_ORIGIN SPA_VERSION_11
676 #define SPA_VERSION_DSL_SCRUB SPA_VERSION_11
677 #define SPA_VERSION_SNAP_PROPS SPA_VERSION_12
678 #define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13
679 #define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14
680 #define SPA_VERSION_USERSPACE SPA_VERSION_15
681 #define SPA_VERSION_STMF_PROP SPA_VERSION_16
682 #define SPA_VERSION_RAIDZ3 SPA_VERSION_17
683 #define SPA_VERSION_USERREFS SPA_VERSION_18
684 #define SPA_VERSION_HOLES SPA_VERSION_19
685 #define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20
686 #define SPA_VERSION_DEDUP SPA_VERSION_21
687 #define SPA_VERSION_RECVD_PROPS SPA_VERSION_22
688 #define SPA_VERSION_SLIM_ZIL SPA_VERSION_23
689 #define SPA_VERSION_SA SPA_VERSION_24
690 #define SPA_VERSION_SCAN SPA_VERSION_25
691 #define SPA_VERSION_DIR_CLONES SPA_VERSION_26
692 #define SPA_VERSION_DEADLISTS SPA_VERSION_26
693 #define SPA_VERSION_FAST_SNAP SPA_VERSION_27
694 #define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28
695 #define SPA_VERSION_BEFORE_FEATURES SPA_VERSION_28
696 #define SPA_VERSION_FEATURES SPA_VERSION_5000
698 #define SPA_VERSION_IS_SUPPORTED(v) \
699 (((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \
700 ((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION))
703 * The following are configuration names used in the nvlist describing a pool's
704 * configuration.
706 #define ZPOOL_CONFIG_VERSION "version"
707 #define ZPOOL_CONFIG_POOL_NAME "name"
708 #define ZPOOL_CONFIG_POOL_STATE "state"
709 #define ZPOOL_CONFIG_POOL_TXG "txg"
710 #define ZPOOL_CONFIG_POOL_GUID "pool_guid"
711 #define ZPOOL_CONFIG_CREATE_TXG "create_txg"
712 #define ZPOOL_CONFIG_TOP_GUID "top_guid"
713 #define ZPOOL_CONFIG_VDEV_TREE "vdev_tree"
714 #define ZPOOL_CONFIG_TYPE "type"
715 #define ZPOOL_CONFIG_CHILDREN "children"
716 #define ZPOOL_CONFIG_ID "id"
717 #define ZPOOL_CONFIG_GUID "guid"
718 #define ZPOOL_CONFIG_PATH "path"
719 #define ZPOOL_CONFIG_DEVID "devid"
720 #define ZPOOL_CONFIG_PHYS_PATH "phys_path"
721 #define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array"
722 #define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
723 #define ZPOOL_CONFIG_ASHIFT "ashift"
724 #define ZPOOL_CONFIG_ASIZE "asize"
725 #define ZPOOL_CONFIG_DTL "DTL"
726 #define ZPOOL_CONFIG_STATS "stats"
727 #define ZPOOL_CONFIG_WHOLE_DISK "whole_disk"
728 #define ZPOOL_CONFIG_ERRCOUNT "error_count"
729 #define ZPOOL_CONFIG_NOT_PRESENT "not_present"
730 #define ZPOOL_CONFIG_SPARES "spares"
731 #define ZPOOL_CONFIG_IS_SPARE "is_spare"
732 #define ZPOOL_CONFIG_NPARITY "nparity"
733 #define ZPOOL_CONFIG_HOSTID "hostid"
734 #define ZPOOL_CONFIG_HOSTNAME "hostname"
735 #define ZPOOL_CONFIG_IS_LOG "is_log"
736 #define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */
737 #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read"
740 * The persistent vdev state is stored as separate values rather than a single
741 * 'vdev_state' entry. This is because a device can be in multiple states, such
742 * as offline and degraded.
744 #define ZPOOL_CONFIG_OFFLINE "offline"
745 #define ZPOOL_CONFIG_FAULTED "faulted"
746 #define ZPOOL_CONFIG_DEGRADED "degraded"
747 #define ZPOOL_CONFIG_REMOVED "removed"
748 #define ZPOOL_CONFIG_FRU "fru"
749 #define ZPOOL_CONFIG_AUX_STATE "aux_state"
751 #define VDEV_TYPE_ROOT "root"
752 #define VDEV_TYPE_MIRROR "mirror"
753 #define VDEV_TYPE_REPLACING "replacing"
754 #define VDEV_TYPE_RAIDZ "raidz"
755 #define VDEV_TYPE_DISK "disk"
756 #define VDEV_TYPE_FILE "file"
757 #define VDEV_TYPE_MISSING "missing"
758 #define VDEV_TYPE_HOLE "hole"
759 #define VDEV_TYPE_SPARE "spare"
760 #define VDEV_TYPE_LOG "log"
761 #define VDEV_TYPE_L2CACHE "l2cache"
764 * This is needed in userland to report the minimum necessary device size.
766 #define SPA_MINDEVSIZE (64ULL << 20)
769 * The location of the pool configuration repository, shared between kernel and
770 * userland.
772 #define ZPOOL_CACHE "/boot/zfs/zpool.cache"
775 * vdev states are ordered from least to most healthy.
776 * A vdev that's CANT_OPEN or below is considered unusable.
778 typedef enum vdev_state {
779 VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */
780 VDEV_STATE_CLOSED, /* Not currently open */
781 VDEV_STATE_OFFLINE, /* Not allowed to open */
782 VDEV_STATE_REMOVED, /* Explicitly removed from system */
783 VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */
784 VDEV_STATE_FAULTED, /* External request to fault device */
785 VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */
786 VDEV_STATE_HEALTHY /* Presumed good */
787 } vdev_state_t;
790 * vdev aux states. When a vdev is in the CANT_OPEN state, the aux field
791 * of the vdev stats structure uses these constants to distinguish why.
793 typedef enum vdev_aux {
794 VDEV_AUX_NONE, /* no error */
795 VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */
796 VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */
797 VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */
798 VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */
799 VDEV_AUX_TOO_SMALL, /* vdev size is too small */
800 VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */
801 VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */
802 VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */
803 VDEV_AUX_SPARED /* hot spare used in another pool */
804 } vdev_aux_t;
807 * pool state. The following states are written to disk as part of the normal
808 * SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE. The remaining states are
809 * software abstractions used at various levels to communicate pool state.
811 typedef enum pool_state {
812 POOL_STATE_ACTIVE = 0, /* In active use */
813 POOL_STATE_EXPORTED, /* Explicitly exported */
814 POOL_STATE_DESTROYED, /* Explicitly destroyed */
815 POOL_STATE_SPARE, /* Reserved for hot spare use */
816 POOL_STATE_UNINITIALIZED, /* Internal spa_t state */
817 POOL_STATE_UNAVAIL, /* Internal libzfs state */
818 POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */
819 } pool_state_t;
822 * The uberblock version is incremented whenever an incompatible on-disk
823 * format change is made to the SPA, DMU, or ZAP.
825 * Note: the first two fields should never be moved. When a storage pool
826 * is opened, the uberblock must be read off the disk before the version
827 * can be checked. If the ub_version field is moved, we may not detect
828 * version mismatch. If the ub_magic field is moved, applications that
829 * expect the magic number in the first word won't work.
831 #define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */
832 #define UBERBLOCK_SHIFT 10 /* up to 1K */
834 struct uberblock {
835 uint64_t ub_magic; /* UBERBLOCK_MAGIC */
836 uint64_t ub_version; /* SPA_VERSION */
837 uint64_t ub_txg; /* txg of last sync */
838 uint64_t ub_guid_sum; /* sum of all vdev guids */
839 uint64_t ub_timestamp; /* UTC time of last sync */
840 blkptr_t ub_rootbp; /* MOS objset_phys_t */
844 * Flags.
846 #define DNODE_MUST_BE_ALLOCATED 1
847 #define DNODE_MUST_BE_FREE 2
850 * Fixed constants.
852 #define DNODE_SHIFT 9 /* 512 bytes */
853 #define DN_MIN_INDBLKSHIFT 12 /* 4k */
854 #define DN_MAX_INDBLKSHIFT 14 /* 16k */
855 #define DNODE_BLOCK_SHIFT 14 /* 16k */
856 #define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */
857 #define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */
858 #define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */
861 * Derived constants.
863 #define DNODE_MIN_SIZE (1 << DNODE_SHIFT)
864 #define DNODE_MAX_SIZE (1 << DNODE_BLOCK_SHIFT)
865 #define DNODE_BLOCK_SIZE (1 << DNODE_BLOCK_SHIFT)
866 #define DNODE_MIN_SLOTS (DNODE_MIN_SIZE >> DNODE_SHIFT)
867 #define DNODE_MAX_SLOTS (DNODE_MAX_SIZE >> DNODE_SHIFT)
868 #define DN_BONUS_SIZE(dnsize) ((dnsize) - DNODE_CORE_SIZE - \
869 (1 << SPA_BLKPTRSHIFT))
870 #define DN_SLOTS_TO_BONUSLEN(slots) DN_BONUS_SIZE((slots) << DNODE_SHIFT)
871 #define DN_OLD_MAX_BONUSLEN (DN_BONUS_SIZE(DNODE_MIN_SIZE))
872 #define DN_MAX_NBLKPTR ((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> \
873 SPA_BLKPTRSHIFT)
874 #define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT)
875 #define DN_ZERO_BONUSLEN (DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
877 #define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
878 #define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT)
879 #define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
881 /* The +2 here is a cheesy way to round up */
882 #define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \
883 (DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT)))
885 #define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \
886 (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
888 #define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
889 (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
891 #define EPB(blkshift, typeshift) (1 << (blkshift - typeshift))
893 /* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
894 #define DNODE_FLAG_USED_BYTES (1<<0)
895 #define DNODE_FLAG_USERUSED_ACCOUNTED (1<<1)
897 /* Does dnode have a SA spill blkptr in bonus? */
898 #define DNODE_FLAG_SPILL_BLKPTR (1<<2)
900 typedef struct dnode_phys {
901 uint8_t dn_type; /* dmu_object_type_t */
902 uint8_t dn_indblkshift; /* ln2(indirect block size) */
903 uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */
904 uint8_t dn_nblkptr; /* length of dn_blkptr */
905 uint8_t dn_bonustype; /* type of data in bonus buffer */
906 uint8_t dn_checksum; /* ZIO_CHECKSUM type */
907 uint8_t dn_compress; /* ZIO_COMPRESS type */
908 uint8_t dn_flags; /* DNODE_FLAG_* */
909 uint16_t dn_datablkszsec; /* data block size in 512b sectors */
910 uint16_t dn_bonuslen; /* length of dn_bonus */
911 uint8_t dn_extra_slots; /* # of subsequent slots consumed */
912 uint8_t dn_pad2[3];
914 /* accounting is protected by dn_dirty_mtx */
915 uint64_t dn_maxblkid; /* largest allocated block ID */
916 uint64_t dn_used; /* bytes (or sectors) of disk space */
918 uint64_t dn_pad3[4];
921 * The tail region is 448 bytes for a 512 byte dnode, and
922 * correspondingly larger for larger dnode sizes. The spill
923 * block pointer, when present, is always at the end of the tail
924 * region. There are three ways this space may be used, using
925 * a 512 byte dnode for this diagram:
927 * 0 64 128 192 256 320 384 448 (offset)
928 * +---------------+---------------+---------------+-------+
929 * | dn_blkptr[0] | dn_blkptr[1] | dn_blkptr[2] | / |
930 * +---------------+---------------+---------------+-------+
931 * | dn_blkptr[0] | dn_bonus[0..319] |
932 * +---------------+-----------------------+---------------+
933 * | dn_blkptr[0] | dn_bonus[0..191] | dn_spill |
934 * +---------------+-----------------------+---------------+
936 union {
937 blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
938 struct {
939 blkptr_t __dn_ignore1;
940 uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
942 struct {
943 blkptr_t __dn_ignore2;
944 uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
945 sizeof (blkptr_t)];
946 blkptr_t dn_spill;
949 } dnode_phys_t;
951 #define DN_SPILL_BLKPTR(dnp) (blkptr_t *)((char *)(dnp) + \
952 (((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT))
954 typedef enum dmu_object_byteswap {
955 DMU_BSWAP_UINT8,
956 DMU_BSWAP_UINT16,
957 DMU_BSWAP_UINT32,
958 DMU_BSWAP_UINT64,
959 DMU_BSWAP_ZAP,
960 DMU_BSWAP_DNODE,
961 DMU_BSWAP_OBJSET,
962 DMU_BSWAP_ZNODE,
963 DMU_BSWAP_OLDACL,
964 DMU_BSWAP_ACL,
966 * Allocating a new byteswap type number makes the on-disk format
967 * incompatible with any other format that uses the same number.
969 * Data can usually be structured to work with one of the
970 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
972 DMU_BSWAP_NUMFUNCS
973 } dmu_object_byteswap_t;
975 #define DMU_OT_NEWTYPE 0x80
976 #define DMU_OT_METADATA 0x40
977 #define DMU_OT_BYTESWAP_MASK 0x3f
980 * Defines a uint8_t object type. Object types specify if the data
981 * in the object is metadata (boolean) and how to byteswap the data
982 * (dmu_object_byteswap_t).
984 #define DMU_OT(byteswap, metadata) \
985 (DMU_OT_NEWTYPE | \
986 ((metadata) ? DMU_OT_METADATA : 0) | \
987 ((byteswap) & DMU_OT_BYTESWAP_MASK))
989 typedef enum dmu_object_type {
990 DMU_OT_NONE,
991 /* general: */
992 DMU_OT_OBJECT_DIRECTORY, /* ZAP */
993 DMU_OT_OBJECT_ARRAY, /* UINT64 */
994 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */
995 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */
996 DMU_OT_BPLIST, /* UINT64 */
997 DMU_OT_BPLIST_HDR, /* UINT64 */
998 /* spa: */
999 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */
1000 DMU_OT_SPACE_MAP, /* UINT64 */
1001 /* zil: */
1002 DMU_OT_INTENT_LOG, /* UINT64 */
1003 /* dmu: */
1004 DMU_OT_DNODE, /* DNODE */
1005 DMU_OT_OBJSET, /* OBJSET */
1006 /* dsl: */
1007 DMU_OT_DSL_DIR, /* UINT64 */
1008 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */
1009 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */
1010 DMU_OT_DSL_PROPS, /* ZAP */
1011 DMU_OT_DSL_DATASET, /* UINT64 */
1012 /* zpl: */
1013 DMU_OT_ZNODE, /* ZNODE */
1014 DMU_OT_OLDACL, /* Old ACL */
1015 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */
1016 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */
1017 DMU_OT_MASTER_NODE, /* ZAP */
1018 DMU_OT_UNLINKED_SET, /* ZAP */
1019 /* zvol: */
1020 DMU_OT_ZVOL, /* UINT8 */
1021 DMU_OT_ZVOL_PROP, /* ZAP */
1022 /* other; for testing only! */
1023 DMU_OT_PLAIN_OTHER, /* UINT8 */
1024 DMU_OT_UINT64_OTHER, /* UINT64 */
1025 DMU_OT_ZAP_OTHER, /* ZAP */
1026 /* new object types: */
1027 DMU_OT_ERROR_LOG, /* ZAP */
1028 DMU_OT_SPA_HISTORY, /* UINT8 */
1029 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */
1030 DMU_OT_POOL_PROPS, /* ZAP */
1031 DMU_OT_DSL_PERMS, /* ZAP */
1032 DMU_OT_ACL, /* ACL */
1033 DMU_OT_SYSACL, /* SYSACL */
1034 DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */
1035 DMU_OT_FUID_SIZE, /* FUID table size UINT64 */
1036 DMU_OT_NEXT_CLONES, /* ZAP */
1037 DMU_OT_SCAN_QUEUE, /* ZAP */
1038 DMU_OT_USERGROUP_USED, /* ZAP */
1039 DMU_OT_USERGROUP_QUOTA, /* ZAP */
1040 DMU_OT_USERREFS, /* ZAP */
1041 DMU_OT_DDT_ZAP, /* ZAP */
1042 DMU_OT_DDT_STATS, /* ZAP */
1043 DMU_OT_SA, /* System attr */
1044 DMU_OT_SA_MASTER_NODE, /* ZAP */
1045 DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */
1046 DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */
1047 DMU_OT_SCAN_XLATE, /* ZAP */
1048 DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */
1049 DMU_OT_NUMTYPES,
1052 * Names for valid types declared with DMU_OT().
1054 DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE),
1055 DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE),
1056 DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE),
1057 DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE),
1058 DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE),
1059 DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE),
1060 DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE),
1061 DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE),
1062 DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE),
1063 DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE)
1064 } dmu_object_type_t;
1066 typedef enum dmu_objset_type {
1067 DMU_OST_NONE,
1068 DMU_OST_META,
1069 DMU_OST_ZFS,
1070 DMU_OST_ZVOL,
1071 DMU_OST_OTHER, /* For testing only! */
1072 DMU_OST_ANY, /* Be careful! */
1073 DMU_OST_NUMTYPES
1074 } dmu_objset_type_t;
1077 * header for all bonus and spill buffers.
1078 * The header has a fixed portion with a variable number
1079 * of "lengths" depending on the number of variable sized
1080 * attribues which are determined by the "layout number"
1083 #define SA_MAGIC 0x2F505A /* ZFS SA */
1084 typedef struct sa_hdr_phys {
1085 uint32_t sa_magic;
1086 uint16_t sa_layout_info; /* Encoded with hdrsize and layout number */
1087 uint16_t sa_lengths[1]; /* optional sizes for variable length attrs */
1088 /* ... Data follows the lengths. */
1089 } sa_hdr_phys_t;
1092 * sa_hdr_phys -> sa_layout_info
1094 * 16 10 0
1095 * +--------+-------+
1096 * | hdrsz |layout |
1097 * +--------+-------+
1099 * Bits 0-10 are the layout number
1100 * Bits 11-16 are the size of the header.
1101 * The hdrsize is the number * 8
1103 * For example.
1104 * hdrsz of 1 ==> 8 byte header
1105 * 2 ==> 16 byte header
1109 #define SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10)
1110 #define SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 16, 3, 0)
1111 #define SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \
1113 BF32_SET_SB(x, 10, 6, 3, 0, size); \
1114 BF32_SET(x, 0, 10, num); \
1117 #define SA_MODE_OFFSET 0
1118 #define SA_SIZE_OFFSET 8
1119 #define SA_GEN_OFFSET 16
1120 #define SA_UID_OFFSET 24
1121 #define SA_GID_OFFSET 32
1122 #define SA_PARENT_OFFSET 40
1123 #define SA_SYMLINK_OFFSET 160
1126 * Intent log header - this on disk structure holds fields to manage
1127 * the log. All fields are 64 bit to easily handle cross architectures.
1129 typedef struct zil_header {
1130 uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
1131 uint64_t zh_replay_seq; /* highest replayed sequence number */
1132 blkptr_t zh_log; /* log chain */
1133 uint64_t zh_claim_seq; /* highest claimed sequence number */
1134 uint64_t zh_pad[5];
1135 } zil_header_t;
1137 #define OBJSET_PHYS_SIZE 2048
1139 typedef struct objset_phys {
1140 dnode_phys_t os_meta_dnode;
1141 zil_header_t os_zil_header;
1142 uint64_t os_type;
1143 uint64_t os_flags;
1144 char os_pad[OBJSET_PHYS_SIZE - sizeof (dnode_phys_t)*3 -
1145 sizeof (zil_header_t) - sizeof (uint64_t)*2];
1146 dnode_phys_t os_userused_dnode;
1147 dnode_phys_t os_groupused_dnode;
1148 } objset_phys_t;
1150 typedef struct dsl_dir_phys {
1151 uint64_t dd_creation_time; /* not actually used */
1152 uint64_t dd_head_dataset_obj;
1153 uint64_t dd_parent_obj;
1154 uint64_t dd_clone_parent_obj;
1155 uint64_t dd_child_dir_zapobj;
1157 * how much space our children are accounting for; for leaf
1158 * datasets, == physical space used by fs + snaps
1160 uint64_t dd_used_bytes;
1161 uint64_t dd_compressed_bytes;
1162 uint64_t dd_uncompressed_bytes;
1163 /* Administrative quota setting */
1164 uint64_t dd_quota;
1165 /* Administrative reservation setting */
1166 uint64_t dd_reserved;
1167 uint64_t dd_props_zapobj;
1168 uint64_t dd_pad[21]; /* pad out to 256 bytes for good measure */
1169 } dsl_dir_phys_t;
1171 typedef struct dsl_dataset_phys {
1172 uint64_t ds_dir_obj;
1173 uint64_t ds_prev_snap_obj;
1174 uint64_t ds_prev_snap_txg;
1175 uint64_t ds_next_snap_obj;
1176 uint64_t ds_snapnames_zapobj; /* zap obj of snaps; ==0 for snaps */
1177 uint64_t ds_num_children; /* clone/snap children; ==0 for head */
1178 uint64_t ds_creation_time; /* seconds since 1970 */
1179 uint64_t ds_creation_txg;
1180 uint64_t ds_deadlist_obj;
1181 uint64_t ds_used_bytes;
1182 uint64_t ds_compressed_bytes;
1183 uint64_t ds_uncompressed_bytes;
1184 uint64_t ds_unique_bytes; /* only relevant to snapshots */
1186 * The ds_fsid_guid is a 56-bit ID that can change to avoid
1187 * collisions. The ds_guid is a 64-bit ID that will never
1188 * change, so there is a small probability that it will collide.
1190 uint64_t ds_fsid_guid;
1191 uint64_t ds_guid;
1192 uint64_t ds_flags;
1193 blkptr_t ds_bp;
1194 uint64_t ds_pad[8]; /* pad out to 320 bytes for good measure */
1195 } dsl_dataset_phys_t;
1198 * The names of zap entries in the DIRECTORY_OBJECT of the MOS.
1200 #define DMU_POOL_DIRECTORY_OBJECT 1
1201 #define DMU_POOL_CONFIG "config"
1202 #define DMU_POOL_FEATURES_FOR_READ "features_for_read"
1203 #define DMU_POOL_ROOT_DATASET "root_dataset"
1204 #define DMU_POOL_SYNC_BPLIST "sync_bplist"
1205 #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
1206 #define DMU_POOL_ERRLOG_LAST "errlog_last"
1207 #define DMU_POOL_SPARES "spares"
1208 #define DMU_POOL_DEFLATE "deflate"
1209 #define DMU_POOL_HISTORY "history"
1210 #define DMU_POOL_PROPS "pool_props"
1212 #define ZAP_MAGIC 0x2F52AB2ABULL
1214 #define FZAP_BLOCK_SHIFT(zap) ((zap)->zap_block_shift)
1216 #define ZAP_MAXCD (uint32_t)(-1)
1217 #define ZAP_HASHBITS 28
1218 #define MZAP_ENT_LEN 64
1219 #define MZAP_NAME_LEN (MZAP_ENT_LEN - 8 - 4 - 2)
1220 #define MZAP_MAX_BLKSHIFT SPA_MAXBLOCKSHIFT
1221 #define MZAP_MAX_BLKSZ (1 << MZAP_MAX_BLKSHIFT)
1223 typedef struct mzap_ent_phys {
1224 uint64_t mze_value;
1225 uint32_t mze_cd;
1226 uint16_t mze_pad; /* in case we want to chain them someday */
1227 char mze_name[MZAP_NAME_LEN];
1228 } mzap_ent_phys_t;
1230 typedef struct mzap_phys {
1231 uint64_t mz_block_type; /* ZBT_MICRO */
1232 uint64_t mz_salt;
1233 uint64_t mz_pad[6];
1234 mzap_ent_phys_t mz_chunk[1];
1235 /* actually variable size depending on block size */
1236 } mzap_phys_t;
1239 * The (fat) zap is stored in one object. It is an array of
1240 * 1<<FZAP_BLOCK_SHIFT byte blocks. The layout looks like one of:
1242 * ptrtbl fits in first block:
1243 * [zap_phys_t zap_ptrtbl_shift < 6] [zap_leaf_t] ...
1245 * ptrtbl too big for first block:
1246 * [zap_phys_t zap_ptrtbl_shift >= 6] [zap_leaf_t] [ptrtbl] ...
1250 #define ZBT_LEAF ((1ULL << 63) + 0)
1251 #define ZBT_HEADER ((1ULL << 63) + 1)
1252 #define ZBT_MICRO ((1ULL << 63) + 3)
1253 /* any other values are ptrtbl blocks */
1256 * the embedded pointer table takes up half a block:
1257 * block size / entry size (2^3) / 2
1259 #define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1)
1262 * The embedded pointer table starts half-way through the block. Since
1263 * the pointer table itself is half the block, it starts at (64-bit)
1264 * word number (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)).
1266 #define ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) \
1267 ((uint64_t *)(zap)->zap_phys) \
1268 [(idx) + (1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap))]
1271 * TAKE NOTE:
1272 * If zap_phys_t is modified, zap_byteswap() must be modified.
1274 typedef struct zap_phys {
1275 uint64_t zap_block_type; /* ZBT_HEADER */
1276 uint64_t zap_magic; /* ZAP_MAGIC */
1278 struct zap_table_phys {
1279 uint64_t zt_blk; /* starting block number */
1280 uint64_t zt_numblks; /* number of blocks */
1281 uint64_t zt_shift; /* bits to index it */
1282 uint64_t zt_nextblk; /* next (larger) copy start block */
1283 uint64_t zt_blks_copied; /* number source blocks copied */
1284 } zap_ptrtbl;
1286 uint64_t zap_freeblk; /* the next free block */
1287 uint64_t zap_num_leafs; /* number of leafs */
1288 uint64_t zap_num_entries; /* number of entries */
1289 uint64_t zap_salt; /* salt to stir into hash function */
1291 * This structure is followed by padding, and then the embedded
1292 * pointer table. The embedded pointer table takes up second
1293 * half of the block. It is accessed using the
1294 * ZAP_EMBEDDED_PTRTBL_ENT() macro.
1296 } zap_phys_t;
1298 typedef struct zap_table_phys zap_table_phys_t;
1300 typedef struct fat_zap {
1301 int zap_block_shift; /* block size shift */
1302 zap_phys_t *zap_phys;
1303 } fat_zap_t;
1305 #define ZAP_LEAF_MAGIC 0x2AB1EAF
1307 /* chunk size = 24 bytes */
1308 #define ZAP_LEAF_CHUNKSIZE 24
1311 * The amount of space available for chunks is:
1312 * block size (1<<l->l_bs) - hash entry size (2) * number of hash
1313 * entries - header space (2*chunksize)
1315 #define ZAP_LEAF_NUMCHUNKS(l) \
1316 (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \
1317 ZAP_LEAF_CHUNKSIZE - 2)
1320 * The amount of space within the chunk available for the array is:
1321 * chunk size - space for type (1) - space for next pointer (2)
1323 #define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
1325 #define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \
1326 (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES)
1329 * Low water mark: when there are only this many chunks free, start
1330 * growing the ptrtbl. Ideally, this should be larger than a
1331 * "reasonably-sized" entry. 20 chunks is more than enough for the
1332 * largest directory entry (MAXNAMELEN (256) byte name, 8-byte value),
1333 * while still being only around 3% for 16k blocks.
1335 #define ZAP_LEAF_LOW_WATER (20)
1338 * The leaf hash table has block size / 2^5 (32) number of entries,
1339 * which should be more than enough for the maximum number of entries,
1340 * which is less than block size / CHUNKSIZE (24) / minimum number of
1341 * chunks per entry (3).
1343 #define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5)
1344 #define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l))
1347 * The chunks start immediately after the hash table. The end of the
1348 * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
1349 * chunk_t.
1351 #define ZAP_LEAF_CHUNK(l, idx) \
1352 ((zap_leaf_chunk_t *) \
1353 ((l)->l_phys->l_hash + ZAP_LEAF_HASH_NUMENTRIES(l)))[idx]
1354 #define ZAP_LEAF_ENTRY(l, idx) (&ZAP_LEAF_CHUNK(l, idx).l_entry)
1356 typedef enum zap_chunk_type {
1357 ZAP_CHUNK_FREE = 253,
1358 ZAP_CHUNK_ENTRY = 252,
1359 ZAP_CHUNK_ARRAY = 251,
1360 ZAP_CHUNK_TYPE_MAX = 250
1361 } zap_chunk_type_t;
1364 * TAKE NOTE:
1365 * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified.
1367 typedef struct zap_leaf_phys {
1368 struct zap_leaf_header {
1369 uint64_t lh_block_type; /* ZBT_LEAF */
1370 uint64_t lh_pad1;
1371 uint64_t lh_prefix; /* hash prefix of this leaf */
1372 uint32_t lh_magic; /* ZAP_LEAF_MAGIC */
1373 uint16_t lh_nfree; /* number free chunks */
1374 uint16_t lh_nentries; /* number of entries */
1375 uint16_t lh_prefix_len; /* num bits used to id this */
1377 /* above is accessable to zap, below is zap_leaf private */
1379 uint16_t lh_freelist; /* chunk head of free list */
1380 uint8_t lh_pad2[12];
1381 } l_hdr; /* 2 24-byte chunks */
1384 * The header is followed by a hash table with
1385 * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is
1386 * followed by an array of ZAP_LEAF_NUMCHUNKS(zap)
1387 * zap_leaf_chunk structures. These structures are accessed
1388 * with the ZAP_LEAF_CHUNK() macro.
1391 uint16_t l_hash[1];
1392 } zap_leaf_phys_t;
1394 typedef union zap_leaf_chunk {
1395 struct zap_leaf_entry {
1396 uint8_t le_type; /* always ZAP_CHUNK_ENTRY */
1397 uint8_t le_value_intlen; /* size of ints */
1398 uint16_t le_next; /* next entry in hash chain */
1399 uint16_t le_name_chunk; /* first chunk of the name */
1400 uint16_t le_name_numints; /* bytes in name, incl null */
1401 uint16_t le_value_chunk; /* first chunk of the value */
1402 uint16_t le_value_numints; /* value length in ints */
1403 uint32_t le_cd; /* collision differentiator */
1404 uint64_t le_hash; /* hash value of the name */
1405 } l_entry;
1406 struct zap_leaf_array {
1407 uint8_t la_type; /* always ZAP_CHUNK_ARRAY */
1408 uint8_t la_array[ZAP_LEAF_ARRAY_BYTES];
1409 uint16_t la_next; /* next blk or CHAIN_END */
1410 } l_array;
1411 struct zap_leaf_free {
1412 uint8_t lf_type; /* always ZAP_CHUNK_FREE */
1413 uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES];
1414 uint16_t lf_next; /* next in free list, or CHAIN_END */
1415 } l_free;
1416 } zap_leaf_chunk_t;
1418 typedef struct zap_leaf {
1419 int l_bs; /* block size shift */
1420 zap_leaf_phys_t *l_phys;
1421 } zap_leaf_t;
1424 * Define special zfs pflags
1426 #define ZFS_XATTR 0x1 /* is an extended attribute */
1427 #define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */
1428 #define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */
1430 #define MASTER_NODE_OBJ 1
1433 * special attributes for master node.
1436 #define ZFS_FSID "FSID"
1437 #define ZFS_UNLINKED_SET "DELETE_QUEUE"
1438 #define ZFS_ROOT_OBJ "ROOT"
1439 #define ZPL_VERSION_OBJ "VERSION"
1440 #define ZFS_PROP_BLOCKPERPAGE "BLOCKPERPAGE"
1441 #define ZFS_PROP_NOGROWBLOCKS "NOGROWBLOCKS"
1443 #define ZFS_FLAG_BLOCKPERPAGE 0x1
1444 #define ZFS_FLAG_NOGROWBLOCKS 0x2
1447 * ZPL version - rev'd whenever an incompatible on-disk format change
1448 * occurs. Independent of SPA/DMU/ZAP versioning.
1451 #define ZPL_VERSION 1ULL
1454 * The directory entry has the type (currently unused on Solaris) in the
1455 * top 4 bits, and the object number in the low 48 bits. The "middle"
1456 * 12 bits are unused.
1458 #define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4)
1459 #define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48)
1460 #define ZFS_DIRENT_MAKE(type, obj) (((uint64_t)type << 60) | obj)
1462 typedef struct ace {
1463 uid_t a_who; /* uid or gid */
1464 uint32_t a_access_mask; /* read,write,... */
1465 uint16_t a_flags; /* see below */
1466 uint16_t a_type; /* allow or deny */
1467 } ace_t;
1469 #define ACE_SLOT_CNT 6
1471 typedef struct zfs_znode_acl {
1472 uint64_t z_acl_extern_obj; /* ext acl pieces */
1473 uint32_t z_acl_count; /* Number of ACEs */
1474 uint16_t z_acl_version; /* acl version */
1475 uint16_t z_acl_pad; /* pad */
1476 ace_t z_ace_data[ACE_SLOT_CNT]; /* 6 standard ACEs */
1477 } zfs_znode_acl_t;
1480 * This is the persistent portion of the znode. It is stored
1481 * in the "bonus buffer" of the file. Short symbolic links
1482 * are also stored in the bonus buffer.
1484 typedef struct znode_phys {
1485 uint64_t zp_atime[2]; /* 0 - last file access time */
1486 uint64_t zp_mtime[2]; /* 16 - last file modification time */
1487 uint64_t zp_ctime[2]; /* 32 - last file change time */
1488 uint64_t zp_crtime[2]; /* 48 - creation time */
1489 uint64_t zp_gen; /* 64 - generation (txg of creation) */
1490 uint64_t zp_mode; /* 72 - file mode bits */
1491 uint64_t zp_size; /* 80 - size of file */
1492 uint64_t zp_parent; /* 88 - directory parent (`..') */
1493 uint64_t zp_links; /* 96 - number of links to file */
1494 uint64_t zp_xattr; /* 104 - DMU object for xattrs */
1495 uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */
1496 uint64_t zp_flags; /* 120 - persistent flags */
1497 uint64_t zp_uid; /* 128 - file owner */
1498 uint64_t zp_gid; /* 136 - owning group */
1499 uint64_t zp_pad[4]; /* 144 - future */
1500 zfs_znode_acl_t zp_acl; /* 176 - 263 ACL */
1502 * Data may pad out any remaining bytes in the znode buffer, eg:
1504 * |<---------------------- dnode_phys (512) ------------------------>|
1505 * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->|
1506 * |<---- znode (264) ---->|<---- data (56) ---->|
1508 * At present, we only use this space to store symbolic links.
1510 } znode_phys_t;
1513 * In-core vdev representation.
1515 struct vdev;
1516 typedef int vdev_phys_read_t(struct vdev *vdev, void *priv,
1517 off_t offset, void *buf, size_t bytes);
1518 typedef int vdev_read_t(struct vdev *vdev, const blkptr_t *bp,
1519 void *buf, off_t offset, size_t bytes);
1521 typedef STAILQ_HEAD(vdev_list, vdev) vdev_list_t;
1523 typedef struct vdev {
1524 STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */
1525 STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */
1526 vdev_list_t v_children; /* children of this vdev */
1527 const char *v_name; /* vdev name */
1528 const char *v_phys_path; /* vdev bootpath */
1529 const char *v_devid; /* vdev devid */
1530 uint64_t v_guid; /* vdev guid */
1531 int v_id; /* index in parent */
1532 int v_ashift; /* offset to block shift */
1533 int v_nparity; /* # parity for raidz */
1534 struct vdev *v_top; /* parent vdev */
1535 int v_nchildren; /* # children */
1536 vdev_state_t v_state; /* current state */
1537 vdev_phys_read_t *v_phys_read; /* read from raw leaf vdev */
1538 vdev_read_t *v_read; /* read from vdev */
1539 void *v_read_priv; /* private data for read function */
1540 } vdev_t;
1543 * In-core pool representation.
1545 typedef STAILQ_HEAD(spa_list, spa) spa_list_t;
1547 typedef struct spa {
1548 STAILQ_ENTRY(spa) spa_link; /* link in global pool list */
1549 char *spa_name; /* pool name */
1550 uint64_t spa_guid; /* pool guid */
1551 uint64_t spa_txg; /* most recent transaction */
1552 struct uberblock spa_uberblock; /* best uberblock so far */
1553 vdev_list_t spa_vdevs; /* list of all toplevel vdevs */
1554 objset_phys_t spa_mos; /* MOS for this pool */
1555 int spa_inited; /* initialized */
1556 vdev_t *spa_boot_vdev; /* boot device for kernel */
1557 } spa_t;
1559 static void decode_embedded_bp_compressed(const blkptr_t *, void *);