4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright (c) 2014 Integros [integros.com]
27 * Copyright 2017 Nexenta Systems, Inc.
33 #include <sys/zfs_context.h>
35 #include <sys/refcount.h>
36 #include <sys/zap_impl.h>
37 #include <sys/zap_leaf.h>
40 #include <sys/dmu_objset.h>
43 #include <sys/sunddi.h>
46 extern inline mzap_phys_t
*zap_m_phys(zap_t
*zap
);
48 static int mzap_upgrade(zap_t
**zapp
,
49 void *tag
, dmu_tx_t
*tx
, zap_flags_t flags
);
52 zap_getflags(zap_t
*zap
)
56 return (zap_f_phys(zap
)->zap_flags
);
60 zap_hashbits(zap_t
*zap
)
62 if (zap_getflags(zap
) & ZAP_FLAG_HASH64
)
71 if (zap_getflags(zap
) & ZAP_FLAG_HASH64
)
78 zap_hash(zap_name_t
*zn
)
80 zap_t
*zap
= zn
->zn_zap
;
83 if (zap_getflags(zap
) & ZAP_FLAG_PRE_HASHED_KEY
) {
84 ASSERT(zap_getflags(zap
) & ZAP_FLAG_UINT64_KEY
);
85 h
= *(uint64_t *)zn
->zn_key_orig
;
89 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
91 if (zap_getflags(zap
) & ZAP_FLAG_UINT64_KEY
) {
93 const uint64_t *wp
= zn
->zn_key_norm
;
95 ASSERT(zn
->zn_key_intlen
== 8);
96 for (i
= 0; i
< zn
->zn_key_norm_numints
; wp
++, i
++) {
100 for (j
= 0; j
< zn
->zn_key_intlen
; j
++) {
102 zfs_crc64_table
[(h
^ word
) & 0xFF];
108 const uint8_t *cp
= zn
->zn_key_norm
;
111 * We previously stored the terminating null on
112 * disk, but didn't hash it, so we need to
113 * continue to not hash it. (The
114 * zn_key_*_numints includes the terminating
115 * null for non-binary keys.)
117 len
= zn
->zn_key_norm_numints
- 1;
119 ASSERT(zn
->zn_key_intlen
== 1);
120 for (i
= 0; i
< len
; cp
++, i
++) {
122 zfs_crc64_table
[(h
^ *cp
) & 0xFF];
127 * Don't use all 64 bits, since we need some in the cookie for
128 * the collision differentiator. We MUST use the high bits,
129 * since those are the ones that we first pay attention to when
130 * chosing the bucket.
132 h
&= ~((1ULL << (64 - zap_hashbits(zap
))) - 1);
138 zap_normalize(zap_t
*zap
, const char *name
, char *namenorm
, int normflags
)
140 size_t inlen
, outlen
;
143 ASSERT(!(zap_getflags(zap
) & ZAP_FLAG_UINT64_KEY
));
145 inlen
= strlen(name
) + 1;
146 outlen
= ZAP_MAXNAMELEN
;
149 (void) u8_textprep_str((char *)name
, &inlen
, namenorm
, &outlen
,
150 normflags
| U8_TEXTPREP_IGNORE_NULL
| U8_TEXTPREP_IGNORE_INVALID
,
151 U8_UNICODE_LATEST
, &err
);
157 zap_match(zap_name_t
*zn
, const char *matchname
)
159 ASSERT(!(zap_getflags(zn
->zn_zap
) & ZAP_FLAG_UINT64_KEY
));
161 if (zn
->zn_matchtype
& MT_NORMALIZE
) {
162 char norm
[ZAP_MAXNAMELEN
];
164 if (zap_normalize(zn
->zn_zap
, matchname
, norm
,
165 zn
->zn_normflags
) != 0)
168 return (strcmp(zn
->zn_key_norm
, norm
) == 0);
170 return (strcmp(zn
->zn_key_orig
, matchname
) == 0);
175 zap_name_free(zap_name_t
*zn
)
177 kmem_free(zn
, sizeof (zap_name_t
));
181 zap_name_alloc(zap_t
*zap
, const char *key
, matchtype_t mt
)
183 zap_name_t
*zn
= kmem_alloc(sizeof (zap_name_t
), KM_SLEEP
);
186 zn
->zn_key_intlen
= sizeof (*key
);
187 zn
->zn_key_orig
= key
;
188 zn
->zn_key_orig_numints
= strlen(zn
->zn_key_orig
) + 1;
189 zn
->zn_matchtype
= mt
;
190 zn
->zn_normflags
= zap
->zap_normflags
;
193 * If we're dealing with a case sensitive lookup on a mixed or
194 * insensitive fs, remove U8_TEXTPREP_TOUPPER or the lookup
195 * will fold case to all caps overriding the lookup request.
197 if (mt
& MT_MATCH_CASE
)
198 zn
->zn_normflags
&= ~U8_TEXTPREP_TOUPPER
;
200 if (zap
->zap_normflags
) {
202 * We *must* use zap_normflags because this normalization is
203 * what the hash is computed from.
205 if (zap_normalize(zap
, key
, zn
->zn_normbuf
,
206 zap
->zap_normflags
) != 0) {
210 zn
->zn_key_norm
= zn
->zn_normbuf
;
211 zn
->zn_key_norm_numints
= strlen(zn
->zn_key_norm
) + 1;
217 zn
->zn_key_norm
= zn
->zn_key_orig
;
218 zn
->zn_key_norm_numints
= zn
->zn_key_orig_numints
;
221 zn
->zn_hash
= zap_hash(zn
);
223 if (zap
->zap_normflags
!= zn
->zn_normflags
) {
225 * We *must* use zn_normflags because this normalization is
226 * what the matching is based on. (Not the hash!)
228 if (zap_normalize(zap
, key
, zn
->zn_normbuf
,
229 zn
->zn_normflags
) != 0) {
233 zn
->zn_key_norm_numints
= strlen(zn
->zn_key_norm
) + 1;
240 zap_name_alloc_uint64(zap_t
*zap
, const uint64_t *key
, int numints
)
242 zap_name_t
*zn
= kmem_alloc(sizeof (zap_name_t
), KM_SLEEP
);
244 ASSERT(zap
->zap_normflags
== 0);
246 zn
->zn_key_intlen
= sizeof (*key
);
247 zn
->zn_key_orig
= zn
->zn_key_norm
= key
;
248 zn
->zn_key_orig_numints
= zn
->zn_key_norm_numints
= numints
;
249 zn
->zn_matchtype
= 0;
251 zn
->zn_hash
= zap_hash(zn
);
256 mzap_byteswap(mzap_phys_t
*buf
, size_t size
)
259 buf
->mz_block_type
= BSWAP_64(buf
->mz_block_type
);
260 buf
->mz_salt
= BSWAP_64(buf
->mz_salt
);
261 buf
->mz_normflags
= BSWAP_64(buf
->mz_normflags
);
262 max
= (size
/ MZAP_ENT_LEN
) - 1;
263 for (i
= 0; i
< max
; i
++) {
264 buf
->mz_chunk
[i
].mze_value
=
265 BSWAP_64(buf
->mz_chunk
[i
].mze_value
);
266 buf
->mz_chunk
[i
].mze_cd
=
267 BSWAP_32(buf
->mz_chunk
[i
].mze_cd
);
272 zap_byteswap(void *buf
, size_t size
)
276 block_type
= *(uint64_t *)buf
;
278 if (block_type
== ZBT_MICRO
|| block_type
== BSWAP_64(ZBT_MICRO
)) {
279 /* ASSERT(magic == ZAP_LEAF_MAGIC); */
280 mzap_byteswap(buf
, size
);
282 fzap_byteswap(buf
, size
);
287 mze_compare(const void *arg1
, const void *arg2
)
289 const mzap_ent_t
*mze1
= arg1
;
290 const mzap_ent_t
*mze2
= arg2
;
292 if (mze1
->mze_hash
> mze2
->mze_hash
)
294 if (mze1
->mze_hash
< mze2
->mze_hash
)
296 if (mze1
->mze_cd
> mze2
->mze_cd
)
298 if (mze1
->mze_cd
< mze2
->mze_cd
)
304 mze_insert(zap_t
*zap
, int chunkid
, uint64_t hash
)
308 ASSERT(zap
->zap_ismicro
);
309 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
311 mze
= kmem_alloc(sizeof (mzap_ent_t
), KM_SLEEP
);
312 mze
->mze_chunkid
= chunkid
;
313 mze
->mze_hash
= hash
;
314 mze
->mze_cd
= MZE_PHYS(zap
, mze
)->mze_cd
;
315 ASSERT(MZE_PHYS(zap
, mze
)->mze_name
[0] != 0);
316 avl_add(&zap
->zap_m
.zap_avl
, mze
);
320 mze_find(zap_name_t
*zn
)
322 mzap_ent_t mze_tofind
;
325 avl_tree_t
*avl
= &zn
->zn_zap
->zap_m
.zap_avl
;
327 ASSERT(zn
->zn_zap
->zap_ismicro
);
328 ASSERT(RW_LOCK_HELD(&zn
->zn_zap
->zap_rwlock
));
330 mze_tofind
.mze_hash
= zn
->zn_hash
;
331 mze_tofind
.mze_cd
= 0;
333 mze
= avl_find(avl
, &mze_tofind
, &idx
);
335 mze
= avl_nearest(avl
, idx
, AVL_AFTER
);
336 for (; mze
&& mze
->mze_hash
== zn
->zn_hash
; mze
= AVL_NEXT(avl
, mze
)) {
337 ASSERT3U(mze
->mze_cd
, ==, MZE_PHYS(zn
->zn_zap
, mze
)->mze_cd
);
338 if (zap_match(zn
, MZE_PHYS(zn
->zn_zap
, mze
)->mze_name
))
346 mze_find_unused_cd(zap_t
*zap
, uint64_t hash
)
348 mzap_ent_t mze_tofind
;
351 avl_tree_t
*avl
= &zap
->zap_m
.zap_avl
;
354 ASSERT(zap
->zap_ismicro
);
355 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
357 mze_tofind
.mze_hash
= hash
;
358 mze_tofind
.mze_cd
= 0;
361 for (mze
= avl_find(avl
, &mze_tofind
, &idx
);
362 mze
&& mze
->mze_hash
== hash
; mze
= AVL_NEXT(avl
, mze
)) {
363 if (mze
->mze_cd
!= cd
)
372 mze_remove(zap_t
*zap
, mzap_ent_t
*mze
)
374 ASSERT(zap
->zap_ismicro
);
375 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
377 avl_remove(&zap
->zap_m
.zap_avl
, mze
);
378 kmem_free(mze
, sizeof (mzap_ent_t
));
382 mze_destroy(zap_t
*zap
)
385 void *avlcookie
= NULL
;
387 while (mze
= avl_destroy_nodes(&zap
->zap_m
.zap_avl
, &avlcookie
))
388 kmem_free(mze
, sizeof (mzap_ent_t
));
389 avl_destroy(&zap
->zap_m
.zap_avl
);
393 mzap_open(objset_t
*os
, uint64_t obj
, dmu_buf_t
*db
)
398 uint64_t *zap_hdr
= (uint64_t *)db
->db_data
;
399 uint64_t zap_block_type
= zap_hdr
[0];
400 uint64_t zap_magic
= zap_hdr
[1];
402 ASSERT3U(MZAP_ENT_LEN
, ==, sizeof (mzap_ent_phys_t
));
404 zap
= kmem_zalloc(sizeof (zap_t
), KM_SLEEP
);
405 rw_init(&zap
->zap_rwlock
, 0, 0, 0);
406 rw_enter(&zap
->zap_rwlock
, RW_WRITER
);
407 zap
->zap_objset
= os
;
408 zap
->zap_object
= obj
;
411 if (zap_block_type
!= ZBT_MICRO
) {
412 mutex_init(&zap
->zap_f
.zap_num_entries_mtx
, 0, 0, 0);
413 zap
->zap_f
.zap_block_shift
= highbit64(db
->db_size
) - 1;
414 if (zap_block_type
!= ZBT_HEADER
|| zap_magic
!= ZAP_MAGIC
) {
415 winner
= NULL
; /* No actual winner here... */
419 zap
->zap_ismicro
= TRUE
;
423 * Make sure that zap_ismicro is set before we let others see
424 * it, because zap_lockdir() checks zap_ismicro without the lock
427 dmu_buf_init_user(&zap
->zap_dbu
, zap_evict_sync
, NULL
, &zap
->zap_dbuf
);
428 winner
= dmu_buf_set_user(db
, &zap
->zap_dbu
);
433 if (zap
->zap_ismicro
) {
434 zap
->zap_salt
= zap_m_phys(zap
)->mz_salt
;
435 zap
->zap_normflags
= zap_m_phys(zap
)->mz_normflags
;
436 zap
->zap_m
.zap_num_chunks
= db
->db_size
/ MZAP_ENT_LEN
- 1;
437 avl_create(&zap
->zap_m
.zap_avl
, mze_compare
,
438 sizeof (mzap_ent_t
), offsetof(mzap_ent_t
, mze_node
));
440 for (i
= 0; i
< zap
->zap_m
.zap_num_chunks
; i
++) {
441 mzap_ent_phys_t
*mze
=
442 &zap_m_phys(zap
)->mz_chunk
[i
];
443 if (mze
->mze_name
[0]) {
446 zap
->zap_m
.zap_num_entries
++;
447 zn
= zap_name_alloc(zap
, mze
->mze_name
, 0);
448 mze_insert(zap
, i
, zn
->zn_hash
);
453 zap
->zap_salt
= zap_f_phys(zap
)->zap_salt
;
454 zap
->zap_normflags
= zap_f_phys(zap
)->zap_normflags
;
456 ASSERT3U(sizeof (struct zap_leaf_header
), ==,
457 2*ZAP_LEAF_CHUNKSIZE
);
460 * The embedded pointer table should not overlap the
463 ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap
, 0), >,
464 &zap_f_phys(zap
)->zap_salt
);
467 * The embedded pointer table should end at the end of
470 ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap
,
471 1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap
)) -
472 (uintptr_t)zap_f_phys(zap
), ==,
473 zap
->zap_dbuf
->db_size
);
475 rw_exit(&zap
->zap_rwlock
);
479 rw_exit(&zap
->zap_rwlock
);
480 rw_destroy(&zap
->zap_rwlock
);
481 if (!zap
->zap_ismicro
)
482 mutex_destroy(&zap
->zap_f
.zap_num_entries_mtx
);
483 kmem_free(zap
, sizeof (zap_t
));
488 zap_lockdir_impl(dmu_buf_t
*db
, void *tag
, dmu_tx_t
*tx
,
489 krw_t lti
, boolean_t fatreader
, boolean_t adding
, zap_t
**zapp
)
494 ASSERT0(db
->db_offset
);
495 objset_t
*os
= dmu_buf_get_objset(db
);
496 uint64_t obj
= db
->db_object
;
502 dmu_object_info_t doi
;
503 dmu_object_info_from_db(db
, &doi
);
504 ASSERT3U(DMU_OT_BYTESWAP(doi
.doi_type
), ==, DMU_BSWAP_ZAP
);
508 zap
= dmu_buf_get_user(db
);
510 zap
= mzap_open(os
, obj
, db
);
513 * mzap_open() didn't like what it saw on-disk.
514 * Check for corruption!
516 return (SET_ERROR(EIO
));
521 * We're checking zap_ismicro without the lock held, in order to
522 * tell what type of lock we want. Once we have some sort of
523 * lock, see if it really is the right type. In practice this
524 * can only be different if it was upgraded from micro to fat,
525 * and micro wanted WRITER but fat only needs READER.
527 lt
= (!zap
->zap_ismicro
&& fatreader
) ? RW_READER
: lti
;
528 rw_enter(&zap
->zap_rwlock
, lt
);
529 if (lt
!= ((!zap
->zap_ismicro
&& fatreader
) ? RW_READER
: lti
)) {
530 /* it was upgraded, now we only need reader */
531 ASSERT(lt
== RW_WRITER
);
533 (!zap
->zap_ismicro
&& fatreader
) ? RW_READER
: lti
);
534 rw_downgrade(&zap
->zap_rwlock
);
538 zap
->zap_objset
= os
;
541 dmu_buf_will_dirty(db
, tx
);
543 ASSERT3P(zap
->zap_dbuf
, ==, db
);
545 ASSERT(!zap
->zap_ismicro
||
546 zap
->zap_m
.zap_num_entries
<= zap
->zap_m
.zap_num_chunks
);
547 if (zap
->zap_ismicro
&& tx
&& adding
&&
548 zap
->zap_m
.zap_num_entries
== zap
->zap_m
.zap_num_chunks
) {
549 uint64_t newsz
= db
->db_size
+ SPA_MINBLOCKSIZE
;
550 if (newsz
> MZAP_MAX_BLKSZ
) {
551 dprintf("upgrading obj %llu: num_entries=%u\n",
552 obj
, zap
->zap_m
.zap_num_entries
);
554 int err
= mzap_upgrade(zapp
, tag
, tx
, 0);
556 rw_exit(&zap
->zap_rwlock
);
559 VERIFY0(dmu_object_set_blocksize(os
, obj
, newsz
, 0, tx
));
560 zap
->zap_m
.zap_num_chunks
=
561 db
->db_size
/ MZAP_ENT_LEN
- 1;
569 zap_lockdir_by_dnode(dnode_t
*dn
, dmu_tx_t
*tx
,
570 krw_t lti
, boolean_t fatreader
, boolean_t adding
, void *tag
, zap_t
**zapp
)
575 err
= dmu_buf_hold_by_dnode(dn
, 0, tag
, &db
, DMU_READ_NO_PREFETCH
);
579 err
= zap_lockdir_impl(db
, tag
, tx
, lti
, fatreader
, adding
, zapp
);
581 dmu_buf_rele(db
, tag
);
587 zap_lockdir(objset_t
*os
, uint64_t obj
, dmu_tx_t
*tx
,
588 krw_t lti
, boolean_t fatreader
, boolean_t adding
, void *tag
, zap_t
**zapp
)
593 err
= dmu_buf_hold(os
, obj
, 0, tag
, &db
, DMU_READ_NO_PREFETCH
);
596 err
= zap_lockdir_impl(db
, tag
, tx
, lti
, fatreader
, adding
, zapp
);
598 dmu_buf_rele(db
, tag
);
603 zap_unlockdir(zap_t
*zap
, void *tag
)
605 rw_exit(&zap
->zap_rwlock
);
606 dmu_buf_rele(zap
->zap_dbuf
, tag
);
610 mzap_upgrade(zap_t
**zapp
, void *tag
, dmu_tx_t
*tx
, zap_flags_t flags
)
617 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
619 sz
= zap
->zap_dbuf
->db_size
;
620 mzp
= zio_buf_alloc(sz
);
621 bcopy(zap
->zap_dbuf
->db_data
, mzp
, sz
);
622 nchunks
= zap
->zap_m
.zap_num_chunks
;
625 err
= dmu_object_set_blocksize(zap
->zap_objset
, zap
->zap_object
,
626 1ULL << fzap_default_block_shift
, 0, tx
);
628 zio_buf_free(mzp
, sz
);
633 dprintf("upgrading obj=%llu with %u chunks\n",
634 zap
->zap_object
, nchunks
);
635 /* XXX destroy the avl later, so we can use the stored hash value */
638 fzap_upgrade(zap
, tx
, flags
);
640 for (i
= 0; i
< nchunks
; i
++) {
641 mzap_ent_phys_t
*mze
= &mzp
->mz_chunk
[i
];
643 if (mze
->mze_name
[0] == 0)
645 dprintf("adding %s=%llu\n",
646 mze
->mze_name
, mze
->mze_value
);
647 zn
= zap_name_alloc(zap
, mze
->mze_name
, 0);
648 err
= fzap_add_cd(zn
, 8, 1, &mze
->mze_value
, mze
->mze_cd
,
650 zap
= zn
->zn_zap
; /* fzap_add_cd() may change zap */
655 zio_buf_free(mzp
, sz
);
661 * The "normflags" determine the behavior of the matchtype_t which is
662 * passed to zap_lookup_norm(). Names which have the same normalized
663 * version will be stored with the same hash value, and therefore we can
664 * perform normalization-insensitive lookups. We can be Unicode form-
665 * insensitive and/or case-insensitive. The following flags are valid for
672 * U8_TEXTPREP_TOUPPER
674 * The *_NF* (Normalization Form) flags are mutually exclusive; at most one
675 * of them may be supplied.
678 mzap_create_impl(objset_t
*os
, uint64_t obj
, int normflags
, zap_flags_t flags
,
684 VERIFY(0 == dmu_buf_hold(os
, obj
, 0, FTAG
, &db
, DMU_READ_NO_PREFETCH
));
688 dmu_object_info_t doi
;
689 dmu_object_info_from_db(db
, &doi
);
690 ASSERT3U(DMU_OT_BYTESWAP(doi
.doi_type
), ==, DMU_BSWAP_ZAP
);
694 dmu_buf_will_dirty(db
, tx
);
696 zp
->mz_block_type
= ZBT_MICRO
;
697 zp
->mz_salt
= ((uintptr_t)db
^ (uintptr_t)tx
^ (obj
<< 1)) | 1ULL;
698 zp
->mz_normflags
= normflags
;
699 dmu_buf_rele(db
, FTAG
);
703 /* Only fat zap supports flags; upgrade immediately. */
704 VERIFY(0 == zap_lockdir(os
, obj
, tx
, RW_WRITER
,
705 B_FALSE
, B_FALSE
, FTAG
, &zap
));
706 VERIFY3U(0, ==, mzap_upgrade(&zap
, FTAG
, tx
, flags
));
707 zap_unlockdir(zap
, FTAG
);
712 zap_create_claim(objset_t
*os
, uint64_t obj
, dmu_object_type_t ot
,
713 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
715 return (zap_create_claim_norm(os
, obj
,
716 0, ot
, bonustype
, bonuslen
, tx
));
720 zap_create_claim_norm(objset_t
*os
, uint64_t obj
, int normflags
,
721 dmu_object_type_t ot
,
722 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
726 err
= dmu_object_claim(os
, obj
, ot
, 0, bonustype
, bonuslen
, tx
);
729 mzap_create_impl(os
, obj
, normflags
, 0, tx
);
734 zap_create(objset_t
*os
, dmu_object_type_t ot
,
735 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
737 return (zap_create_norm(os
, 0, ot
, bonustype
, bonuslen
, tx
));
741 zap_create_norm(objset_t
*os
, int normflags
, dmu_object_type_t ot
,
742 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
744 uint64_t obj
= dmu_object_alloc(os
, ot
, 0, bonustype
, bonuslen
, tx
);
746 mzap_create_impl(os
, obj
, normflags
, 0, tx
);
751 zap_create_flags(objset_t
*os
, int normflags
, zap_flags_t flags
,
752 dmu_object_type_t ot
, int leaf_blockshift
, int indirect_blockshift
,
753 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
755 uint64_t obj
= dmu_object_alloc(os
, ot
, 0, bonustype
, bonuslen
, tx
);
757 ASSERT(leaf_blockshift
>= SPA_MINBLOCKSHIFT
&&
758 leaf_blockshift
<= SPA_OLD_MAXBLOCKSHIFT
&&
759 indirect_blockshift
>= SPA_MINBLOCKSHIFT
&&
760 indirect_blockshift
<= SPA_OLD_MAXBLOCKSHIFT
);
762 VERIFY(dmu_object_set_blocksize(os
, obj
,
763 1ULL << leaf_blockshift
, indirect_blockshift
, tx
) == 0);
765 mzap_create_impl(os
, obj
, normflags
, flags
, tx
);
770 zap_destroy(objset_t
*os
, uint64_t zapobj
, dmu_tx_t
*tx
)
773 * dmu_object_free will free the object number and free the
774 * data. Freeing the data will cause our pageout function to be
775 * called, which will destroy our data (zap_leaf_t's and zap_t).
778 return (dmu_object_free(os
, zapobj
, tx
));
782 zap_evict_sync(void *dbu
)
786 rw_destroy(&zap
->zap_rwlock
);
788 if (zap
->zap_ismicro
)
791 mutex_destroy(&zap
->zap_f
.zap_num_entries_mtx
);
793 kmem_free(zap
, sizeof (zap_t
));
797 zap_count(objset_t
*os
, uint64_t zapobj
, uint64_t *count
)
802 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
805 if (!zap
->zap_ismicro
) {
806 err
= fzap_count(zap
, count
);
808 *count
= zap
->zap_m
.zap_num_entries
;
810 zap_unlockdir(zap
, FTAG
);
815 * zn may be NULL; if not specified, it will be computed if needed.
816 * See also the comment above zap_entry_normalization_conflict().
819 mzap_normalization_conflict(zap_t
*zap
, zap_name_t
*zn
, mzap_ent_t
*mze
)
822 int direction
= AVL_BEFORE
;
823 boolean_t allocdzn
= B_FALSE
;
825 if (zap
->zap_normflags
== 0)
829 for (other
= avl_walk(&zap
->zap_m
.zap_avl
, mze
, direction
);
830 other
&& other
->mze_hash
== mze
->mze_hash
;
831 other
= avl_walk(&zap
->zap_m
.zap_avl
, other
, direction
)) {
834 zn
= zap_name_alloc(zap
, MZE_PHYS(zap
, mze
)->mze_name
,
838 if (zap_match(zn
, MZE_PHYS(zap
, other
)->mze_name
)) {
845 if (direction
== AVL_BEFORE
) {
846 direction
= AVL_AFTER
;
856 * Routines for manipulating attributes.
860 zap_lookup(objset_t
*os
, uint64_t zapobj
, const char *name
,
861 uint64_t integer_size
, uint64_t num_integers
, void *buf
)
863 return (zap_lookup_norm(os
, zapobj
, name
, integer_size
,
864 num_integers
, buf
, 0, NULL
, 0, NULL
));
868 zap_lookup_impl(zap_t
*zap
, const char *name
,
869 uint64_t integer_size
, uint64_t num_integers
, void *buf
,
870 matchtype_t mt
, char *realname
, int rn_len
,
877 zn
= zap_name_alloc(zap
, name
, mt
);
879 return (SET_ERROR(ENOTSUP
));
881 if (!zap
->zap_ismicro
) {
882 err
= fzap_lookup(zn
, integer_size
, num_integers
, buf
,
883 realname
, rn_len
, ncp
);
887 err
= SET_ERROR(ENOENT
);
889 if (num_integers
< 1) {
890 err
= SET_ERROR(EOVERFLOW
);
891 } else if (integer_size
!= 8) {
892 err
= SET_ERROR(EINVAL
);
895 MZE_PHYS(zap
, mze
)->mze_value
;
896 (void) strlcpy(realname
,
897 MZE_PHYS(zap
, mze
)->mze_name
, rn_len
);
899 *ncp
= mzap_normalization_conflict(zap
,
910 zap_lookup_norm(objset_t
*os
, uint64_t zapobj
, const char *name
,
911 uint64_t integer_size
, uint64_t num_integers
, void *buf
,
912 matchtype_t mt
, char *realname
, int rn_len
,
918 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
921 err
= zap_lookup_impl(zap
, name
, integer_size
,
922 num_integers
, buf
, mt
, realname
, rn_len
, ncp
);
923 zap_unlockdir(zap
, FTAG
);
928 zap_lookup_by_dnode(dnode_t
*dn
, const char *name
,
929 uint64_t integer_size
, uint64_t num_integers
, void *buf
)
931 return (zap_lookup_norm_by_dnode(dn
, name
, integer_size
,
932 num_integers
, buf
, 0, NULL
, 0, NULL
));
936 zap_lookup_norm_by_dnode(dnode_t
*dn
, const char *name
,
937 uint64_t integer_size
, uint64_t num_integers
, void *buf
,
938 matchtype_t mt
, char *realname
, int rn_len
,
944 err
= zap_lockdir_by_dnode(dn
, NULL
, RW_READER
, TRUE
, FALSE
,
948 err
= zap_lookup_impl(zap
, name
, integer_size
,
949 num_integers
, buf
, mt
, realname
, rn_len
, ncp
);
950 zap_unlockdir(zap
, FTAG
);
955 zap_prefetch_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
962 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
965 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
967 zap_unlockdir(zap
, FTAG
);
968 return (SET_ERROR(ENOTSUP
));
973 zap_unlockdir(zap
, FTAG
);
978 zap_lookup_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
979 int key_numints
, uint64_t integer_size
, uint64_t num_integers
, void *buf
)
985 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
988 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
990 zap_unlockdir(zap
, FTAG
);
991 return (SET_ERROR(ENOTSUP
));
994 err
= fzap_lookup(zn
, integer_size
, num_integers
, buf
,
997 zap_unlockdir(zap
, FTAG
);
1002 zap_contains(objset_t
*os
, uint64_t zapobj
, const char *name
)
1004 int err
= zap_lookup_norm(os
, zapobj
, name
, 0,
1005 0, NULL
, 0, NULL
, 0, NULL
);
1006 if (err
== EOVERFLOW
|| err
== EINVAL
)
1007 err
= 0; /* found, but skipped reading the value */
1012 zap_length(objset_t
*os
, uint64_t zapobj
, const char *name
,
1013 uint64_t *integer_size
, uint64_t *num_integers
)
1020 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
1023 zn
= zap_name_alloc(zap
, name
, 0);
1025 zap_unlockdir(zap
, FTAG
);
1026 return (SET_ERROR(ENOTSUP
));
1028 if (!zap
->zap_ismicro
) {
1029 err
= fzap_length(zn
, integer_size
, num_integers
);
1033 err
= SET_ERROR(ENOENT
);
1042 zap_unlockdir(zap
, FTAG
);
1047 zap_length_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1048 int key_numints
, uint64_t *integer_size
, uint64_t *num_integers
)
1054 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
1057 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1059 zap_unlockdir(zap
, FTAG
);
1060 return (SET_ERROR(ENOTSUP
));
1062 err
= fzap_length(zn
, integer_size
, num_integers
);
1064 zap_unlockdir(zap
, FTAG
);
1069 mzap_addent(zap_name_t
*zn
, uint64_t value
)
1072 zap_t
*zap
= zn
->zn_zap
;
1073 int start
= zap
->zap_m
.zap_alloc_next
;
1076 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
1079 for (i
= 0; i
< zap
->zap_m
.zap_num_chunks
; i
++) {
1080 mzap_ent_phys_t
*mze
= &zap_m_phys(zap
)->mz_chunk
[i
];
1081 ASSERT(strcmp(zn
->zn_key_orig
, mze
->mze_name
) != 0);
1085 cd
= mze_find_unused_cd(zap
, zn
->zn_hash
);
1086 /* given the limited size of the microzap, this can't happen */
1087 ASSERT(cd
< zap_maxcd(zap
));
1090 for (i
= start
; i
< zap
->zap_m
.zap_num_chunks
; i
++) {
1091 mzap_ent_phys_t
*mze
= &zap_m_phys(zap
)->mz_chunk
[i
];
1092 if (mze
->mze_name
[0] == 0) {
1093 mze
->mze_value
= value
;
1095 (void) strcpy(mze
->mze_name
, zn
->zn_key_orig
);
1096 zap
->zap_m
.zap_num_entries
++;
1097 zap
->zap_m
.zap_alloc_next
= i
+1;
1098 if (zap
->zap_m
.zap_alloc_next
==
1099 zap
->zap_m
.zap_num_chunks
)
1100 zap
->zap_m
.zap_alloc_next
= 0;
1101 mze_insert(zap
, i
, zn
->zn_hash
);
1109 ASSERT(!"out of entries!");
1113 zap_add_impl(zap_t
*zap
, const char *key
,
1114 int integer_size
, uint64_t num_integers
,
1115 const void *val
, dmu_tx_t
*tx
, void *tag
)
1119 const uint64_t *intval
= val
;
1122 zn
= zap_name_alloc(zap
, key
, 0);
1124 zap_unlockdir(zap
, tag
);
1125 return (SET_ERROR(ENOTSUP
));
1127 if (!zap
->zap_ismicro
) {
1128 err
= fzap_add(zn
, integer_size
, num_integers
, val
, tag
, tx
);
1129 zap
= zn
->zn_zap
; /* fzap_add() may change zap */
1130 } else if (integer_size
!= 8 || num_integers
!= 1 ||
1131 strlen(key
) >= MZAP_NAME_LEN
) {
1132 err
= mzap_upgrade(&zn
->zn_zap
, tag
, tx
, 0);
1134 err
= fzap_add(zn
, integer_size
, num_integers
, val
,
1137 zap
= zn
->zn_zap
; /* fzap_add() may change zap */
1141 err
= SET_ERROR(EEXIST
);
1143 mzap_addent(zn
, *intval
);
1146 ASSERT(zap
== zn
->zn_zap
);
1148 if (zap
!= NULL
) /* may be NULL if fzap_add() failed */
1149 zap_unlockdir(zap
, tag
);
1154 zap_add(objset_t
*os
, uint64_t zapobj
, const char *key
,
1155 int integer_size
, uint64_t num_integers
,
1156 const void *val
, dmu_tx_t
*tx
)
1161 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, FTAG
, &zap
);
1164 err
= zap_add_impl(zap
, key
, integer_size
, num_integers
, val
, tx
, FTAG
);
1165 /* zap_add_impl() calls zap_unlockdir() */
1170 zap_add_by_dnode(dnode_t
*dn
, const char *key
,
1171 int integer_size
, uint64_t num_integers
,
1172 const void *val
, dmu_tx_t
*tx
)
1177 err
= zap_lockdir_by_dnode(dn
, tx
, RW_WRITER
, TRUE
, TRUE
, FTAG
, &zap
);
1180 err
= zap_add_impl(zap
, key
, integer_size
, num_integers
, val
, tx
, FTAG
);
1181 /* zap_add_impl() calls zap_unlockdir() */
1186 zap_add_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1187 int key_numints
, int integer_size
, uint64_t num_integers
,
1188 const void *val
, dmu_tx_t
*tx
)
1194 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, FTAG
, &zap
);
1197 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1199 zap_unlockdir(zap
, FTAG
);
1200 return (SET_ERROR(ENOTSUP
));
1202 err
= fzap_add(zn
, integer_size
, num_integers
, val
, FTAG
, tx
);
1203 zap
= zn
->zn_zap
; /* fzap_add() may change zap */
1205 if (zap
!= NULL
) /* may be NULL if fzap_add() failed */
1206 zap_unlockdir(zap
, FTAG
);
1211 zap_update(objset_t
*os
, uint64_t zapobj
, const char *name
,
1212 int integer_size
, uint64_t num_integers
, const void *val
, dmu_tx_t
*tx
)
1217 const uint64_t *intval
= val
;
1223 * If there is an old value, it shouldn't change across the
1224 * lockdir (eg, due to bprewrite's xlation).
1226 if (integer_size
== 8 && num_integers
== 1)
1227 (void) zap_lookup(os
, zapobj
, name
, 8, 1, &oldval
);
1230 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, FTAG
, &zap
);
1233 zn
= zap_name_alloc(zap
, name
, 0);
1235 zap_unlockdir(zap
, FTAG
);
1236 return (SET_ERROR(ENOTSUP
));
1238 if (!zap
->zap_ismicro
) {
1239 err
= fzap_update(zn
, integer_size
, num_integers
, val
,
1241 zap
= zn
->zn_zap
; /* fzap_update() may change zap */
1242 } else if (integer_size
!= 8 || num_integers
!= 1 ||
1243 strlen(name
) >= MZAP_NAME_LEN
) {
1244 dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
1245 zapobj
, integer_size
, num_integers
, name
);
1246 err
= mzap_upgrade(&zn
->zn_zap
, FTAG
, tx
, 0);
1248 err
= fzap_update(zn
, integer_size
, num_integers
,
1251 zap
= zn
->zn_zap
; /* fzap_update() may change zap */
1255 ASSERT3U(MZE_PHYS(zap
, mze
)->mze_value
, ==, oldval
);
1256 MZE_PHYS(zap
, mze
)->mze_value
= *intval
;
1258 mzap_addent(zn
, *intval
);
1261 ASSERT(zap
== zn
->zn_zap
);
1263 if (zap
!= NULL
) /* may be NULL if fzap_upgrade() failed */
1264 zap_unlockdir(zap
, FTAG
);
1269 zap_update_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1271 int integer_size
, uint64_t num_integers
, const void *val
, dmu_tx_t
*tx
)
1277 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, FTAG
, &zap
);
1280 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1282 zap_unlockdir(zap
, FTAG
);
1283 return (SET_ERROR(ENOTSUP
));
1285 err
= fzap_update(zn
, integer_size
, num_integers
, val
, FTAG
, tx
);
1286 zap
= zn
->zn_zap
; /* fzap_update() may change zap */
1288 if (zap
!= NULL
) /* may be NULL if fzap_upgrade() failed */
1289 zap_unlockdir(zap
, FTAG
);
1294 zap_remove(objset_t
*os
, uint64_t zapobj
, const char *name
, dmu_tx_t
*tx
)
1296 return (zap_remove_norm(os
, zapobj
, name
, 0, tx
));
1300 zap_remove_impl(zap_t
*zap
, const char *name
,
1301 matchtype_t mt
, dmu_tx_t
*tx
)
1307 zn
= zap_name_alloc(zap
, name
, mt
);
1309 return (SET_ERROR(ENOTSUP
));
1310 if (!zap
->zap_ismicro
) {
1311 err
= fzap_remove(zn
, tx
);
1315 err
= SET_ERROR(ENOENT
);
1317 zap
->zap_m
.zap_num_entries
--;
1318 bzero(&zap_m_phys(zap
)->mz_chunk
[mze
->mze_chunkid
],
1319 sizeof (mzap_ent_phys_t
));
1320 mze_remove(zap
, mze
);
1328 zap_remove_norm(objset_t
*os
, uint64_t zapobj
, const char *name
,
1329 matchtype_t mt
, dmu_tx_t
*tx
)
1334 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, FALSE
, FTAG
, &zap
);
1337 err
= zap_remove_impl(zap
, name
, mt
, tx
);
1338 zap_unlockdir(zap
, FTAG
);
1343 zap_remove_by_dnode(dnode_t
*dn
, const char *name
, dmu_tx_t
*tx
)
1348 err
= zap_lockdir_by_dnode(dn
, tx
, RW_WRITER
, TRUE
, FALSE
, FTAG
, &zap
);
1351 err
= zap_remove_impl(zap
, name
, 0, tx
);
1352 zap_unlockdir(zap
, FTAG
);
1357 zap_remove_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1358 int key_numints
, dmu_tx_t
*tx
)
1364 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, FALSE
, FTAG
, &zap
);
1367 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1369 zap_unlockdir(zap
, FTAG
);
1370 return (SET_ERROR(ENOTSUP
));
1372 err
= fzap_remove(zn
, tx
);
1374 zap_unlockdir(zap
, FTAG
);
1379 * Routines for iterating over the attributes.
1383 zap_cursor_init_serialized(zap_cursor_t
*zc
, objset_t
*os
, uint64_t zapobj
,
1384 uint64_t serialized
)
1389 zc
->zc_zapobj
= zapobj
;
1390 zc
->zc_serialized
= serialized
;
1396 zap_cursor_init(zap_cursor_t
*zc
, objset_t
*os
, uint64_t zapobj
)
1398 zap_cursor_init_serialized(zc
, os
, zapobj
, 0);
1402 zap_cursor_fini(zap_cursor_t
*zc
)
1405 rw_enter(&zc
->zc_zap
->zap_rwlock
, RW_READER
);
1406 zap_unlockdir(zc
->zc_zap
, NULL
);
1410 rw_enter(&zc
->zc_leaf
->l_rwlock
, RW_READER
);
1411 zap_put_leaf(zc
->zc_leaf
);
1414 zc
->zc_objset
= NULL
;
1418 zap_cursor_serialize(zap_cursor_t
*zc
)
1420 if (zc
->zc_hash
== -1ULL)
1422 if (zc
->zc_zap
== NULL
)
1423 return (zc
->zc_serialized
);
1424 ASSERT((zc
->zc_hash
& zap_maxcd(zc
->zc_zap
)) == 0);
1425 ASSERT(zc
->zc_cd
< zap_maxcd(zc
->zc_zap
));
1428 * We want to keep the high 32 bits of the cursor zero if we can, so
1429 * that 32-bit programs can access this. So usually use a small
1430 * (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
1433 * [ collision differentiator | zap_hashbits()-bit hash value ]
1435 return ((zc
->zc_hash
>> (64 - zap_hashbits(zc
->zc_zap
))) |
1436 ((uint64_t)zc
->zc_cd
<< zap_hashbits(zc
->zc_zap
)));
1440 zap_cursor_retrieve(zap_cursor_t
*zc
, zap_attribute_t
*za
)
1444 mzap_ent_t mze_tofind
;
1447 if (zc
->zc_hash
== -1ULL)
1448 return (SET_ERROR(ENOENT
));
1450 if (zc
->zc_zap
== NULL
) {
1452 err
= zap_lockdir(zc
->zc_objset
, zc
->zc_zapobj
, NULL
,
1453 RW_READER
, TRUE
, FALSE
, NULL
, &zc
->zc_zap
);
1458 * To support zap_cursor_init_serialized, advance, retrieve,
1459 * we must add to the existing zc_cd, which may already
1460 * be 1 due to the zap_cursor_advance.
1462 ASSERT(zc
->zc_hash
== 0);
1463 hb
= zap_hashbits(zc
->zc_zap
);
1464 zc
->zc_hash
= zc
->zc_serialized
<< (64 - hb
);
1465 zc
->zc_cd
+= zc
->zc_serialized
>> hb
;
1466 if (zc
->zc_cd
>= zap_maxcd(zc
->zc_zap
)) /* corrupt serialized */
1469 rw_enter(&zc
->zc_zap
->zap_rwlock
, RW_READER
);
1471 if (!zc
->zc_zap
->zap_ismicro
) {
1472 err
= fzap_cursor_retrieve(zc
->zc_zap
, zc
, za
);
1474 mze_tofind
.mze_hash
= zc
->zc_hash
;
1475 mze_tofind
.mze_cd
= zc
->zc_cd
;
1477 mze
= avl_find(&zc
->zc_zap
->zap_m
.zap_avl
, &mze_tofind
, &idx
);
1479 mze
= avl_nearest(&zc
->zc_zap
->zap_m
.zap_avl
,
1483 mzap_ent_phys_t
*mzep
= MZE_PHYS(zc
->zc_zap
, mze
);
1484 ASSERT3U(mze
->mze_cd
, ==, mzep
->mze_cd
);
1485 za
->za_normalization_conflict
=
1486 mzap_normalization_conflict(zc
->zc_zap
, NULL
, mze
);
1487 za
->za_integer_length
= 8;
1488 za
->za_num_integers
= 1;
1489 za
->za_first_integer
= mzep
->mze_value
;
1490 (void) strcpy(za
->za_name
, mzep
->mze_name
);
1491 zc
->zc_hash
= mze
->mze_hash
;
1492 zc
->zc_cd
= mze
->mze_cd
;
1495 zc
->zc_hash
= -1ULL;
1496 err
= SET_ERROR(ENOENT
);
1499 rw_exit(&zc
->zc_zap
->zap_rwlock
);
1504 zap_cursor_advance(zap_cursor_t
*zc
)
1506 if (zc
->zc_hash
== -1ULL)
1512 zap_get_stats(objset_t
*os
, uint64_t zapobj
, zap_stats_t
*zs
)
1517 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, FTAG
, &zap
);
1521 bzero(zs
, sizeof (zap_stats_t
));
1523 if (zap
->zap_ismicro
) {
1524 zs
->zs_blocksize
= zap
->zap_dbuf
->db_size
;
1525 zs
->zs_num_entries
= zap
->zap_m
.zap_num_entries
;
1526 zs
->zs_num_blocks
= 1;
1528 fzap_get_stats(zap
, zs
);
1530 zap_unlockdir(zap
, FTAG
);