4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Portions Copyright 2011 iXsystems, Inc
25 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2014 Integros [integros.com]
30 #include <sys/zfs_context.h>
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dmu_objset.h>
39 #include <sys/dnode.h>
42 #include <sys/sunddi.h>
43 #include <sys/sa_impl.h>
44 #include <sys/dnode.h>
45 #include <sys/errno.h>
46 #include <sys/zfs_context.h>
49 * ZFS System attributes:
51 * A generic mechanism to allow for arbitrary attributes
52 * to be stored in a dnode. The data will be stored in the bonus buffer of
53 * the dnode and if necessary a special "spill" block will be used to handle
54 * overflow situations. The spill block will be sized to fit the data
55 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
56 * spill block is stored at the end of the current bonus buffer. Any
57 * attributes that would be in the way of the blkptr_t will be relocated
58 * into the spill block.
60 * Attribute registration:
62 * Stored persistently on a per dataset basis
63 * a mapping between attribute "string" names and their actual attribute
64 * numeric values, length, and byteswap function. The names are only used
65 * during registration. All attributes are known by their unique attribute
66 * id value. If an attribute can have a variable size then the value
67 * 0 will be used to indicate this.
71 * Attribute layouts are a way to compactly store multiple attributes, but
72 * without taking the overhead associated with managing each attribute
73 * individually. Since you will typically have the same set of attributes
74 * stored in the same order a single table will be used to represent that
75 * layout. The ZPL for example will usually have only about 10 different
76 * layouts (regular files, device files, symlinks,
77 * regular files + scanstamp, files/dir with extended attributes, and then
78 * you have the possibility of all of those minus ACL, because it would
79 * be kicked out into the spill block)
81 * Layouts are simply an array of the attributes and their
82 * ordering i.e. [0, 1, 4, 5, 2]
84 * Each distinct layout is given a unique layout number and that is whats
85 * stored in the header at the beginning of the SA data buffer.
87 * A layout only covers a single dbuf (bonus or spill). If a set of
88 * attributes is split up between the bonus buffer and a spill buffer then
89 * two different layouts will be used. This allows us to byteswap the
90 * spill without looking at the bonus buffer and keeps the on disk format of
91 * the bonus and spill buffer the same.
93 * Adding a single attribute will cause the entire set of attributes to
94 * be rewritten and could result in a new layout number being constructed
95 * as part of the rewrite if no such layout exists for the new set of
96 * attribues. The new attribute will be appended to the end of the already
97 * existing attributes.
99 * Both the attribute registration and attribute layout information are
100 * stored in normal ZAP attributes. Their should be a small number of
101 * known layouts and the set of attributes is assumed to typically be quite
104 * The registered attributes and layout "table" information is maintained
105 * in core and a special "sa_os_t" is attached to the objset_t.
107 * A special interface is provided to allow for quickly applying
108 * a large set of attributes at once. sa_replace_all_by_template() is
109 * used to set an array of attributes. This is used by the ZPL when
110 * creating a brand new file. The template that is passed into the function
111 * specifies the attribute, size for variable length attributes, location of
112 * data and special "data locator" function if the data isn't in a contiguous
115 * Byteswap implications:
117 * Since the SA attributes are not entirely self describing we can't do
118 * the normal byteswap processing. The special ZAP layout attribute and
119 * attribute registration attributes define the byteswap function and the
120 * size of the attributes, unless it is variable sized.
121 * The normal ZFS byteswapping infrastructure assumes you don't need
122 * to read any objects in order to do the necessary byteswapping. Whereas
123 * SA attributes can only be properly byteswapped if the dataset is opened
124 * and the layout/attribute ZAP attributes are available. Because of this
125 * the SA attributes will be byteswapped when they are first accessed by
126 * the SA code that will read the SA data.
129 typedef void (sa_iterfunc_t
)(void *hdr
, void *addr
, sa_attr_type_t
,
130 uint16_t length
, int length_idx
, boolean_t
, void *userp
);
132 static int sa_build_index(sa_handle_t
*hdl
, sa_buf_type_t buftype
);
133 static void sa_idx_tab_hold(objset_t
*os
, sa_idx_tab_t
*idx_tab
);
134 static sa_idx_tab_t
*sa_find_idx_tab(objset_t
*os
, dmu_object_type_t bonustype
,
136 static void sa_idx_tab_rele(objset_t
*os
, void *arg
);
137 static void sa_copy_data(sa_data_locator_t
*func
, void *start
, void *target
,
139 static int sa_modify_attrs(sa_handle_t
*hdl
, sa_attr_type_t newattr
,
140 sa_data_op_t action
, sa_data_locator_t
*locator
, void *datastart
,
141 uint16_t buflen
, dmu_tx_t
*tx
);
143 arc_byteswap_func_t
*sa_bswap_table
[] = {
144 byteswap_uint64_array
,
145 byteswap_uint32_array
,
146 byteswap_uint16_array
,
147 byteswap_uint8_array
,
151 #define SA_COPY_DATA(f, s, t, l) \
155 *(uint64_t *)t = *(uint64_t *)s; \
156 } else if (l == 16) { \
157 *(uint64_t *)t = *(uint64_t *)s; \
158 *(uint64_t *)((uintptr_t)t + 8) = \
159 *(uint64_t *)((uintptr_t)s + 8); \
164 sa_copy_data(f, s, t, l); \
168 * This table is fixed and cannot be changed. Its purpose is to
169 * allow the SA code to work with both old/new ZPL file systems.
170 * It contains the list of legacy attributes. These attributes aren't
171 * stored in the "attribute" registry zap objects, since older ZPL file systems
172 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
173 * use this static table.
175 sa_attr_reg_t sa_legacy_attrs
[] = {
176 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 0},
177 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 1},
178 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 2},
179 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 3},
180 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY
, 4},
181 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY
, 5},
182 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY
, 6},
183 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY
, 7},
184 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY
, 8},
185 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY
, 9},
186 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY
, 10},
187 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY
, 11},
188 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY
, 12},
189 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY
, 13},
190 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY
, 14},
191 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY
, 15},
195 * This is only used for objects of type DMU_OT_ZNODE
197 sa_attr_type_t sa_legacy_zpl_layout
[] = {
198 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
202 * Special dummy layout used for buffers with no attributes.
204 sa_attr_type_t sa_dummy_zpl_layout
[] = { 0 };
206 static int sa_legacy_attr_count
= 16;
207 static kmem_cache_t
*sa_cache
= NULL
;
211 sa_cache_constructor(void *buf
, void *unused
, int kmflag
)
213 sa_handle_t
*hdl
= buf
;
215 mutex_init(&hdl
->sa_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
221 sa_cache_destructor(void *buf
, void *unused
)
223 sa_handle_t
*hdl
= buf
;
224 mutex_destroy(&hdl
->sa_lock
);
230 sa_cache
= kmem_cache_create("sa_cache",
231 sizeof (sa_handle_t
), 0, sa_cache_constructor
,
232 sa_cache_destructor
, NULL
, NULL
, NULL
, 0);
239 kmem_cache_destroy(sa_cache
);
243 layout_num_compare(const void *arg1
, const void *arg2
)
245 const sa_lot_t
*node1
= arg1
;
246 const sa_lot_t
*node2
= arg2
;
248 if (node1
->lot_num
> node2
->lot_num
)
250 else if (node1
->lot_num
< node2
->lot_num
)
256 layout_hash_compare(const void *arg1
, const void *arg2
)
258 const sa_lot_t
*node1
= arg1
;
259 const sa_lot_t
*node2
= arg2
;
261 if (node1
->lot_hash
> node2
->lot_hash
)
263 if (node1
->lot_hash
< node2
->lot_hash
)
265 if (node1
->lot_instance
> node2
->lot_instance
)
267 if (node1
->lot_instance
< node2
->lot_instance
)
273 sa_layout_equal(sa_lot_t
*tbf
, sa_attr_type_t
*attrs
, int count
)
277 if (count
!= tbf
->lot_attr_count
)
280 for (i
= 0; i
!= count
; i
++) {
281 if (attrs
[i
] != tbf
->lot_attrs
[i
])
287 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
290 sa_layout_info_hash(sa_attr_type_t
*attrs
, int attr_count
)
293 uint64_t crc
= -1ULL;
295 for (i
= 0; i
!= attr_count
; i
++)
296 crc
^= SA_ATTR_HASH(attrs
[i
]);
302 sa_get_spill(sa_handle_t
*hdl
)
305 if (hdl
->sa_spill
== NULL
) {
306 if ((rc
= dmu_spill_hold_existing(hdl
->sa_bonus
, NULL
,
307 &hdl
->sa_spill
)) == 0)
308 VERIFY(0 == sa_build_index(hdl
, SA_SPILL
));
317 * Main attribute lookup/update function
318 * returns 0 for success or non zero for failures
320 * Operates on bulk array, first failure will abort further processing
323 sa_attr_op(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
,
324 sa_data_op_t data_op
, dmu_tx_t
*tx
)
326 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
329 sa_buf_type_t buftypes
;
334 for (i
= 0; i
!= count
; i
++) {
335 ASSERT(bulk
[i
].sa_attr
<= hdl
->sa_os
->os_sa
->sa_num_attrs
);
337 bulk
[i
].sa_addr
= NULL
;
338 /* First check the bonus buffer */
340 if (hdl
->sa_bonus_tab
&& TOC_ATTR_PRESENT(
341 hdl
->sa_bonus_tab
->sa_idx_tab
[bulk
[i
].sa_attr
])) {
342 SA_ATTR_INFO(sa
, hdl
->sa_bonus_tab
,
343 SA_GET_HDR(hdl
, SA_BONUS
),
344 bulk
[i
].sa_attr
, bulk
[i
], SA_BONUS
, hdl
);
345 if (tx
&& !(buftypes
& SA_BONUS
)) {
346 dmu_buf_will_dirty(hdl
->sa_bonus
, tx
);
347 buftypes
|= SA_BONUS
;
350 if (bulk
[i
].sa_addr
== NULL
&&
351 ((error
= sa_get_spill(hdl
)) == 0)) {
352 if (TOC_ATTR_PRESENT(
353 hdl
->sa_spill_tab
->sa_idx_tab
[bulk
[i
].sa_attr
])) {
354 SA_ATTR_INFO(sa
, hdl
->sa_spill_tab
,
355 SA_GET_HDR(hdl
, SA_SPILL
),
356 bulk
[i
].sa_attr
, bulk
[i
], SA_SPILL
, hdl
);
357 if (tx
&& !(buftypes
& SA_SPILL
) &&
358 bulk
[i
].sa_size
== bulk
[i
].sa_length
) {
359 dmu_buf_will_dirty(hdl
->sa_spill
, tx
);
360 buftypes
|= SA_SPILL
;
364 if (error
&& error
!= ENOENT
) {
365 return ((error
== ECKSUM
) ? EIO
: error
);
370 if (bulk
[i
].sa_addr
== NULL
)
371 return (SET_ERROR(ENOENT
));
372 if (bulk
[i
].sa_data
) {
373 SA_COPY_DATA(bulk
[i
].sa_data_func
,
374 bulk
[i
].sa_addr
, bulk
[i
].sa_data
,
380 /* existing rewrite of attr */
381 if (bulk
[i
].sa_addr
&&
382 bulk
[i
].sa_size
== bulk
[i
].sa_length
) {
383 SA_COPY_DATA(bulk
[i
].sa_data_func
,
384 bulk
[i
].sa_data
, bulk
[i
].sa_addr
,
387 } else if (bulk
[i
].sa_addr
) { /* attr size change */
388 error
= sa_modify_attrs(hdl
, bulk
[i
].sa_attr
,
389 SA_REPLACE
, bulk
[i
].sa_data_func
,
390 bulk
[i
].sa_data
, bulk
[i
].sa_length
, tx
);
391 } else { /* adding new attribute */
392 error
= sa_modify_attrs(hdl
, bulk
[i
].sa_attr
,
393 SA_ADD
, bulk
[i
].sa_data_func
,
394 bulk
[i
].sa_data
, bulk
[i
].sa_length
, tx
);
405 sa_add_layout_entry(objset_t
*os
, sa_attr_type_t
*attrs
, int attr_count
,
406 uint64_t lot_num
, uint64_t hash
, boolean_t zapadd
, dmu_tx_t
*tx
)
408 sa_os_t
*sa
= os
->os_sa
;
409 sa_lot_t
*tb
, *findtb
;
413 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
414 tb
= kmem_zalloc(sizeof (sa_lot_t
), KM_SLEEP
);
415 tb
->lot_attr_count
= attr_count
;
416 tb
->lot_attrs
= kmem_alloc(sizeof (sa_attr_type_t
) * attr_count
,
418 bcopy(attrs
, tb
->lot_attrs
, sizeof (sa_attr_type_t
) * attr_count
);
419 tb
->lot_num
= lot_num
;
421 tb
->lot_instance
= 0;
426 if (sa
->sa_layout_attr_obj
== 0) {
427 sa
->sa_layout_attr_obj
= zap_create_link(os
,
428 DMU_OT_SA_ATTR_LAYOUTS
,
429 sa
->sa_master_obj
, SA_LAYOUTS
, tx
);
432 (void) snprintf(attr_name
, sizeof (attr_name
),
434 VERIFY(0 == zap_update(os
, os
->os_sa
->sa_layout_attr_obj
,
435 attr_name
, 2, attr_count
, attrs
, tx
));
438 list_create(&tb
->lot_idx_tab
, sizeof (sa_idx_tab_t
),
439 offsetof(sa_idx_tab_t
, sa_next
));
441 for (i
= 0; i
!= attr_count
; i
++) {
442 if (sa
->sa_attr_table
[tb
->lot_attrs
[i
]].sa_length
== 0)
446 avl_add(&sa
->sa_layout_num_tree
, tb
);
448 /* verify we don't have a hash collision */
449 if ((findtb
= avl_find(&sa
->sa_layout_hash_tree
, tb
, &loc
)) != NULL
) {
450 for (; findtb
&& findtb
->lot_hash
== hash
;
451 findtb
= AVL_NEXT(&sa
->sa_layout_hash_tree
, findtb
)) {
452 if (findtb
->lot_instance
!= tb
->lot_instance
)
457 avl_add(&sa
->sa_layout_hash_tree
, tb
);
462 sa_find_layout(objset_t
*os
, uint64_t hash
, sa_attr_type_t
*attrs
,
463 int count
, dmu_tx_t
*tx
, sa_lot_t
**lot
)
465 sa_lot_t
*tb
, tbsearch
;
467 sa_os_t
*sa
= os
->os_sa
;
468 boolean_t found
= B_FALSE
;
470 mutex_enter(&sa
->sa_lock
);
471 tbsearch
.lot_hash
= hash
;
472 tbsearch
.lot_instance
= 0;
473 tb
= avl_find(&sa
->sa_layout_hash_tree
, &tbsearch
, &loc
);
475 for (; tb
&& tb
->lot_hash
== hash
;
476 tb
= AVL_NEXT(&sa
->sa_layout_hash_tree
, tb
)) {
477 if (sa_layout_equal(tb
, attrs
, count
) == 0) {
484 tb
= sa_add_layout_entry(os
, attrs
, count
,
485 avl_numnodes(&sa
->sa_layout_num_tree
), hash
, B_TRUE
, tx
);
487 mutex_exit(&sa
->sa_lock
);
492 sa_resize_spill(sa_handle_t
*hdl
, uint32_t size
, dmu_tx_t
*tx
)
498 blocksize
= SPA_MINBLOCKSIZE
;
499 } else if (size
> SPA_OLD_MAXBLOCKSIZE
) {
501 return (SET_ERROR(EFBIG
));
503 blocksize
= P2ROUNDUP_TYPED(size
, SPA_MINBLOCKSIZE
, uint32_t);
506 error
= dbuf_spill_set_blksz(hdl
->sa_spill
, blocksize
, tx
);
512 sa_copy_data(sa_data_locator_t
*func
, void *datastart
, void *target
, int buflen
)
515 bcopy(datastart
, target
, buflen
);
520 void *saptr
= target
;
525 while (bytes
< buflen
) {
526 func(&dataptr
, &length
, buflen
, start
, datastart
);
527 bcopy(dataptr
, saptr
, length
);
528 saptr
= (void *)((caddr_t
)saptr
+ length
);
536 * Determine several different sizes
537 * first the sa header size
538 * the number of bytes to be stored
539 * if spill would occur the index in the attribute array is returned
541 * the boolean will_spill will be set when spilling is necessary. It
542 * is only set when the buftype is SA_BONUS
545 sa_find_sizes(sa_os_t
*sa
, sa_bulk_attr_t
*attr_desc
, int attr_count
,
546 dmu_buf_t
*db
, sa_buf_type_t buftype
, int *index
, int *total
,
547 boolean_t
*will_spill
)
555 if (buftype
== SA_BONUS
&& sa
->sa_force_spill
) {
558 *will_spill
= B_TRUE
;
564 *will_spill
= B_FALSE
;
567 hdrsize
= (SA_BONUSTYPE_FROM_DB(db
) == DMU_OT_ZNODE
) ? 0 :
568 sizeof (sa_hdr_phys_t
);
570 full_space
= (buftype
== SA_BONUS
) ? DN_MAX_BONUSLEN
: db
->db_size
;
571 ASSERT(IS_P2ALIGNED(full_space
, 8));
573 for (i
= 0; i
!= attr_count
; i
++) {
576 *total
= P2ROUNDUP(*total
, 8);
577 *total
+= attr_desc
[i
].sa_length
;
581 is_var_sz
= (SA_REGISTERED_LEN(sa
, attr_desc
[i
].sa_attr
) == 0);
586 if (is_var_sz
&& var_size
> 1) {
588 * Don't worry that the spill block might overflow.
589 * It will be resized if needed in sa_build_layouts().
591 if (buftype
== SA_SPILL
||
592 P2ROUNDUP(hdrsize
+ sizeof (uint16_t), 8) +
593 *total
< full_space
) {
595 * Account for header space used by array of
596 * optional sizes of variable-length attributes.
597 * Record the extra header size in case this
598 * increase needs to be reversed due to
601 hdrsize
+= sizeof (uint16_t);
603 extra_hdrsize
+= sizeof (uint16_t);
605 ASSERT(buftype
== SA_BONUS
);
608 *will_spill
= B_TRUE
;
614 * find index of where spill *could* occur.
615 * Then continue to count of remainder attribute
616 * space. The sum is used later for sizing bonus
619 if (buftype
== SA_BONUS
&& *index
== -1 &&
620 *total
+ P2ROUNDUP(hdrsize
, 8) >
621 (full_space
- sizeof (blkptr_t
))) {
625 if (*total
+ P2ROUNDUP(hdrsize
, 8) > full_space
&&
627 *will_spill
= B_TRUE
;
631 hdrsize
-= extra_hdrsize
;
633 hdrsize
= P2ROUNDUP(hdrsize
, 8);
637 #define BUF_SPACE_NEEDED(total, header) (total + header)
640 * Find layout that corresponds to ordering of attributes
641 * If not found a new layout number is created and added to
642 * persistent layout tables.
645 sa_build_layouts(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
, int attr_count
,
648 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
650 sa_buf_type_t buftype
;
651 sa_hdr_phys_t
*sahdr
;
654 sa_attr_type_t
*attrs
, *attrs_start
;
657 int spillhdrsize
= 0;
659 dmu_object_type_t bonustype
;
665 dmu_buf_will_dirty(hdl
->sa_bonus
, tx
);
666 bonustype
= SA_BONUSTYPE_FROM_DB(hdl
->sa_bonus
);
668 /* first determine bonus header size and sum of all attributes */
669 hdrsize
= sa_find_sizes(sa
, attr_desc
, attr_count
, hdl
->sa_bonus
,
670 SA_BONUS
, &i
, &used
, &spilling
);
672 if (used
> SPA_OLD_MAXBLOCKSIZE
)
673 return (SET_ERROR(EFBIG
));
675 VERIFY(0 == dmu_set_bonus(hdl
->sa_bonus
, spilling
?
676 MIN(DN_MAX_BONUSLEN
- sizeof (blkptr_t
), used
+ hdrsize
) :
677 used
+ hdrsize
, tx
));
679 ASSERT((bonustype
== DMU_OT_ZNODE
&& spilling
== 0) ||
680 bonustype
== DMU_OT_SA
);
682 /* setup and size spill buffer when needed */
686 if (hdl
->sa_spill
== NULL
) {
687 VERIFY(dmu_spill_hold_by_bonus(hdl
->sa_bonus
, NULL
,
688 &hdl
->sa_spill
) == 0);
690 dmu_buf_will_dirty(hdl
->sa_spill
, tx
);
692 spillhdrsize
= sa_find_sizes(sa
, &attr_desc
[i
],
693 attr_count
- i
, hdl
->sa_spill
, SA_SPILL
, &i
,
694 &spill_used
, &dummy
);
696 if (spill_used
> SPA_OLD_MAXBLOCKSIZE
)
697 return (SET_ERROR(EFBIG
));
699 buf_space
= hdl
->sa_spill
->db_size
- spillhdrsize
;
700 if (BUF_SPACE_NEEDED(spill_used
, spillhdrsize
) >
701 hdl
->sa_spill
->db_size
)
702 VERIFY(0 == sa_resize_spill(hdl
,
703 BUF_SPACE_NEEDED(spill_used
, spillhdrsize
), tx
));
706 /* setup starting pointers to lay down data */
707 data_start
= (void *)((uintptr_t)hdl
->sa_bonus
->db_data
+ hdrsize
);
708 sahdr
= (sa_hdr_phys_t
*)hdl
->sa_bonus
->db_data
;
712 buf_space
= (sa
->sa_force_spill
) ?
713 0 : SA_BLKPTR_SPACE
- hdrsize
;
715 buf_space
= hdl
->sa_bonus
->db_size
- hdrsize
;
717 attrs_start
= attrs
= kmem_alloc(sizeof (sa_attr_type_t
) * attr_count
,
721 for (i
= 0, len_idx
= 0, hash
= -1ULL; i
!= attr_count
; i
++) {
724 ASSERT(IS_P2ALIGNED(data_start
, 8));
725 ASSERT(IS_P2ALIGNED(buf_space
, 8));
726 attrs
[i
] = attr_desc
[i
].sa_attr
;
727 length
= SA_REGISTERED_LEN(sa
, attrs
[i
]);
729 length
= attr_desc
[i
].sa_length
;
731 if (buf_space
< length
) { /* switch to spill buffer */
733 VERIFY(bonustype
== DMU_OT_SA
);
734 if (buftype
== SA_BONUS
&& !sa
->sa_force_spill
) {
735 sa_find_layout(hdl
->sa_os
, hash
, attrs_start
,
736 lot_count
, tx
, &lot
);
737 SA_SET_HDR(sahdr
, lot
->lot_num
, hdrsize
);
744 sahdr
= (sa_hdr_phys_t
*)hdl
->sa_spill
->db_data
;
745 sahdr
->sa_magic
= SA_MAGIC
;
746 data_start
= (void *)((uintptr_t)sahdr
+
748 attrs_start
= &attrs
[i
];
749 buf_space
= hdl
->sa_spill
->db_size
- spillhdrsize
;
752 hash
^= SA_ATTR_HASH(attrs
[i
]);
753 attr_desc
[i
].sa_addr
= data_start
;
754 attr_desc
[i
].sa_size
= length
;
755 SA_COPY_DATA(attr_desc
[i
].sa_data_func
, attr_desc
[i
].sa_data
,
757 if (sa
->sa_attr_table
[attrs
[i
]].sa_length
== 0) {
758 sahdr
->sa_lengths
[len_idx
++] = length
;
760 data_start
= (void *)P2ROUNDUP(((uintptr_t)data_start
+
762 buf_space
-= P2ROUNDUP(length
, 8);
766 sa_find_layout(hdl
->sa_os
, hash
, attrs_start
, lot_count
, tx
, &lot
);
769 * Verify that old znodes always have layout number 0.
770 * Must be DMU_OT_SA for arbitrary layouts
772 VERIFY((bonustype
== DMU_OT_ZNODE
&& lot
->lot_num
== 0) ||
773 (bonustype
== DMU_OT_SA
&& lot
->lot_num
> 1));
775 if (bonustype
== DMU_OT_SA
) {
776 SA_SET_HDR(sahdr
, lot
->lot_num
,
777 buftype
== SA_BONUS
? hdrsize
: spillhdrsize
);
780 kmem_free(attrs
, sizeof (sa_attr_type_t
) * attr_count
);
781 if (hdl
->sa_bonus_tab
) {
782 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_bonus_tab
);
783 hdl
->sa_bonus_tab
= NULL
;
785 if (!sa
->sa_force_spill
)
786 VERIFY(0 == sa_build_index(hdl
, SA_BONUS
));
788 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
791 * remove spill block that is no longer needed.
793 dmu_buf_rele(hdl
->sa_spill
, NULL
);
794 hdl
->sa_spill
= NULL
;
795 hdl
->sa_spill_tab
= NULL
;
796 VERIFY(0 == dmu_rm_spill(hdl
->sa_os
,
797 sa_handle_object(hdl
), tx
));
799 VERIFY(0 == sa_build_index(hdl
, SA_SPILL
));
807 sa_free_attr_table(sa_os_t
*sa
)
811 if (sa
->sa_attr_table
== NULL
)
814 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
815 if (sa
->sa_attr_table
[i
].sa_name
)
816 kmem_free(sa
->sa_attr_table
[i
].sa_name
,
817 strlen(sa
->sa_attr_table
[i
].sa_name
) + 1);
820 kmem_free(sa
->sa_attr_table
,
821 sizeof (sa_attr_table_t
) * sa
->sa_num_attrs
);
823 sa
->sa_attr_table
= NULL
;
827 sa_attr_table_setup(objset_t
*os
, sa_attr_reg_t
*reg_attrs
, int count
)
829 sa_os_t
*sa
= os
->os_sa
;
830 uint64_t sa_attr_count
= 0;
831 uint64_t sa_reg_count
= 0;
837 int registered_count
= 0;
839 dmu_objset_type_t ostype
= dmu_objset_type(os
);
842 kmem_zalloc(count
* sizeof (sa_attr_type_t
), KM_SLEEP
);
843 sa
->sa_user_table_sz
= count
* sizeof (sa_attr_type_t
);
845 if (sa
->sa_reg_attr_obj
!= 0) {
846 error
= zap_count(os
, sa
->sa_reg_attr_obj
,
850 * Make sure we retrieved a count and that it isn't zero
852 if (error
|| (error
== 0 && sa_attr_count
== 0)) {
854 error
= SET_ERROR(EINVAL
);
857 sa_reg_count
= sa_attr_count
;
860 if (ostype
== DMU_OST_ZFS
&& sa_attr_count
== 0)
861 sa_attr_count
+= sa_legacy_attr_count
;
863 /* Allocate attribute numbers for attributes that aren't registered */
864 for (i
= 0; i
!= count
; i
++) {
865 boolean_t found
= B_FALSE
;
868 if (ostype
== DMU_OST_ZFS
) {
869 for (j
= 0; j
!= sa_legacy_attr_count
; j
++) {
870 if (strcmp(reg_attrs
[i
].sa_name
,
871 sa_legacy_attrs
[j
].sa_name
) == 0) {
872 sa
->sa_user_table
[i
] =
873 sa_legacy_attrs
[j
].sa_attr
;
881 if (sa
->sa_reg_attr_obj
)
882 error
= zap_lookup(os
, sa
->sa_reg_attr_obj
,
883 reg_attrs
[i
].sa_name
, 8, 1, &attr_value
);
885 error
= SET_ERROR(ENOENT
);
888 sa
->sa_user_table
[i
] = (sa_attr_type_t
)sa_attr_count
;
892 sa
->sa_user_table
[i
] = ATTR_NUM(attr_value
);
899 sa
->sa_num_attrs
= sa_attr_count
;
900 tb
= sa
->sa_attr_table
=
901 kmem_zalloc(sizeof (sa_attr_table_t
) * sa_attr_count
, KM_SLEEP
);
904 * Attribute table is constructed from requested attribute list,
905 * previously foreign registered attributes, and also the legacy
906 * ZPL set of attributes.
909 if (sa
->sa_reg_attr_obj
) {
910 for (zap_cursor_init(&zc
, os
, sa
->sa_reg_attr_obj
);
911 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
912 zap_cursor_advance(&zc
)) {
914 value
= za
.za_first_integer
;
917 tb
[ATTR_NUM(value
)].sa_attr
= ATTR_NUM(value
);
918 tb
[ATTR_NUM(value
)].sa_length
= ATTR_LENGTH(value
);
919 tb
[ATTR_NUM(value
)].sa_byteswap
= ATTR_BSWAP(value
);
920 tb
[ATTR_NUM(value
)].sa_registered
= B_TRUE
;
922 if (tb
[ATTR_NUM(value
)].sa_name
) {
925 tb
[ATTR_NUM(value
)].sa_name
=
926 kmem_zalloc(strlen(za
.za_name
) +1, KM_SLEEP
);
927 (void) strlcpy(tb
[ATTR_NUM(value
)].sa_name
, za
.za_name
,
928 strlen(za
.za_name
) +1);
930 zap_cursor_fini(&zc
);
932 * Make sure we processed the correct number of registered
935 if (registered_count
!= sa_reg_count
) {
942 if (ostype
== DMU_OST_ZFS
) {
943 for (i
= 0; i
!= sa_legacy_attr_count
; i
++) {
946 tb
[i
].sa_attr
= sa_legacy_attrs
[i
].sa_attr
;
947 tb
[i
].sa_length
= sa_legacy_attrs
[i
].sa_length
;
948 tb
[i
].sa_byteswap
= sa_legacy_attrs
[i
].sa_byteswap
;
949 tb
[i
].sa_registered
= B_FALSE
;
951 kmem_zalloc(strlen(sa_legacy_attrs
[i
].sa_name
) +1,
953 (void) strlcpy(tb
[i
].sa_name
,
954 sa_legacy_attrs
[i
].sa_name
,
955 strlen(sa_legacy_attrs
[i
].sa_name
) + 1);
959 for (i
= 0; i
!= count
; i
++) {
960 sa_attr_type_t attr_id
;
962 attr_id
= sa
->sa_user_table
[i
];
963 if (tb
[attr_id
].sa_name
)
966 tb
[attr_id
].sa_length
= reg_attrs
[i
].sa_length
;
967 tb
[attr_id
].sa_byteswap
= reg_attrs
[i
].sa_byteswap
;
968 tb
[attr_id
].sa_attr
= attr_id
;
969 tb
[attr_id
].sa_name
=
970 kmem_zalloc(strlen(reg_attrs
[i
].sa_name
) + 1, KM_SLEEP
);
971 (void) strlcpy(tb
[attr_id
].sa_name
, reg_attrs
[i
].sa_name
,
972 strlen(reg_attrs
[i
].sa_name
) + 1);
975 sa
->sa_need_attr_registration
=
976 (sa_attr_count
!= registered_count
);
980 kmem_free(sa
->sa_user_table
, count
* sizeof (sa_attr_type_t
));
981 sa
->sa_user_table
= NULL
;
982 sa_free_attr_table(sa
);
983 return ((error
!= 0) ? error
: EINVAL
);
987 sa_setup(objset_t
*os
, uint64_t sa_obj
, sa_attr_reg_t
*reg_attrs
, int count
,
988 sa_attr_type_t
**user_table
)
993 dmu_objset_type_t ostype
= dmu_objset_type(os
);
997 mutex_enter(&os
->os_user_ptr_lock
);
999 mutex_enter(&os
->os_sa
->sa_lock
);
1000 mutex_exit(&os
->os_user_ptr_lock
);
1001 tb
= os
->os_sa
->sa_user_table
;
1002 mutex_exit(&os
->os_sa
->sa_lock
);
1007 sa
= kmem_zalloc(sizeof (sa_os_t
), KM_SLEEP
);
1008 mutex_init(&sa
->sa_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1009 sa
->sa_master_obj
= sa_obj
;
1012 mutex_enter(&sa
->sa_lock
);
1013 mutex_exit(&os
->os_user_ptr_lock
);
1014 avl_create(&sa
->sa_layout_num_tree
, layout_num_compare
,
1015 sizeof (sa_lot_t
), offsetof(sa_lot_t
, lot_num_node
));
1016 avl_create(&sa
->sa_layout_hash_tree
, layout_hash_compare
,
1017 sizeof (sa_lot_t
), offsetof(sa_lot_t
, lot_hash_node
));
1020 error
= zap_lookup(os
, sa_obj
, SA_LAYOUTS
,
1021 8, 1, &sa
->sa_layout_attr_obj
);
1022 if (error
!= 0 && error
!= ENOENT
)
1024 error
= zap_lookup(os
, sa_obj
, SA_REGISTRY
,
1025 8, 1, &sa
->sa_reg_attr_obj
);
1026 if (error
!= 0 && error
!= ENOENT
)
1030 if ((error
= sa_attr_table_setup(os
, reg_attrs
, count
)) != 0)
1033 if (sa
->sa_layout_attr_obj
!= 0) {
1034 uint64_t layout_count
;
1036 error
= zap_count(os
, sa
->sa_layout_attr_obj
,
1040 * Layout number count should be > 0
1042 if (error
|| (error
== 0 && layout_count
== 0)) {
1044 error
= SET_ERROR(EINVAL
);
1048 for (zap_cursor_init(&zc
, os
, sa
->sa_layout_attr_obj
);
1049 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
1050 zap_cursor_advance(&zc
)) {
1051 sa_attr_type_t
*lot_attrs
;
1054 lot_attrs
= kmem_zalloc(sizeof (sa_attr_type_t
) *
1055 za
.za_num_integers
, KM_SLEEP
);
1057 if ((error
= (zap_lookup(os
, sa
->sa_layout_attr_obj
,
1058 za
.za_name
, 2, za
.za_num_integers
,
1059 lot_attrs
))) != 0) {
1060 kmem_free(lot_attrs
, sizeof (sa_attr_type_t
) *
1061 za
.za_num_integers
);
1064 VERIFY(ddi_strtoull(za
.za_name
, NULL
, 10,
1065 (unsigned long long *)&lot_num
) == 0);
1067 (void) sa_add_layout_entry(os
, lot_attrs
,
1068 za
.za_num_integers
, lot_num
,
1069 sa_layout_info_hash(lot_attrs
,
1070 za
.za_num_integers
), B_FALSE
, NULL
);
1071 kmem_free(lot_attrs
, sizeof (sa_attr_type_t
) *
1072 za
.za_num_integers
);
1074 zap_cursor_fini(&zc
);
1077 * Make sure layout count matches number of entries added
1080 if (avl_numnodes(&sa
->sa_layout_num_tree
) != layout_count
) {
1086 /* Add special layout number for old ZNODES */
1087 if (ostype
== DMU_OST_ZFS
) {
1088 (void) sa_add_layout_entry(os
, sa_legacy_zpl_layout
,
1089 sa_legacy_attr_count
, 0,
1090 sa_layout_info_hash(sa_legacy_zpl_layout
,
1091 sa_legacy_attr_count
), B_FALSE
, NULL
);
1093 (void) sa_add_layout_entry(os
, sa_dummy_zpl_layout
, 0, 1,
1096 *user_table
= os
->os_sa
->sa_user_table
;
1097 mutex_exit(&sa
->sa_lock
);
1101 sa_free_attr_table(sa
);
1102 if (sa
->sa_user_table
)
1103 kmem_free(sa
->sa_user_table
, sa
->sa_user_table_sz
);
1104 mutex_exit(&sa
->sa_lock
);
1105 avl_destroy(&sa
->sa_layout_hash_tree
);
1106 avl_destroy(&sa
->sa_layout_num_tree
);
1107 mutex_destroy(&sa
->sa_lock
);
1108 kmem_free(sa
, sizeof (sa_os_t
));
1109 return ((error
== ECKSUM
) ? EIO
: error
);
1113 sa_tear_down(objset_t
*os
)
1115 sa_os_t
*sa
= os
->os_sa
;
1119 kmem_free(sa
->sa_user_table
, sa
->sa_user_table_sz
);
1121 /* Free up attr table */
1123 sa_free_attr_table(sa
);
1126 while (layout
= avl_destroy_nodes(&sa
->sa_layout_hash_tree
, &cookie
)) {
1128 while (tab
= list_head(&layout
->lot_idx_tab
)) {
1129 ASSERT(refcount_count(&tab
->sa_refcount
));
1130 sa_idx_tab_rele(os
, tab
);
1135 while (layout
= avl_destroy_nodes(&sa
->sa_layout_num_tree
, &cookie
)) {
1136 kmem_free(layout
->lot_attrs
,
1137 sizeof (sa_attr_type_t
) * layout
->lot_attr_count
);
1138 kmem_free(layout
, sizeof (sa_lot_t
));
1141 avl_destroy(&sa
->sa_layout_hash_tree
);
1142 avl_destroy(&sa
->sa_layout_num_tree
);
1143 mutex_destroy(&sa
->sa_lock
);
1145 kmem_free(sa
, sizeof (sa_os_t
));
1150 sa_build_idx_tab(void *hdr
, void *attr_addr
, sa_attr_type_t attr
,
1151 uint16_t length
, int length_idx
, boolean_t var_length
, void *userp
)
1153 sa_idx_tab_t
*idx_tab
= userp
;
1156 ASSERT(idx_tab
->sa_variable_lengths
);
1157 idx_tab
->sa_variable_lengths
[length_idx
] = length
;
1159 TOC_ATTR_ENCODE(idx_tab
->sa_idx_tab
[attr
], length_idx
,
1160 (uint32_t)((uintptr_t)attr_addr
- (uintptr_t)hdr
));
1164 sa_attr_iter(objset_t
*os
, sa_hdr_phys_t
*hdr
, dmu_object_type_t type
,
1165 sa_iterfunc_t func
, sa_lot_t
*tab
, void *userp
)
1171 sa_os_t
*sa
= os
->os_sa
;
1173 uint16_t *length_start
= NULL
;
1174 uint8_t length_idx
= 0;
1177 search
.lot_num
= SA_LAYOUT_NUM(hdr
, type
);
1178 tb
= avl_find(&sa
->sa_layout_num_tree
, &search
, &loc
);
1182 if (IS_SA_BONUSTYPE(type
)) {
1183 data_start
= (void *)P2ROUNDUP(((uintptr_t)hdr
+
1184 offsetof(sa_hdr_phys_t
, sa_lengths
) +
1185 (sizeof (uint16_t) * tb
->lot_var_sizes
)), 8);
1186 length_start
= hdr
->sa_lengths
;
1191 for (i
= 0; i
!= tb
->lot_attr_count
; i
++) {
1192 int attr_length
, reg_length
;
1195 reg_length
= sa
->sa_attr_table
[tb
->lot_attrs
[i
]].sa_length
;
1197 attr_length
= reg_length
;
1200 attr_length
= length_start
[length_idx
];
1201 idx_len
= length_idx
++;
1204 func(hdr
, data_start
, tb
->lot_attrs
[i
], attr_length
,
1205 idx_len
, reg_length
== 0 ? B_TRUE
: B_FALSE
, userp
);
1207 data_start
= (void *)P2ROUNDUP(((uintptr_t)data_start
+
1214 sa_byteswap_cb(void *hdr
, void *attr_addr
, sa_attr_type_t attr
,
1215 uint16_t length
, int length_idx
, boolean_t variable_length
, void *userp
)
1217 sa_handle_t
*hdl
= userp
;
1218 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1220 sa_bswap_table
[sa
->sa_attr_table
[attr
].sa_byteswap
](attr_addr
, length
);
1224 sa_byteswap(sa_handle_t
*hdl
, sa_buf_type_t buftype
)
1226 sa_hdr_phys_t
*sa_hdr_phys
= SA_GET_HDR(hdl
, buftype
);
1228 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1229 int num_lengths
= 1;
1232 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
1233 if (sa_hdr_phys
->sa_magic
== SA_MAGIC
)
1236 db
= SA_GET_DB(hdl
, buftype
);
1238 if (buftype
== SA_SPILL
) {
1239 arc_release(db
->db_buf
, NULL
);
1240 arc_buf_thaw(db
->db_buf
);
1243 sa_hdr_phys
->sa_magic
= BSWAP_32(sa_hdr_phys
->sa_magic
);
1244 sa_hdr_phys
->sa_layout_info
= BSWAP_16(sa_hdr_phys
->sa_layout_info
);
1247 * Determine number of variable lenghts in header
1248 * The standard 8 byte header has one for free and a
1249 * 16 byte header would have 4 + 1;
1251 if (SA_HDR_SIZE(sa_hdr_phys
) > 8)
1252 num_lengths
+= (SA_HDR_SIZE(sa_hdr_phys
) - 8) >> 1;
1253 for (i
= 0; i
!= num_lengths
; i
++)
1254 sa_hdr_phys
->sa_lengths
[i
] =
1255 BSWAP_16(sa_hdr_phys
->sa_lengths
[i
]);
1257 sa_attr_iter(hdl
->sa_os
, sa_hdr_phys
, DMU_OT_SA
,
1258 sa_byteswap_cb
, NULL
, hdl
);
1260 if (buftype
== SA_SPILL
)
1261 arc_buf_freeze(((dmu_buf_impl_t
*)hdl
->sa_spill
)->db_buf
);
1265 sa_build_index(sa_handle_t
*hdl
, sa_buf_type_t buftype
)
1267 sa_hdr_phys_t
*sa_hdr_phys
;
1268 dmu_buf_impl_t
*db
= SA_GET_DB(hdl
, buftype
);
1269 dmu_object_type_t bonustype
= SA_BONUSTYPE_FROM_DB(db
);
1270 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1271 sa_idx_tab_t
*idx_tab
;
1273 sa_hdr_phys
= SA_GET_HDR(hdl
, buftype
);
1275 mutex_enter(&sa
->sa_lock
);
1277 /* Do we need to byteswap? */
1279 /* only check if not old znode */
1280 if (IS_SA_BONUSTYPE(bonustype
) && sa_hdr_phys
->sa_magic
!= SA_MAGIC
&&
1281 sa_hdr_phys
->sa_magic
!= 0) {
1282 VERIFY(BSWAP_32(sa_hdr_phys
->sa_magic
) == SA_MAGIC
);
1283 sa_byteswap(hdl
, buftype
);
1286 idx_tab
= sa_find_idx_tab(hdl
->sa_os
, bonustype
, sa_hdr_phys
);
1288 if (buftype
== SA_BONUS
)
1289 hdl
->sa_bonus_tab
= idx_tab
;
1291 hdl
->sa_spill_tab
= idx_tab
;
1293 mutex_exit(&sa
->sa_lock
);
1299 sa_evict_sync(void *dbu
)
1301 panic("evicting sa dbuf\n");
1305 sa_idx_tab_rele(objset_t
*os
, void *arg
)
1307 sa_os_t
*sa
= os
->os_sa
;
1308 sa_idx_tab_t
*idx_tab
= arg
;
1310 if (idx_tab
== NULL
)
1313 mutex_enter(&sa
->sa_lock
);
1314 if (refcount_remove(&idx_tab
->sa_refcount
, NULL
) == 0) {
1315 list_remove(&idx_tab
->sa_layout
->lot_idx_tab
, idx_tab
);
1316 if (idx_tab
->sa_variable_lengths
)
1317 kmem_free(idx_tab
->sa_variable_lengths
,
1319 idx_tab
->sa_layout
->lot_var_sizes
);
1320 refcount_destroy(&idx_tab
->sa_refcount
);
1321 kmem_free(idx_tab
->sa_idx_tab
,
1322 sizeof (uint32_t) * sa
->sa_num_attrs
);
1323 kmem_free(idx_tab
, sizeof (sa_idx_tab_t
));
1325 mutex_exit(&sa
->sa_lock
);
1329 sa_idx_tab_hold(objset_t
*os
, sa_idx_tab_t
*idx_tab
)
1331 sa_os_t
*sa
= os
->os_sa
;
1333 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
1334 (void) refcount_add(&idx_tab
->sa_refcount
, NULL
);
1338 sa_handle_destroy(sa_handle_t
*hdl
)
1340 dmu_buf_t
*db
= hdl
->sa_bonus
;
1342 mutex_enter(&hdl
->sa_lock
);
1343 (void) dmu_buf_remove_user(db
, &hdl
->sa_dbu
);
1345 if (hdl
->sa_bonus_tab
)
1346 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_bonus_tab
);
1348 if (hdl
->sa_spill_tab
)
1349 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
1351 dmu_buf_rele(hdl
->sa_bonus
, NULL
);
1354 dmu_buf_rele((dmu_buf_t
*)hdl
->sa_spill
, NULL
);
1355 mutex_exit(&hdl
->sa_lock
);
1357 kmem_cache_free(sa_cache
, hdl
);
1361 sa_handle_get_from_db(objset_t
*os
, dmu_buf_t
*db
, void *userp
,
1362 sa_handle_type_t hdl_type
, sa_handle_t
**handlepp
)
1365 dmu_object_info_t doi
;
1366 sa_handle_t
*handle
= NULL
;
1369 dmu_object_info_from_db(db
, &doi
);
1370 ASSERT(doi
.doi_bonus_type
== DMU_OT_SA
||
1371 doi
.doi_bonus_type
== DMU_OT_ZNODE
);
1373 /* find handle, if it exists */
1374 /* if one doesn't exist then create a new one, and initialize it */
1376 if (hdl_type
== SA_HDL_SHARED
)
1377 handle
= dmu_buf_get_user(db
);
1379 if (handle
== NULL
) {
1380 sa_handle_t
*winner
= NULL
;
1382 handle
= kmem_cache_alloc(sa_cache
, KM_SLEEP
);
1383 handle
->sa_dbu
.dbu_evict_func_sync
= NULL
;
1384 handle
->sa_dbu
.dbu_evict_func_async
= NULL
;
1385 handle
->sa_userp
= userp
;
1386 handle
->sa_bonus
= db
;
1388 handle
->sa_spill
= NULL
;
1389 handle
->sa_bonus_tab
= NULL
;
1390 handle
->sa_spill_tab
= NULL
;
1392 error
= sa_build_index(handle
, SA_BONUS
);
1394 if (hdl_type
== SA_HDL_SHARED
) {
1395 dmu_buf_init_user(&handle
->sa_dbu
, sa_evict_sync
, NULL
,
1397 winner
= dmu_buf_set_user_ie(db
, &handle
->sa_dbu
);
1400 if (winner
!= NULL
) {
1401 kmem_cache_free(sa_cache
, handle
);
1411 sa_handle_get(objset_t
*objset
, uint64_t objid
, void *userp
,
1412 sa_handle_type_t hdl_type
, sa_handle_t
**handlepp
)
1417 if (error
= dmu_bonus_hold(objset
, objid
, NULL
, &db
))
1420 return (sa_handle_get_from_db(objset
, db
, userp
, hdl_type
,
1425 sa_buf_hold(objset_t
*objset
, uint64_t obj_num
, void *tag
, dmu_buf_t
**db
)
1427 return (dmu_bonus_hold(objset
, obj_num
, tag
, db
));
1431 sa_buf_rele(dmu_buf_t
*db
, void *tag
)
1433 dmu_buf_rele(db
, tag
);
1437 sa_lookup_impl(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
)
1440 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1441 return (sa_attr_op(hdl
, bulk
, count
, SA_LOOKUP
, NULL
));
1445 sa_lookup(sa_handle_t
*hdl
, sa_attr_type_t attr
, void *buf
, uint32_t buflen
)
1448 sa_bulk_attr_t bulk
;
1450 bulk
.sa_attr
= attr
;
1452 bulk
.sa_length
= buflen
;
1453 bulk
.sa_data_func
= NULL
;
1456 mutex_enter(&hdl
->sa_lock
);
1457 error
= sa_lookup_impl(hdl
, &bulk
, 1);
1458 mutex_exit(&hdl
->sa_lock
);
1464 sa_lookup_uio(sa_handle_t
*hdl
, sa_attr_type_t attr
, uio_t
*uio
)
1467 sa_bulk_attr_t bulk
;
1469 bulk
.sa_data
= NULL
;
1470 bulk
.sa_attr
= attr
;
1471 bulk
.sa_data_func
= NULL
;
1475 mutex_enter(&hdl
->sa_lock
);
1476 if ((error
= sa_attr_op(hdl
, &bulk
, 1, SA_LOOKUP
, NULL
)) == 0) {
1477 error
= uiomove((void *)bulk
.sa_addr
, MIN(bulk
.sa_size
,
1478 uio
->uio_resid
), UIO_READ
, uio
);
1480 mutex_exit(&hdl
->sa_lock
);
1486 static sa_idx_tab_t
*
1487 sa_find_idx_tab(objset_t
*os
, dmu_object_type_t bonustype
, sa_hdr_phys_t
*hdr
)
1489 sa_idx_tab_t
*idx_tab
;
1490 sa_os_t
*sa
= os
->os_sa
;
1491 sa_lot_t
*tb
, search
;
1495 * Deterimine layout number. If SA node and header == 0 then
1496 * force the index table to the dummy "1" empty layout.
1498 * The layout number would only be zero for a newly created file
1499 * that has not added any attributes yet, or with crypto enabled which
1500 * doesn't write any attributes to the bonus buffer.
1503 search
.lot_num
= SA_LAYOUT_NUM(hdr
, bonustype
);
1505 tb
= avl_find(&sa
->sa_layout_num_tree
, &search
, &loc
);
1507 /* Verify header size is consistent with layout information */
1509 ASSERT(IS_SA_BONUSTYPE(bonustype
) &&
1510 SA_HDR_SIZE_MATCH_LAYOUT(hdr
, tb
) || !IS_SA_BONUSTYPE(bonustype
) ||
1511 (IS_SA_BONUSTYPE(bonustype
) && hdr
->sa_layout_info
== 0));
1514 * See if any of the already existing TOC entries can be reused?
1517 for (idx_tab
= list_head(&tb
->lot_idx_tab
); idx_tab
;
1518 idx_tab
= list_next(&tb
->lot_idx_tab
, idx_tab
)) {
1519 boolean_t valid_idx
= B_TRUE
;
1522 if (tb
->lot_var_sizes
!= 0 &&
1523 idx_tab
->sa_variable_lengths
!= NULL
) {
1524 for (i
= 0; i
!= tb
->lot_var_sizes
; i
++) {
1525 if (hdr
->sa_lengths
[i
] !=
1526 idx_tab
->sa_variable_lengths
[i
]) {
1527 valid_idx
= B_FALSE
;
1533 sa_idx_tab_hold(os
, idx_tab
);
1538 /* No such luck, create a new entry */
1539 idx_tab
= kmem_zalloc(sizeof (sa_idx_tab_t
), KM_SLEEP
);
1540 idx_tab
->sa_idx_tab
=
1541 kmem_zalloc(sizeof (uint32_t) * sa
->sa_num_attrs
, KM_SLEEP
);
1542 idx_tab
->sa_layout
= tb
;
1543 refcount_create(&idx_tab
->sa_refcount
);
1544 if (tb
->lot_var_sizes
)
1545 idx_tab
->sa_variable_lengths
= kmem_alloc(sizeof (uint16_t) *
1546 tb
->lot_var_sizes
, KM_SLEEP
);
1548 sa_attr_iter(os
, hdr
, bonustype
, sa_build_idx_tab
,
1550 sa_idx_tab_hold(os
, idx_tab
); /* one hold for consumer */
1551 sa_idx_tab_hold(os
, idx_tab
); /* one for layout */
1552 list_insert_tail(&tb
->lot_idx_tab
, idx_tab
);
1557 sa_default_locator(void **dataptr
, uint32_t *len
, uint32_t total_len
,
1558 boolean_t start
, void *userdata
)
1562 *dataptr
= userdata
;
1567 sa_attr_register_sync(sa_handle_t
*hdl
, dmu_tx_t
*tx
)
1569 uint64_t attr_value
= 0;
1570 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1571 sa_attr_table_t
*tb
= sa
->sa_attr_table
;
1574 mutex_enter(&sa
->sa_lock
);
1576 if (!sa
->sa_need_attr_registration
|| sa
->sa_master_obj
== NULL
) {
1577 mutex_exit(&sa
->sa_lock
);
1581 if (sa
->sa_reg_attr_obj
== NULL
) {
1582 sa
->sa_reg_attr_obj
= zap_create_link(hdl
->sa_os
,
1583 DMU_OT_SA_ATTR_REGISTRATION
,
1584 sa
->sa_master_obj
, SA_REGISTRY
, tx
);
1586 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1587 if (sa
->sa_attr_table
[i
].sa_registered
)
1589 ATTR_ENCODE(attr_value
, tb
[i
].sa_attr
, tb
[i
].sa_length
,
1591 VERIFY(0 == zap_update(hdl
->sa_os
, sa
->sa_reg_attr_obj
,
1592 tb
[i
].sa_name
, 8, 1, &attr_value
, tx
));
1593 tb
[i
].sa_registered
= B_TRUE
;
1595 sa
->sa_need_attr_registration
= B_FALSE
;
1596 mutex_exit(&sa
->sa_lock
);
1600 * Replace all attributes with attributes specified in template.
1601 * If dnode had a spill buffer then those attributes will be
1602 * also be replaced, possibly with just an empty spill block
1604 * This interface is intended to only be used for bulk adding of
1605 * attributes for a new file. It will also be used by the ZPL
1606 * when converting and old formatted znode to native SA support.
1609 sa_replace_all_by_template_locked(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
,
1610 int attr_count
, dmu_tx_t
*tx
)
1612 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1614 if (sa
->sa_need_attr_registration
)
1615 sa_attr_register_sync(hdl
, tx
);
1616 return (sa_build_layouts(hdl
, attr_desc
, attr_count
, tx
));
1620 sa_replace_all_by_template(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
,
1621 int attr_count
, dmu_tx_t
*tx
)
1625 mutex_enter(&hdl
->sa_lock
);
1626 error
= sa_replace_all_by_template_locked(hdl
, attr_desc
,
1628 mutex_exit(&hdl
->sa_lock
);
1633 * Add/remove a single attribute or replace a variable-sized attribute value
1634 * with a value of a different size, and then rewrite the entire set
1636 * Same-length attribute value replacement (including fixed-length attributes)
1637 * is handled more efficiently by the upper layers.
1640 sa_modify_attrs(sa_handle_t
*hdl
, sa_attr_type_t newattr
,
1641 sa_data_op_t action
, sa_data_locator_t
*locator
, void *datastart
,
1642 uint16_t buflen
, dmu_tx_t
*tx
)
1644 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1645 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1647 sa_bulk_attr_t
*attr_desc
;
1649 int bonus_attr_count
= 0;
1650 int bonus_data_size
= 0;
1651 int spill_data_size
= 0;
1652 int spill_attr_count
= 0;
1654 uint16_t length
, reg_length
;
1655 int i
, j
, k
, length_idx
;
1657 sa_idx_tab_t
*idx_tab
;
1661 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1663 /* First make of copy of the old data */
1667 if (dn
->dn_bonuslen
!= 0) {
1668 bonus_data_size
= hdl
->sa_bonus
->db_size
;
1669 old_data
[0] = kmem_alloc(bonus_data_size
, KM_SLEEP
);
1670 bcopy(hdl
->sa_bonus
->db_data
, old_data
[0],
1671 hdl
->sa_bonus
->db_size
);
1672 bonus_attr_count
= hdl
->sa_bonus_tab
->sa_layout
->lot_attr_count
;
1678 /* Bring spill buffer online if it isn't currently */
1680 if ((error
= sa_get_spill(hdl
)) == 0) {
1681 spill_data_size
= hdl
->sa_spill
->db_size
;
1682 old_data
[1] = kmem_alloc(spill_data_size
, KM_SLEEP
);
1683 bcopy(hdl
->sa_spill
->db_data
, old_data
[1],
1684 hdl
->sa_spill
->db_size
);
1686 hdl
->sa_spill_tab
->sa_layout
->lot_attr_count
;
1687 } else if (error
&& error
!= ENOENT
) {
1689 kmem_free(old_data
[0], bonus_data_size
);
1695 /* build descriptor of all attributes */
1697 attr_count
= bonus_attr_count
+ spill_attr_count
;
1698 if (action
== SA_ADD
)
1700 else if (action
== SA_REMOVE
)
1703 attr_desc
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * attr_count
, KM_SLEEP
);
1706 * loop through bonus and spill buffer if it exists, and
1707 * build up new attr_descriptor to reset the attributes
1710 count
= bonus_attr_count
;
1711 hdr
= SA_GET_HDR(hdl
, SA_BONUS
);
1712 idx_tab
= SA_IDX_TAB_GET(hdl
, SA_BONUS
);
1713 for (; k
!= 2; k
++) {
1715 * Iterate over each attribute in layout. Fetch the
1716 * size of variable-length attributes needing rewrite
1717 * from sa_lengths[].
1719 for (i
= 0, length_idx
= 0; i
!= count
; i
++) {
1720 sa_attr_type_t attr
;
1722 attr
= idx_tab
->sa_layout
->lot_attrs
[i
];
1723 reg_length
= SA_REGISTERED_LEN(sa
, attr
);
1724 if (reg_length
== 0) {
1725 length
= hdr
->sa_lengths
[length_idx
];
1728 length
= reg_length
;
1730 if (attr
== newattr
) {
1732 * There is nothing to do for SA_REMOVE,
1733 * so it is just skipped.
1735 if (action
== SA_REMOVE
)
1739 * Duplicate attributes are not allowed, so the
1740 * action can not be SA_ADD here.
1742 ASSERT3S(action
, ==, SA_REPLACE
);
1745 * Only a variable-sized attribute can be
1746 * replaced here, and its size must be changing.
1748 ASSERT3U(reg_length
, ==, 0);
1749 ASSERT3U(length
, !=, buflen
);
1750 SA_ADD_BULK_ATTR(attr_desc
, j
, attr
,
1751 locator
, datastart
, buflen
);
1753 SA_ADD_BULK_ATTR(attr_desc
, j
, attr
,
1755 (TOC_OFF(idx_tab
->sa_idx_tab
[attr
]) +
1756 (uintptr_t)old_data
[k
]), length
);
1759 if (k
== 0 && hdl
->sa_spill
) {
1760 hdr
= SA_GET_HDR(hdl
, SA_SPILL
);
1761 idx_tab
= SA_IDX_TAB_GET(hdl
, SA_SPILL
);
1762 count
= spill_attr_count
;
1767 if (action
== SA_ADD
) {
1768 reg_length
= SA_REGISTERED_LEN(sa
, newattr
);
1769 IMPLY(reg_length
!= 0, reg_length
== buflen
);
1770 SA_ADD_BULK_ATTR(attr_desc
, j
, newattr
, locator
,
1773 ASSERT3U(j
, ==, attr_count
);
1775 error
= sa_build_layouts(hdl
, attr_desc
, attr_count
, tx
);
1778 kmem_free(old_data
[0], bonus_data_size
);
1780 kmem_free(old_data
[1], spill_data_size
);
1781 kmem_free(attr_desc
, sizeof (sa_bulk_attr_t
) * attr_count
);
1787 sa_bulk_update_impl(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
,
1791 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1792 dmu_object_type_t bonustype
;
1794 bonustype
= SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl
, SA_BONUS
));
1797 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1799 /* sync out registration table if necessary */
1800 if (sa
->sa_need_attr_registration
)
1801 sa_attr_register_sync(hdl
, tx
);
1803 error
= sa_attr_op(hdl
, bulk
, count
, SA_UPDATE
, tx
);
1804 if (error
== 0 && !IS_SA_BONUSTYPE(bonustype
) && sa
->sa_update_cb
)
1805 sa
->sa_update_cb(hdl
, tx
);
1811 * update or add new attribute
1814 sa_update(sa_handle_t
*hdl
, sa_attr_type_t type
,
1815 void *buf
, uint32_t buflen
, dmu_tx_t
*tx
)
1818 sa_bulk_attr_t bulk
;
1820 bulk
.sa_attr
= type
;
1821 bulk
.sa_data_func
= NULL
;
1822 bulk
.sa_length
= buflen
;
1825 mutex_enter(&hdl
->sa_lock
);
1826 error
= sa_bulk_update_impl(hdl
, &bulk
, 1, tx
);
1827 mutex_exit(&hdl
->sa_lock
);
1832 sa_update_from_cb(sa_handle_t
*hdl
, sa_attr_type_t attr
,
1833 uint32_t buflen
, sa_data_locator_t
*locator
, void *userdata
, dmu_tx_t
*tx
)
1836 sa_bulk_attr_t bulk
;
1838 bulk
.sa_attr
= attr
;
1839 bulk
.sa_data
= userdata
;
1840 bulk
.sa_data_func
= locator
;
1841 bulk
.sa_length
= buflen
;
1843 mutex_enter(&hdl
->sa_lock
);
1844 error
= sa_bulk_update_impl(hdl
, &bulk
, 1, tx
);
1845 mutex_exit(&hdl
->sa_lock
);
1850 * Return size of an attribute
1854 sa_size(sa_handle_t
*hdl
, sa_attr_type_t attr
, int *size
)
1856 sa_bulk_attr_t bulk
;
1859 bulk
.sa_data
= NULL
;
1860 bulk
.sa_attr
= attr
;
1861 bulk
.sa_data_func
= NULL
;
1864 mutex_enter(&hdl
->sa_lock
);
1865 if ((error
= sa_attr_op(hdl
, &bulk
, 1, SA_LOOKUP
, NULL
)) != 0) {
1866 mutex_exit(&hdl
->sa_lock
);
1869 *size
= bulk
.sa_size
;
1871 mutex_exit(&hdl
->sa_lock
);
1876 sa_bulk_lookup_locked(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
)
1879 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1880 return (sa_lookup_impl(hdl
, attrs
, count
));
1884 sa_bulk_lookup(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
)
1889 mutex_enter(&hdl
->sa_lock
);
1890 error
= sa_bulk_lookup_locked(hdl
, attrs
, count
);
1891 mutex_exit(&hdl
->sa_lock
);
1896 sa_bulk_update(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
, dmu_tx_t
*tx
)
1901 mutex_enter(&hdl
->sa_lock
);
1902 error
= sa_bulk_update_impl(hdl
, attrs
, count
, tx
);
1903 mutex_exit(&hdl
->sa_lock
);
1908 sa_remove(sa_handle_t
*hdl
, sa_attr_type_t attr
, dmu_tx_t
*tx
)
1912 mutex_enter(&hdl
->sa_lock
);
1913 error
= sa_modify_attrs(hdl
, attr
, SA_REMOVE
, NULL
,
1915 mutex_exit(&hdl
->sa_lock
);
1920 sa_object_info(sa_handle_t
*hdl
, dmu_object_info_t
*doi
)
1922 dmu_object_info_from_db((dmu_buf_t
*)hdl
->sa_bonus
, doi
);
1926 sa_object_size(sa_handle_t
*hdl
, uint32_t *blksize
, u_longlong_t
*nblocks
)
1928 dmu_object_size_from_db((dmu_buf_t
*)hdl
->sa_bonus
,
1933 sa_set_userp(sa_handle_t
*hdl
, void *ptr
)
1935 hdl
->sa_userp
= ptr
;
1939 sa_get_db(sa_handle_t
*hdl
)
1941 return ((dmu_buf_t
*)hdl
->sa_bonus
);
1945 sa_get_userdata(sa_handle_t
*hdl
)
1947 return (hdl
->sa_userp
);
1951 sa_register_update_callback_locked(objset_t
*os
, sa_update_cb_t
*func
)
1953 ASSERT(MUTEX_HELD(&os
->os_sa
->sa_lock
));
1954 os
->os_sa
->sa_update_cb
= func
;
1958 sa_register_update_callback(objset_t
*os
, sa_update_cb_t
*func
)
1961 mutex_enter(&os
->os_sa
->sa_lock
);
1962 sa_register_update_callback_locked(os
, func
);
1963 mutex_exit(&os
->os_sa
->sa_lock
);
1967 sa_handle_object(sa_handle_t
*hdl
)
1969 return (hdl
->sa_bonus
->db_object
);
1973 sa_enabled(objset_t
*os
)
1975 return (os
->os_sa
== NULL
);
1979 sa_set_sa_object(objset_t
*os
, uint64_t sa_object
)
1981 sa_os_t
*sa
= os
->os_sa
;
1983 if (sa
->sa_master_obj
)
1986 sa
->sa_master_obj
= sa_object
;
1992 sa_hdrsize(void *arg
)
1994 sa_hdr_phys_t
*hdr
= arg
;
1996 return (SA_HDR_SIZE(hdr
));
2000 sa_handle_lock(sa_handle_t
*hdl
)
2003 mutex_enter(&hdl
->sa_lock
);
2007 sa_handle_unlock(sa_handle_t
*hdl
)
2010 mutex_exit(&hdl
->sa_lock
);