4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
30 * DVA-based Adjustable Replacement Cache
32 * While much of the theory of operation used here is
33 * based on the self-tuning, low overhead replacement cache
34 * presented by Megiddo and Modha at FAST 2003, there are some
35 * significant differences:
37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 * Pages in its cache cannot be "locked" into memory. This makes
39 * the eviction algorithm simple: evict the last page in the list.
40 * This also make the performance characteristics easy to reason
41 * about. Our cache is not so simple. At any given moment, some
42 * subset of the blocks in the cache are un-evictable because we
43 * have handed out a reference to them. Blocks are only evictable
44 * when there are no external references active. This makes
45 * eviction far more problematic: we choose to evict the evictable
46 * blocks that are the "lowest" in the list.
48 * There are times when it is not possible to evict the requested
49 * space. In these circumstances we are unable to adjust the cache
50 * size. To prevent the cache growing unbounded at these times we
51 * implement a "cache throttle" that slows the flow of new data
52 * into the cache until we can make space available.
54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 * Pages are evicted when the cache is full and there is a cache
56 * miss. Our model has a variable sized cache. It grows with
57 * high use, but also tries to react to memory pressure from the
58 * operating system: decreasing its size when system memory is
61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 * elements of the cache are therefore exactly the same size. So
63 * when adjusting the cache size following a cache miss, its simply
64 * a matter of choosing a single page to evict. In our model, we
65 * have variable sized cache blocks (rangeing from 512 bytes to
66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 * space for a cache miss that approximates as closely as possible
68 * the space used by the new block.
70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 * by N. Megiddo & D. Modha, FAST 2003
77 * A new reference to a cache buffer can be obtained in two
78 * ways: 1) via a hash table lookup using the DVA as a key,
79 * or 2) via one of the ARC lists. The arc_read() interface
80 * uses method 1, while the internal ARC algorithms for
81 * adjusting the cache use method 2. We therefore provide two
82 * types of locks: 1) the hash table lock array, and 2) the
85 * Buffers do not have their own mutexes, rather they rely on the
86 * hash table mutexes for the bulk of their protection (i.e. most
87 * fields in the arc_buf_hdr_t are protected by these mutexes).
89 * buf_hash_find() returns the appropriate mutex (held) when it
90 * locates the requested buffer in the hash table. It returns
91 * NULL for the mutex if the buffer was not in the table.
93 * buf_hash_remove() expects the appropriate hash mutex to be
94 * already held before it is invoked.
96 * Each ARC state also has a mutex which is used to protect the
97 * buffer list associated with the state. When attempting to
98 * obtain a hash table lock while holding an ARC list lock you
99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 * the active state mutex must be held before the ghost state mutex.
102 * Note that the majority of the performance stats are manipulated
103 * with atomic operations.
105 * The L2ARC uses the l2ad_mtx on each vdev for the following:
107 * - L2ARC buflist creation
108 * - L2ARC buflist eviction
109 * - L2ARC write completion, which walks L2ARC buflists
110 * - ARC header destruction, as it removes from L2ARC buflists
111 * - ARC header release, as it removes from L2ARC buflists
117 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
118 * This structure can point either to a block that is still in the cache or to
119 * one that is only accessible in an L2 ARC device, or it can provide
120 * information about a block that was recently evicted. If a block is
121 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
122 * information to retrieve it from the L2ARC device. This information is
123 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
124 * that is in this state cannot access the data directly.
126 * Blocks that are actively being referenced or have not been evicted
127 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
128 * the arc_buf_hdr_t that will point to the data block in memory. A block can
129 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
130 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
131 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
133 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
134 * ability to store the physical data (b_pabd) associated with the DVA of the
135 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
136 * it will match its on-disk compression characteristics. This behavior can be
137 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
138 * compressed ARC functionality is disabled, the b_pabd will point to an
139 * uncompressed version of the on-disk data.
141 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
142 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
143 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
144 * consumer. The ARC will provide references to this data and will keep it
145 * cached until it is no longer in use. The ARC caches only the L1ARC's physical
146 * data block and will evict any arc_buf_t that is no longer referenced. The
147 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
148 * "overhead_size" kstat.
150 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
151 * compressed form. The typical case is that consumers will want uncompressed
152 * data, and when that happens a new data buffer is allocated where the data is
153 * decompressed for them to use. Currently the only consumer who wants
154 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
155 * exists on disk. When this happens, the arc_buf_t's data buffer is shared
156 * with the arc_buf_hdr_t.
158 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
159 * first one is owned by a compressed send consumer (and therefore references
160 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
161 * used by any other consumer (and has its own uncompressed copy of the data
176 * | b_buf +------------>+-----------+ arc_buf_t
177 * | b_pabd +-+ |b_next +---->+-----------+
178 * +-----------+ | |-----------| |b_next +-->NULL
179 * | |b_comp = T | +-----------+
180 * | |b_data +-+ |b_comp = F |
181 * | +-----------+ | |b_data +-+
182 * +->+------+ | +-----------+ |
184 * data | |<--------------+ | uncompressed
185 * +------+ compressed, | data
186 * shared +-->+------+
191 * When a consumer reads a block, the ARC must first look to see if the
192 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
193 * arc_buf_t and either copies uncompressed data into a new data buffer from an
194 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
195 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
196 * hdr is compressed and the desired compression characteristics of the
197 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
198 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
199 * the last buffer in the hdr's b_buf list, however a shared compressed buf can
200 * be anywhere in the hdr's list.
202 * The diagram below shows an example of an uncompressed ARC hdr that is
203 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
204 * the last element in the buf list):
216 * | | arc_buf_t (shared)
217 * | b_buf +------------>+---------+ arc_buf_t
218 * | | |b_next +---->+---------+
219 * | b_pabd +-+ |---------| |b_next +-->NULL
220 * +-----------+ | | | +---------+
222 * | +---------+ | |b_data +-+
223 * +->+------+ | +---------+ |
225 * uncompressed | | | |
228 * | uncompressed | | |
231 * +---------------------------------+
233 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd
234 * since the physical block is about to be rewritten. The new data contents
235 * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
236 * it may compress the data before writing it to disk. The ARC will be called
237 * with the transformed data and will bcopy the transformed on-disk block into
238 * a newly allocated b_pabd. Writes are always done into buffers which have
239 * either been loaned (and hence are new and don't have other readers) or
240 * buffers which have been released (and hence have their own hdr, if there
241 * were originally other readers of the buf's original hdr). This ensures that
242 * the ARC only needs to update a single buf and its hdr after a write occurs.
244 * When the L2ARC is in use, it will also take advantage of the b_pabd. The
245 * L2ARC will always write the contents of b_pabd to the L2ARC. This means
246 * that when compressed ARC is enabled that the L2ARC blocks are identical
247 * to the on-disk block in the main data pool. This provides a significant
248 * advantage since the ARC can leverage the bp's checksum when reading from the
249 * L2ARC to determine if the contents are valid. However, if the compressed
250 * ARC is disabled, then the L2ARC's block must be transformed to look
251 * like the physical block in the main data pool before comparing the
252 * checksum and determining its validity.
257 #include <sys/spa_impl.h>
258 #include <sys/zio_compress.h>
259 #include <sys/zio_checksum.h>
260 #include <sys/zfs_context.h>
262 #include <sys/refcount.h>
263 #include <sys/vdev.h>
264 #include <sys/vdev_impl.h>
265 #include <sys/dsl_pool.h>
266 #include <sys/zio_checksum.h>
267 #include <sys/multilist.h>
270 #include <sys/vmsystm.h>
272 #include <sys/fs/swapnode.h>
273 #include <sys/dnlc.h>
275 #include <sys/callb.h>
276 #include <sys/kstat.h>
277 #include <zfs_fletcher.h>
280 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
281 boolean_t arc_watch
= B_FALSE
;
285 static kmutex_t arc_reclaim_lock
;
286 static kcondvar_t arc_reclaim_thread_cv
;
287 static boolean_t arc_reclaim_thread_exit
;
288 static kcondvar_t arc_reclaim_waiters_cv
;
290 uint_t arc_reduce_dnlc_percent
= 3;
293 * The number of headers to evict in arc_evict_state_impl() before
294 * dropping the sublist lock and evicting from another sublist. A lower
295 * value means we're more likely to evict the "correct" header (i.e. the
296 * oldest header in the arc state), but comes with higher overhead
297 * (i.e. more invocations of arc_evict_state_impl()).
299 int zfs_arc_evict_batch_limit
= 10;
301 /* number of seconds before growing cache again */
302 static int arc_grow_retry
= 60;
304 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */
305 int zfs_arc_overflow_shift
= 8;
307 /* shift of arc_c for calculating both min and max arc_p */
308 static int arc_p_min_shift
= 4;
310 /* log2(fraction of arc to reclaim) */
311 static int arc_shrink_shift
= 7;
314 * log2(fraction of ARC which must be free to allow growing).
315 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
316 * when reading a new block into the ARC, we will evict an equal-sized block
319 * This must be less than arc_shrink_shift, so that when we shrink the ARC,
320 * we will still not allow it to grow.
322 int arc_no_grow_shift
= 5;
326 * minimum lifespan of a prefetch block in clock ticks
327 * (initialized in arc_init())
329 static int arc_min_prefetch_lifespan
;
332 * If this percent of memory is free, don't throttle.
334 int arc_lotsfree_percent
= 10;
339 * The arc has filled available memory and has now warmed up.
341 static boolean_t arc_warm
;
344 * log2 fraction of the zio arena to keep free.
346 int arc_zio_arena_free_shift
= 2;
349 * These tunables are for performance analysis.
351 uint64_t zfs_arc_max
;
352 uint64_t zfs_arc_min
;
353 uint64_t zfs_arc_meta_limit
= 0;
354 uint64_t zfs_arc_meta_min
= 0;
355 int zfs_arc_grow_retry
= 0;
356 int zfs_arc_shrink_shift
= 0;
357 int zfs_arc_p_min_shift
= 0;
358 int zfs_arc_average_blocksize
= 8 * 1024; /* 8KB */
360 boolean_t zfs_compressed_arc_enabled
= B_TRUE
;
363 * Note that buffers can be in one of 6 states:
364 * ARC_anon - anonymous (discussed below)
365 * ARC_mru - recently used, currently cached
366 * ARC_mru_ghost - recentely used, no longer in cache
367 * ARC_mfu - frequently used, currently cached
368 * ARC_mfu_ghost - frequently used, no longer in cache
369 * ARC_l2c_only - exists in L2ARC but not other states
370 * When there are no active references to the buffer, they are
371 * are linked onto a list in one of these arc states. These are
372 * the only buffers that can be evicted or deleted. Within each
373 * state there are multiple lists, one for meta-data and one for
374 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
375 * etc.) is tracked separately so that it can be managed more
376 * explicitly: favored over data, limited explicitly.
378 * Anonymous buffers are buffers that are not associated with
379 * a DVA. These are buffers that hold dirty block copies
380 * before they are written to stable storage. By definition,
381 * they are "ref'd" and are considered part of arc_mru
382 * that cannot be freed. Generally, they will aquire a DVA
383 * as they are written and migrate onto the arc_mru list.
385 * The ARC_l2c_only state is for buffers that are in the second
386 * level ARC but no longer in any of the ARC_m* lists. The second
387 * level ARC itself may also contain buffers that are in any of
388 * the ARC_m* states - meaning that a buffer can exist in two
389 * places. The reason for the ARC_l2c_only state is to keep the
390 * buffer header in the hash table, so that reads that hit the
391 * second level ARC benefit from these fast lookups.
394 typedef struct arc_state
{
396 * list of evictable buffers
398 multilist_t
*arcs_list
[ARC_BUFC_NUMTYPES
];
400 * total amount of evictable data in this state
402 refcount_t arcs_esize
[ARC_BUFC_NUMTYPES
];
404 * total amount of data in this state; this includes: evictable,
405 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
407 refcount_t arcs_size
;
411 static arc_state_t ARC_anon
;
412 static arc_state_t ARC_mru
;
413 static arc_state_t ARC_mru_ghost
;
414 static arc_state_t ARC_mfu
;
415 static arc_state_t ARC_mfu_ghost
;
416 static arc_state_t ARC_l2c_only
;
418 typedef struct arc_stats
{
419 kstat_named_t arcstat_hits
;
420 kstat_named_t arcstat_misses
;
421 kstat_named_t arcstat_demand_data_hits
;
422 kstat_named_t arcstat_demand_data_misses
;
423 kstat_named_t arcstat_demand_metadata_hits
;
424 kstat_named_t arcstat_demand_metadata_misses
;
425 kstat_named_t arcstat_prefetch_data_hits
;
426 kstat_named_t arcstat_prefetch_data_misses
;
427 kstat_named_t arcstat_prefetch_metadata_hits
;
428 kstat_named_t arcstat_prefetch_metadata_misses
;
429 kstat_named_t arcstat_mru_hits
;
430 kstat_named_t arcstat_mru_ghost_hits
;
431 kstat_named_t arcstat_mfu_hits
;
432 kstat_named_t arcstat_mfu_ghost_hits
;
433 kstat_named_t arcstat_deleted
;
435 * Number of buffers that could not be evicted because the hash lock
436 * was held by another thread. The lock may not necessarily be held
437 * by something using the same buffer, since hash locks are shared
438 * by multiple buffers.
440 kstat_named_t arcstat_mutex_miss
;
442 * Number of buffers skipped because they have I/O in progress, are
443 * indrect prefetch buffers that have not lived long enough, or are
444 * not from the spa we're trying to evict from.
446 kstat_named_t arcstat_evict_skip
;
448 * Number of times arc_evict_state() was unable to evict enough
449 * buffers to reach it's target amount.
451 kstat_named_t arcstat_evict_not_enough
;
452 kstat_named_t arcstat_evict_l2_cached
;
453 kstat_named_t arcstat_evict_l2_eligible
;
454 kstat_named_t arcstat_evict_l2_ineligible
;
455 kstat_named_t arcstat_evict_l2_skip
;
456 kstat_named_t arcstat_hash_elements
;
457 kstat_named_t arcstat_hash_elements_max
;
458 kstat_named_t arcstat_hash_collisions
;
459 kstat_named_t arcstat_hash_chains
;
460 kstat_named_t arcstat_hash_chain_max
;
461 kstat_named_t arcstat_p
;
462 kstat_named_t arcstat_c
;
463 kstat_named_t arcstat_c_min
;
464 kstat_named_t arcstat_c_max
;
465 kstat_named_t arcstat_size
;
467 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
468 * Note that the compressed bytes may match the uncompressed bytes
469 * if the block is either not compressed or compressed arc is disabled.
471 kstat_named_t arcstat_compressed_size
;
473 * Uncompressed size of the data stored in b_pabd. If compressed
474 * arc is disabled then this value will be identical to the stat
477 kstat_named_t arcstat_uncompressed_size
;
479 * Number of bytes stored in all the arc_buf_t's. This is classified
480 * as "overhead" since this data is typically short-lived and will
481 * be evicted from the arc when it becomes unreferenced unless the
482 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
483 * values have been set (see comment in dbuf.c for more information).
485 kstat_named_t arcstat_overhead_size
;
487 * Number of bytes consumed by internal ARC structures necessary
488 * for tracking purposes; these structures are not actually
489 * backed by ARC buffers. This includes arc_buf_hdr_t structures
490 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
491 * caches), and arc_buf_t structures (allocated via arc_buf_t
494 kstat_named_t arcstat_hdr_size
;
496 * Number of bytes consumed by ARC buffers of type equal to
497 * ARC_BUFC_DATA. This is generally consumed by buffers backing
498 * on disk user data (e.g. plain file contents).
500 kstat_named_t arcstat_data_size
;
502 * Number of bytes consumed by ARC buffers of type equal to
503 * ARC_BUFC_METADATA. This is generally consumed by buffers
504 * backing on disk data that is used for internal ZFS
505 * structures (e.g. ZAP, dnode, indirect blocks, etc).
507 kstat_named_t arcstat_metadata_size
;
509 * Number of bytes consumed by various buffers and structures
510 * not actually backed with ARC buffers. This includes bonus
511 * buffers (allocated directly via zio_buf_* functions),
512 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
513 * cache), and dnode_t structures (allocated via dnode_t cache).
515 kstat_named_t arcstat_other_size
;
517 * Total number of bytes consumed by ARC buffers residing in the
518 * arc_anon state. This includes *all* buffers in the arc_anon
519 * state; e.g. data, metadata, evictable, and unevictable buffers
520 * are all included in this value.
522 kstat_named_t arcstat_anon_size
;
524 * Number of bytes consumed by ARC buffers that meet the
525 * following criteria: backing buffers of type ARC_BUFC_DATA,
526 * residing in the arc_anon state, and are eligible for eviction
527 * (e.g. have no outstanding holds on the buffer).
529 kstat_named_t arcstat_anon_evictable_data
;
531 * Number of bytes consumed by ARC buffers that meet the
532 * following criteria: backing buffers of type ARC_BUFC_METADATA,
533 * residing in the arc_anon state, and are eligible for eviction
534 * (e.g. have no outstanding holds on the buffer).
536 kstat_named_t arcstat_anon_evictable_metadata
;
538 * Total number of bytes consumed by ARC buffers residing in the
539 * arc_mru state. This includes *all* buffers in the arc_mru
540 * state; e.g. data, metadata, evictable, and unevictable buffers
541 * are all included in this value.
543 kstat_named_t arcstat_mru_size
;
545 * Number of bytes consumed by ARC buffers that meet the
546 * following criteria: backing buffers of type ARC_BUFC_DATA,
547 * residing in the arc_mru state, and are eligible for eviction
548 * (e.g. have no outstanding holds on the buffer).
550 kstat_named_t arcstat_mru_evictable_data
;
552 * Number of bytes consumed by ARC buffers that meet the
553 * following criteria: backing buffers of type ARC_BUFC_METADATA,
554 * residing in the arc_mru state, and are eligible for eviction
555 * (e.g. have no outstanding holds on the buffer).
557 kstat_named_t arcstat_mru_evictable_metadata
;
559 * Total number of bytes that *would have been* consumed by ARC
560 * buffers in the arc_mru_ghost state. The key thing to note
561 * here, is the fact that this size doesn't actually indicate
562 * RAM consumption. The ghost lists only consist of headers and
563 * don't actually have ARC buffers linked off of these headers.
564 * Thus, *if* the headers had associated ARC buffers, these
565 * buffers *would have* consumed this number of bytes.
567 kstat_named_t arcstat_mru_ghost_size
;
569 * Number of bytes that *would have been* consumed by ARC
570 * buffers that are eligible for eviction, of type
571 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
573 kstat_named_t arcstat_mru_ghost_evictable_data
;
575 * Number of bytes that *would have been* consumed by ARC
576 * buffers that are eligible for eviction, of type
577 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
579 kstat_named_t arcstat_mru_ghost_evictable_metadata
;
581 * Total number of bytes consumed by ARC buffers residing in the
582 * arc_mfu state. This includes *all* buffers in the arc_mfu
583 * state; e.g. data, metadata, evictable, and unevictable buffers
584 * are all included in this value.
586 kstat_named_t arcstat_mfu_size
;
588 * Number of bytes consumed by ARC buffers that are eligible for
589 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
592 kstat_named_t arcstat_mfu_evictable_data
;
594 * Number of bytes consumed by ARC buffers that are eligible for
595 * eviction, of type ARC_BUFC_METADATA, and reside in the
598 kstat_named_t arcstat_mfu_evictable_metadata
;
600 * Total number of bytes that *would have been* consumed by ARC
601 * buffers in the arc_mfu_ghost state. See the comment above
602 * arcstat_mru_ghost_size for more details.
604 kstat_named_t arcstat_mfu_ghost_size
;
606 * Number of bytes that *would have been* consumed by ARC
607 * buffers that are eligible for eviction, of type
608 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
610 kstat_named_t arcstat_mfu_ghost_evictable_data
;
612 * Number of bytes that *would have been* consumed by ARC
613 * buffers that are eligible for eviction, of type
614 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
616 kstat_named_t arcstat_mfu_ghost_evictable_metadata
;
617 kstat_named_t arcstat_l2_hits
;
618 kstat_named_t arcstat_l2_misses
;
619 kstat_named_t arcstat_l2_feeds
;
620 kstat_named_t arcstat_l2_rw_clash
;
621 kstat_named_t arcstat_l2_read_bytes
;
622 kstat_named_t arcstat_l2_write_bytes
;
623 kstat_named_t arcstat_l2_writes_sent
;
624 kstat_named_t arcstat_l2_writes_done
;
625 kstat_named_t arcstat_l2_writes_error
;
626 kstat_named_t arcstat_l2_writes_lock_retry
;
627 kstat_named_t arcstat_l2_evict_lock_retry
;
628 kstat_named_t arcstat_l2_evict_reading
;
629 kstat_named_t arcstat_l2_evict_l1cached
;
630 kstat_named_t arcstat_l2_free_on_write
;
631 kstat_named_t arcstat_l2_abort_lowmem
;
632 kstat_named_t arcstat_l2_cksum_bad
;
633 kstat_named_t arcstat_l2_io_error
;
634 kstat_named_t arcstat_l2_lsize
;
635 kstat_named_t arcstat_l2_psize
;
636 kstat_named_t arcstat_l2_hdr_size
;
637 kstat_named_t arcstat_memory_throttle_count
;
638 kstat_named_t arcstat_meta_used
;
639 kstat_named_t arcstat_meta_limit
;
640 kstat_named_t arcstat_meta_max
;
641 kstat_named_t arcstat_meta_min
;
642 kstat_named_t arcstat_sync_wait_for_async
;
643 kstat_named_t arcstat_demand_hit_predictive_prefetch
;
646 static arc_stats_t arc_stats
= {
647 { "hits", KSTAT_DATA_UINT64
},
648 { "misses", KSTAT_DATA_UINT64
},
649 { "demand_data_hits", KSTAT_DATA_UINT64
},
650 { "demand_data_misses", KSTAT_DATA_UINT64
},
651 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
652 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
653 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
654 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
655 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
656 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
657 { "mru_hits", KSTAT_DATA_UINT64
},
658 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
659 { "mfu_hits", KSTAT_DATA_UINT64
},
660 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
661 { "deleted", KSTAT_DATA_UINT64
},
662 { "mutex_miss", KSTAT_DATA_UINT64
},
663 { "evict_skip", KSTAT_DATA_UINT64
},
664 { "evict_not_enough", KSTAT_DATA_UINT64
},
665 { "evict_l2_cached", KSTAT_DATA_UINT64
},
666 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
667 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
668 { "evict_l2_skip", KSTAT_DATA_UINT64
},
669 { "hash_elements", KSTAT_DATA_UINT64
},
670 { "hash_elements_max", KSTAT_DATA_UINT64
},
671 { "hash_collisions", KSTAT_DATA_UINT64
},
672 { "hash_chains", KSTAT_DATA_UINT64
},
673 { "hash_chain_max", KSTAT_DATA_UINT64
},
674 { "p", KSTAT_DATA_UINT64
},
675 { "c", KSTAT_DATA_UINT64
},
676 { "c_min", KSTAT_DATA_UINT64
},
677 { "c_max", KSTAT_DATA_UINT64
},
678 { "size", KSTAT_DATA_UINT64
},
679 { "compressed_size", KSTAT_DATA_UINT64
},
680 { "uncompressed_size", KSTAT_DATA_UINT64
},
681 { "overhead_size", KSTAT_DATA_UINT64
},
682 { "hdr_size", KSTAT_DATA_UINT64
},
683 { "data_size", KSTAT_DATA_UINT64
},
684 { "metadata_size", KSTAT_DATA_UINT64
},
685 { "other_size", KSTAT_DATA_UINT64
},
686 { "anon_size", KSTAT_DATA_UINT64
},
687 { "anon_evictable_data", KSTAT_DATA_UINT64
},
688 { "anon_evictable_metadata", KSTAT_DATA_UINT64
},
689 { "mru_size", KSTAT_DATA_UINT64
},
690 { "mru_evictable_data", KSTAT_DATA_UINT64
},
691 { "mru_evictable_metadata", KSTAT_DATA_UINT64
},
692 { "mru_ghost_size", KSTAT_DATA_UINT64
},
693 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64
},
694 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
695 { "mfu_size", KSTAT_DATA_UINT64
},
696 { "mfu_evictable_data", KSTAT_DATA_UINT64
},
697 { "mfu_evictable_metadata", KSTAT_DATA_UINT64
},
698 { "mfu_ghost_size", KSTAT_DATA_UINT64
},
699 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64
},
700 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
701 { "l2_hits", KSTAT_DATA_UINT64
},
702 { "l2_misses", KSTAT_DATA_UINT64
},
703 { "l2_feeds", KSTAT_DATA_UINT64
},
704 { "l2_rw_clash", KSTAT_DATA_UINT64
},
705 { "l2_read_bytes", KSTAT_DATA_UINT64
},
706 { "l2_write_bytes", KSTAT_DATA_UINT64
},
707 { "l2_writes_sent", KSTAT_DATA_UINT64
},
708 { "l2_writes_done", KSTAT_DATA_UINT64
},
709 { "l2_writes_error", KSTAT_DATA_UINT64
},
710 { "l2_writes_lock_retry", KSTAT_DATA_UINT64
},
711 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
712 { "l2_evict_reading", KSTAT_DATA_UINT64
},
713 { "l2_evict_l1cached", KSTAT_DATA_UINT64
},
714 { "l2_free_on_write", KSTAT_DATA_UINT64
},
715 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
716 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
717 { "l2_io_error", KSTAT_DATA_UINT64
},
718 { "l2_size", KSTAT_DATA_UINT64
},
719 { "l2_asize", KSTAT_DATA_UINT64
},
720 { "l2_hdr_size", KSTAT_DATA_UINT64
},
721 { "memory_throttle_count", KSTAT_DATA_UINT64
},
722 { "arc_meta_used", KSTAT_DATA_UINT64
},
723 { "arc_meta_limit", KSTAT_DATA_UINT64
},
724 { "arc_meta_max", KSTAT_DATA_UINT64
},
725 { "arc_meta_min", KSTAT_DATA_UINT64
},
726 { "sync_wait_for_async", KSTAT_DATA_UINT64
},
727 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64
},
730 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
732 #define ARCSTAT_INCR(stat, val) \
733 atomic_add_64(&arc_stats.stat.value.ui64, (val))
735 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
736 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
738 #define ARCSTAT_MAX(stat, val) { \
740 while ((val) > (m = arc_stats.stat.value.ui64) && \
741 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
745 #define ARCSTAT_MAXSTAT(stat) \
746 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
749 * We define a macro to allow ARC hits/misses to be easily broken down by
750 * two separate conditions, giving a total of four different subtypes for
751 * each of hits and misses (so eight statistics total).
753 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
756 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
758 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
762 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
764 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
769 static arc_state_t
*arc_anon
;
770 static arc_state_t
*arc_mru
;
771 static arc_state_t
*arc_mru_ghost
;
772 static arc_state_t
*arc_mfu
;
773 static arc_state_t
*arc_mfu_ghost
;
774 static arc_state_t
*arc_l2c_only
;
777 * There are several ARC variables that are critical to export as kstats --
778 * but we don't want to have to grovel around in the kstat whenever we wish to
779 * manipulate them. For these variables, we therefore define them to be in
780 * terms of the statistic variable. This assures that we are not introducing
781 * the possibility of inconsistency by having shadow copies of the variables,
782 * while still allowing the code to be readable.
784 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
785 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
786 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
787 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
788 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
789 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
790 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
791 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
792 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
794 /* compressed size of entire arc */
795 #define arc_compressed_size ARCSTAT(arcstat_compressed_size)
796 /* uncompressed size of entire arc */
797 #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size)
798 /* number of bytes in the arc from arc_buf_t's */
799 #define arc_overhead_size ARCSTAT(arcstat_overhead_size)
801 static int arc_no_grow
; /* Don't try to grow cache size */
802 static uint64_t arc_tempreserve
;
803 static uint64_t arc_loaned_bytes
;
805 typedef struct arc_callback arc_callback_t
;
807 struct arc_callback
{
809 arc_done_func_t
*acb_done
;
811 boolean_t acb_compressed
;
812 zio_t
*acb_zio_dummy
;
813 arc_callback_t
*acb_next
;
816 typedef struct arc_write_callback arc_write_callback_t
;
818 struct arc_write_callback
{
820 arc_done_func_t
*awcb_ready
;
821 arc_done_func_t
*awcb_children_ready
;
822 arc_done_func_t
*awcb_physdone
;
823 arc_done_func_t
*awcb_done
;
828 * ARC buffers are separated into multiple structs as a memory saving measure:
829 * - Common fields struct, always defined, and embedded within it:
830 * - L2-only fields, always allocated but undefined when not in L2ARC
831 * - L1-only fields, only allocated when in L1ARC
833 * Buffer in L1 Buffer only in L2
834 * +------------------------+ +------------------------+
835 * | arc_buf_hdr_t | | arc_buf_hdr_t |
839 * +------------------------+ +------------------------+
840 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
841 * | (undefined if L1-only) | | |
842 * +------------------------+ +------------------------+
843 * | l1arc_buf_hdr_t |
848 * +------------------------+
850 * Because it's possible for the L2ARC to become extremely large, we can wind
851 * up eating a lot of memory in L2ARC buffer headers, so the size of a header
852 * is minimized by only allocating the fields necessary for an L1-cached buffer
853 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
854 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
855 * words in pointers. arc_hdr_realloc() is used to switch a header between
856 * these two allocation states.
858 typedef struct l1arc_buf_hdr
{
859 kmutex_t b_freeze_lock
;
860 zio_cksum_t
*b_freeze_cksum
;
863 * Used for debugging with kmem_flags - by allocating and freeing
864 * b_thawed when the buffer is thawed, we get a record of the stack
865 * trace that thawed it.
872 /* for waiting on writes to complete */
876 /* protected by arc state mutex */
877 arc_state_t
*b_state
;
878 multilist_node_t b_arc_node
;
880 /* updated atomically */
881 clock_t b_arc_access
;
883 /* self protecting */
886 arc_callback_t
*b_acb
;
890 typedef struct l2arc_dev l2arc_dev_t
;
892 typedef struct l2arc_buf_hdr
{
893 /* protected by arc_buf_hdr mutex */
894 l2arc_dev_t
*b_dev
; /* L2ARC device */
895 uint64_t b_daddr
; /* disk address, offset byte */
897 list_node_t b_l2node
;
901 /* protected by hash lock */
905 arc_buf_contents_t b_type
;
906 arc_buf_hdr_t
*b_hash_next
;
910 * This field stores the size of the data buffer after
911 * compression, and is set in the arc's zio completion handlers.
912 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
914 * While the block pointers can store up to 32MB in their psize
915 * field, we can only store up to 32MB minus 512B. This is due
916 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
917 * a field of zeros represents 512B in the bp). We can't use a
918 * bias of 1 since we need to reserve a psize of zero, here, to
919 * represent holes and embedded blocks.
921 * This isn't a problem in practice, since the maximum size of a
922 * buffer is limited to 16MB, so we never need to store 32MB in
923 * this field. Even in the upstream illumos code base, the
924 * maximum size of a buffer is limited to 16MB.
929 * This field stores the size of the data buffer before
930 * compression, and cannot change once set. It is in units
931 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
933 uint16_t b_lsize
; /* immutable */
934 uint64_t b_spa
; /* immutable */
936 /* L2ARC fields. Undefined when not in L2ARC. */
937 l2arc_buf_hdr_t b_l2hdr
;
938 /* L1ARC fields. Undefined when in l2arc_only state */
939 l1arc_buf_hdr_t b_l1hdr
;
942 #define GHOST_STATE(state) \
943 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
944 (state) == arc_l2c_only)
946 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
947 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
948 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
949 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
950 #define HDR_COMPRESSION_ENABLED(hdr) \
951 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
953 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
954 #define HDR_L2_READING(hdr) \
955 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
956 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
957 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
958 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
959 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
960 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
962 #define HDR_ISTYPE_METADATA(hdr) \
963 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
964 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
966 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
967 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
969 /* For storing compression mode in b_flags */
970 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
972 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
973 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
974 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
975 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
977 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
978 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
979 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
985 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
986 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
989 * Hash table routines
992 #define HT_LOCK_PAD 64
997 unsigned char pad
[(HT_LOCK_PAD
- sizeof (kmutex_t
))];
1001 #define BUF_LOCKS 256
1002 typedef struct buf_hash_table
{
1004 arc_buf_hdr_t
**ht_table
;
1005 struct ht_lock ht_locks
[BUF_LOCKS
];
1008 static buf_hash_table_t buf_hash_table
;
1010 #define BUF_HASH_INDEX(spa, dva, birth) \
1011 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
1012 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
1013 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
1014 #define HDR_LOCK(hdr) \
1015 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
1017 uint64_t zfs_crc64_table
[256];
1023 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
1024 #define L2ARC_HEADROOM 2 /* num of writes */
1026 * If we discover during ARC scan any buffers to be compressed, we boost
1027 * our headroom for the next scanning cycle by this percentage multiple.
1029 #define L2ARC_HEADROOM_BOOST 200
1030 #define L2ARC_FEED_SECS 1 /* caching interval secs */
1031 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
1033 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
1034 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
1036 /* L2ARC Performance Tunables */
1037 uint64_t l2arc_write_max
= L2ARC_WRITE_SIZE
; /* default max write size */
1038 uint64_t l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra write during warmup */
1039 uint64_t l2arc_headroom
= L2ARC_HEADROOM
; /* number of dev writes */
1040 uint64_t l2arc_headroom_boost
= L2ARC_HEADROOM_BOOST
;
1041 uint64_t l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
1042 uint64_t l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval milliseconds */
1043 boolean_t l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
1044 boolean_t l2arc_feed_again
= B_TRUE
; /* turbo warmup */
1045 boolean_t l2arc_norw
= B_TRUE
; /* no reads during writes */
1051 vdev_t
*l2ad_vdev
; /* vdev */
1052 spa_t
*l2ad_spa
; /* spa */
1053 uint64_t l2ad_hand
; /* next write location */
1054 uint64_t l2ad_start
; /* first addr on device */
1055 uint64_t l2ad_end
; /* last addr on device */
1056 boolean_t l2ad_first
; /* first sweep through */
1057 boolean_t l2ad_writing
; /* currently writing */
1058 kmutex_t l2ad_mtx
; /* lock for buffer list */
1059 list_t l2ad_buflist
; /* buffer list */
1060 list_node_t l2ad_node
; /* device list node */
1061 refcount_t l2ad_alloc
; /* allocated bytes */
1064 static list_t L2ARC_dev_list
; /* device list */
1065 static list_t
*l2arc_dev_list
; /* device list pointer */
1066 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
1067 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
1068 static list_t L2ARC_free_on_write
; /* free after write buf list */
1069 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
1070 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
1071 static uint64_t l2arc_ndev
; /* number of devices */
1073 typedef struct l2arc_read_callback
{
1074 arc_buf_hdr_t
*l2rcb_hdr
; /* read header */
1075 blkptr_t l2rcb_bp
; /* original blkptr */
1076 zbookmark_phys_t l2rcb_zb
; /* original bookmark */
1077 int l2rcb_flags
; /* original flags */
1078 abd_t
*l2rcb_abd
; /* temporary buffer */
1079 } l2arc_read_callback_t
;
1081 typedef struct l2arc_write_callback
{
1082 l2arc_dev_t
*l2wcb_dev
; /* device info */
1083 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
1084 } l2arc_write_callback_t
;
1086 typedef struct l2arc_data_free
{
1087 /* protected by l2arc_free_on_write_mtx */
1090 arc_buf_contents_t l2df_type
;
1091 list_node_t l2df_list_node
;
1092 } l2arc_data_free_t
;
1094 static kmutex_t l2arc_feed_thr_lock
;
1095 static kcondvar_t l2arc_feed_thr_cv
;
1096 static uint8_t l2arc_thread_exit
;
1098 static abd_t
*arc_get_data_abd(arc_buf_hdr_t
*, uint64_t, void *);
1099 static void *arc_get_data_buf(arc_buf_hdr_t
*, uint64_t, void *);
1100 static void arc_get_data_impl(arc_buf_hdr_t
*, uint64_t, void *);
1101 static void arc_free_data_abd(arc_buf_hdr_t
*, abd_t
*, uint64_t, void *);
1102 static void arc_free_data_buf(arc_buf_hdr_t
*, void *, uint64_t, void *);
1103 static void arc_free_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
);
1104 static void arc_hdr_free_pabd(arc_buf_hdr_t
*);
1105 static void arc_hdr_alloc_pabd(arc_buf_hdr_t
*);
1106 static void arc_access(arc_buf_hdr_t
*, kmutex_t
*);
1107 static boolean_t
arc_is_overflowing();
1108 static void arc_buf_watch(arc_buf_t
*);
1110 static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t
*);
1111 static uint32_t arc_bufc_to_flags(arc_buf_contents_t
);
1112 static inline void arc_hdr_set_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
);
1113 static inline void arc_hdr_clear_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
);
1115 static boolean_t
l2arc_write_eligible(uint64_t, arc_buf_hdr_t
*);
1116 static void l2arc_read_done(zio_t
*);
1119 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
1121 uint8_t *vdva
= (uint8_t *)dva
;
1122 uint64_t crc
= -1ULL;
1125 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
1127 for (i
= 0; i
< sizeof (dva_t
); i
++)
1128 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ vdva
[i
]) & 0xFF];
1130 crc
^= (spa
>>8) ^ birth
;
1135 #define HDR_EMPTY(hdr) \
1136 ((hdr)->b_dva.dva_word[0] == 0 && \
1137 (hdr)->b_dva.dva_word[1] == 0)
1139 #define HDR_EQUAL(spa, dva, birth, hdr) \
1140 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
1141 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
1142 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1145 buf_discard_identity(arc_buf_hdr_t
*hdr
)
1147 hdr
->b_dva
.dva_word
[0] = 0;
1148 hdr
->b_dva
.dva_word
[1] = 0;
1152 static arc_buf_hdr_t
*
1153 buf_hash_find(uint64_t spa
, const blkptr_t
*bp
, kmutex_t
**lockp
)
1155 const dva_t
*dva
= BP_IDENTITY(bp
);
1156 uint64_t birth
= BP_PHYSICAL_BIRTH(bp
);
1157 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
1158 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
1161 mutex_enter(hash_lock
);
1162 for (hdr
= buf_hash_table
.ht_table
[idx
]; hdr
!= NULL
;
1163 hdr
= hdr
->b_hash_next
) {
1164 if (HDR_EQUAL(spa
, dva
, birth
, hdr
)) {
1169 mutex_exit(hash_lock
);
1175 * Insert an entry into the hash table. If there is already an element
1176 * equal to elem in the hash table, then the already existing element
1177 * will be returned and the new element will not be inserted.
1178 * Otherwise returns NULL.
1179 * If lockp == NULL, the caller is assumed to already hold the hash lock.
1181 static arc_buf_hdr_t
*
1182 buf_hash_insert(arc_buf_hdr_t
*hdr
, kmutex_t
**lockp
)
1184 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1185 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
1186 arc_buf_hdr_t
*fhdr
;
1189 ASSERT(!DVA_IS_EMPTY(&hdr
->b_dva
));
1190 ASSERT(hdr
->b_birth
!= 0);
1191 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1193 if (lockp
!= NULL
) {
1195 mutex_enter(hash_lock
);
1197 ASSERT(MUTEX_HELD(hash_lock
));
1200 for (fhdr
= buf_hash_table
.ht_table
[idx
], i
= 0; fhdr
!= NULL
;
1201 fhdr
= fhdr
->b_hash_next
, i
++) {
1202 if (HDR_EQUAL(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
, fhdr
))
1206 hdr
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
1207 buf_hash_table
.ht_table
[idx
] = hdr
;
1208 arc_hdr_set_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
1210 /* collect some hash table performance data */
1212 ARCSTAT_BUMP(arcstat_hash_collisions
);
1214 ARCSTAT_BUMP(arcstat_hash_chains
);
1216 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
1219 ARCSTAT_BUMP(arcstat_hash_elements
);
1220 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
1226 buf_hash_remove(arc_buf_hdr_t
*hdr
)
1228 arc_buf_hdr_t
*fhdr
, **hdrp
;
1229 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1231 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
1232 ASSERT(HDR_IN_HASH_TABLE(hdr
));
1234 hdrp
= &buf_hash_table
.ht_table
[idx
];
1235 while ((fhdr
= *hdrp
) != hdr
) {
1236 ASSERT3P(fhdr
, !=, NULL
);
1237 hdrp
= &fhdr
->b_hash_next
;
1239 *hdrp
= hdr
->b_hash_next
;
1240 hdr
->b_hash_next
= NULL
;
1241 arc_hdr_clear_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
1243 /* collect some hash table performance data */
1244 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
1246 if (buf_hash_table
.ht_table
[idx
] &&
1247 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
1248 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
1252 * Global data structures and functions for the buf kmem cache.
1254 static kmem_cache_t
*hdr_full_cache
;
1255 static kmem_cache_t
*hdr_l2only_cache
;
1256 static kmem_cache_t
*buf_cache
;
1263 kmem_free(buf_hash_table
.ht_table
,
1264 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
1265 for (i
= 0; i
< BUF_LOCKS
; i
++)
1266 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
1267 kmem_cache_destroy(hdr_full_cache
);
1268 kmem_cache_destroy(hdr_l2only_cache
);
1269 kmem_cache_destroy(buf_cache
);
1273 * Constructor callback - called when the cache is empty
1274 * and a new buf is requested.
1278 hdr_full_cons(void *vbuf
, void *unused
, int kmflag
)
1280 arc_buf_hdr_t
*hdr
= vbuf
;
1282 bzero(hdr
, HDR_FULL_SIZE
);
1283 cv_init(&hdr
->b_l1hdr
.b_cv
, NULL
, CV_DEFAULT
, NULL
);
1284 refcount_create(&hdr
->b_l1hdr
.b_refcnt
);
1285 mutex_init(&hdr
->b_l1hdr
.b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1286 multilist_link_init(&hdr
->b_l1hdr
.b_arc_node
);
1287 arc_space_consume(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1294 hdr_l2only_cons(void *vbuf
, void *unused
, int kmflag
)
1296 arc_buf_hdr_t
*hdr
= vbuf
;
1298 bzero(hdr
, HDR_L2ONLY_SIZE
);
1299 arc_space_consume(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1306 buf_cons(void *vbuf
, void *unused
, int kmflag
)
1308 arc_buf_t
*buf
= vbuf
;
1310 bzero(buf
, sizeof (arc_buf_t
));
1311 mutex_init(&buf
->b_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1312 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1318 * Destructor callback - called when a cached buf is
1319 * no longer required.
1323 hdr_full_dest(void *vbuf
, void *unused
)
1325 arc_buf_hdr_t
*hdr
= vbuf
;
1327 ASSERT(HDR_EMPTY(hdr
));
1328 cv_destroy(&hdr
->b_l1hdr
.b_cv
);
1329 refcount_destroy(&hdr
->b_l1hdr
.b_refcnt
);
1330 mutex_destroy(&hdr
->b_l1hdr
.b_freeze_lock
);
1331 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1332 arc_space_return(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1337 hdr_l2only_dest(void *vbuf
, void *unused
)
1339 arc_buf_hdr_t
*hdr
= vbuf
;
1341 ASSERT(HDR_EMPTY(hdr
));
1342 arc_space_return(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1347 buf_dest(void *vbuf
, void *unused
)
1349 arc_buf_t
*buf
= vbuf
;
1351 mutex_destroy(&buf
->b_evict_lock
);
1352 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1356 * Reclaim callback -- invoked when memory is low.
1360 hdr_recl(void *unused
)
1362 dprintf("hdr_recl called\n");
1364 * umem calls the reclaim func when we destroy the buf cache,
1365 * which is after we do arc_fini().
1368 cv_signal(&arc_reclaim_thread_cv
);
1375 uint64_t hsize
= 1ULL << 12;
1379 * The hash table is big enough to fill all of physical memory
1380 * with an average block size of zfs_arc_average_blocksize (default 8K).
1381 * By default, the table will take up
1382 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1384 while (hsize
* zfs_arc_average_blocksize
< physmem
* PAGESIZE
)
1387 buf_hash_table
.ht_mask
= hsize
- 1;
1388 buf_hash_table
.ht_table
=
1389 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
1390 if (buf_hash_table
.ht_table
== NULL
) {
1391 ASSERT(hsize
> (1ULL << 8));
1396 hdr_full_cache
= kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE
,
1397 0, hdr_full_cons
, hdr_full_dest
, hdr_recl
, NULL
, NULL
, 0);
1398 hdr_l2only_cache
= kmem_cache_create("arc_buf_hdr_t_l2only",
1399 HDR_L2ONLY_SIZE
, 0, hdr_l2only_cons
, hdr_l2only_dest
, hdr_recl
,
1401 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
1402 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
1404 for (i
= 0; i
< 256; i
++)
1405 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
1406 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
1408 for (i
= 0; i
< BUF_LOCKS
; i
++) {
1409 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
1410 NULL
, MUTEX_DEFAULT
, NULL
);
1415 * This is the size that the buf occupies in memory. If the buf is compressed,
1416 * it will correspond to the compressed size. You should use this method of
1417 * getting the buf size unless you explicitly need the logical size.
1420 arc_buf_size(arc_buf_t
*buf
)
1422 return (ARC_BUF_COMPRESSED(buf
) ?
1423 HDR_GET_PSIZE(buf
->b_hdr
) : HDR_GET_LSIZE(buf
->b_hdr
));
1427 arc_buf_lsize(arc_buf_t
*buf
)
1429 return (HDR_GET_LSIZE(buf
->b_hdr
));
1433 arc_get_compression(arc_buf_t
*buf
)
1435 return (ARC_BUF_COMPRESSED(buf
) ?
1436 HDR_GET_COMPRESS(buf
->b_hdr
) : ZIO_COMPRESS_OFF
);
1439 #define ARC_MINTIME (hz>>4) /* 62 ms */
1441 static inline boolean_t
1442 arc_buf_is_shared(arc_buf_t
*buf
)
1444 boolean_t shared
= (buf
->b_data
!= NULL
&&
1445 buf
->b_hdr
->b_l1hdr
.b_pabd
!= NULL
&&
1446 abd_is_linear(buf
->b_hdr
->b_l1hdr
.b_pabd
) &&
1447 buf
->b_data
== abd_to_buf(buf
->b_hdr
->b_l1hdr
.b_pabd
));
1448 IMPLY(shared
, HDR_SHARED_DATA(buf
->b_hdr
));
1449 IMPLY(shared
, ARC_BUF_SHARED(buf
));
1450 IMPLY(shared
, ARC_BUF_COMPRESSED(buf
) || ARC_BUF_LAST(buf
));
1453 * It would be nice to assert arc_can_share() too, but the "hdr isn't
1454 * already being shared" requirement prevents us from doing that.
1461 * Free the checksum associated with this header. If there is no checksum, this
1465 arc_cksum_free(arc_buf_hdr_t
*hdr
)
1467 ASSERT(HDR_HAS_L1HDR(hdr
));
1468 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1469 if (hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
) {
1470 kmem_free(hdr
->b_l1hdr
.b_freeze_cksum
, sizeof (zio_cksum_t
));
1471 hdr
->b_l1hdr
.b_freeze_cksum
= NULL
;
1473 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1477 * Return true iff at least one of the bufs on hdr is not compressed.
1480 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t
*hdr
)
1482 for (arc_buf_t
*b
= hdr
->b_l1hdr
.b_buf
; b
!= NULL
; b
= b
->b_next
) {
1483 if (!ARC_BUF_COMPRESSED(b
)) {
1491 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
1492 * matches the checksum that is stored in the hdr. If there is no checksum,
1493 * or if the buf is compressed, this is a no-op.
1496 arc_cksum_verify(arc_buf_t
*buf
)
1498 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1501 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1504 if (ARC_BUF_COMPRESSED(buf
)) {
1505 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
== NULL
||
1506 arc_hdr_has_uncompressed_buf(hdr
));
1510 ASSERT(HDR_HAS_L1HDR(hdr
));
1512 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1513 if (hdr
->b_l1hdr
.b_freeze_cksum
== NULL
|| HDR_IO_ERROR(hdr
)) {
1514 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1518 fletcher_2_native(buf
->b_data
, arc_buf_size(buf
), NULL
, &zc
);
1519 if (!ZIO_CHECKSUM_EQUAL(*hdr
->b_l1hdr
.b_freeze_cksum
, zc
))
1520 panic("buffer modified while frozen!");
1521 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1525 arc_cksum_is_equal(arc_buf_hdr_t
*hdr
, zio_t
*zio
)
1527 enum zio_compress compress
= BP_GET_COMPRESS(zio
->io_bp
);
1528 boolean_t valid_cksum
;
1530 ASSERT(!BP_IS_EMBEDDED(zio
->io_bp
));
1531 VERIFY3U(BP_GET_PSIZE(zio
->io_bp
), ==, HDR_GET_PSIZE(hdr
));
1534 * We rely on the blkptr's checksum to determine if the block
1535 * is valid or not. When compressed arc is enabled, the l2arc
1536 * writes the block to the l2arc just as it appears in the pool.
1537 * This allows us to use the blkptr's checksum to validate the
1538 * data that we just read off of the l2arc without having to store
1539 * a separate checksum in the arc_buf_hdr_t. However, if compressed
1540 * arc is disabled, then the data written to the l2arc is always
1541 * uncompressed and won't match the block as it exists in the main
1542 * pool. When this is the case, we must first compress it if it is
1543 * compressed on the main pool before we can validate the checksum.
1545 if (!HDR_COMPRESSION_ENABLED(hdr
) && compress
!= ZIO_COMPRESS_OFF
) {
1546 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
1547 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
1550 abd_t
*cdata
= abd_alloc_linear(HDR_GET_PSIZE(hdr
), B_TRUE
);
1551 csize
= zio_compress_data(compress
, zio
->io_abd
,
1552 abd_to_buf(cdata
), lsize
);
1554 ASSERT3U(csize
, <=, HDR_GET_PSIZE(hdr
));
1555 if (csize
< HDR_GET_PSIZE(hdr
)) {
1557 * Compressed blocks are always a multiple of the
1558 * smallest ashift in the pool. Ideally, we would
1559 * like to round up the csize to the next
1560 * spa_min_ashift but that value may have changed
1561 * since the block was last written. Instead,
1562 * we rely on the fact that the hdr's psize
1563 * was set to the psize of the block when it was
1564 * last written. We set the csize to that value
1565 * and zero out any part that should not contain
1568 abd_zero_off(cdata
, csize
, HDR_GET_PSIZE(hdr
) - csize
);
1569 csize
= HDR_GET_PSIZE(hdr
);
1571 zio_push_transform(zio
, cdata
, csize
, HDR_GET_PSIZE(hdr
), NULL
);
1575 * Block pointers always store the checksum for the logical data.
1576 * If the block pointer has the gang bit set, then the checksum
1577 * it represents is for the reconstituted data and not for an
1578 * individual gang member. The zio pipeline, however, must be able to
1579 * determine the checksum of each of the gang constituents so it
1580 * treats the checksum comparison differently than what we need
1581 * for l2arc blocks. This prevents us from using the
1582 * zio_checksum_error() interface directly. Instead we must call the
1583 * zio_checksum_error_impl() so that we can ensure the checksum is
1584 * generated using the correct checksum algorithm and accounts for the
1585 * logical I/O size and not just a gang fragment.
1587 valid_cksum
= (zio_checksum_error_impl(zio
->io_spa
, zio
->io_bp
,
1588 BP_GET_CHECKSUM(zio
->io_bp
), zio
->io_abd
, zio
->io_size
,
1589 zio
->io_offset
, NULL
) == 0);
1590 zio_pop_transforms(zio
);
1591 return (valid_cksum
);
1595 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
1596 * checksum and attaches it to the buf's hdr so that we can ensure that the buf
1597 * isn't modified later on. If buf is compressed or there is already a checksum
1598 * on the hdr, this is a no-op (we only checksum uncompressed bufs).
1601 arc_cksum_compute(arc_buf_t
*buf
)
1603 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1605 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1608 ASSERT(HDR_HAS_L1HDR(hdr
));
1610 mutex_enter(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1611 if (hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
) {
1612 ASSERT(arc_hdr_has_uncompressed_buf(hdr
));
1613 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1615 } else if (ARC_BUF_COMPRESSED(buf
)) {
1616 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1620 ASSERT(!ARC_BUF_COMPRESSED(buf
));
1621 hdr
->b_l1hdr
.b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
),
1623 fletcher_2_native(buf
->b_data
, arc_buf_size(buf
), NULL
,
1624 hdr
->b_l1hdr
.b_freeze_cksum
);
1625 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1630 typedef struct procctl
{
1638 arc_buf_unwatch(arc_buf_t
*buf
)
1645 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1646 ctl
.prwatch
.pr_size
= 0;
1647 ctl
.prwatch
.pr_wflags
= 0;
1648 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1649 ASSERT3U(result
, ==, sizeof (ctl
));
1656 arc_buf_watch(arc_buf_t
*buf
)
1663 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1664 ctl
.prwatch
.pr_size
= arc_buf_size(buf
);
1665 ctl
.prwatch
.pr_wflags
= WA_WRITE
;
1666 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1667 ASSERT3U(result
, ==, sizeof (ctl
));
1672 static arc_buf_contents_t
1673 arc_buf_type(arc_buf_hdr_t
*hdr
)
1675 arc_buf_contents_t type
;
1676 if (HDR_ISTYPE_METADATA(hdr
)) {
1677 type
= ARC_BUFC_METADATA
;
1679 type
= ARC_BUFC_DATA
;
1681 VERIFY3U(hdr
->b_type
, ==, type
);
1686 arc_is_metadata(arc_buf_t
*buf
)
1688 return (HDR_ISTYPE_METADATA(buf
->b_hdr
) != 0);
1692 arc_bufc_to_flags(arc_buf_contents_t type
)
1696 /* metadata field is 0 if buffer contains normal data */
1698 case ARC_BUFC_METADATA
:
1699 return (ARC_FLAG_BUFC_METADATA
);
1703 panic("undefined ARC buffer type!");
1704 return ((uint32_t)-1);
1708 arc_buf_thaw(arc_buf_t
*buf
)
1710 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1712 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
1713 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1715 arc_cksum_verify(buf
);
1718 * Compressed buffers do not manipulate the b_freeze_cksum or
1719 * allocate b_thawed.
1721 if (ARC_BUF_COMPRESSED(buf
)) {
1722 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
== NULL
||
1723 arc_hdr_has_uncompressed_buf(hdr
));
1727 ASSERT(HDR_HAS_L1HDR(hdr
));
1728 arc_cksum_free(hdr
);
1730 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1732 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1733 if (hdr
->b_l1hdr
.b_thawed
!= NULL
)
1734 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
1735 hdr
->b_l1hdr
.b_thawed
= kmem_alloc(1, KM_SLEEP
);
1739 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1741 arc_buf_unwatch(buf
);
1745 arc_buf_freeze(arc_buf_t
*buf
)
1747 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1748 kmutex_t
*hash_lock
;
1750 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1753 if (ARC_BUF_COMPRESSED(buf
)) {
1754 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
== NULL
||
1755 arc_hdr_has_uncompressed_buf(hdr
));
1759 hash_lock
= HDR_LOCK(hdr
);
1760 mutex_enter(hash_lock
);
1762 ASSERT(HDR_HAS_L1HDR(hdr
));
1763 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
||
1764 hdr
->b_l1hdr
.b_state
== arc_anon
);
1765 arc_cksum_compute(buf
);
1766 mutex_exit(hash_lock
);
1770 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
1771 * the following functions should be used to ensure that the flags are
1772 * updated in a thread-safe way. When manipulating the flags either
1773 * the hash_lock must be held or the hdr must be undiscoverable. This
1774 * ensures that we're not racing with any other threads when updating
1778 arc_hdr_set_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
)
1780 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
1781 hdr
->b_flags
|= flags
;
1785 arc_hdr_clear_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
)
1787 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
1788 hdr
->b_flags
&= ~flags
;
1792 * Setting the compression bits in the arc_buf_hdr_t's b_flags is
1793 * done in a special way since we have to clear and set bits
1794 * at the same time. Consumers that wish to set the compression bits
1795 * must use this function to ensure that the flags are updated in
1796 * thread-safe manner.
1799 arc_hdr_set_compress(arc_buf_hdr_t
*hdr
, enum zio_compress cmp
)
1801 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
1804 * Holes and embedded blocks will always have a psize = 0 so
1805 * we ignore the compression of the blkptr and set the
1806 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF.
1807 * Holes and embedded blocks remain anonymous so we don't
1808 * want to uncompress them. Mark them as uncompressed.
1810 if (!zfs_compressed_arc_enabled
|| HDR_GET_PSIZE(hdr
) == 0) {
1811 arc_hdr_clear_flags(hdr
, ARC_FLAG_COMPRESSED_ARC
);
1812 HDR_SET_COMPRESS(hdr
, ZIO_COMPRESS_OFF
);
1813 ASSERT(!HDR_COMPRESSION_ENABLED(hdr
));
1814 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
1816 arc_hdr_set_flags(hdr
, ARC_FLAG_COMPRESSED_ARC
);
1817 HDR_SET_COMPRESS(hdr
, cmp
);
1818 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, cmp
);
1819 ASSERT(HDR_COMPRESSION_ENABLED(hdr
));
1824 * Looks for another buf on the same hdr which has the data decompressed, copies
1825 * from it, and returns true. If no such buf exists, returns false.
1828 arc_buf_try_copy_decompressed_data(arc_buf_t
*buf
)
1830 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1831 boolean_t copied
= B_FALSE
;
1833 ASSERT(HDR_HAS_L1HDR(hdr
));
1834 ASSERT3P(buf
->b_data
, !=, NULL
);
1835 ASSERT(!ARC_BUF_COMPRESSED(buf
));
1837 for (arc_buf_t
*from
= hdr
->b_l1hdr
.b_buf
; from
!= NULL
;
1838 from
= from
->b_next
) {
1839 /* can't use our own data buffer */
1844 if (!ARC_BUF_COMPRESSED(from
)) {
1845 bcopy(from
->b_data
, buf
->b_data
, arc_buf_size(buf
));
1852 * There were no decompressed bufs, so there should not be a
1853 * checksum on the hdr either.
1855 EQUIV(!copied
, hdr
->b_l1hdr
.b_freeze_cksum
== NULL
);
1861 * Given a buf that has a data buffer attached to it, this function will
1862 * efficiently fill the buf with data of the specified compression setting from
1863 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
1864 * are already sharing a data buf, no copy is performed.
1866 * If the buf is marked as compressed but uncompressed data was requested, this
1867 * will allocate a new data buffer for the buf, remove that flag, and fill the
1868 * buf with uncompressed data. You can't request a compressed buf on a hdr with
1869 * uncompressed data, and (since we haven't added support for it yet) if you
1870 * want compressed data your buf must already be marked as compressed and have
1871 * the correct-sized data buffer.
1874 arc_buf_fill(arc_buf_t
*buf
, boolean_t compressed
)
1876 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1877 boolean_t hdr_compressed
= (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
);
1878 dmu_object_byteswap_t bswap
= hdr
->b_l1hdr
.b_byteswap
;
1880 ASSERT3P(buf
->b_data
, !=, NULL
);
1881 IMPLY(compressed
, hdr_compressed
);
1882 IMPLY(compressed
, ARC_BUF_COMPRESSED(buf
));
1884 if (hdr_compressed
== compressed
) {
1885 if (!arc_buf_is_shared(buf
)) {
1886 abd_copy_to_buf(buf
->b_data
, hdr
->b_l1hdr
.b_pabd
,
1890 ASSERT(hdr_compressed
);
1891 ASSERT(!compressed
);
1892 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, HDR_GET_PSIZE(hdr
));
1895 * If the buf is sharing its data with the hdr, unlink it and
1896 * allocate a new data buffer for the buf.
1898 if (arc_buf_is_shared(buf
)) {
1899 ASSERT(ARC_BUF_COMPRESSED(buf
));
1901 /* We need to give the buf it's own b_data */
1902 buf
->b_flags
&= ~ARC_BUF_FLAG_SHARED
;
1904 arc_get_data_buf(hdr
, HDR_GET_LSIZE(hdr
), buf
);
1905 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
1907 /* Previously overhead was 0; just add new overhead */
1908 ARCSTAT_INCR(arcstat_overhead_size
, HDR_GET_LSIZE(hdr
));
1909 } else if (ARC_BUF_COMPRESSED(buf
)) {
1910 /* We need to reallocate the buf's b_data */
1911 arc_free_data_buf(hdr
, buf
->b_data
, HDR_GET_PSIZE(hdr
),
1914 arc_get_data_buf(hdr
, HDR_GET_LSIZE(hdr
), buf
);
1916 /* We increased the size of b_data; update overhead */
1917 ARCSTAT_INCR(arcstat_overhead_size
,
1918 HDR_GET_LSIZE(hdr
) - HDR_GET_PSIZE(hdr
));
1922 * Regardless of the buf's previous compression settings, it
1923 * should not be compressed at the end of this function.
1925 buf
->b_flags
&= ~ARC_BUF_FLAG_COMPRESSED
;
1928 * Try copying the data from another buf which already has a
1929 * decompressed version. If that's not possible, it's time to
1930 * bite the bullet and decompress the data from the hdr.
1932 if (arc_buf_try_copy_decompressed_data(buf
)) {
1933 /* Skip byteswapping and checksumming (already done) */
1934 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, !=, NULL
);
1937 int error
= zio_decompress_data(HDR_GET_COMPRESS(hdr
),
1938 hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
1939 HDR_GET_PSIZE(hdr
), HDR_GET_LSIZE(hdr
));
1942 * Absent hardware errors or software bugs, this should
1943 * be impossible, but log it anyway so we can debug it.
1947 "hdr %p, compress %d, psize %d, lsize %d",
1948 hdr
, HDR_GET_COMPRESS(hdr
),
1949 HDR_GET_PSIZE(hdr
), HDR_GET_LSIZE(hdr
));
1950 return (SET_ERROR(EIO
));
1955 /* Byteswap the buf's data if necessary */
1956 if (bswap
!= DMU_BSWAP_NUMFUNCS
) {
1957 ASSERT(!HDR_SHARED_DATA(hdr
));
1958 ASSERT3U(bswap
, <, DMU_BSWAP_NUMFUNCS
);
1959 dmu_ot_byteswap
[bswap
].ob_func(buf
->b_data
, HDR_GET_LSIZE(hdr
));
1962 /* Compute the hdr's checksum if necessary */
1963 arc_cksum_compute(buf
);
1969 arc_decompress(arc_buf_t
*buf
)
1971 return (arc_buf_fill(buf
, B_FALSE
));
1975 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
1978 arc_hdr_size(arc_buf_hdr_t
*hdr
)
1982 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
1983 HDR_GET_PSIZE(hdr
) > 0) {
1984 size
= HDR_GET_PSIZE(hdr
);
1986 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, 0);
1987 size
= HDR_GET_LSIZE(hdr
);
1993 * Increment the amount of evictable space in the arc_state_t's refcount.
1994 * We account for the space used by the hdr and the arc buf individually
1995 * so that we can add and remove them from the refcount individually.
1998 arc_evictable_space_increment(arc_buf_hdr_t
*hdr
, arc_state_t
*state
)
2000 arc_buf_contents_t type
= arc_buf_type(hdr
);
2002 ASSERT(HDR_HAS_L1HDR(hdr
));
2004 if (GHOST_STATE(state
)) {
2005 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
2006 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2007 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2008 (void) refcount_add_many(&state
->arcs_esize
[type
],
2009 HDR_GET_LSIZE(hdr
), hdr
);
2013 ASSERT(!GHOST_STATE(state
));
2014 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2015 (void) refcount_add_many(&state
->arcs_esize
[type
],
2016 arc_hdr_size(hdr
), hdr
);
2018 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2019 buf
= buf
->b_next
) {
2020 if (arc_buf_is_shared(buf
))
2022 (void) refcount_add_many(&state
->arcs_esize
[type
],
2023 arc_buf_size(buf
), buf
);
2028 * Decrement the amount of evictable space in the arc_state_t's refcount.
2029 * We account for the space used by the hdr and the arc buf individually
2030 * so that we can add and remove them from the refcount individually.
2033 arc_evictable_space_decrement(arc_buf_hdr_t
*hdr
, arc_state_t
*state
)
2035 arc_buf_contents_t type
= arc_buf_type(hdr
);
2037 ASSERT(HDR_HAS_L1HDR(hdr
));
2039 if (GHOST_STATE(state
)) {
2040 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
2041 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2042 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2043 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2044 HDR_GET_LSIZE(hdr
), hdr
);
2048 ASSERT(!GHOST_STATE(state
));
2049 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2050 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2051 arc_hdr_size(hdr
), hdr
);
2053 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2054 buf
= buf
->b_next
) {
2055 if (arc_buf_is_shared(buf
))
2057 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2058 arc_buf_size(buf
), buf
);
2063 * Add a reference to this hdr indicating that someone is actively
2064 * referencing that memory. When the refcount transitions from 0 to 1,
2065 * we remove it from the respective arc_state_t list to indicate that
2066 * it is not evictable.
2069 add_reference(arc_buf_hdr_t
*hdr
, void *tag
)
2071 ASSERT(HDR_HAS_L1HDR(hdr
));
2072 if (!MUTEX_HELD(HDR_LOCK(hdr
))) {
2073 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
);
2074 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2075 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2078 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2080 if ((refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
) == 1) &&
2081 (state
!= arc_anon
)) {
2082 /* We don't use the L2-only state list. */
2083 if (state
!= arc_l2c_only
) {
2084 multilist_remove(state
->arcs_list
[arc_buf_type(hdr
)],
2086 arc_evictable_space_decrement(hdr
, state
);
2088 /* remove the prefetch flag if we get a reference */
2089 arc_hdr_clear_flags(hdr
, ARC_FLAG_PREFETCH
);
2094 * Remove a reference from this hdr. When the reference transitions from
2095 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
2096 * list making it eligible for eviction.
2099 remove_reference(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
, void *tag
)
2102 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2104 ASSERT(HDR_HAS_L1HDR(hdr
));
2105 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
2106 ASSERT(!GHOST_STATE(state
));
2109 * arc_l2c_only counts as a ghost state so we don't need to explicitly
2110 * check to prevent usage of the arc_l2c_only list.
2112 if (((cnt
= refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
)) == 0) &&
2113 (state
!= arc_anon
)) {
2114 multilist_insert(state
->arcs_list
[arc_buf_type(hdr
)], hdr
);
2115 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, >, 0);
2116 arc_evictable_space_increment(hdr
, state
);
2122 * Move the supplied buffer to the indicated state. The hash lock
2123 * for the buffer must be held by the caller.
2126 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*hdr
,
2127 kmutex_t
*hash_lock
)
2129 arc_state_t
*old_state
;
2132 boolean_t update_old
, update_new
;
2133 arc_buf_contents_t buftype
= arc_buf_type(hdr
);
2136 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
2137 * in arc_read() when bringing a buffer out of the L2ARC. However, the
2138 * L1 hdr doesn't always exist when we change state to arc_anon before
2139 * destroying a header, in which case reallocating to add the L1 hdr is
2142 if (HDR_HAS_L1HDR(hdr
)) {
2143 old_state
= hdr
->b_l1hdr
.b_state
;
2144 refcnt
= refcount_count(&hdr
->b_l1hdr
.b_refcnt
);
2145 bufcnt
= hdr
->b_l1hdr
.b_bufcnt
;
2146 update_old
= (bufcnt
> 0 || hdr
->b_l1hdr
.b_pabd
!= NULL
);
2148 old_state
= arc_l2c_only
;
2151 update_old
= B_FALSE
;
2153 update_new
= update_old
;
2155 ASSERT(MUTEX_HELD(hash_lock
));
2156 ASSERT3P(new_state
, !=, old_state
);
2157 ASSERT(!GHOST_STATE(new_state
) || bufcnt
== 0);
2158 ASSERT(old_state
!= arc_anon
|| bufcnt
<= 1);
2161 * If this buffer is evictable, transfer it from the
2162 * old state list to the new state list.
2165 if (old_state
!= arc_anon
&& old_state
!= arc_l2c_only
) {
2166 ASSERT(HDR_HAS_L1HDR(hdr
));
2167 multilist_remove(old_state
->arcs_list
[buftype
], hdr
);
2169 if (GHOST_STATE(old_state
)) {
2171 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2172 update_old
= B_TRUE
;
2174 arc_evictable_space_decrement(hdr
, old_state
);
2176 if (new_state
!= arc_anon
&& new_state
!= arc_l2c_only
) {
2179 * An L1 header always exists here, since if we're
2180 * moving to some L1-cached state (i.e. not l2c_only or
2181 * anonymous), we realloc the header to add an L1hdr
2184 ASSERT(HDR_HAS_L1HDR(hdr
));
2185 multilist_insert(new_state
->arcs_list
[buftype
], hdr
);
2187 if (GHOST_STATE(new_state
)) {
2189 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2190 update_new
= B_TRUE
;
2192 arc_evictable_space_increment(hdr
, new_state
);
2196 ASSERT(!HDR_EMPTY(hdr
));
2197 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(hdr
))
2198 buf_hash_remove(hdr
);
2200 /* adjust state sizes (ignore arc_l2c_only) */
2202 if (update_new
&& new_state
!= arc_l2c_only
) {
2203 ASSERT(HDR_HAS_L1HDR(hdr
));
2204 if (GHOST_STATE(new_state
)) {
2208 * When moving a header to a ghost state, we first
2209 * remove all arc buffers. Thus, we'll have a
2210 * bufcnt of zero, and no arc buffer to use for
2211 * the reference. As a result, we use the arc
2212 * header pointer for the reference.
2214 (void) refcount_add_many(&new_state
->arcs_size
,
2215 HDR_GET_LSIZE(hdr
), hdr
);
2216 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2218 uint32_t buffers
= 0;
2221 * Each individual buffer holds a unique reference,
2222 * thus we must remove each of these references one
2225 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2226 buf
= buf
->b_next
) {
2227 ASSERT3U(bufcnt
, !=, 0);
2231 * When the arc_buf_t is sharing the data
2232 * block with the hdr, the owner of the
2233 * reference belongs to the hdr. Only
2234 * add to the refcount if the arc_buf_t is
2237 if (arc_buf_is_shared(buf
))
2240 (void) refcount_add_many(&new_state
->arcs_size
,
2241 arc_buf_size(buf
), buf
);
2243 ASSERT3U(bufcnt
, ==, buffers
);
2245 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2246 (void) refcount_add_many(&new_state
->arcs_size
,
2247 arc_hdr_size(hdr
), hdr
);
2249 ASSERT(GHOST_STATE(old_state
));
2254 if (update_old
&& old_state
!= arc_l2c_only
) {
2255 ASSERT(HDR_HAS_L1HDR(hdr
));
2256 if (GHOST_STATE(old_state
)) {
2258 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2261 * When moving a header off of a ghost state,
2262 * the header will not contain any arc buffers.
2263 * We use the arc header pointer for the reference
2264 * which is exactly what we did when we put the
2265 * header on the ghost state.
2268 (void) refcount_remove_many(&old_state
->arcs_size
,
2269 HDR_GET_LSIZE(hdr
), hdr
);
2271 uint32_t buffers
= 0;
2274 * Each individual buffer holds a unique reference,
2275 * thus we must remove each of these references one
2278 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2279 buf
= buf
->b_next
) {
2280 ASSERT3U(bufcnt
, !=, 0);
2284 * When the arc_buf_t is sharing the data
2285 * block with the hdr, the owner of the
2286 * reference belongs to the hdr. Only
2287 * add to the refcount if the arc_buf_t is
2290 if (arc_buf_is_shared(buf
))
2293 (void) refcount_remove_many(
2294 &old_state
->arcs_size
, arc_buf_size(buf
),
2297 ASSERT3U(bufcnt
, ==, buffers
);
2298 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2299 (void) refcount_remove_many(
2300 &old_state
->arcs_size
, arc_hdr_size(hdr
), hdr
);
2304 if (HDR_HAS_L1HDR(hdr
))
2305 hdr
->b_l1hdr
.b_state
= new_state
;
2308 * L2 headers should never be on the L2 state list since they don't
2309 * have L1 headers allocated.
2311 ASSERT(multilist_is_empty(arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
]) &&
2312 multilist_is_empty(arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
]));
2316 arc_space_consume(uint64_t space
, arc_space_type_t type
)
2318 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
2321 case ARC_SPACE_DATA
:
2322 ARCSTAT_INCR(arcstat_data_size
, space
);
2324 case ARC_SPACE_META
:
2325 ARCSTAT_INCR(arcstat_metadata_size
, space
);
2327 case ARC_SPACE_OTHER
:
2328 ARCSTAT_INCR(arcstat_other_size
, space
);
2330 case ARC_SPACE_HDRS
:
2331 ARCSTAT_INCR(arcstat_hdr_size
, space
);
2333 case ARC_SPACE_L2HDRS
:
2334 ARCSTAT_INCR(arcstat_l2_hdr_size
, space
);
2338 if (type
!= ARC_SPACE_DATA
)
2339 ARCSTAT_INCR(arcstat_meta_used
, space
);
2341 atomic_add_64(&arc_size
, space
);
2345 arc_space_return(uint64_t space
, arc_space_type_t type
)
2347 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
2350 case ARC_SPACE_DATA
:
2351 ARCSTAT_INCR(arcstat_data_size
, -space
);
2353 case ARC_SPACE_META
:
2354 ARCSTAT_INCR(arcstat_metadata_size
, -space
);
2356 case ARC_SPACE_OTHER
:
2357 ARCSTAT_INCR(arcstat_other_size
, -space
);
2359 case ARC_SPACE_HDRS
:
2360 ARCSTAT_INCR(arcstat_hdr_size
, -space
);
2362 case ARC_SPACE_L2HDRS
:
2363 ARCSTAT_INCR(arcstat_l2_hdr_size
, -space
);
2367 if (type
!= ARC_SPACE_DATA
) {
2368 ASSERT(arc_meta_used
>= space
);
2369 if (arc_meta_max
< arc_meta_used
)
2370 arc_meta_max
= arc_meta_used
;
2371 ARCSTAT_INCR(arcstat_meta_used
, -space
);
2374 ASSERT(arc_size
>= space
);
2375 atomic_add_64(&arc_size
, -space
);
2379 * Given a hdr and a buf, returns whether that buf can share its b_data buffer
2380 * with the hdr's b_pabd.
2383 arc_can_share(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2386 * The criteria for sharing a hdr's data are:
2387 * 1. the hdr's compression matches the buf's compression
2388 * 2. the hdr doesn't need to be byteswapped
2389 * 3. the hdr isn't already being shared
2390 * 4. the buf is either compressed or it is the last buf in the hdr list
2392 * Criterion #4 maintains the invariant that shared uncompressed
2393 * bufs must be the final buf in the hdr's b_buf list. Reading this, you
2394 * might ask, "if a compressed buf is allocated first, won't that be the
2395 * last thing in the list?", but in that case it's impossible to create
2396 * a shared uncompressed buf anyway (because the hdr must be compressed
2397 * to have the compressed buf). You might also think that #3 is
2398 * sufficient to make this guarantee, however it's possible
2399 * (specifically in the rare L2ARC write race mentioned in
2400 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
2401 * is sharable, but wasn't at the time of its allocation. Rather than
2402 * allow a new shared uncompressed buf to be created and then shuffle
2403 * the list around to make it the last element, this simply disallows
2404 * sharing if the new buf isn't the first to be added.
2406 ASSERT3P(buf
->b_hdr
, ==, hdr
);
2407 boolean_t hdr_compressed
= HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
;
2408 boolean_t buf_compressed
= ARC_BUF_COMPRESSED(buf
) != 0;
2409 return (buf_compressed
== hdr_compressed
&&
2410 hdr
->b_l1hdr
.b_byteswap
== DMU_BSWAP_NUMFUNCS
&&
2411 !HDR_SHARED_DATA(hdr
) &&
2412 (ARC_BUF_LAST(buf
) || ARC_BUF_COMPRESSED(buf
)));
2416 * Allocate a buf for this hdr. If you care about the data that's in the hdr,
2417 * or if you want a compressed buffer, pass those flags in. Returns 0 if the
2418 * copy was made successfully, or an error code otherwise.
2421 arc_buf_alloc_impl(arc_buf_hdr_t
*hdr
, void *tag
, boolean_t compressed
,
2422 boolean_t fill
, arc_buf_t
**ret
)
2426 ASSERT(HDR_HAS_L1HDR(hdr
));
2427 ASSERT3U(HDR_GET_LSIZE(hdr
), >, 0);
2428 VERIFY(hdr
->b_type
== ARC_BUFC_DATA
||
2429 hdr
->b_type
== ARC_BUFC_METADATA
);
2430 ASSERT3P(ret
, !=, NULL
);
2431 ASSERT3P(*ret
, ==, NULL
);
2433 buf
= *ret
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
2436 buf
->b_next
= hdr
->b_l1hdr
.b_buf
;
2439 add_reference(hdr
, tag
);
2442 * We're about to change the hdr's b_flags. We must either
2443 * hold the hash_lock or be undiscoverable.
2445 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2448 * Only honor requests for compressed bufs if the hdr is actually
2451 if (compressed
&& HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
)
2452 buf
->b_flags
|= ARC_BUF_FLAG_COMPRESSED
;
2455 * If the hdr's data can be shared then we share the data buffer and
2456 * set the appropriate bit in the hdr's b_flags to indicate the hdr is
2457 * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
2458 * buffer to store the buf's data.
2460 * There are two additional restrictions here because we're sharing
2461 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
2462 * actively involved in an L2ARC write, because if this buf is used by
2463 * an arc_write() then the hdr's data buffer will be released when the
2464 * write completes, even though the L2ARC write might still be using it.
2465 * Second, the hdr's ABD must be linear so that the buf's user doesn't
2466 * need to be ABD-aware.
2468 boolean_t can_share
= arc_can_share(hdr
, buf
) && !HDR_L2_WRITING(hdr
) &&
2469 abd_is_linear(hdr
->b_l1hdr
.b_pabd
);
2471 /* Set up b_data and sharing */
2473 buf
->b_data
= abd_to_buf(hdr
->b_l1hdr
.b_pabd
);
2474 buf
->b_flags
|= ARC_BUF_FLAG_SHARED
;
2475 arc_hdr_set_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2478 arc_get_data_buf(hdr
, arc_buf_size(buf
), buf
);
2479 ARCSTAT_INCR(arcstat_overhead_size
, arc_buf_size(buf
));
2481 VERIFY3P(buf
->b_data
, !=, NULL
);
2483 hdr
->b_l1hdr
.b_buf
= buf
;
2484 hdr
->b_l1hdr
.b_bufcnt
+= 1;
2487 * If the user wants the data from the hdr, we need to either copy or
2488 * decompress the data.
2491 return (arc_buf_fill(buf
, ARC_BUF_COMPRESSED(buf
) != 0));
2497 static char *arc_onloan_tag
= "onloan";
2500 arc_loaned_bytes_update(int64_t delta
)
2502 atomic_add_64(&arc_loaned_bytes
, delta
);
2504 /* assert that it did not wrap around */
2505 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes
, 0), >=, 0);
2509 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
2510 * flight data by arc_tempreserve_space() until they are "returned". Loaned
2511 * buffers must be returned to the arc before they can be used by the DMU or
2515 arc_loan_buf(spa_t
*spa
, boolean_t is_metadata
, int size
)
2517 arc_buf_t
*buf
= arc_alloc_buf(spa
, arc_onloan_tag
,
2518 is_metadata
? ARC_BUFC_METADATA
: ARC_BUFC_DATA
, size
);
2520 arc_loaned_bytes_update(size
);
2526 arc_loan_compressed_buf(spa_t
*spa
, uint64_t psize
, uint64_t lsize
,
2527 enum zio_compress compression_type
)
2529 arc_buf_t
*buf
= arc_alloc_compressed_buf(spa
, arc_onloan_tag
,
2530 psize
, lsize
, compression_type
);
2532 arc_loaned_bytes_update(psize
);
2539 * Return a loaned arc buffer to the arc.
2542 arc_return_buf(arc_buf_t
*buf
, void *tag
)
2544 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2546 ASSERT3P(buf
->b_data
, !=, NULL
);
2547 ASSERT(HDR_HAS_L1HDR(hdr
));
2548 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
);
2549 (void) refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
2551 arc_loaned_bytes_update(-arc_buf_size(buf
));
2554 /* Detach an arc_buf from a dbuf (tag) */
2556 arc_loan_inuse_buf(arc_buf_t
*buf
, void *tag
)
2558 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2560 ASSERT3P(buf
->b_data
, !=, NULL
);
2561 ASSERT(HDR_HAS_L1HDR(hdr
));
2562 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
2563 (void) refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
);
2565 arc_loaned_bytes_update(arc_buf_size(buf
));
2569 l2arc_free_abd_on_write(abd_t
*abd
, size_t size
, arc_buf_contents_t type
)
2571 l2arc_data_free_t
*df
= kmem_alloc(sizeof (*df
), KM_SLEEP
);
2574 df
->l2df_size
= size
;
2575 df
->l2df_type
= type
;
2576 mutex_enter(&l2arc_free_on_write_mtx
);
2577 list_insert_head(l2arc_free_on_write
, df
);
2578 mutex_exit(&l2arc_free_on_write_mtx
);
2582 arc_hdr_free_on_write(arc_buf_hdr_t
*hdr
)
2584 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2585 arc_buf_contents_t type
= arc_buf_type(hdr
);
2586 uint64_t size
= arc_hdr_size(hdr
);
2588 /* protected by hash lock, if in the hash table */
2589 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
2590 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2591 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
2593 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2596 (void) refcount_remove_many(&state
->arcs_size
, size
, hdr
);
2597 if (type
== ARC_BUFC_METADATA
) {
2598 arc_space_return(size
, ARC_SPACE_META
);
2600 ASSERT(type
== ARC_BUFC_DATA
);
2601 arc_space_return(size
, ARC_SPACE_DATA
);
2604 l2arc_free_abd_on_write(hdr
->b_l1hdr
.b_pabd
, size
, type
);
2608 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
2609 * data buffer, we transfer the refcount ownership to the hdr and update
2610 * the appropriate kstats.
2613 arc_share_buf(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2615 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2617 ASSERT(arc_can_share(hdr
, buf
));
2618 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2619 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2622 * Start sharing the data buffer. We transfer the
2623 * refcount ownership to the hdr since it always owns
2624 * the refcount whenever an arc_buf_t is shared.
2626 refcount_transfer_ownership(&state
->arcs_size
, buf
, hdr
);
2627 hdr
->b_l1hdr
.b_pabd
= abd_get_from_buf(buf
->b_data
, arc_buf_size(buf
));
2628 abd_take_ownership_of_buf(hdr
->b_l1hdr
.b_pabd
,
2629 HDR_ISTYPE_METADATA(hdr
));
2630 arc_hdr_set_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2631 buf
->b_flags
|= ARC_BUF_FLAG_SHARED
;
2634 * Since we've transferred ownership to the hdr we need
2635 * to increment its compressed and uncompressed kstats and
2636 * decrement the overhead size.
2638 ARCSTAT_INCR(arcstat_compressed_size
, arc_hdr_size(hdr
));
2639 ARCSTAT_INCR(arcstat_uncompressed_size
, HDR_GET_LSIZE(hdr
));
2640 ARCSTAT_INCR(arcstat_overhead_size
, -arc_buf_size(buf
));
2644 arc_unshare_buf(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2646 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2648 ASSERT(arc_buf_is_shared(buf
));
2649 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2650 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2653 * We are no longer sharing this buffer so we need
2654 * to transfer its ownership to the rightful owner.
2656 refcount_transfer_ownership(&state
->arcs_size
, hdr
, buf
);
2657 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2658 abd_release_ownership_of_buf(hdr
->b_l1hdr
.b_pabd
);
2659 abd_put(hdr
->b_l1hdr
.b_pabd
);
2660 hdr
->b_l1hdr
.b_pabd
= NULL
;
2661 buf
->b_flags
&= ~ARC_BUF_FLAG_SHARED
;
2664 * Since the buffer is no longer shared between
2665 * the arc buf and the hdr, count it as overhead.
2667 ARCSTAT_INCR(arcstat_compressed_size
, -arc_hdr_size(hdr
));
2668 ARCSTAT_INCR(arcstat_uncompressed_size
, -HDR_GET_LSIZE(hdr
));
2669 ARCSTAT_INCR(arcstat_overhead_size
, arc_buf_size(buf
));
2673 * Remove an arc_buf_t from the hdr's buf list and return the last
2674 * arc_buf_t on the list. If no buffers remain on the list then return
2678 arc_buf_remove(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2680 ASSERT(HDR_HAS_L1HDR(hdr
));
2681 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2683 arc_buf_t
**bufp
= &hdr
->b_l1hdr
.b_buf
;
2684 arc_buf_t
*lastbuf
= NULL
;
2687 * Remove the buf from the hdr list and locate the last
2688 * remaining buffer on the list.
2690 while (*bufp
!= NULL
) {
2692 *bufp
= buf
->b_next
;
2695 * If we've removed a buffer in the middle of
2696 * the list then update the lastbuf and update
2699 if (*bufp
!= NULL
) {
2701 bufp
= &(*bufp
)->b_next
;
2705 ASSERT3P(lastbuf
, !=, buf
);
2706 IMPLY(hdr
->b_l1hdr
.b_bufcnt
> 0, lastbuf
!= NULL
);
2707 IMPLY(hdr
->b_l1hdr
.b_bufcnt
> 0, hdr
->b_l1hdr
.b_buf
!= NULL
);
2708 IMPLY(lastbuf
!= NULL
, ARC_BUF_LAST(lastbuf
));
2714 * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
2718 arc_buf_destroy_impl(arc_buf_t
*buf
)
2720 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2723 * Free up the data associated with the buf but only if we're not
2724 * sharing this with the hdr. If we are sharing it with the hdr, the
2725 * hdr is responsible for doing the free.
2727 if (buf
->b_data
!= NULL
) {
2729 * We're about to change the hdr's b_flags. We must either
2730 * hold the hash_lock or be undiscoverable.
2732 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2734 arc_cksum_verify(buf
);
2735 arc_buf_unwatch(buf
);
2737 if (arc_buf_is_shared(buf
)) {
2738 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2740 uint64_t size
= arc_buf_size(buf
);
2741 arc_free_data_buf(hdr
, buf
->b_data
, size
, buf
);
2742 ARCSTAT_INCR(arcstat_overhead_size
, -size
);
2746 ASSERT(hdr
->b_l1hdr
.b_bufcnt
> 0);
2747 hdr
->b_l1hdr
.b_bufcnt
-= 1;
2750 arc_buf_t
*lastbuf
= arc_buf_remove(hdr
, buf
);
2752 if (ARC_BUF_SHARED(buf
) && !ARC_BUF_COMPRESSED(buf
)) {
2754 * If the current arc_buf_t is sharing its data buffer with the
2755 * hdr, then reassign the hdr's b_pabd to share it with the new
2756 * buffer at the end of the list. The shared buffer is always
2757 * the last one on the hdr's buffer list.
2759 * There is an equivalent case for compressed bufs, but since
2760 * they aren't guaranteed to be the last buf in the list and
2761 * that is an exceedingly rare case, we just allow that space be
2762 * wasted temporarily.
2764 if (lastbuf
!= NULL
) {
2765 /* Only one buf can be shared at once */
2766 VERIFY(!arc_buf_is_shared(lastbuf
));
2767 /* hdr is uncompressed so can't have compressed buf */
2768 VERIFY(!ARC_BUF_COMPRESSED(lastbuf
));
2770 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2771 arc_hdr_free_pabd(hdr
);
2774 * We must setup a new shared block between the
2775 * last buffer and the hdr. The data would have
2776 * been allocated by the arc buf so we need to transfer
2777 * ownership to the hdr since it's now being shared.
2779 arc_share_buf(hdr
, lastbuf
);
2781 } else if (HDR_SHARED_DATA(hdr
)) {
2783 * Uncompressed shared buffers are always at the end
2784 * of the list. Compressed buffers don't have the
2785 * same requirements. This makes it hard to
2786 * simply assert that the lastbuf is shared so
2787 * we rely on the hdr's compression flags to determine
2788 * if we have a compressed, shared buffer.
2790 ASSERT3P(lastbuf
, !=, NULL
);
2791 ASSERT(arc_buf_is_shared(lastbuf
) ||
2792 HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
);
2796 * Free the checksum if we're removing the last uncompressed buf from
2799 if (!arc_hdr_has_uncompressed_buf(hdr
)) {
2800 arc_cksum_free(hdr
);
2803 /* clean up the buf */
2805 kmem_cache_free(buf_cache
, buf
);
2809 arc_hdr_alloc_pabd(arc_buf_hdr_t
*hdr
)
2811 ASSERT3U(HDR_GET_LSIZE(hdr
), >, 0);
2812 ASSERT(HDR_HAS_L1HDR(hdr
));
2813 ASSERT(!HDR_SHARED_DATA(hdr
));
2815 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2816 hdr
->b_l1hdr
.b_pabd
= arc_get_data_abd(hdr
, arc_hdr_size(hdr
), hdr
);
2817 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
2818 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2820 ARCSTAT_INCR(arcstat_compressed_size
, arc_hdr_size(hdr
));
2821 ARCSTAT_INCR(arcstat_uncompressed_size
, HDR_GET_LSIZE(hdr
));
2825 arc_hdr_free_pabd(arc_buf_hdr_t
*hdr
)
2827 ASSERT(HDR_HAS_L1HDR(hdr
));
2828 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2831 * If the hdr is currently being written to the l2arc then
2832 * we defer freeing the data by adding it to the l2arc_free_on_write
2833 * list. The l2arc will free the data once it's finished
2834 * writing it to the l2arc device.
2836 if (HDR_L2_WRITING(hdr
)) {
2837 arc_hdr_free_on_write(hdr
);
2838 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
2840 arc_free_data_abd(hdr
, hdr
->b_l1hdr
.b_pabd
,
2841 arc_hdr_size(hdr
), hdr
);
2843 hdr
->b_l1hdr
.b_pabd
= NULL
;
2844 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
2846 ARCSTAT_INCR(arcstat_compressed_size
, -arc_hdr_size(hdr
));
2847 ARCSTAT_INCR(arcstat_uncompressed_size
, -HDR_GET_LSIZE(hdr
));
2850 static arc_buf_hdr_t
*
2851 arc_hdr_alloc(uint64_t spa
, int32_t psize
, int32_t lsize
,
2852 enum zio_compress compression_type
, arc_buf_contents_t type
)
2856 VERIFY(type
== ARC_BUFC_DATA
|| type
== ARC_BUFC_METADATA
);
2858 hdr
= kmem_cache_alloc(hdr_full_cache
, KM_PUSHPAGE
);
2859 ASSERT(HDR_EMPTY(hdr
));
2860 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
2861 ASSERT3P(hdr
->b_l1hdr
.b_thawed
, ==, NULL
);
2862 HDR_SET_PSIZE(hdr
, psize
);
2863 HDR_SET_LSIZE(hdr
, lsize
);
2867 arc_hdr_set_flags(hdr
, arc_bufc_to_flags(type
) | ARC_FLAG_HAS_L1HDR
);
2868 arc_hdr_set_compress(hdr
, compression_type
);
2870 hdr
->b_l1hdr
.b_state
= arc_anon
;
2871 hdr
->b_l1hdr
.b_arc_access
= 0;
2872 hdr
->b_l1hdr
.b_bufcnt
= 0;
2873 hdr
->b_l1hdr
.b_buf
= NULL
;
2876 * Allocate the hdr's buffer. This will contain either
2877 * the compressed or uncompressed data depending on the block
2878 * it references and compressed arc enablement.
2880 arc_hdr_alloc_pabd(hdr
);
2881 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2887 * Transition between the two allocation states for the arc_buf_hdr struct.
2888 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
2889 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
2890 * version is used when a cache buffer is only in the L2ARC in order to reduce
2893 static arc_buf_hdr_t
*
2894 arc_hdr_realloc(arc_buf_hdr_t
*hdr
, kmem_cache_t
*old
, kmem_cache_t
*new)
2896 ASSERT(HDR_HAS_L2HDR(hdr
));
2898 arc_buf_hdr_t
*nhdr
;
2899 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
2901 ASSERT((old
== hdr_full_cache
&& new == hdr_l2only_cache
) ||
2902 (old
== hdr_l2only_cache
&& new == hdr_full_cache
));
2904 nhdr
= kmem_cache_alloc(new, KM_PUSHPAGE
);
2906 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
2907 buf_hash_remove(hdr
);
2909 bcopy(hdr
, nhdr
, HDR_L2ONLY_SIZE
);
2911 if (new == hdr_full_cache
) {
2912 arc_hdr_set_flags(nhdr
, ARC_FLAG_HAS_L1HDR
);
2914 * arc_access and arc_change_state need to be aware that a
2915 * header has just come out of L2ARC, so we set its state to
2916 * l2c_only even though it's about to change.
2918 nhdr
->b_l1hdr
.b_state
= arc_l2c_only
;
2920 /* Verify previous threads set to NULL before freeing */
2921 ASSERT3P(nhdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2923 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2924 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
2925 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
2928 * If we've reached here, We must have been called from
2929 * arc_evict_hdr(), as such we should have already been
2930 * removed from any ghost list we were previously on
2931 * (which protects us from racing with arc_evict_state),
2932 * thus no locking is needed during this check.
2934 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
2937 * A buffer must not be moved into the arc_l2c_only
2938 * state if it's not finished being written out to the
2939 * l2arc device. Otherwise, the b_l1hdr.b_pabd field
2940 * might try to be accessed, even though it was removed.
2942 VERIFY(!HDR_L2_WRITING(hdr
));
2943 VERIFY3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2946 if (hdr
->b_l1hdr
.b_thawed
!= NULL
) {
2947 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
2948 hdr
->b_l1hdr
.b_thawed
= NULL
;
2952 arc_hdr_clear_flags(nhdr
, ARC_FLAG_HAS_L1HDR
);
2955 * The header has been reallocated so we need to re-insert it into any
2958 (void) buf_hash_insert(nhdr
, NULL
);
2960 ASSERT(list_link_active(&hdr
->b_l2hdr
.b_l2node
));
2962 mutex_enter(&dev
->l2ad_mtx
);
2965 * We must place the realloc'ed header back into the list at
2966 * the same spot. Otherwise, if it's placed earlier in the list,
2967 * l2arc_write_buffers() could find it during the function's
2968 * write phase, and try to write it out to the l2arc.
2970 list_insert_after(&dev
->l2ad_buflist
, hdr
, nhdr
);
2971 list_remove(&dev
->l2ad_buflist
, hdr
);
2973 mutex_exit(&dev
->l2ad_mtx
);
2976 * Since we're using the pointer address as the tag when
2977 * incrementing and decrementing the l2ad_alloc refcount, we
2978 * must remove the old pointer (that we're about to destroy) and
2979 * add the new pointer to the refcount. Otherwise we'd remove
2980 * the wrong pointer address when calling arc_hdr_destroy() later.
2983 (void) refcount_remove_many(&dev
->l2ad_alloc
, arc_hdr_size(hdr
), hdr
);
2984 (void) refcount_add_many(&dev
->l2ad_alloc
, arc_hdr_size(nhdr
), nhdr
);
2986 buf_discard_identity(hdr
);
2987 kmem_cache_free(old
, hdr
);
2993 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
2994 * The buf is returned thawed since we expect the consumer to modify it.
2997 arc_alloc_buf(spa_t
*spa
, void *tag
, arc_buf_contents_t type
, int32_t size
)
2999 arc_buf_hdr_t
*hdr
= arc_hdr_alloc(spa_load_guid(spa
), size
, size
,
3000 ZIO_COMPRESS_OFF
, type
);
3001 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr
)));
3003 arc_buf_t
*buf
= NULL
;
3004 VERIFY0(arc_buf_alloc_impl(hdr
, tag
, B_FALSE
, B_FALSE
, &buf
));
3011 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
3012 * for bufs containing metadata.
3015 arc_alloc_compressed_buf(spa_t
*spa
, void *tag
, uint64_t psize
, uint64_t lsize
,
3016 enum zio_compress compression_type
)
3018 ASSERT3U(lsize
, >, 0);
3019 ASSERT3U(lsize
, >=, psize
);
3020 ASSERT(compression_type
> ZIO_COMPRESS_OFF
);
3021 ASSERT(compression_type
< ZIO_COMPRESS_FUNCTIONS
);
3023 arc_buf_hdr_t
*hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
,
3024 compression_type
, ARC_BUFC_DATA
);
3025 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr
)));
3027 arc_buf_t
*buf
= NULL
;
3028 VERIFY0(arc_buf_alloc_impl(hdr
, tag
, B_TRUE
, B_FALSE
, &buf
));
3030 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
3032 if (!arc_buf_is_shared(buf
)) {
3034 * To ensure that the hdr has the correct data in it if we call
3035 * arc_decompress() on this buf before it's been written to
3036 * disk, it's easiest if we just set up sharing between the
3039 ASSERT(!abd_is_linear(hdr
->b_l1hdr
.b_pabd
));
3040 arc_hdr_free_pabd(hdr
);
3041 arc_share_buf(hdr
, buf
);
3048 arc_hdr_l2hdr_destroy(arc_buf_hdr_t
*hdr
)
3050 l2arc_buf_hdr_t
*l2hdr
= &hdr
->b_l2hdr
;
3051 l2arc_dev_t
*dev
= l2hdr
->b_dev
;
3052 uint64_t psize
= arc_hdr_size(hdr
);
3054 ASSERT(MUTEX_HELD(&dev
->l2ad_mtx
));
3055 ASSERT(HDR_HAS_L2HDR(hdr
));
3057 list_remove(&dev
->l2ad_buflist
, hdr
);
3059 ARCSTAT_INCR(arcstat_l2_psize
, -psize
);
3060 ARCSTAT_INCR(arcstat_l2_lsize
, -HDR_GET_LSIZE(hdr
));
3062 vdev_space_update(dev
->l2ad_vdev
, -psize
, 0, 0);
3064 (void) refcount_remove_many(&dev
->l2ad_alloc
, psize
, hdr
);
3065 arc_hdr_clear_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
3069 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
3071 if (HDR_HAS_L1HDR(hdr
)) {
3072 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
||
3073 hdr
->b_l1hdr
.b_bufcnt
> 0);
3074 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3075 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
3077 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3078 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
3080 if (!HDR_EMPTY(hdr
))
3081 buf_discard_identity(hdr
);
3083 if (HDR_HAS_L2HDR(hdr
)) {
3084 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
3085 boolean_t buflist_held
= MUTEX_HELD(&dev
->l2ad_mtx
);
3088 mutex_enter(&dev
->l2ad_mtx
);
3091 * Even though we checked this conditional above, we
3092 * need to check this again now that we have the
3093 * l2ad_mtx. This is because we could be racing with
3094 * another thread calling l2arc_evict() which might have
3095 * destroyed this header's L2 portion as we were waiting
3096 * to acquire the l2ad_mtx. If that happens, we don't
3097 * want to re-destroy the header's L2 portion.
3099 if (HDR_HAS_L2HDR(hdr
))
3100 arc_hdr_l2hdr_destroy(hdr
);
3103 mutex_exit(&dev
->l2ad_mtx
);
3106 if (HDR_HAS_L1HDR(hdr
)) {
3107 arc_cksum_free(hdr
);
3109 while (hdr
->b_l1hdr
.b_buf
!= NULL
)
3110 arc_buf_destroy_impl(hdr
->b_l1hdr
.b_buf
);
3113 if (hdr
->b_l1hdr
.b_thawed
!= NULL
) {
3114 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
3115 hdr
->b_l1hdr
.b_thawed
= NULL
;
3119 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
3120 arc_hdr_free_pabd(hdr
);
3124 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
3125 if (HDR_HAS_L1HDR(hdr
)) {
3126 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3127 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
3128 kmem_cache_free(hdr_full_cache
, hdr
);
3130 kmem_cache_free(hdr_l2only_cache
, hdr
);
3135 arc_buf_destroy(arc_buf_t
*buf
, void* tag
)
3137 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3138 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
3140 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
3141 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, ==, 1);
3142 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3143 VERIFY0(remove_reference(hdr
, NULL
, tag
));
3144 arc_hdr_destroy(hdr
);
3148 mutex_enter(hash_lock
);
3149 ASSERT3P(hdr
, ==, buf
->b_hdr
);
3150 ASSERT(hdr
->b_l1hdr
.b_bufcnt
> 0);
3151 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3152 ASSERT3P(hdr
->b_l1hdr
.b_state
, !=, arc_anon
);
3153 ASSERT3P(buf
->b_data
, !=, NULL
);
3155 (void) remove_reference(hdr
, hash_lock
, tag
);
3156 arc_buf_destroy_impl(buf
);
3157 mutex_exit(hash_lock
);
3161 * Evict the arc_buf_hdr that is provided as a parameter. The resultant
3162 * state of the header is dependent on it's state prior to entering this
3163 * function. The following transitions are possible:
3165 * - arc_mru -> arc_mru_ghost
3166 * - arc_mfu -> arc_mfu_ghost
3167 * - arc_mru_ghost -> arc_l2c_only
3168 * - arc_mru_ghost -> deleted
3169 * - arc_mfu_ghost -> arc_l2c_only
3170 * - arc_mfu_ghost -> deleted
3173 arc_evict_hdr(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
)
3175 arc_state_t
*evicted_state
, *state
;
3176 int64_t bytes_evicted
= 0;
3178 ASSERT(MUTEX_HELD(hash_lock
));
3179 ASSERT(HDR_HAS_L1HDR(hdr
));
3181 state
= hdr
->b_l1hdr
.b_state
;
3182 if (GHOST_STATE(state
)) {
3183 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3184 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
3187 * l2arc_write_buffers() relies on a header's L1 portion
3188 * (i.e. its b_pabd field) during it's write phase.
3189 * Thus, we cannot push a header onto the arc_l2c_only
3190 * state (removing it's L1 piece) until the header is
3191 * done being written to the l2arc.
3193 if (HDR_HAS_L2HDR(hdr
) && HDR_L2_WRITING(hdr
)) {
3194 ARCSTAT_BUMP(arcstat_evict_l2_skip
);
3195 return (bytes_evicted
);
3198 ARCSTAT_BUMP(arcstat_deleted
);
3199 bytes_evicted
+= HDR_GET_LSIZE(hdr
);
3201 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, hdr
);
3203 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
3204 if (HDR_HAS_L2HDR(hdr
)) {
3206 * This buffer is cached on the 2nd Level ARC;
3207 * don't destroy the header.
3209 arc_change_state(arc_l2c_only
, hdr
, hash_lock
);
3211 * dropping from L1+L2 cached to L2-only,
3212 * realloc to remove the L1 header.
3214 hdr
= arc_hdr_realloc(hdr
, hdr_full_cache
,
3217 arc_change_state(arc_anon
, hdr
, hash_lock
);
3218 arc_hdr_destroy(hdr
);
3220 return (bytes_evicted
);
3223 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
3224 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
3226 /* prefetch buffers have a minimum lifespan */
3227 if (HDR_IO_IN_PROGRESS(hdr
) ||
3228 ((hdr
->b_flags
& (ARC_FLAG_PREFETCH
| ARC_FLAG_INDIRECT
)) &&
3229 ddi_get_lbolt() - hdr
->b_l1hdr
.b_arc_access
<
3230 arc_min_prefetch_lifespan
)) {
3231 ARCSTAT_BUMP(arcstat_evict_skip
);
3232 return (bytes_evicted
);
3235 ASSERT0(refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
3236 while (hdr
->b_l1hdr
.b_buf
) {
3237 arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
;
3238 if (!mutex_tryenter(&buf
->b_evict_lock
)) {
3239 ARCSTAT_BUMP(arcstat_mutex_miss
);
3242 if (buf
->b_data
!= NULL
)
3243 bytes_evicted
+= HDR_GET_LSIZE(hdr
);
3244 mutex_exit(&buf
->b_evict_lock
);
3245 arc_buf_destroy_impl(buf
);
3248 if (HDR_HAS_L2HDR(hdr
)) {
3249 ARCSTAT_INCR(arcstat_evict_l2_cached
, HDR_GET_LSIZE(hdr
));
3251 if (l2arc_write_eligible(hdr
->b_spa
, hdr
)) {
3252 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
3253 HDR_GET_LSIZE(hdr
));
3255 ARCSTAT_INCR(arcstat_evict_l2_ineligible
,
3256 HDR_GET_LSIZE(hdr
));
3260 if (hdr
->b_l1hdr
.b_bufcnt
== 0) {
3261 arc_cksum_free(hdr
);
3263 bytes_evicted
+= arc_hdr_size(hdr
);
3266 * If this hdr is being evicted and has a compressed
3267 * buffer then we discard it here before we change states.
3268 * This ensures that the accounting is updated correctly
3269 * in arc_free_data_impl().
3271 arc_hdr_free_pabd(hdr
);
3273 arc_change_state(evicted_state
, hdr
, hash_lock
);
3274 ASSERT(HDR_IN_HASH_TABLE(hdr
));
3275 arc_hdr_set_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
3276 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, hdr
);
3279 return (bytes_evicted
);
3283 arc_evict_state_impl(multilist_t
*ml
, int idx
, arc_buf_hdr_t
*marker
,
3284 uint64_t spa
, int64_t bytes
)
3286 multilist_sublist_t
*mls
;
3287 uint64_t bytes_evicted
= 0;
3289 kmutex_t
*hash_lock
;
3290 int evict_count
= 0;
3292 ASSERT3P(marker
, !=, NULL
);
3293 IMPLY(bytes
< 0, bytes
== ARC_EVICT_ALL
);
3295 mls
= multilist_sublist_lock(ml
, idx
);
3297 for (hdr
= multilist_sublist_prev(mls
, marker
); hdr
!= NULL
;
3298 hdr
= multilist_sublist_prev(mls
, marker
)) {
3299 if ((bytes
!= ARC_EVICT_ALL
&& bytes_evicted
>= bytes
) ||
3300 (evict_count
>= zfs_arc_evict_batch_limit
))
3304 * To keep our iteration location, move the marker
3305 * forward. Since we're not holding hdr's hash lock, we
3306 * must be very careful and not remove 'hdr' from the
3307 * sublist. Otherwise, other consumers might mistake the
3308 * 'hdr' as not being on a sublist when they call the
3309 * multilist_link_active() function (they all rely on
3310 * the hash lock protecting concurrent insertions and
3311 * removals). multilist_sublist_move_forward() was
3312 * specifically implemented to ensure this is the case
3313 * (only 'marker' will be removed and re-inserted).
3315 multilist_sublist_move_forward(mls
, marker
);
3318 * The only case where the b_spa field should ever be
3319 * zero, is the marker headers inserted by
3320 * arc_evict_state(). It's possible for multiple threads
3321 * to be calling arc_evict_state() concurrently (e.g.
3322 * dsl_pool_close() and zio_inject_fault()), so we must
3323 * skip any markers we see from these other threads.
3325 if (hdr
->b_spa
== 0)
3328 /* we're only interested in evicting buffers of a certain spa */
3329 if (spa
!= 0 && hdr
->b_spa
!= spa
) {
3330 ARCSTAT_BUMP(arcstat_evict_skip
);
3334 hash_lock
= HDR_LOCK(hdr
);
3337 * We aren't calling this function from any code path
3338 * that would already be holding a hash lock, so we're
3339 * asserting on this assumption to be defensive in case
3340 * this ever changes. Without this check, it would be
3341 * possible to incorrectly increment arcstat_mutex_miss
3342 * below (e.g. if the code changed such that we called
3343 * this function with a hash lock held).
3345 ASSERT(!MUTEX_HELD(hash_lock
));
3347 if (mutex_tryenter(hash_lock
)) {
3348 uint64_t evicted
= arc_evict_hdr(hdr
, hash_lock
);
3349 mutex_exit(hash_lock
);
3351 bytes_evicted
+= evicted
;
3354 * If evicted is zero, arc_evict_hdr() must have
3355 * decided to skip this header, don't increment
3356 * evict_count in this case.
3362 * If arc_size isn't overflowing, signal any
3363 * threads that might happen to be waiting.
3365 * For each header evicted, we wake up a single
3366 * thread. If we used cv_broadcast, we could
3367 * wake up "too many" threads causing arc_size
3368 * to significantly overflow arc_c; since
3369 * arc_get_data_impl() doesn't check for overflow
3370 * when it's woken up (it doesn't because it's
3371 * possible for the ARC to be overflowing while
3372 * full of un-evictable buffers, and the
3373 * function should proceed in this case).
3375 * If threads are left sleeping, due to not
3376 * using cv_broadcast, they will be woken up
3377 * just before arc_reclaim_thread() sleeps.
3379 mutex_enter(&arc_reclaim_lock
);
3380 if (!arc_is_overflowing())
3381 cv_signal(&arc_reclaim_waiters_cv
);
3382 mutex_exit(&arc_reclaim_lock
);
3384 ARCSTAT_BUMP(arcstat_mutex_miss
);
3388 multilist_sublist_unlock(mls
);
3390 return (bytes_evicted
);
3394 * Evict buffers from the given arc state, until we've removed the
3395 * specified number of bytes. Move the removed buffers to the
3396 * appropriate evict state.
3398 * This function makes a "best effort". It skips over any buffers
3399 * it can't get a hash_lock on, and so, may not catch all candidates.
3400 * It may also return without evicting as much space as requested.
3402 * If bytes is specified using the special value ARC_EVICT_ALL, this
3403 * will evict all available (i.e. unlocked and evictable) buffers from
3404 * the given arc state; which is used by arc_flush().
3407 arc_evict_state(arc_state_t
*state
, uint64_t spa
, int64_t bytes
,
3408 arc_buf_contents_t type
)
3410 uint64_t total_evicted
= 0;
3411 multilist_t
*ml
= state
->arcs_list
[type
];
3413 arc_buf_hdr_t
**markers
;
3415 IMPLY(bytes
< 0, bytes
== ARC_EVICT_ALL
);
3417 num_sublists
= multilist_get_num_sublists(ml
);
3420 * If we've tried to evict from each sublist, made some
3421 * progress, but still have not hit the target number of bytes
3422 * to evict, we want to keep trying. The markers allow us to
3423 * pick up where we left off for each individual sublist, rather
3424 * than starting from the tail each time.
3426 markers
= kmem_zalloc(sizeof (*markers
) * num_sublists
, KM_SLEEP
);
3427 for (int i
= 0; i
< num_sublists
; i
++) {
3428 markers
[i
] = kmem_cache_alloc(hdr_full_cache
, KM_SLEEP
);
3431 * A b_spa of 0 is used to indicate that this header is
3432 * a marker. This fact is used in arc_adjust_type() and
3433 * arc_evict_state_impl().
3435 markers
[i
]->b_spa
= 0;
3437 multilist_sublist_t
*mls
= multilist_sublist_lock(ml
, i
);
3438 multilist_sublist_insert_tail(mls
, markers
[i
]);
3439 multilist_sublist_unlock(mls
);
3443 * While we haven't hit our target number of bytes to evict, or
3444 * we're evicting all available buffers.
3446 while (total_evicted
< bytes
|| bytes
== ARC_EVICT_ALL
) {
3448 * Start eviction using a randomly selected sublist,
3449 * this is to try and evenly balance eviction across all
3450 * sublists. Always starting at the same sublist
3451 * (e.g. index 0) would cause evictions to favor certain
3452 * sublists over others.
3454 int sublist_idx
= multilist_get_random_index(ml
);
3455 uint64_t scan_evicted
= 0;
3457 for (int i
= 0; i
< num_sublists
; i
++) {
3458 uint64_t bytes_remaining
;
3459 uint64_t bytes_evicted
;
3461 if (bytes
== ARC_EVICT_ALL
)
3462 bytes_remaining
= ARC_EVICT_ALL
;
3463 else if (total_evicted
< bytes
)
3464 bytes_remaining
= bytes
- total_evicted
;
3468 bytes_evicted
= arc_evict_state_impl(ml
, sublist_idx
,
3469 markers
[sublist_idx
], spa
, bytes_remaining
);
3471 scan_evicted
+= bytes_evicted
;
3472 total_evicted
+= bytes_evicted
;
3474 /* we've reached the end, wrap to the beginning */
3475 if (++sublist_idx
>= num_sublists
)
3480 * If we didn't evict anything during this scan, we have
3481 * no reason to believe we'll evict more during another
3482 * scan, so break the loop.
3484 if (scan_evicted
== 0) {
3485 /* This isn't possible, let's make that obvious */
3486 ASSERT3S(bytes
, !=, 0);
3489 * When bytes is ARC_EVICT_ALL, the only way to
3490 * break the loop is when scan_evicted is zero.
3491 * In that case, we actually have evicted enough,
3492 * so we don't want to increment the kstat.
3494 if (bytes
!= ARC_EVICT_ALL
) {
3495 ASSERT3S(total_evicted
, <, bytes
);
3496 ARCSTAT_BUMP(arcstat_evict_not_enough
);
3503 for (int i
= 0; i
< num_sublists
; i
++) {
3504 multilist_sublist_t
*mls
= multilist_sublist_lock(ml
, i
);
3505 multilist_sublist_remove(mls
, markers
[i
]);
3506 multilist_sublist_unlock(mls
);
3508 kmem_cache_free(hdr_full_cache
, markers
[i
]);
3510 kmem_free(markers
, sizeof (*markers
) * num_sublists
);
3512 return (total_evicted
);
3516 * Flush all "evictable" data of the given type from the arc state
3517 * specified. This will not evict any "active" buffers (i.e. referenced).
3519 * When 'retry' is set to B_FALSE, the function will make a single pass
3520 * over the state and evict any buffers that it can. Since it doesn't
3521 * continually retry the eviction, it might end up leaving some buffers
3522 * in the ARC due to lock misses.
3524 * When 'retry' is set to B_TRUE, the function will continually retry the
3525 * eviction until *all* evictable buffers have been removed from the
3526 * state. As a result, if concurrent insertions into the state are
3527 * allowed (e.g. if the ARC isn't shutting down), this function might
3528 * wind up in an infinite loop, continually trying to evict buffers.
3531 arc_flush_state(arc_state_t
*state
, uint64_t spa
, arc_buf_contents_t type
,
3534 uint64_t evicted
= 0;
3536 while (refcount_count(&state
->arcs_esize
[type
]) != 0) {
3537 evicted
+= arc_evict_state(state
, spa
, ARC_EVICT_ALL
, type
);
3547 * Evict the specified number of bytes from the state specified,
3548 * restricting eviction to the spa and type given. This function
3549 * prevents us from trying to evict more from a state's list than
3550 * is "evictable", and to skip evicting altogether when passed a
3551 * negative value for "bytes". In contrast, arc_evict_state() will
3552 * evict everything it can, when passed a negative value for "bytes".
3555 arc_adjust_impl(arc_state_t
*state
, uint64_t spa
, int64_t bytes
,
3556 arc_buf_contents_t type
)
3560 if (bytes
> 0 && refcount_count(&state
->arcs_esize
[type
]) > 0) {
3561 delta
= MIN(refcount_count(&state
->arcs_esize
[type
]), bytes
);
3562 return (arc_evict_state(state
, spa
, delta
, type
));
3569 * Evict metadata buffers from the cache, such that arc_meta_used is
3570 * capped by the arc_meta_limit tunable.
3573 arc_adjust_meta(void)
3575 uint64_t total_evicted
= 0;
3579 * If we're over the meta limit, we want to evict enough
3580 * metadata to get back under the meta limit. We don't want to
3581 * evict so much that we drop the MRU below arc_p, though. If
3582 * we're over the meta limit more than we're over arc_p, we
3583 * evict some from the MRU here, and some from the MFU below.
3585 target
= MIN((int64_t)(arc_meta_used
- arc_meta_limit
),
3586 (int64_t)(refcount_count(&arc_anon
->arcs_size
) +
3587 refcount_count(&arc_mru
->arcs_size
) - arc_p
));
3589 total_evicted
+= arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_METADATA
);
3592 * Similar to the above, we want to evict enough bytes to get us
3593 * below the meta limit, but not so much as to drop us below the
3594 * space allotted to the MFU (which is defined as arc_c - arc_p).
3596 target
= MIN((int64_t)(arc_meta_used
- arc_meta_limit
),
3597 (int64_t)(refcount_count(&arc_mfu
->arcs_size
) - (arc_c
- arc_p
)));
3599 total_evicted
+= arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_METADATA
);
3601 return (total_evicted
);
3605 * Return the type of the oldest buffer in the given arc state
3607 * This function will select a random sublist of type ARC_BUFC_DATA and
3608 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
3609 * is compared, and the type which contains the "older" buffer will be
3612 static arc_buf_contents_t
3613 arc_adjust_type(arc_state_t
*state
)
3615 multilist_t
*data_ml
= state
->arcs_list
[ARC_BUFC_DATA
];
3616 multilist_t
*meta_ml
= state
->arcs_list
[ARC_BUFC_METADATA
];
3617 int data_idx
= multilist_get_random_index(data_ml
);
3618 int meta_idx
= multilist_get_random_index(meta_ml
);
3619 multilist_sublist_t
*data_mls
;
3620 multilist_sublist_t
*meta_mls
;
3621 arc_buf_contents_t type
;
3622 arc_buf_hdr_t
*data_hdr
;
3623 arc_buf_hdr_t
*meta_hdr
;
3626 * We keep the sublist lock until we're finished, to prevent
3627 * the headers from being destroyed via arc_evict_state().
3629 data_mls
= multilist_sublist_lock(data_ml
, data_idx
);
3630 meta_mls
= multilist_sublist_lock(meta_ml
, meta_idx
);
3633 * These two loops are to ensure we skip any markers that
3634 * might be at the tail of the lists due to arc_evict_state().
3637 for (data_hdr
= multilist_sublist_tail(data_mls
); data_hdr
!= NULL
;
3638 data_hdr
= multilist_sublist_prev(data_mls
, data_hdr
)) {
3639 if (data_hdr
->b_spa
!= 0)
3643 for (meta_hdr
= multilist_sublist_tail(meta_mls
); meta_hdr
!= NULL
;
3644 meta_hdr
= multilist_sublist_prev(meta_mls
, meta_hdr
)) {
3645 if (meta_hdr
->b_spa
!= 0)
3649 if (data_hdr
== NULL
&& meta_hdr
== NULL
) {
3650 type
= ARC_BUFC_DATA
;
3651 } else if (data_hdr
== NULL
) {
3652 ASSERT3P(meta_hdr
, !=, NULL
);
3653 type
= ARC_BUFC_METADATA
;
3654 } else if (meta_hdr
== NULL
) {
3655 ASSERT3P(data_hdr
, !=, NULL
);
3656 type
= ARC_BUFC_DATA
;
3658 ASSERT3P(data_hdr
, !=, NULL
);
3659 ASSERT3P(meta_hdr
, !=, NULL
);
3661 /* The headers can't be on the sublist without an L1 header */
3662 ASSERT(HDR_HAS_L1HDR(data_hdr
));
3663 ASSERT(HDR_HAS_L1HDR(meta_hdr
));
3665 if (data_hdr
->b_l1hdr
.b_arc_access
<
3666 meta_hdr
->b_l1hdr
.b_arc_access
) {
3667 type
= ARC_BUFC_DATA
;
3669 type
= ARC_BUFC_METADATA
;
3673 multilist_sublist_unlock(meta_mls
);
3674 multilist_sublist_unlock(data_mls
);
3680 * Evict buffers from the cache, such that arc_size is capped by arc_c.
3685 uint64_t total_evicted
= 0;
3690 * If we're over arc_meta_limit, we want to correct that before
3691 * potentially evicting data buffers below.
3693 total_evicted
+= arc_adjust_meta();
3698 * If we're over the target cache size, we want to evict enough
3699 * from the list to get back to our target size. We don't want
3700 * to evict too much from the MRU, such that it drops below
3701 * arc_p. So, if we're over our target cache size more than
3702 * the MRU is over arc_p, we'll evict enough to get back to
3703 * arc_p here, and then evict more from the MFU below.
3705 target
= MIN((int64_t)(arc_size
- arc_c
),
3706 (int64_t)(refcount_count(&arc_anon
->arcs_size
) +
3707 refcount_count(&arc_mru
->arcs_size
) + arc_meta_used
- arc_p
));
3710 * If we're below arc_meta_min, always prefer to evict data.
3711 * Otherwise, try to satisfy the requested number of bytes to
3712 * evict from the type which contains older buffers; in an
3713 * effort to keep newer buffers in the cache regardless of their
3714 * type. If we cannot satisfy the number of bytes from this
3715 * type, spill over into the next type.
3717 if (arc_adjust_type(arc_mru
) == ARC_BUFC_METADATA
&&
3718 arc_meta_used
> arc_meta_min
) {
3719 bytes
= arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_METADATA
);
3720 total_evicted
+= bytes
;
3723 * If we couldn't evict our target number of bytes from
3724 * metadata, we try to get the rest from data.
3729 arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_DATA
);
3731 bytes
= arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_DATA
);
3732 total_evicted
+= bytes
;
3735 * If we couldn't evict our target number of bytes from
3736 * data, we try to get the rest from metadata.
3741 arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_METADATA
);
3747 * Now that we've tried to evict enough from the MRU to get its
3748 * size back to arc_p, if we're still above the target cache
3749 * size, we evict the rest from the MFU.
3751 target
= arc_size
- arc_c
;
3753 if (arc_adjust_type(arc_mfu
) == ARC_BUFC_METADATA
&&
3754 arc_meta_used
> arc_meta_min
) {
3755 bytes
= arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_METADATA
);
3756 total_evicted
+= bytes
;
3759 * If we couldn't evict our target number of bytes from
3760 * metadata, we try to get the rest from data.
3765 arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_DATA
);
3767 bytes
= arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_DATA
);
3768 total_evicted
+= bytes
;
3771 * If we couldn't evict our target number of bytes from
3772 * data, we try to get the rest from data.
3777 arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_METADATA
);
3781 * Adjust ghost lists
3783 * In addition to the above, the ARC also defines target values
3784 * for the ghost lists. The sum of the mru list and mru ghost
3785 * list should never exceed the target size of the cache, and
3786 * the sum of the mru list, mfu list, mru ghost list, and mfu
3787 * ghost list should never exceed twice the target size of the
3788 * cache. The following logic enforces these limits on the ghost
3789 * caches, and evicts from them as needed.
3791 target
= refcount_count(&arc_mru
->arcs_size
) +
3792 refcount_count(&arc_mru_ghost
->arcs_size
) - arc_c
;
3794 bytes
= arc_adjust_impl(arc_mru_ghost
, 0, target
, ARC_BUFC_DATA
);
3795 total_evicted
+= bytes
;
3800 arc_adjust_impl(arc_mru_ghost
, 0, target
, ARC_BUFC_METADATA
);
3803 * We assume the sum of the mru list and mfu list is less than
3804 * or equal to arc_c (we enforced this above), which means we
3805 * can use the simpler of the two equations below:
3807 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
3808 * mru ghost + mfu ghost <= arc_c
3810 target
= refcount_count(&arc_mru_ghost
->arcs_size
) +
3811 refcount_count(&arc_mfu_ghost
->arcs_size
) - arc_c
;
3813 bytes
= arc_adjust_impl(arc_mfu_ghost
, 0, target
, ARC_BUFC_DATA
);
3814 total_evicted
+= bytes
;
3819 arc_adjust_impl(arc_mfu_ghost
, 0, target
, ARC_BUFC_METADATA
);
3821 return (total_evicted
);
3825 arc_flush(spa_t
*spa
, boolean_t retry
)
3830 * If retry is B_TRUE, a spa must not be specified since we have
3831 * no good way to determine if all of a spa's buffers have been
3832 * evicted from an arc state.
3834 ASSERT(!retry
|| spa
== 0);
3837 guid
= spa_load_guid(spa
);
3839 (void) arc_flush_state(arc_mru
, guid
, ARC_BUFC_DATA
, retry
);
3840 (void) arc_flush_state(arc_mru
, guid
, ARC_BUFC_METADATA
, retry
);
3842 (void) arc_flush_state(arc_mfu
, guid
, ARC_BUFC_DATA
, retry
);
3843 (void) arc_flush_state(arc_mfu
, guid
, ARC_BUFC_METADATA
, retry
);
3845 (void) arc_flush_state(arc_mru_ghost
, guid
, ARC_BUFC_DATA
, retry
);
3846 (void) arc_flush_state(arc_mru_ghost
, guid
, ARC_BUFC_METADATA
, retry
);
3848 (void) arc_flush_state(arc_mfu_ghost
, guid
, ARC_BUFC_DATA
, retry
);
3849 (void) arc_flush_state(arc_mfu_ghost
, guid
, ARC_BUFC_METADATA
, retry
);
3853 arc_shrink(int64_t to_free
)
3855 if (arc_c
> arc_c_min
) {
3857 if (arc_c
> arc_c_min
+ to_free
)
3858 atomic_add_64(&arc_c
, -to_free
);
3862 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
3863 if (arc_c
> arc_size
)
3864 arc_c
= MAX(arc_size
, arc_c_min
);
3866 arc_p
= (arc_c
>> 1);
3867 ASSERT(arc_c
>= arc_c_min
);
3868 ASSERT((int64_t)arc_p
>= 0);
3871 if (arc_size
> arc_c
)
3872 (void) arc_adjust();
3875 typedef enum free_memory_reason_t
{
3880 FMR_PAGES_PP_MAXIMUM
,
3883 } free_memory_reason_t
;
3885 int64_t last_free_memory
;
3886 free_memory_reason_t last_free_reason
;
3889 * Additional reserve of pages for pp_reserve.
3891 int64_t arc_pages_pp_reserve
= 64;
3894 * Additional reserve of pages for swapfs.
3896 int64_t arc_swapfs_reserve
= 64;
3899 * Return the amount of memory that can be consumed before reclaim will be
3900 * needed. Positive if there is sufficient free memory, negative indicates
3901 * the amount of memory that needs to be freed up.
3904 arc_available_memory(void)
3906 int64_t lowest
= INT64_MAX
;
3908 free_memory_reason_t r
= FMR_UNKNOWN
;
3912 n
= PAGESIZE
* (-needfree
);
3920 * check that we're out of range of the pageout scanner. It starts to
3921 * schedule paging if freemem is less than lotsfree and needfree.
3922 * lotsfree is the high-water mark for pageout, and needfree is the
3923 * number of needed free pages. We add extra pages here to make sure
3924 * the scanner doesn't start up while we're freeing memory.
3926 n
= PAGESIZE
* (freemem
- lotsfree
- needfree
- desfree
);
3933 * check to make sure that swapfs has enough space so that anon
3934 * reservations can still succeed. anon_resvmem() checks that the
3935 * availrmem is greater than swapfs_minfree, and the number of reserved
3936 * swap pages. We also add a bit of extra here just to prevent
3937 * circumstances from getting really dire.
3939 n
= PAGESIZE
* (availrmem
- swapfs_minfree
- swapfs_reserve
-
3940 desfree
- arc_swapfs_reserve
);
3943 r
= FMR_SWAPFS_MINFREE
;
3948 * Check that we have enough availrmem that memory locking (e.g., via
3949 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
3950 * stores the number of pages that cannot be locked; when availrmem
3951 * drops below pages_pp_maximum, page locking mechanisms such as
3952 * page_pp_lock() will fail.)
3954 n
= PAGESIZE
* (availrmem
- pages_pp_maximum
-
3955 arc_pages_pp_reserve
);
3958 r
= FMR_PAGES_PP_MAXIMUM
;
3963 * If we're on an i386 platform, it's possible that we'll exhaust the
3964 * kernel heap space before we ever run out of available physical
3965 * memory. Most checks of the size of the heap_area compare against
3966 * tune.t_minarmem, which is the minimum available real memory that we
3967 * can have in the system. However, this is generally fixed at 25 pages
3968 * which is so low that it's useless. In this comparison, we seek to
3969 * calculate the total heap-size, and reclaim if more than 3/4ths of the
3970 * heap is allocated. (Or, in the calculation, if less than 1/4th is
3973 n
= (int64_t)vmem_size(heap_arena
, VMEM_FREE
) -
3974 (vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
) >> 2);
3982 * If zio data pages are being allocated out of a separate heap segment,
3983 * then enforce that the size of available vmem for this arena remains
3984 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
3986 * Note that reducing the arc_zio_arena_free_shift keeps more virtual
3987 * memory (in the zio_arena) free, which can avoid memory
3988 * fragmentation issues.
3990 if (zio_arena
!= NULL
) {
3991 n
= (int64_t)vmem_size(zio_arena
, VMEM_FREE
) -
3992 (vmem_size(zio_arena
, VMEM_ALLOC
) >>
3993 arc_zio_arena_free_shift
);
4000 /* Every 100 calls, free a small amount */
4001 if (spa_get_random(100) == 0)
4005 last_free_memory
= lowest
;
4006 last_free_reason
= r
;
4013 * Determine if the system is under memory pressure and is asking
4014 * to reclaim memory. A return value of B_TRUE indicates that the system
4015 * is under memory pressure and that the arc should adjust accordingly.
4018 arc_reclaim_needed(void)
4020 return (arc_available_memory() < 0);
4024 arc_kmem_reap_now(void)
4027 kmem_cache_t
*prev_cache
= NULL
;
4028 kmem_cache_t
*prev_data_cache
= NULL
;
4029 extern kmem_cache_t
*zio_buf_cache
[];
4030 extern kmem_cache_t
*zio_data_buf_cache
[];
4031 extern kmem_cache_t
*range_seg_cache
;
4032 extern kmem_cache_t
*abd_chunk_cache
;
4035 if (arc_meta_used
>= arc_meta_limit
) {
4037 * We are exceeding our meta-data cache limit.
4038 * Purge some DNLC entries to release holds on meta-data.
4040 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent
);
4044 * Reclaim unused memory from all kmem caches.
4050 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
4051 if (zio_buf_cache
[i
] != prev_cache
) {
4052 prev_cache
= zio_buf_cache
[i
];
4053 kmem_cache_reap_now(zio_buf_cache
[i
]);
4055 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
4056 prev_data_cache
= zio_data_buf_cache
[i
];
4057 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
4060 kmem_cache_reap_now(abd_chunk_cache
);
4061 kmem_cache_reap_now(buf_cache
);
4062 kmem_cache_reap_now(hdr_full_cache
);
4063 kmem_cache_reap_now(hdr_l2only_cache
);
4064 kmem_cache_reap_now(range_seg_cache
);
4066 if (zio_arena
!= NULL
) {
4068 * Ask the vmem arena to reclaim unused memory from its
4071 vmem_qcache_reap(zio_arena
);
4076 * Threads can block in arc_get_data_impl() waiting for this thread to evict
4077 * enough data and signal them to proceed. When this happens, the threads in
4078 * arc_get_data_impl() are sleeping while holding the hash lock for their
4079 * particular arc header. Thus, we must be careful to never sleep on a
4080 * hash lock in this thread. This is to prevent the following deadlock:
4082 * - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L",
4083 * waiting for the reclaim thread to signal it.
4085 * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
4086 * fails, and goes to sleep forever.
4088 * This possible deadlock is avoided by always acquiring a hash lock
4089 * using mutex_tryenter() from arc_reclaim_thread().
4093 arc_reclaim_thread(void *unused
)
4095 hrtime_t growtime
= 0;
4098 CALLB_CPR_INIT(&cpr
, &arc_reclaim_lock
, callb_generic_cpr
, FTAG
);
4100 mutex_enter(&arc_reclaim_lock
);
4101 while (!arc_reclaim_thread_exit
) {
4102 uint64_t evicted
= 0;
4105 * This is necessary in order for the mdb ::arc dcmd to
4106 * show up to date information. Since the ::arc command
4107 * does not call the kstat's update function, without
4108 * this call, the command may show stale stats for the
4109 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
4110 * with this change, the data might be up to 1 second
4111 * out of date; but that should suffice. The arc_state_t
4112 * structures can be queried directly if more accurate
4113 * information is needed.
4115 if (arc_ksp
!= NULL
)
4116 arc_ksp
->ks_update(arc_ksp
, KSTAT_READ
);
4118 mutex_exit(&arc_reclaim_lock
);
4121 * We call arc_adjust() before (possibly) calling
4122 * arc_kmem_reap_now(), so that we can wake up
4123 * arc_get_data_impl() sooner.
4125 evicted
= arc_adjust();
4127 int64_t free_memory
= arc_available_memory();
4128 if (free_memory
< 0) {
4130 arc_no_grow
= B_TRUE
;
4134 * Wait at least zfs_grow_retry (default 60) seconds
4135 * before considering growing.
4137 growtime
= gethrtime() + SEC2NSEC(arc_grow_retry
);
4139 arc_kmem_reap_now();
4142 * If we are still low on memory, shrink the ARC
4143 * so that we have arc_shrink_min free space.
4145 free_memory
= arc_available_memory();
4148 (arc_c
>> arc_shrink_shift
) - free_memory
;
4151 to_free
= MAX(to_free
, ptob(needfree
));
4153 arc_shrink(to_free
);
4155 } else if (free_memory
< arc_c
>> arc_no_grow_shift
) {
4156 arc_no_grow
= B_TRUE
;
4157 } else if (gethrtime() >= growtime
) {
4158 arc_no_grow
= B_FALSE
;
4161 mutex_enter(&arc_reclaim_lock
);
4164 * If evicted is zero, we couldn't evict anything via
4165 * arc_adjust(). This could be due to hash lock
4166 * collisions, but more likely due to the majority of
4167 * arc buffers being unevictable. Therefore, even if
4168 * arc_size is above arc_c, another pass is unlikely to
4169 * be helpful and could potentially cause us to enter an
4172 if (arc_size
<= arc_c
|| evicted
== 0) {
4174 * We're either no longer overflowing, or we
4175 * can't evict anything more, so we should wake
4176 * up any threads before we go to sleep.
4178 cv_broadcast(&arc_reclaim_waiters_cv
);
4181 * Block until signaled, or after one second (we
4182 * might need to perform arc_kmem_reap_now()
4183 * even if we aren't being signalled)
4185 CALLB_CPR_SAFE_BEGIN(&cpr
);
4186 (void) cv_timedwait_hires(&arc_reclaim_thread_cv
,
4187 &arc_reclaim_lock
, SEC2NSEC(1), MSEC2NSEC(1), 0);
4188 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_lock
);
4192 arc_reclaim_thread_exit
= B_FALSE
;
4193 cv_broadcast(&arc_reclaim_thread_cv
);
4194 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_lock */
4199 * Adapt arc info given the number of bytes we are trying to add and
4200 * the state that we are comming from. This function is only called
4201 * when we are adding new content to the cache.
4204 arc_adapt(int bytes
, arc_state_t
*state
)
4207 uint64_t arc_p_min
= (arc_c
>> arc_p_min_shift
);
4208 int64_t mrug_size
= refcount_count(&arc_mru_ghost
->arcs_size
);
4209 int64_t mfug_size
= refcount_count(&arc_mfu_ghost
->arcs_size
);
4211 if (state
== arc_l2c_only
)
4216 * Adapt the target size of the MRU list:
4217 * - if we just hit in the MRU ghost list, then increase
4218 * the target size of the MRU list.
4219 * - if we just hit in the MFU ghost list, then increase
4220 * the target size of the MFU list by decreasing the
4221 * target size of the MRU list.
4223 if (state
== arc_mru_ghost
) {
4224 mult
= (mrug_size
>= mfug_size
) ? 1 : (mfug_size
/ mrug_size
);
4225 mult
= MIN(mult
, 10); /* avoid wild arc_p adjustment */
4227 arc_p
= MIN(arc_c
- arc_p_min
, arc_p
+ bytes
* mult
);
4228 } else if (state
== arc_mfu_ghost
) {
4231 mult
= (mfug_size
>= mrug_size
) ? 1 : (mrug_size
/ mfug_size
);
4232 mult
= MIN(mult
, 10);
4234 delta
= MIN(bytes
* mult
, arc_p
);
4235 arc_p
= MAX(arc_p_min
, arc_p
- delta
);
4237 ASSERT((int64_t)arc_p
>= 0);
4239 if (arc_reclaim_needed()) {
4240 cv_signal(&arc_reclaim_thread_cv
);
4247 if (arc_c
>= arc_c_max
)
4251 * If we're within (2 * maxblocksize) bytes of the target
4252 * cache size, increment the target cache size
4254 if (arc_size
> arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) {
4255 atomic_add_64(&arc_c
, (int64_t)bytes
);
4256 if (arc_c
> arc_c_max
)
4258 else if (state
== arc_anon
)
4259 atomic_add_64(&arc_p
, (int64_t)bytes
);
4263 ASSERT((int64_t)arc_p
>= 0);
4267 * Check if arc_size has grown past our upper threshold, determined by
4268 * zfs_arc_overflow_shift.
4271 arc_is_overflowing(void)
4273 /* Always allow at least one block of overflow */
4274 uint64_t overflow
= MAX(SPA_MAXBLOCKSIZE
,
4275 arc_c
>> zfs_arc_overflow_shift
);
4277 return (arc_size
>= arc_c
+ overflow
);
4281 arc_get_data_abd(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4283 arc_buf_contents_t type
= arc_buf_type(hdr
);
4285 arc_get_data_impl(hdr
, size
, tag
);
4286 if (type
== ARC_BUFC_METADATA
) {
4287 return (abd_alloc(size
, B_TRUE
));
4289 ASSERT(type
== ARC_BUFC_DATA
);
4290 return (abd_alloc(size
, B_FALSE
));
4295 arc_get_data_buf(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4297 arc_buf_contents_t type
= arc_buf_type(hdr
);
4299 arc_get_data_impl(hdr
, size
, tag
);
4300 if (type
== ARC_BUFC_METADATA
) {
4301 return (zio_buf_alloc(size
));
4303 ASSERT(type
== ARC_BUFC_DATA
);
4304 return (zio_data_buf_alloc(size
));
4309 * Allocate a block and return it to the caller. If we are hitting the
4310 * hard limit for the cache size, we must sleep, waiting for the eviction
4311 * thread to catch up. If we're past the target size but below the hard
4312 * limit, we'll only signal the reclaim thread and continue on.
4315 arc_get_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4317 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
4318 arc_buf_contents_t type
= arc_buf_type(hdr
);
4320 arc_adapt(size
, state
);
4323 * If arc_size is currently overflowing, and has grown past our
4324 * upper limit, we must be adding data faster than the evict
4325 * thread can evict. Thus, to ensure we don't compound the
4326 * problem by adding more data and forcing arc_size to grow even
4327 * further past it's target size, we halt and wait for the
4328 * eviction thread to catch up.
4330 * It's also possible that the reclaim thread is unable to evict
4331 * enough buffers to get arc_size below the overflow limit (e.g.
4332 * due to buffers being un-evictable, or hash lock collisions).
4333 * In this case, we want to proceed regardless if we're
4334 * overflowing; thus we don't use a while loop here.
4336 if (arc_is_overflowing()) {
4337 mutex_enter(&arc_reclaim_lock
);
4340 * Now that we've acquired the lock, we may no longer be
4341 * over the overflow limit, lets check.
4343 * We're ignoring the case of spurious wake ups. If that
4344 * were to happen, it'd let this thread consume an ARC
4345 * buffer before it should have (i.e. before we're under
4346 * the overflow limit and were signalled by the reclaim
4347 * thread). As long as that is a rare occurrence, it
4348 * shouldn't cause any harm.
4350 if (arc_is_overflowing()) {
4351 cv_signal(&arc_reclaim_thread_cv
);
4352 cv_wait(&arc_reclaim_waiters_cv
, &arc_reclaim_lock
);
4355 mutex_exit(&arc_reclaim_lock
);
4358 VERIFY3U(hdr
->b_type
, ==, type
);
4359 if (type
== ARC_BUFC_METADATA
) {
4360 arc_space_consume(size
, ARC_SPACE_META
);
4362 arc_space_consume(size
, ARC_SPACE_DATA
);
4366 * Update the state size. Note that ghost states have a
4367 * "ghost size" and so don't need to be updated.
4369 if (!GHOST_STATE(state
)) {
4371 (void) refcount_add_many(&state
->arcs_size
, size
, tag
);
4374 * If this is reached via arc_read, the link is
4375 * protected by the hash lock. If reached via
4376 * arc_buf_alloc, the header should not be accessed by
4377 * any other thread. And, if reached via arc_read_done,
4378 * the hash lock will protect it if it's found in the
4379 * hash table; otherwise no other thread should be
4380 * trying to [add|remove]_reference it.
4382 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
4383 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4384 (void) refcount_add_many(&state
->arcs_esize
[type
],
4389 * If we are growing the cache, and we are adding anonymous
4390 * data, and we have outgrown arc_p, update arc_p
4392 if (arc_size
< arc_c
&& hdr
->b_l1hdr
.b_state
== arc_anon
&&
4393 (refcount_count(&arc_anon
->arcs_size
) +
4394 refcount_count(&arc_mru
->arcs_size
) > arc_p
))
4395 arc_p
= MIN(arc_c
, arc_p
+ size
);
4400 arc_free_data_abd(arc_buf_hdr_t
*hdr
, abd_t
*abd
, uint64_t size
, void *tag
)
4402 arc_free_data_impl(hdr
, size
, tag
);
4407 arc_free_data_buf(arc_buf_hdr_t
*hdr
, void *buf
, uint64_t size
, void *tag
)
4409 arc_buf_contents_t type
= arc_buf_type(hdr
);
4411 arc_free_data_impl(hdr
, size
, tag
);
4412 if (type
== ARC_BUFC_METADATA
) {
4413 zio_buf_free(buf
, size
);
4415 ASSERT(type
== ARC_BUFC_DATA
);
4416 zio_data_buf_free(buf
, size
);
4421 * Free the arc data buffer.
4424 arc_free_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4426 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
4427 arc_buf_contents_t type
= arc_buf_type(hdr
);
4429 /* protected by hash lock, if in the hash table */
4430 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
4431 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4432 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
4434 (void) refcount_remove_many(&state
->arcs_esize
[type
],
4437 (void) refcount_remove_many(&state
->arcs_size
, size
, tag
);
4439 VERIFY3U(hdr
->b_type
, ==, type
);
4440 if (type
== ARC_BUFC_METADATA
) {
4441 arc_space_return(size
, ARC_SPACE_META
);
4443 ASSERT(type
== ARC_BUFC_DATA
);
4444 arc_space_return(size
, ARC_SPACE_DATA
);
4449 * This routine is called whenever a buffer is accessed.
4450 * NOTE: the hash lock is dropped in this function.
4453 arc_access(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
)
4457 ASSERT(MUTEX_HELD(hash_lock
));
4458 ASSERT(HDR_HAS_L1HDR(hdr
));
4460 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
4462 * This buffer is not in the cache, and does not
4463 * appear in our "ghost" list. Add the new buffer
4467 ASSERT0(hdr
->b_l1hdr
.b_arc_access
);
4468 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4469 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
4470 arc_change_state(arc_mru
, hdr
, hash_lock
);
4472 } else if (hdr
->b_l1hdr
.b_state
== arc_mru
) {
4473 now
= ddi_get_lbolt();
4476 * If this buffer is here because of a prefetch, then either:
4477 * - clear the flag if this is a "referencing" read
4478 * (any subsequent access will bump this into the MFU state).
4480 * - move the buffer to the head of the list if this is
4481 * another prefetch (to make it less likely to be evicted).
4483 if (HDR_PREFETCH(hdr
)) {
4484 if (refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 0) {
4485 /* link protected by hash lock */
4486 ASSERT(multilist_link_active(
4487 &hdr
->b_l1hdr
.b_arc_node
));
4489 arc_hdr_clear_flags(hdr
, ARC_FLAG_PREFETCH
);
4490 ARCSTAT_BUMP(arcstat_mru_hits
);
4492 hdr
->b_l1hdr
.b_arc_access
= now
;
4497 * This buffer has been "accessed" only once so far,
4498 * but it is still in the cache. Move it to the MFU
4501 if (now
> hdr
->b_l1hdr
.b_arc_access
+ ARC_MINTIME
) {
4503 * More than 125ms have passed since we
4504 * instantiated this buffer. Move it to the
4505 * most frequently used state.
4507 hdr
->b_l1hdr
.b_arc_access
= now
;
4508 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4509 arc_change_state(arc_mfu
, hdr
, hash_lock
);
4511 ARCSTAT_BUMP(arcstat_mru_hits
);
4512 } else if (hdr
->b_l1hdr
.b_state
== arc_mru_ghost
) {
4513 arc_state_t
*new_state
;
4515 * This buffer has been "accessed" recently, but
4516 * was evicted from the cache. Move it to the
4520 if (HDR_PREFETCH(hdr
)) {
4521 new_state
= arc_mru
;
4522 if (refcount_count(&hdr
->b_l1hdr
.b_refcnt
) > 0)
4523 arc_hdr_clear_flags(hdr
, ARC_FLAG_PREFETCH
);
4524 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
4526 new_state
= arc_mfu
;
4527 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4530 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4531 arc_change_state(new_state
, hdr
, hash_lock
);
4533 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
4534 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu
) {
4536 * This buffer has been accessed more than once and is
4537 * still in the cache. Keep it in the MFU state.
4539 * NOTE: an add_reference() that occurred when we did
4540 * the arc_read() will have kicked this off the list.
4541 * If it was a prefetch, we will explicitly move it to
4542 * the head of the list now.
4544 if ((HDR_PREFETCH(hdr
)) != 0) {
4545 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4546 /* link protected by hash_lock */
4547 ASSERT(multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
4549 ARCSTAT_BUMP(arcstat_mfu_hits
);
4550 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4551 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu_ghost
) {
4552 arc_state_t
*new_state
= arc_mfu
;
4554 * This buffer has been accessed more than once but has
4555 * been evicted from the cache. Move it back to the
4559 if (HDR_PREFETCH(hdr
)) {
4561 * This is a prefetch access...
4562 * move this block back to the MRU state.
4564 ASSERT0(refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
4565 new_state
= arc_mru
;
4568 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4569 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4570 arc_change_state(new_state
, hdr
, hash_lock
);
4572 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
4573 } else if (hdr
->b_l1hdr
.b_state
== arc_l2c_only
) {
4575 * This buffer is on the 2nd Level ARC.
4578 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4579 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4580 arc_change_state(arc_mfu
, hdr
, hash_lock
);
4582 ASSERT(!"invalid arc state");
4586 /* a generic arc_done_func_t which you can use */
4589 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
4591 if (zio
== NULL
|| zio
->io_error
== 0)
4592 bcopy(buf
->b_data
, arg
, arc_buf_size(buf
));
4593 arc_buf_destroy(buf
, arg
);
4596 /* a generic arc_done_func_t */
4598 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
4600 arc_buf_t
**bufp
= arg
;
4601 if (zio
&& zio
->io_error
) {
4602 arc_buf_destroy(buf
, arg
);
4606 ASSERT(buf
->b_data
);
4611 arc_hdr_verify(arc_buf_hdr_t
*hdr
, blkptr_t
*bp
)
4613 if (BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
)) {
4614 ASSERT3U(HDR_GET_PSIZE(hdr
), ==, 0);
4615 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
4617 if (HDR_COMPRESSION_ENABLED(hdr
)) {
4618 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==,
4619 BP_GET_COMPRESS(bp
));
4621 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, BP_GET_LSIZE(bp
));
4622 ASSERT3U(HDR_GET_PSIZE(hdr
), ==, BP_GET_PSIZE(bp
));
4627 arc_read_done(zio_t
*zio
)
4629 arc_buf_hdr_t
*hdr
= zio
->io_private
;
4630 kmutex_t
*hash_lock
= NULL
;
4631 arc_callback_t
*callback_list
;
4632 arc_callback_t
*acb
;
4633 boolean_t freeable
= B_FALSE
;
4634 boolean_t no_zio_error
= (zio
->io_error
== 0);
4637 * The hdr was inserted into hash-table and removed from lists
4638 * prior to starting I/O. We should find this header, since
4639 * it's in the hash table, and it should be legit since it's
4640 * not possible to evict it during the I/O. The only possible
4641 * reason for it not to be found is if we were freed during the
4644 if (HDR_IN_HASH_TABLE(hdr
)) {
4645 ASSERT3U(hdr
->b_birth
, ==, BP_PHYSICAL_BIRTH(zio
->io_bp
));
4646 ASSERT3U(hdr
->b_dva
.dva_word
[0], ==,
4647 BP_IDENTITY(zio
->io_bp
)->dva_word
[0]);
4648 ASSERT3U(hdr
->b_dva
.dva_word
[1], ==,
4649 BP_IDENTITY(zio
->io_bp
)->dva_word
[1]);
4651 arc_buf_hdr_t
*found
= buf_hash_find(hdr
->b_spa
, zio
->io_bp
,
4654 ASSERT((found
== hdr
&&
4655 DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
4656 (found
== hdr
&& HDR_L2_READING(hdr
)));
4657 ASSERT3P(hash_lock
, !=, NULL
);
4661 /* byteswap if necessary */
4662 if (BP_SHOULD_BYTESWAP(zio
->io_bp
)) {
4663 if (BP_GET_LEVEL(zio
->io_bp
) > 0) {
4664 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_UINT64
;
4666 hdr
->b_l1hdr
.b_byteswap
=
4667 DMU_OT_BYTESWAP(BP_GET_TYPE(zio
->io_bp
));
4670 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
4674 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2_EVICTED
);
4675 if (l2arc_noprefetch
&& HDR_PREFETCH(hdr
))
4676 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2CACHE
);
4678 callback_list
= hdr
->b_l1hdr
.b_acb
;
4679 ASSERT3P(callback_list
, !=, NULL
);
4681 if (hash_lock
&& no_zio_error
&& hdr
->b_l1hdr
.b_state
== arc_anon
) {
4683 * Only call arc_access on anonymous buffers. This is because
4684 * if we've issued an I/O for an evicted buffer, we've already
4685 * called arc_access (to prevent any simultaneous readers from
4686 * getting confused).
4688 arc_access(hdr
, hash_lock
);
4692 * If a read request has a callback (i.e. acb_done is not NULL), then we
4693 * make a buf containing the data according to the parameters which were
4694 * passed in. The implementation of arc_buf_alloc_impl() ensures that we
4695 * aren't needlessly decompressing the data multiple times.
4697 int callback_cnt
= 0;
4698 for (acb
= callback_list
; acb
!= NULL
; acb
= acb
->acb_next
) {
4702 /* This is a demand read since prefetches don't use callbacks */
4705 int error
= arc_buf_alloc_impl(hdr
, acb
->acb_private
,
4706 acb
->acb_compressed
, no_zio_error
, &acb
->acb_buf
);
4708 zio
->io_error
= error
;
4711 hdr
->b_l1hdr
.b_acb
= NULL
;
4712 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
4713 if (callback_cnt
== 0) {
4714 ASSERT(HDR_PREFETCH(hdr
));
4715 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
4716 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
4719 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
) ||
4720 callback_list
!= NULL
);
4723 arc_hdr_verify(hdr
, zio
->io_bp
);
4725 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_ERROR
);
4726 if (hdr
->b_l1hdr
.b_state
!= arc_anon
)
4727 arc_change_state(arc_anon
, hdr
, hash_lock
);
4728 if (HDR_IN_HASH_TABLE(hdr
))
4729 buf_hash_remove(hdr
);
4730 freeable
= refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
);
4734 * Broadcast before we drop the hash_lock to avoid the possibility
4735 * that the hdr (and hence the cv) might be freed before we get to
4736 * the cv_broadcast().
4738 cv_broadcast(&hdr
->b_l1hdr
.b_cv
);
4740 if (hash_lock
!= NULL
) {
4741 mutex_exit(hash_lock
);
4744 * This block was freed while we waited for the read to
4745 * complete. It has been removed from the hash table and
4746 * moved to the anonymous state (so that it won't show up
4749 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
4750 freeable
= refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
);
4753 /* execute each callback and free its structure */
4754 while ((acb
= callback_list
) != NULL
) {
4756 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
4758 if (acb
->acb_zio_dummy
!= NULL
) {
4759 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
4760 zio_nowait(acb
->acb_zio_dummy
);
4763 callback_list
= acb
->acb_next
;
4764 kmem_free(acb
, sizeof (arc_callback_t
));
4768 arc_hdr_destroy(hdr
);
4772 * "Read" the block at the specified DVA (in bp) via the
4773 * cache. If the block is found in the cache, invoke the provided
4774 * callback immediately and return. Note that the `zio' parameter
4775 * in the callback will be NULL in this case, since no IO was
4776 * required. If the block is not in the cache pass the read request
4777 * on to the spa with a substitute callback function, so that the
4778 * requested block will be added to the cache.
4780 * If a read request arrives for a block that has a read in-progress,
4781 * either wait for the in-progress read to complete (and return the
4782 * results); or, if this is a read with a "done" func, add a record
4783 * to the read to invoke the "done" func when the read completes,
4784 * and return; or just return.
4786 * arc_read_done() will invoke all the requested "done" functions
4787 * for readers of this block.
4790 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
, arc_done_func_t
*done
,
4791 void *private, zio_priority_t priority
, int zio_flags
,
4792 arc_flags_t
*arc_flags
, const zbookmark_phys_t
*zb
)
4794 arc_buf_hdr_t
*hdr
= NULL
;
4795 kmutex_t
*hash_lock
= NULL
;
4797 uint64_t guid
= spa_load_guid(spa
);
4798 boolean_t compressed_read
= (zio_flags
& ZIO_FLAG_RAW
) != 0;
4800 ASSERT(!BP_IS_EMBEDDED(bp
) ||
4801 BPE_GET_ETYPE(bp
) == BP_EMBEDDED_TYPE_DATA
);
4804 if (!BP_IS_EMBEDDED(bp
)) {
4806 * Embedded BP's have no DVA and require no I/O to "read".
4807 * Create an anonymous arc buf to back it.
4809 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
4812 if (hdr
!= NULL
&& HDR_HAS_L1HDR(hdr
) && hdr
->b_l1hdr
.b_pabd
!= NULL
) {
4813 arc_buf_t
*buf
= NULL
;
4814 *arc_flags
|= ARC_FLAG_CACHED
;
4816 if (HDR_IO_IN_PROGRESS(hdr
)) {
4818 if ((hdr
->b_flags
& ARC_FLAG_PRIO_ASYNC_READ
) &&
4819 priority
== ZIO_PRIORITY_SYNC_READ
) {
4821 * This sync read must wait for an
4822 * in-progress async read (e.g. a predictive
4823 * prefetch). Async reads are queued
4824 * separately at the vdev_queue layer, so
4825 * this is a form of priority inversion.
4826 * Ideally, we would "inherit" the demand
4827 * i/o's priority by moving the i/o from
4828 * the async queue to the synchronous queue,
4829 * but there is currently no mechanism to do
4830 * so. Track this so that we can evaluate
4831 * the magnitude of this potential performance
4834 * Note that if the prefetch i/o is already
4835 * active (has been issued to the device),
4836 * the prefetch improved performance, because
4837 * we issued it sooner than we would have
4838 * without the prefetch.
4840 DTRACE_PROBE1(arc__sync__wait__for__async
,
4841 arc_buf_hdr_t
*, hdr
);
4842 ARCSTAT_BUMP(arcstat_sync_wait_for_async
);
4844 if (hdr
->b_flags
& ARC_FLAG_PREDICTIVE_PREFETCH
) {
4845 arc_hdr_clear_flags(hdr
,
4846 ARC_FLAG_PREDICTIVE_PREFETCH
);
4849 if (*arc_flags
& ARC_FLAG_WAIT
) {
4850 cv_wait(&hdr
->b_l1hdr
.b_cv
, hash_lock
);
4851 mutex_exit(hash_lock
);
4854 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
4857 arc_callback_t
*acb
= NULL
;
4859 acb
= kmem_zalloc(sizeof (arc_callback_t
),
4861 acb
->acb_done
= done
;
4862 acb
->acb_private
= private;
4863 acb
->acb_compressed
= compressed_read
;
4865 acb
->acb_zio_dummy
= zio_null(pio
,
4866 spa
, NULL
, NULL
, NULL
, zio_flags
);
4868 ASSERT3P(acb
->acb_done
, !=, NULL
);
4869 acb
->acb_next
= hdr
->b_l1hdr
.b_acb
;
4870 hdr
->b_l1hdr
.b_acb
= acb
;
4871 mutex_exit(hash_lock
);
4874 mutex_exit(hash_lock
);
4878 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
4879 hdr
->b_l1hdr
.b_state
== arc_mfu
);
4882 if (hdr
->b_flags
& ARC_FLAG_PREDICTIVE_PREFETCH
) {
4884 * This is a demand read which does not have to
4885 * wait for i/o because we did a predictive
4886 * prefetch i/o for it, which has completed.
4889 arc__demand__hit__predictive__prefetch
,
4890 arc_buf_hdr_t
*, hdr
);
4892 arcstat_demand_hit_predictive_prefetch
);
4893 arc_hdr_clear_flags(hdr
,
4894 ARC_FLAG_PREDICTIVE_PREFETCH
);
4896 ASSERT(!BP_IS_EMBEDDED(bp
) || !BP_IS_HOLE(bp
));
4898 /* Get a buf with the desired data in it. */
4899 VERIFY0(arc_buf_alloc_impl(hdr
, private,
4900 compressed_read
, B_TRUE
, &buf
));
4901 } else if (*arc_flags
& ARC_FLAG_PREFETCH
&&
4902 refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 0) {
4903 arc_hdr_set_flags(hdr
, ARC_FLAG_PREFETCH
);
4905 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
4906 arc_access(hdr
, hash_lock
);
4907 if (*arc_flags
& ARC_FLAG_L2CACHE
)
4908 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
4909 mutex_exit(hash_lock
);
4910 ARCSTAT_BUMP(arcstat_hits
);
4911 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
4912 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
4913 data
, metadata
, hits
);
4916 done(NULL
, buf
, private);
4918 uint64_t lsize
= BP_GET_LSIZE(bp
);
4919 uint64_t psize
= BP_GET_PSIZE(bp
);
4920 arc_callback_t
*acb
;
4923 boolean_t devw
= B_FALSE
;
4927 /* this block is not in the cache */
4928 arc_buf_hdr_t
*exists
= NULL
;
4929 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
4930 hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
,
4931 BP_GET_COMPRESS(bp
), type
);
4933 if (!BP_IS_EMBEDDED(bp
)) {
4934 hdr
->b_dva
= *BP_IDENTITY(bp
);
4935 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
4936 exists
= buf_hash_insert(hdr
, &hash_lock
);
4938 if (exists
!= NULL
) {
4939 /* somebody beat us to the hash insert */
4940 mutex_exit(hash_lock
);
4941 buf_discard_identity(hdr
);
4942 arc_hdr_destroy(hdr
);
4943 goto top
; /* restart the IO request */
4947 * This block is in the ghost cache. If it was L2-only
4948 * (and thus didn't have an L1 hdr), we realloc the
4949 * header to add an L1 hdr.
4951 if (!HDR_HAS_L1HDR(hdr
)) {
4952 hdr
= arc_hdr_realloc(hdr
, hdr_l2only_cache
,
4955 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
4956 ASSERT(GHOST_STATE(hdr
->b_l1hdr
.b_state
));
4957 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
4958 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4959 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
4960 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
4963 * This is a delicate dance that we play here.
4964 * This hdr is in the ghost list so we access it
4965 * to move it out of the ghost list before we
4966 * initiate the read. If it's a prefetch then
4967 * it won't have a callback so we'll remove the
4968 * reference that arc_buf_alloc_impl() created. We
4969 * do this after we've called arc_access() to
4970 * avoid hitting an assert in remove_reference().
4972 arc_access(hdr
, hash_lock
);
4973 arc_hdr_alloc_pabd(hdr
);
4975 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
4976 size
= arc_hdr_size(hdr
);
4979 * If compression is enabled on the hdr, then will do
4980 * RAW I/O and will store the compressed data in the hdr's
4981 * data block. Otherwise, the hdr's data block will contain
4982 * the uncompressed data.
4984 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
) {
4985 zio_flags
|= ZIO_FLAG_RAW
;
4988 if (*arc_flags
& ARC_FLAG_PREFETCH
)
4989 arc_hdr_set_flags(hdr
, ARC_FLAG_PREFETCH
);
4990 if (*arc_flags
& ARC_FLAG_L2CACHE
)
4991 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
4992 if (BP_GET_LEVEL(bp
) > 0)
4993 arc_hdr_set_flags(hdr
, ARC_FLAG_INDIRECT
);
4994 if (*arc_flags
& ARC_FLAG_PREDICTIVE_PREFETCH
)
4995 arc_hdr_set_flags(hdr
, ARC_FLAG_PREDICTIVE_PREFETCH
);
4996 ASSERT(!GHOST_STATE(hdr
->b_l1hdr
.b_state
));
4998 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_SLEEP
);
4999 acb
->acb_done
= done
;
5000 acb
->acb_private
= private;
5001 acb
->acb_compressed
= compressed_read
;
5003 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5004 hdr
->b_l1hdr
.b_acb
= acb
;
5005 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5007 if (HDR_HAS_L2HDR(hdr
) &&
5008 (vd
= hdr
->b_l2hdr
.b_dev
->l2ad_vdev
) != NULL
) {
5009 devw
= hdr
->b_l2hdr
.b_dev
->l2ad_writing
;
5010 addr
= hdr
->b_l2hdr
.b_daddr
;
5012 * Lock out L2ARC device removal.
5014 if (vdev_is_dead(vd
) ||
5015 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
5019 if (priority
== ZIO_PRIORITY_ASYNC_READ
)
5020 arc_hdr_set_flags(hdr
, ARC_FLAG_PRIO_ASYNC_READ
);
5022 arc_hdr_clear_flags(hdr
, ARC_FLAG_PRIO_ASYNC_READ
);
5024 if (hash_lock
!= NULL
)
5025 mutex_exit(hash_lock
);
5028 * At this point, we have a level 1 cache miss. Try again in
5029 * L2ARC if possible.
5031 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, lsize
);
5033 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
, blkptr_t
*, bp
,
5034 uint64_t, lsize
, zbookmark_phys_t
*, zb
);
5035 ARCSTAT_BUMP(arcstat_misses
);
5036 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
5037 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
5038 data
, metadata
, misses
);
5040 if (vd
!= NULL
&& l2arc_ndev
!= 0 && !(l2arc_norw
&& devw
)) {
5042 * Read from the L2ARC if the following are true:
5043 * 1. The L2ARC vdev was previously cached.
5044 * 2. This buffer still has L2ARC metadata.
5045 * 3. This buffer isn't currently writing to the L2ARC.
5046 * 4. The L2ARC entry wasn't evicted, which may
5047 * also have invalidated the vdev.
5048 * 5. This isn't prefetch and l2arc_noprefetch is set.
5050 if (HDR_HAS_L2HDR(hdr
) &&
5051 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
5052 !(l2arc_noprefetch
&& HDR_PREFETCH(hdr
))) {
5053 l2arc_read_callback_t
*cb
;
5057 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
5058 ARCSTAT_BUMP(arcstat_l2_hits
);
5060 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
5062 cb
->l2rcb_hdr
= hdr
;
5065 cb
->l2rcb_flags
= zio_flags
;
5067 asize
= vdev_psize_to_asize(vd
, size
);
5068 if (asize
!= size
) {
5069 abd
= abd_alloc_for_io(asize
,
5070 HDR_ISTYPE_METADATA(hdr
));
5071 cb
->l2rcb_abd
= abd
;
5073 abd
= hdr
->b_l1hdr
.b_pabd
;
5076 ASSERT(addr
>= VDEV_LABEL_START_SIZE
&&
5077 addr
+ asize
<= vd
->vdev_psize
-
5078 VDEV_LABEL_END_SIZE
);
5081 * l2arc read. The SCL_L2ARC lock will be
5082 * released by l2arc_read_done().
5083 * Issue a null zio if the underlying buffer
5084 * was squashed to zero size by compression.
5086 ASSERT3U(HDR_GET_COMPRESS(hdr
), !=,
5087 ZIO_COMPRESS_EMPTY
);
5088 rzio
= zio_read_phys(pio
, vd
, addr
,
5091 l2arc_read_done
, cb
, priority
,
5092 zio_flags
| ZIO_FLAG_DONT_CACHE
|
5094 ZIO_FLAG_DONT_PROPAGATE
|
5095 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
5096 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
5098 ARCSTAT_INCR(arcstat_l2_read_bytes
, size
);
5100 if (*arc_flags
& ARC_FLAG_NOWAIT
) {
5105 ASSERT(*arc_flags
& ARC_FLAG_WAIT
);
5106 if (zio_wait(rzio
) == 0)
5109 /* l2arc read error; goto zio_read() */
5111 DTRACE_PROBE1(l2arc__miss
,
5112 arc_buf_hdr_t
*, hdr
);
5113 ARCSTAT_BUMP(arcstat_l2_misses
);
5114 if (HDR_L2_WRITING(hdr
))
5115 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
5116 spa_config_exit(spa
, SCL_L2ARC
, vd
);
5120 spa_config_exit(spa
, SCL_L2ARC
, vd
);
5121 if (l2arc_ndev
!= 0) {
5122 DTRACE_PROBE1(l2arc__miss
,
5123 arc_buf_hdr_t
*, hdr
);
5124 ARCSTAT_BUMP(arcstat_l2_misses
);
5128 rzio
= zio_read(pio
, spa
, bp
, hdr
->b_l1hdr
.b_pabd
, size
,
5129 arc_read_done
, hdr
, priority
, zio_flags
, zb
);
5131 if (*arc_flags
& ARC_FLAG_WAIT
)
5132 return (zio_wait(rzio
));
5134 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
5141 * Notify the arc that a block was freed, and thus will never be used again.
5144 arc_freed(spa_t
*spa
, const blkptr_t
*bp
)
5147 kmutex_t
*hash_lock
;
5148 uint64_t guid
= spa_load_guid(spa
);
5150 ASSERT(!BP_IS_EMBEDDED(bp
));
5152 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
5157 * We might be trying to free a block that is still doing I/O
5158 * (i.e. prefetch) or has a reference (i.e. a dedup-ed,
5159 * dmu_sync-ed block). If this block is being prefetched, then it
5160 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
5161 * until the I/O completes. A block may also have a reference if it is
5162 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
5163 * have written the new block to its final resting place on disk but
5164 * without the dedup flag set. This would have left the hdr in the MRU
5165 * state and discoverable. When the txg finally syncs it detects that
5166 * the block was overridden in open context and issues an override I/O.
5167 * Since this is a dedup block, the override I/O will determine if the
5168 * block is already in the DDT. If so, then it will replace the io_bp
5169 * with the bp from the DDT and allow the I/O to finish. When the I/O
5170 * reaches the done callback, dbuf_write_override_done, it will
5171 * check to see if the io_bp and io_bp_override are identical.
5172 * If they are not, then it indicates that the bp was replaced with
5173 * the bp in the DDT and the override bp is freed. This allows
5174 * us to arrive here with a reference on a block that is being
5175 * freed. So if we have an I/O in progress, or a reference to
5176 * this hdr, then we don't destroy the hdr.
5178 if (!HDR_HAS_L1HDR(hdr
) || (!HDR_IO_IN_PROGRESS(hdr
) &&
5179 refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
))) {
5180 arc_change_state(arc_anon
, hdr
, hash_lock
);
5181 arc_hdr_destroy(hdr
);
5182 mutex_exit(hash_lock
);
5184 mutex_exit(hash_lock
);
5190 * Release this buffer from the cache, making it an anonymous buffer. This
5191 * must be done after a read and prior to modifying the buffer contents.
5192 * If the buffer has more than one reference, we must make
5193 * a new hdr for the buffer.
5196 arc_release(arc_buf_t
*buf
, void *tag
)
5198 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5201 * It would be nice to assert that if it's DMU metadata (level >
5202 * 0 || it's the dnode file), then it must be syncing context.
5203 * But we don't know that information at this level.
5206 mutex_enter(&buf
->b_evict_lock
);
5208 ASSERT(HDR_HAS_L1HDR(hdr
));
5211 * We don't grab the hash lock prior to this check, because if
5212 * the buffer's header is in the arc_anon state, it won't be
5213 * linked into the hash table.
5215 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
5216 mutex_exit(&buf
->b_evict_lock
);
5217 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5218 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
5219 ASSERT(!HDR_HAS_L2HDR(hdr
));
5220 ASSERT(HDR_EMPTY(hdr
));
5222 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, ==, 1);
5223 ASSERT3S(refcount_count(&hdr
->b_l1hdr
.b_refcnt
), ==, 1);
5224 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
5226 hdr
->b_l1hdr
.b_arc_access
= 0;
5229 * If the buf is being overridden then it may already
5230 * have a hdr that is not empty.
5232 buf_discard_identity(hdr
);
5238 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
5239 mutex_enter(hash_lock
);
5242 * This assignment is only valid as long as the hash_lock is
5243 * held, we must be careful not to reference state or the
5244 * b_state field after dropping the lock.
5246 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
5247 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
5248 ASSERT3P(state
, !=, arc_anon
);
5250 /* this buffer is not on any list */
5251 ASSERT3S(refcount_count(&hdr
->b_l1hdr
.b_refcnt
), >, 0);
5253 if (HDR_HAS_L2HDR(hdr
)) {
5254 mutex_enter(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
5257 * We have to recheck this conditional again now that
5258 * we're holding the l2ad_mtx to prevent a race with
5259 * another thread which might be concurrently calling
5260 * l2arc_evict(). In that case, l2arc_evict() might have
5261 * destroyed the header's L2 portion as we were waiting
5262 * to acquire the l2ad_mtx.
5264 if (HDR_HAS_L2HDR(hdr
))
5265 arc_hdr_l2hdr_destroy(hdr
);
5267 mutex_exit(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
5271 * Do we have more than one buf?
5273 if (hdr
->b_l1hdr
.b_bufcnt
> 1) {
5274 arc_buf_hdr_t
*nhdr
;
5275 uint64_t spa
= hdr
->b_spa
;
5276 uint64_t psize
= HDR_GET_PSIZE(hdr
);
5277 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
5278 enum zio_compress compress
= HDR_GET_COMPRESS(hdr
);
5279 arc_buf_contents_t type
= arc_buf_type(hdr
);
5280 VERIFY3U(hdr
->b_type
, ==, type
);
5282 ASSERT(hdr
->b_l1hdr
.b_buf
!= buf
|| buf
->b_next
!= NULL
);
5283 (void) remove_reference(hdr
, hash_lock
, tag
);
5285 if (arc_buf_is_shared(buf
) && !ARC_BUF_COMPRESSED(buf
)) {
5286 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, buf
);
5287 ASSERT(ARC_BUF_LAST(buf
));
5291 * Pull the data off of this hdr and attach it to
5292 * a new anonymous hdr. Also find the last buffer
5293 * in the hdr's buffer list.
5295 arc_buf_t
*lastbuf
= arc_buf_remove(hdr
, buf
);
5296 ASSERT3P(lastbuf
, !=, NULL
);
5299 * If the current arc_buf_t and the hdr are sharing their data
5300 * buffer, then we must stop sharing that block.
5302 if (arc_buf_is_shared(buf
)) {
5303 VERIFY(!arc_buf_is_shared(lastbuf
));
5306 * First, sever the block sharing relationship between
5307 * buf and the arc_buf_hdr_t.
5309 arc_unshare_buf(hdr
, buf
);
5312 * Now we need to recreate the hdr's b_pabd. Since we
5313 * have lastbuf handy, we try to share with it, but if
5314 * we can't then we allocate a new b_pabd and copy the
5315 * data from buf into it.
5317 if (arc_can_share(hdr
, lastbuf
)) {
5318 arc_share_buf(hdr
, lastbuf
);
5320 arc_hdr_alloc_pabd(hdr
);
5321 abd_copy_from_buf(hdr
->b_l1hdr
.b_pabd
,
5322 buf
->b_data
, psize
);
5324 VERIFY3P(lastbuf
->b_data
, !=, NULL
);
5325 } else if (HDR_SHARED_DATA(hdr
)) {
5327 * Uncompressed shared buffers are always at the end
5328 * of the list. Compressed buffers don't have the
5329 * same requirements. This makes it hard to
5330 * simply assert that the lastbuf is shared so
5331 * we rely on the hdr's compression flags to determine
5332 * if we have a compressed, shared buffer.
5334 ASSERT(arc_buf_is_shared(lastbuf
) ||
5335 HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
);
5336 ASSERT(!ARC_BUF_SHARED(buf
));
5338 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
5339 ASSERT3P(state
, !=, arc_l2c_only
);
5341 (void) refcount_remove_many(&state
->arcs_size
,
5342 arc_buf_size(buf
), buf
);
5344 if (refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
)) {
5345 ASSERT3P(state
, !=, arc_l2c_only
);
5346 (void) refcount_remove_many(&state
->arcs_esize
[type
],
5347 arc_buf_size(buf
), buf
);
5350 hdr
->b_l1hdr
.b_bufcnt
-= 1;
5351 arc_cksum_verify(buf
);
5352 arc_buf_unwatch(buf
);
5354 mutex_exit(hash_lock
);
5357 * Allocate a new hdr. The new hdr will contain a b_pabd
5358 * buffer which will be freed in arc_write().
5360 nhdr
= arc_hdr_alloc(spa
, psize
, lsize
, compress
, type
);
5361 ASSERT3P(nhdr
->b_l1hdr
.b_buf
, ==, NULL
);
5362 ASSERT0(nhdr
->b_l1hdr
.b_bufcnt
);
5363 ASSERT0(refcount_count(&nhdr
->b_l1hdr
.b_refcnt
));
5364 VERIFY3U(nhdr
->b_type
, ==, type
);
5365 ASSERT(!HDR_SHARED_DATA(nhdr
));
5367 nhdr
->b_l1hdr
.b_buf
= buf
;
5368 nhdr
->b_l1hdr
.b_bufcnt
= 1;
5369 (void) refcount_add(&nhdr
->b_l1hdr
.b_refcnt
, tag
);
5372 mutex_exit(&buf
->b_evict_lock
);
5373 (void) refcount_add_many(&arc_anon
->arcs_size
,
5374 arc_buf_size(buf
), buf
);
5376 mutex_exit(&buf
->b_evict_lock
);
5377 ASSERT(refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 1);
5378 /* protected by hash lock, or hdr is on arc_anon */
5379 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
5380 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5381 arc_change_state(arc_anon
, hdr
, hash_lock
);
5382 hdr
->b_l1hdr
.b_arc_access
= 0;
5383 mutex_exit(hash_lock
);
5385 buf_discard_identity(hdr
);
5391 arc_released(arc_buf_t
*buf
)
5395 mutex_enter(&buf
->b_evict_lock
);
5396 released
= (buf
->b_data
!= NULL
&&
5397 buf
->b_hdr
->b_l1hdr
.b_state
== arc_anon
);
5398 mutex_exit(&buf
->b_evict_lock
);
5404 arc_referenced(arc_buf_t
*buf
)
5408 mutex_enter(&buf
->b_evict_lock
);
5409 referenced
= (refcount_count(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
5410 mutex_exit(&buf
->b_evict_lock
);
5411 return (referenced
);
5416 arc_write_ready(zio_t
*zio
)
5418 arc_write_callback_t
*callback
= zio
->io_private
;
5419 arc_buf_t
*buf
= callback
->awcb_buf
;
5420 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5421 uint64_t psize
= BP_IS_HOLE(zio
->io_bp
) ? 0 : BP_GET_PSIZE(zio
->io_bp
);
5423 ASSERT(HDR_HAS_L1HDR(hdr
));
5424 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
5425 ASSERT(hdr
->b_l1hdr
.b_bufcnt
> 0);
5428 * If we're reexecuting this zio because the pool suspended, then
5429 * cleanup any state that was previously set the first time the
5430 * callback was invoked.
5432 if (zio
->io_flags
& ZIO_FLAG_REEXECUTED
) {
5433 arc_cksum_free(hdr
);
5434 arc_buf_unwatch(buf
);
5435 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
5436 if (arc_buf_is_shared(buf
)) {
5437 arc_unshare_buf(hdr
, buf
);
5439 arc_hdr_free_pabd(hdr
);
5443 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
5444 ASSERT(!HDR_SHARED_DATA(hdr
));
5445 ASSERT(!arc_buf_is_shared(buf
));
5447 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
5449 if (HDR_IO_IN_PROGRESS(hdr
))
5450 ASSERT(zio
->io_flags
& ZIO_FLAG_REEXECUTED
);
5452 arc_cksum_compute(buf
);
5453 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5455 enum zio_compress compress
;
5456 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
5457 compress
= ZIO_COMPRESS_OFF
;
5459 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, BP_GET_LSIZE(zio
->io_bp
));
5460 compress
= BP_GET_COMPRESS(zio
->io_bp
);
5462 HDR_SET_PSIZE(hdr
, psize
);
5463 arc_hdr_set_compress(hdr
, compress
);
5467 * Fill the hdr with data. If the hdr is compressed, the data we want
5468 * is available from the zio, otherwise we can take it from the buf.
5470 * We might be able to share the buf's data with the hdr here. However,
5471 * doing so would cause the ARC to be full of linear ABDs if we write a
5472 * lot of shareable data. As a compromise, we check whether scattered
5473 * ABDs are allowed, and assume that if they are then the user wants
5474 * the ARC to be primarily filled with them regardless of the data being
5475 * written. Therefore, if they're allowed then we allocate one and copy
5476 * the data into it; otherwise, we share the data directly if we can.
5478 if (zfs_abd_scatter_enabled
|| !arc_can_share(hdr
, buf
)) {
5479 arc_hdr_alloc_pabd(hdr
);
5482 * Ideally, we would always copy the io_abd into b_pabd, but the
5483 * user may have disabled compressed ARC, thus we must check the
5484 * hdr's compression setting rather than the io_bp's.
5486 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
) {
5487 ASSERT3U(BP_GET_COMPRESS(zio
->io_bp
), !=,
5489 ASSERT3U(psize
, >, 0);
5491 abd_copy(hdr
->b_l1hdr
.b_pabd
, zio
->io_abd
, psize
);
5493 ASSERT3U(zio
->io_orig_size
, ==, arc_hdr_size(hdr
));
5495 abd_copy_from_buf(hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
5499 ASSERT3P(buf
->b_data
, ==, abd_to_buf(zio
->io_orig_abd
));
5500 ASSERT3U(zio
->io_orig_size
, ==, arc_buf_size(buf
));
5501 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, ==, 1);
5503 arc_share_buf(hdr
, buf
);
5506 arc_hdr_verify(hdr
, zio
->io_bp
);
5510 arc_write_children_ready(zio_t
*zio
)
5512 arc_write_callback_t
*callback
= zio
->io_private
;
5513 arc_buf_t
*buf
= callback
->awcb_buf
;
5515 callback
->awcb_children_ready(zio
, buf
, callback
->awcb_private
);
5519 * The SPA calls this callback for each physical write that happens on behalf
5520 * of a logical write. See the comment in dbuf_write_physdone() for details.
5523 arc_write_physdone(zio_t
*zio
)
5525 arc_write_callback_t
*cb
= zio
->io_private
;
5526 if (cb
->awcb_physdone
!= NULL
)
5527 cb
->awcb_physdone(zio
, cb
->awcb_buf
, cb
->awcb_private
);
5531 arc_write_done(zio_t
*zio
)
5533 arc_write_callback_t
*callback
= zio
->io_private
;
5534 arc_buf_t
*buf
= callback
->awcb_buf
;
5535 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5537 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5539 if (zio
->io_error
== 0) {
5540 arc_hdr_verify(hdr
, zio
->io_bp
);
5542 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
5543 buf_discard_identity(hdr
);
5545 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
5546 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
5549 ASSERT(HDR_EMPTY(hdr
));
5553 * If the block to be written was all-zero or compressed enough to be
5554 * embedded in the BP, no write was performed so there will be no
5555 * dva/birth/checksum. The buffer must therefore remain anonymous
5558 if (!HDR_EMPTY(hdr
)) {
5559 arc_buf_hdr_t
*exists
;
5560 kmutex_t
*hash_lock
;
5562 ASSERT3U(zio
->io_error
, ==, 0);
5564 arc_cksum_verify(buf
);
5566 exists
= buf_hash_insert(hdr
, &hash_lock
);
5567 if (exists
!= NULL
) {
5569 * This can only happen if we overwrite for
5570 * sync-to-convergence, because we remove
5571 * buffers from the hash table when we arc_free().
5573 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
5574 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
5575 panic("bad overwrite, hdr=%p exists=%p",
5576 (void *)hdr
, (void *)exists
);
5577 ASSERT(refcount_is_zero(
5578 &exists
->b_l1hdr
.b_refcnt
));
5579 arc_change_state(arc_anon
, exists
, hash_lock
);
5580 mutex_exit(hash_lock
);
5581 arc_hdr_destroy(exists
);
5582 exists
= buf_hash_insert(hdr
, &hash_lock
);
5583 ASSERT3P(exists
, ==, NULL
);
5584 } else if (zio
->io_flags
& ZIO_FLAG_NOPWRITE
) {
5586 ASSERT(zio
->io_prop
.zp_nopwrite
);
5587 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
5588 panic("bad nopwrite, hdr=%p exists=%p",
5589 (void *)hdr
, (void *)exists
);
5592 ASSERT(hdr
->b_l1hdr
.b_bufcnt
== 1);
5593 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
);
5594 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
5595 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
5598 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5599 /* if it's not anon, we are doing a scrub */
5600 if (exists
== NULL
&& hdr
->b_l1hdr
.b_state
== arc_anon
)
5601 arc_access(hdr
, hash_lock
);
5602 mutex_exit(hash_lock
);
5604 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5607 ASSERT(!refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
5608 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
5610 abd_put(zio
->io_abd
);
5611 kmem_free(callback
, sizeof (arc_write_callback_t
));
5615 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
, arc_buf_t
*buf
,
5616 boolean_t l2arc
, const zio_prop_t
*zp
, arc_done_func_t
*ready
,
5617 arc_done_func_t
*children_ready
, arc_done_func_t
*physdone
,
5618 arc_done_func_t
*done
, void *private, zio_priority_t priority
,
5619 int zio_flags
, const zbookmark_phys_t
*zb
)
5621 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5622 arc_write_callback_t
*callback
;
5624 zio_prop_t localprop
= *zp
;
5626 ASSERT3P(ready
, !=, NULL
);
5627 ASSERT3P(done
, !=, NULL
);
5628 ASSERT(!HDR_IO_ERROR(hdr
));
5629 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5630 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5631 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, >, 0);
5633 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
5634 if (ARC_BUF_COMPRESSED(buf
)) {
5636 * We're writing a pre-compressed buffer. Make the
5637 * compression algorithm requested by the zio_prop_t match
5638 * the pre-compressed buffer's compression algorithm.
5640 localprop
.zp_compress
= HDR_GET_COMPRESS(hdr
);
5642 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, arc_buf_size(buf
));
5643 zio_flags
|= ZIO_FLAG_RAW
;
5645 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
5646 callback
->awcb_ready
= ready
;
5647 callback
->awcb_children_ready
= children_ready
;
5648 callback
->awcb_physdone
= physdone
;
5649 callback
->awcb_done
= done
;
5650 callback
->awcb_private
= private;
5651 callback
->awcb_buf
= buf
;
5654 * The hdr's b_pabd is now stale, free it now. A new data block
5655 * will be allocated when the zio pipeline calls arc_write_ready().
5657 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
5659 * If the buf is currently sharing the data block with
5660 * the hdr then we need to break that relationship here.
5661 * The hdr will remain with a NULL data pointer and the
5662 * buf will take sole ownership of the block.
5664 if (arc_buf_is_shared(buf
)) {
5665 arc_unshare_buf(hdr
, buf
);
5667 arc_hdr_free_pabd(hdr
);
5669 VERIFY3P(buf
->b_data
, !=, NULL
);
5670 arc_hdr_set_compress(hdr
, ZIO_COMPRESS_OFF
);
5672 ASSERT(!arc_buf_is_shared(buf
));
5673 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
5675 zio
= zio_write(pio
, spa
, txg
, bp
,
5676 abd_get_from_buf(buf
->b_data
, HDR_GET_LSIZE(hdr
)),
5677 HDR_GET_LSIZE(hdr
), arc_buf_size(buf
), &localprop
, arc_write_ready
,
5678 (children_ready
!= NULL
) ? arc_write_children_ready
: NULL
,
5679 arc_write_physdone
, arc_write_done
, callback
,
5680 priority
, zio_flags
, zb
);
5686 arc_memory_throttle(uint64_t reserve
, uint64_t txg
)
5689 uint64_t available_memory
= ptob(freemem
);
5690 static uint64_t page_load
= 0;
5691 static uint64_t last_txg
= 0;
5695 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
5698 if (freemem
> physmem
* arc_lotsfree_percent
/ 100)
5701 if (txg
> last_txg
) {
5706 * If we are in pageout, we know that memory is already tight,
5707 * the arc is already going to be evicting, so we just want to
5708 * continue to let page writes occur as quickly as possible.
5710 if (curproc
== proc_pageout
) {
5711 if (page_load
> MAX(ptob(minfree
), available_memory
) / 4)
5712 return (SET_ERROR(ERESTART
));
5713 /* Note: reserve is inflated, so we deflate */
5714 page_load
+= reserve
/ 8;
5716 } else if (page_load
> 0 && arc_reclaim_needed()) {
5717 /* memory is low, delay before restarting */
5718 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
5719 return (SET_ERROR(EAGAIN
));
5727 arc_tempreserve_clear(uint64_t reserve
)
5729 atomic_add_64(&arc_tempreserve
, -reserve
);
5730 ASSERT((int64_t)arc_tempreserve
>= 0);
5734 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
5739 if (reserve
> arc_c
/4 && !arc_no_grow
)
5740 arc_c
= MIN(arc_c_max
, reserve
* 4);
5741 if (reserve
> arc_c
)
5742 return (SET_ERROR(ENOMEM
));
5745 * Don't count loaned bufs as in flight dirty data to prevent long
5746 * network delays from blocking transactions that are ready to be
5747 * assigned to a txg.
5750 /* assert that it has not wrapped around */
5751 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes
, 0), >=, 0);
5753 anon_size
= MAX((int64_t)(refcount_count(&arc_anon
->arcs_size
) -
5754 arc_loaned_bytes
), 0);
5757 * Writes will, almost always, require additional memory allocations
5758 * in order to compress/encrypt/etc the data. We therefore need to
5759 * make sure that there is sufficient available memory for this.
5761 error
= arc_memory_throttle(reserve
, txg
);
5766 * Throttle writes when the amount of dirty data in the cache
5767 * gets too large. We try to keep the cache less than half full
5768 * of dirty blocks so that our sync times don't grow too large.
5769 * Note: if two requests come in concurrently, we might let them
5770 * both succeed, when one of them should fail. Not a huge deal.
5773 if (reserve
+ arc_tempreserve
+ anon_size
> arc_c
/ 2 &&
5774 anon_size
> arc_c
/ 4) {
5775 uint64_t meta_esize
=
5776 refcount_count(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
5777 uint64_t data_esize
=
5778 refcount_count(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
5779 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
5780 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
5781 arc_tempreserve
>> 10, meta_esize
>> 10,
5782 data_esize
>> 10, reserve
>> 10, arc_c
>> 10);
5783 return (SET_ERROR(ERESTART
));
5785 atomic_add_64(&arc_tempreserve
, reserve
);
5790 arc_kstat_update_state(arc_state_t
*state
, kstat_named_t
*size
,
5791 kstat_named_t
*evict_data
, kstat_named_t
*evict_metadata
)
5793 size
->value
.ui64
= refcount_count(&state
->arcs_size
);
5794 evict_data
->value
.ui64
=
5795 refcount_count(&state
->arcs_esize
[ARC_BUFC_DATA
]);
5796 evict_metadata
->value
.ui64
=
5797 refcount_count(&state
->arcs_esize
[ARC_BUFC_METADATA
]);
5801 arc_kstat_update(kstat_t
*ksp
, int rw
)
5803 arc_stats_t
*as
= ksp
->ks_data
;
5805 if (rw
== KSTAT_WRITE
) {
5808 arc_kstat_update_state(arc_anon
,
5809 &as
->arcstat_anon_size
,
5810 &as
->arcstat_anon_evictable_data
,
5811 &as
->arcstat_anon_evictable_metadata
);
5812 arc_kstat_update_state(arc_mru
,
5813 &as
->arcstat_mru_size
,
5814 &as
->arcstat_mru_evictable_data
,
5815 &as
->arcstat_mru_evictable_metadata
);
5816 arc_kstat_update_state(arc_mru_ghost
,
5817 &as
->arcstat_mru_ghost_size
,
5818 &as
->arcstat_mru_ghost_evictable_data
,
5819 &as
->arcstat_mru_ghost_evictable_metadata
);
5820 arc_kstat_update_state(arc_mfu
,
5821 &as
->arcstat_mfu_size
,
5822 &as
->arcstat_mfu_evictable_data
,
5823 &as
->arcstat_mfu_evictable_metadata
);
5824 arc_kstat_update_state(arc_mfu_ghost
,
5825 &as
->arcstat_mfu_ghost_size
,
5826 &as
->arcstat_mfu_ghost_evictable_data
,
5827 &as
->arcstat_mfu_ghost_evictable_metadata
);
5834 * This function *must* return indices evenly distributed between all
5835 * sublists of the multilist. This is needed due to how the ARC eviction
5836 * code is laid out; arc_evict_state() assumes ARC buffers are evenly
5837 * distributed between all sublists and uses this assumption when
5838 * deciding which sublist to evict from and how much to evict from it.
5841 arc_state_multilist_index_func(multilist_t
*ml
, void *obj
)
5843 arc_buf_hdr_t
*hdr
= obj
;
5846 * We rely on b_dva to generate evenly distributed index
5847 * numbers using buf_hash below. So, as an added precaution,
5848 * let's make sure we never add empty buffers to the arc lists.
5850 ASSERT(!HDR_EMPTY(hdr
));
5853 * The assumption here, is the hash value for a given
5854 * arc_buf_hdr_t will remain constant throughout it's lifetime
5855 * (i.e. it's b_spa, b_dva, and b_birth fields don't change).
5856 * Thus, we don't need to store the header's sublist index
5857 * on insertion, as this index can be recalculated on removal.
5859 * Also, the low order bits of the hash value are thought to be
5860 * distributed evenly. Otherwise, in the case that the multilist
5861 * has a power of two number of sublists, each sublists' usage
5862 * would not be evenly distributed.
5864 return (buf_hash(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
) %
5865 multilist_get_num_sublists(ml
));
5869 arc_state_init(void)
5871 arc_anon
= &ARC_anon
;
5873 arc_mru_ghost
= &ARC_mru_ghost
;
5875 arc_mfu_ghost
= &ARC_mfu_ghost
;
5876 arc_l2c_only
= &ARC_l2c_only
;
5878 arc_mru
->arcs_list
[ARC_BUFC_METADATA
] =
5879 multilist_create(sizeof (arc_buf_hdr_t
),
5880 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5881 arc_state_multilist_index_func
);
5882 arc_mru
->arcs_list
[ARC_BUFC_DATA
] =
5883 multilist_create(sizeof (arc_buf_hdr_t
),
5884 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5885 arc_state_multilist_index_func
);
5886 arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
] =
5887 multilist_create(sizeof (arc_buf_hdr_t
),
5888 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5889 arc_state_multilist_index_func
);
5890 arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
] =
5891 multilist_create(sizeof (arc_buf_hdr_t
),
5892 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5893 arc_state_multilist_index_func
);
5894 arc_mfu
->arcs_list
[ARC_BUFC_METADATA
] =
5895 multilist_create(sizeof (arc_buf_hdr_t
),
5896 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5897 arc_state_multilist_index_func
);
5898 arc_mfu
->arcs_list
[ARC_BUFC_DATA
] =
5899 multilist_create(sizeof (arc_buf_hdr_t
),
5900 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5901 arc_state_multilist_index_func
);
5902 arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
] =
5903 multilist_create(sizeof (arc_buf_hdr_t
),
5904 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5905 arc_state_multilist_index_func
);
5906 arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
] =
5907 multilist_create(sizeof (arc_buf_hdr_t
),
5908 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5909 arc_state_multilist_index_func
);
5910 arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
] =
5911 multilist_create(sizeof (arc_buf_hdr_t
),
5912 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5913 arc_state_multilist_index_func
);
5914 arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
] =
5915 multilist_create(sizeof (arc_buf_hdr_t
),
5916 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5917 arc_state_multilist_index_func
);
5919 refcount_create(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
5920 refcount_create(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
5921 refcount_create(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]);
5922 refcount_create(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]);
5923 refcount_create(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
5924 refcount_create(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
5925 refcount_create(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
5926 refcount_create(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]);
5927 refcount_create(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
5928 refcount_create(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
5929 refcount_create(&arc_l2c_only
->arcs_esize
[ARC_BUFC_METADATA
]);
5930 refcount_create(&arc_l2c_only
->arcs_esize
[ARC_BUFC_DATA
]);
5932 refcount_create(&arc_anon
->arcs_size
);
5933 refcount_create(&arc_mru
->arcs_size
);
5934 refcount_create(&arc_mru_ghost
->arcs_size
);
5935 refcount_create(&arc_mfu
->arcs_size
);
5936 refcount_create(&arc_mfu_ghost
->arcs_size
);
5937 refcount_create(&arc_l2c_only
->arcs_size
);
5941 arc_state_fini(void)
5943 refcount_destroy(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
5944 refcount_destroy(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
5945 refcount_destroy(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]);
5946 refcount_destroy(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]);
5947 refcount_destroy(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
5948 refcount_destroy(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
5949 refcount_destroy(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
5950 refcount_destroy(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]);
5951 refcount_destroy(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
5952 refcount_destroy(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
5953 refcount_destroy(&arc_l2c_only
->arcs_esize
[ARC_BUFC_METADATA
]);
5954 refcount_destroy(&arc_l2c_only
->arcs_esize
[ARC_BUFC_DATA
]);
5956 refcount_destroy(&arc_anon
->arcs_size
);
5957 refcount_destroy(&arc_mru
->arcs_size
);
5958 refcount_destroy(&arc_mru_ghost
->arcs_size
);
5959 refcount_destroy(&arc_mfu
->arcs_size
);
5960 refcount_destroy(&arc_mfu_ghost
->arcs_size
);
5961 refcount_destroy(&arc_l2c_only
->arcs_size
);
5963 multilist_destroy(arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
5964 multilist_destroy(arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
5965 multilist_destroy(arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
5966 multilist_destroy(arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
5967 multilist_destroy(arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
5968 multilist_destroy(arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
5969 multilist_destroy(arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
5970 multilist_destroy(arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
5983 * allmem is "all memory that we could possibly use".
5986 uint64_t allmem
= ptob(physmem
- swapfs_minfree
);
5988 uint64_t allmem
= (physmem
* PAGESIZE
) / 2;
5991 mutex_init(&arc_reclaim_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5992 cv_init(&arc_reclaim_thread_cv
, NULL
, CV_DEFAULT
, NULL
);
5993 cv_init(&arc_reclaim_waiters_cv
, NULL
, CV_DEFAULT
, NULL
);
5995 /* Convert seconds to clock ticks */
5996 arc_min_prefetch_lifespan
= 1 * hz
;
5998 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
5999 arc_c_min
= MAX(allmem
/ 32, 64 << 20);
6000 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
6001 if (allmem
>= 1 << 30)
6002 arc_c_max
= allmem
- (1 << 30);
6004 arc_c_max
= arc_c_min
;
6005 arc_c_max
= MAX(allmem
* 3 / 4, arc_c_max
);
6008 * In userland, there's only the memory pressure that we artificially
6009 * create (see arc_available_memory()). Don't let arc_c get too
6010 * small, because it can cause transactions to be larger than
6011 * arc_c, causing arc_tempreserve_space() to fail.
6014 arc_c_min
= arc_c_max
/ 2;
6018 * Allow the tunables to override our calculations if they are
6019 * reasonable (ie. over 64MB)
6021 if (zfs_arc_max
> 64 << 20 && zfs_arc_max
< allmem
) {
6022 arc_c_max
= zfs_arc_max
;
6023 arc_c_min
= MIN(arc_c_min
, arc_c_max
);
6025 if (zfs_arc_min
> 64 << 20 && zfs_arc_min
<= arc_c_max
)
6026 arc_c_min
= zfs_arc_min
;
6029 arc_p
= (arc_c
>> 1);
6032 /* limit meta-data to 1/4 of the arc capacity */
6033 arc_meta_limit
= arc_c_max
/ 4;
6037 * Metadata is stored in the kernel's heap. Don't let us
6038 * use more than half the heap for the ARC.
6040 arc_meta_limit
= MIN(arc_meta_limit
,
6041 vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 2);
6044 /* Allow the tunable to override if it is reasonable */
6045 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
6046 arc_meta_limit
= zfs_arc_meta_limit
;
6048 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
6049 arc_c_min
= arc_meta_limit
/ 2;
6051 if (zfs_arc_meta_min
> 0) {
6052 arc_meta_min
= zfs_arc_meta_min
;
6054 arc_meta_min
= arc_c_min
/ 2;
6057 if (zfs_arc_grow_retry
> 0)
6058 arc_grow_retry
= zfs_arc_grow_retry
;
6060 if (zfs_arc_shrink_shift
> 0)
6061 arc_shrink_shift
= zfs_arc_shrink_shift
;
6064 * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
6066 if (arc_no_grow_shift
>= arc_shrink_shift
)
6067 arc_no_grow_shift
= arc_shrink_shift
- 1;
6069 if (zfs_arc_p_min_shift
> 0)
6070 arc_p_min_shift
= zfs_arc_p_min_shift
;
6072 /* if kmem_flags are set, lets try to use less memory */
6073 if (kmem_debugging())
6075 if (arc_c
< arc_c_min
)
6081 arc_reclaim_thread_exit
= B_FALSE
;
6083 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
6084 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
6086 if (arc_ksp
!= NULL
) {
6087 arc_ksp
->ks_data
= &arc_stats
;
6088 arc_ksp
->ks_update
= arc_kstat_update
;
6089 kstat_install(arc_ksp
);
6092 (void) thread_create(NULL
, 0, arc_reclaim_thread
, NULL
, 0, &p0
,
6093 TS_RUN
, minclsyspri
);
6099 * Calculate maximum amount of dirty data per pool.
6101 * If it has been set by /etc/system, take that.
6102 * Otherwise, use a percentage of physical memory defined by
6103 * zfs_dirty_data_max_percent (default 10%) with a cap at
6104 * zfs_dirty_data_max_max (default 4GB).
6106 if (zfs_dirty_data_max
== 0) {
6107 zfs_dirty_data_max
= physmem
* PAGESIZE
*
6108 zfs_dirty_data_max_percent
/ 100;
6109 zfs_dirty_data_max
= MIN(zfs_dirty_data_max
,
6110 zfs_dirty_data_max_max
);
6117 mutex_enter(&arc_reclaim_lock
);
6118 arc_reclaim_thread_exit
= B_TRUE
;
6120 * The reclaim thread will set arc_reclaim_thread_exit back to
6121 * B_FALSE when it is finished exiting; we're waiting for that.
6123 while (arc_reclaim_thread_exit
) {
6124 cv_signal(&arc_reclaim_thread_cv
);
6125 cv_wait(&arc_reclaim_thread_cv
, &arc_reclaim_lock
);
6127 mutex_exit(&arc_reclaim_lock
);
6129 /* Use B_TRUE to ensure *all* buffers are evicted */
6130 arc_flush(NULL
, B_TRUE
);
6134 if (arc_ksp
!= NULL
) {
6135 kstat_delete(arc_ksp
);
6139 mutex_destroy(&arc_reclaim_lock
);
6140 cv_destroy(&arc_reclaim_thread_cv
);
6141 cv_destroy(&arc_reclaim_waiters_cv
);
6146 ASSERT0(arc_loaned_bytes
);
6152 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
6153 * It uses dedicated storage devices to hold cached data, which are populated
6154 * using large infrequent writes. The main role of this cache is to boost
6155 * the performance of random read workloads. The intended L2ARC devices
6156 * include short-stroked disks, solid state disks, and other media with
6157 * substantially faster read latency than disk.
6159 * +-----------------------+
6161 * +-----------------------+
6164 * l2arc_feed_thread() arc_read()
6168 * +---------------+ |
6170 * +---------------+ |
6175 * +-------+ +-------+
6177 * | cache | | cache |
6178 * +-------+ +-------+
6179 * +=========+ .-----.
6180 * : L2ARC : |-_____-|
6181 * : devices : | Disks |
6182 * +=========+ `-_____-'
6184 * Read requests are satisfied from the following sources, in order:
6187 * 2) vdev cache of L2ARC devices
6189 * 4) vdev cache of disks
6192 * Some L2ARC device types exhibit extremely slow write performance.
6193 * To accommodate for this there are some significant differences between
6194 * the L2ARC and traditional cache design:
6196 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
6197 * the ARC behave as usual, freeing buffers and placing headers on ghost
6198 * lists. The ARC does not send buffers to the L2ARC during eviction as
6199 * this would add inflated write latencies for all ARC memory pressure.
6201 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
6202 * It does this by periodically scanning buffers from the eviction-end of
6203 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
6204 * not already there. It scans until a headroom of buffers is satisfied,
6205 * which itself is a buffer for ARC eviction. If a compressible buffer is
6206 * found during scanning and selected for writing to an L2ARC device, we
6207 * temporarily boost scanning headroom during the next scan cycle to make
6208 * sure we adapt to compression effects (which might significantly reduce
6209 * the data volume we write to L2ARC). The thread that does this is
6210 * l2arc_feed_thread(), illustrated below; example sizes are included to
6211 * provide a better sense of ratio than this diagram:
6214 * +---------------------+----------+
6215 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
6216 * +---------------------+----------+ | o L2ARC eligible
6217 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
6218 * +---------------------+----------+ |
6219 * 15.9 Gbytes ^ 32 Mbytes |
6221 * l2arc_feed_thread()
6223 * l2arc write hand <--[oooo]--'
6227 * +==============================+
6228 * L2ARC dev |####|#|###|###| |####| ... |
6229 * +==============================+
6232 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
6233 * evicted, then the L2ARC has cached a buffer much sooner than it probably
6234 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
6235 * safe to say that this is an uncommon case, since buffers at the end of
6236 * the ARC lists have moved there due to inactivity.
6238 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
6239 * then the L2ARC simply misses copying some buffers. This serves as a
6240 * pressure valve to prevent heavy read workloads from both stalling the ARC
6241 * with waits and clogging the L2ARC with writes. This also helps prevent
6242 * the potential for the L2ARC to churn if it attempts to cache content too
6243 * quickly, such as during backups of the entire pool.
6245 * 5. After system boot and before the ARC has filled main memory, there are
6246 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
6247 * lists can remain mostly static. Instead of searching from tail of these
6248 * lists as pictured, the l2arc_feed_thread() will search from the list heads
6249 * for eligible buffers, greatly increasing its chance of finding them.
6251 * The L2ARC device write speed is also boosted during this time so that
6252 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
6253 * there are no L2ARC reads, and no fear of degrading read performance
6254 * through increased writes.
6256 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
6257 * the vdev queue can aggregate them into larger and fewer writes. Each
6258 * device is written to in a rotor fashion, sweeping writes through
6259 * available space then repeating.
6261 * 7. The L2ARC does not store dirty content. It never needs to flush
6262 * write buffers back to disk based storage.
6264 * 8. If an ARC buffer is written (and dirtied) which also exists in the
6265 * L2ARC, the now stale L2ARC buffer is immediately dropped.
6267 * The performance of the L2ARC can be tweaked by a number of tunables, which
6268 * may be necessary for different workloads:
6270 * l2arc_write_max max write bytes per interval
6271 * l2arc_write_boost extra write bytes during device warmup
6272 * l2arc_noprefetch skip caching prefetched buffers
6273 * l2arc_headroom number of max device writes to precache
6274 * l2arc_headroom_boost when we find compressed buffers during ARC
6275 * scanning, we multiply headroom by this
6276 * percentage factor for the next scan cycle,
6277 * since more compressed buffers are likely to
6279 * l2arc_feed_secs seconds between L2ARC writing
6281 * Tunables may be removed or added as future performance improvements are
6282 * integrated, and also may become zpool properties.
6284 * There are three key functions that control how the L2ARC warms up:
6286 * l2arc_write_eligible() check if a buffer is eligible to cache
6287 * l2arc_write_size() calculate how much to write
6288 * l2arc_write_interval() calculate sleep delay between writes
6290 * These three functions determine what to write, how much, and how quickly
6295 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*hdr
)
6298 * A buffer is *not* eligible for the L2ARC if it:
6299 * 1. belongs to a different spa.
6300 * 2. is already cached on the L2ARC.
6301 * 3. has an I/O in progress (it may be an incomplete read).
6302 * 4. is flagged not eligible (zfs property).
6304 if (hdr
->b_spa
!= spa_guid
|| HDR_HAS_L2HDR(hdr
) ||
6305 HDR_IO_IN_PROGRESS(hdr
) || !HDR_L2CACHE(hdr
))
6312 l2arc_write_size(void)
6317 * Make sure our globals have meaningful values in case the user
6320 size
= l2arc_write_max
;
6322 cmn_err(CE_NOTE
, "Bad value for l2arc_write_max, value must "
6323 "be greater than zero, resetting it to the default (%d)",
6325 size
= l2arc_write_max
= L2ARC_WRITE_SIZE
;
6328 if (arc_warm
== B_FALSE
)
6329 size
+= l2arc_write_boost
;
6336 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
6338 clock_t interval
, next
, now
;
6341 * If the ARC lists are busy, increase our write rate; if the
6342 * lists are stale, idle back. This is achieved by checking
6343 * how much we previously wrote - if it was more than half of
6344 * what we wanted, schedule the next write much sooner.
6346 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
6347 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
6349 interval
= hz
* l2arc_feed_secs
;
6351 now
= ddi_get_lbolt();
6352 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
6358 * Cycle through L2ARC devices. This is how L2ARC load balances.
6359 * If a device is returned, this also returns holding the spa config lock.
6361 static l2arc_dev_t
*
6362 l2arc_dev_get_next(void)
6364 l2arc_dev_t
*first
, *next
= NULL
;
6367 * Lock out the removal of spas (spa_namespace_lock), then removal
6368 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
6369 * both locks will be dropped and a spa config lock held instead.
6371 mutex_enter(&spa_namespace_lock
);
6372 mutex_enter(&l2arc_dev_mtx
);
6374 /* if there are no vdevs, there is nothing to do */
6375 if (l2arc_ndev
== 0)
6379 next
= l2arc_dev_last
;
6381 /* loop around the list looking for a non-faulted vdev */
6383 next
= list_head(l2arc_dev_list
);
6385 next
= list_next(l2arc_dev_list
, next
);
6387 next
= list_head(l2arc_dev_list
);
6390 /* if we have come back to the start, bail out */
6393 else if (next
== first
)
6396 } while (vdev_is_dead(next
->l2ad_vdev
));
6398 /* if we were unable to find any usable vdevs, return NULL */
6399 if (vdev_is_dead(next
->l2ad_vdev
))
6402 l2arc_dev_last
= next
;
6405 mutex_exit(&l2arc_dev_mtx
);
6408 * Grab the config lock to prevent the 'next' device from being
6409 * removed while we are writing to it.
6412 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
6413 mutex_exit(&spa_namespace_lock
);
6419 * Free buffers that were tagged for destruction.
6422 l2arc_do_free_on_write()
6425 l2arc_data_free_t
*df
, *df_prev
;
6427 mutex_enter(&l2arc_free_on_write_mtx
);
6428 buflist
= l2arc_free_on_write
;
6430 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
6431 df_prev
= list_prev(buflist
, df
);
6432 ASSERT3P(df
->l2df_abd
, !=, NULL
);
6433 abd_free(df
->l2df_abd
);
6434 list_remove(buflist
, df
);
6435 kmem_free(df
, sizeof (l2arc_data_free_t
));
6438 mutex_exit(&l2arc_free_on_write_mtx
);
6442 * A write to a cache device has completed. Update all headers to allow
6443 * reads from these buffers to begin.
6446 l2arc_write_done(zio_t
*zio
)
6448 l2arc_write_callback_t
*cb
;
6451 arc_buf_hdr_t
*head
, *hdr
, *hdr_prev
;
6452 kmutex_t
*hash_lock
;
6453 int64_t bytes_dropped
= 0;
6455 cb
= zio
->io_private
;
6456 ASSERT3P(cb
, !=, NULL
);
6457 dev
= cb
->l2wcb_dev
;
6458 ASSERT3P(dev
, !=, NULL
);
6459 head
= cb
->l2wcb_head
;
6460 ASSERT3P(head
, !=, NULL
);
6461 buflist
= &dev
->l2ad_buflist
;
6462 ASSERT3P(buflist
, !=, NULL
);
6463 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
6464 l2arc_write_callback_t
*, cb
);
6466 if (zio
->io_error
!= 0)
6467 ARCSTAT_BUMP(arcstat_l2_writes_error
);
6470 * All writes completed, or an error was hit.
6473 mutex_enter(&dev
->l2ad_mtx
);
6474 for (hdr
= list_prev(buflist
, head
); hdr
; hdr
= hdr_prev
) {
6475 hdr_prev
= list_prev(buflist
, hdr
);
6477 hash_lock
= HDR_LOCK(hdr
);
6480 * We cannot use mutex_enter or else we can deadlock
6481 * with l2arc_write_buffers (due to swapping the order
6482 * the hash lock and l2ad_mtx are taken).
6484 if (!mutex_tryenter(hash_lock
)) {
6486 * Missed the hash lock. We must retry so we
6487 * don't leave the ARC_FLAG_L2_WRITING bit set.
6489 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry
);
6492 * We don't want to rescan the headers we've
6493 * already marked as having been written out, so
6494 * we reinsert the head node so we can pick up
6495 * where we left off.
6497 list_remove(buflist
, head
);
6498 list_insert_after(buflist
, hdr
, head
);
6500 mutex_exit(&dev
->l2ad_mtx
);
6503 * We wait for the hash lock to become available
6504 * to try and prevent busy waiting, and increase
6505 * the chance we'll be able to acquire the lock
6506 * the next time around.
6508 mutex_enter(hash_lock
);
6509 mutex_exit(hash_lock
);
6514 * We could not have been moved into the arc_l2c_only
6515 * state while in-flight due to our ARC_FLAG_L2_WRITING
6516 * bit being set. Let's just ensure that's being enforced.
6518 ASSERT(HDR_HAS_L1HDR(hdr
));
6520 if (zio
->io_error
!= 0) {
6522 * Error - drop L2ARC entry.
6524 list_remove(buflist
, hdr
);
6525 arc_hdr_clear_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
6527 ARCSTAT_INCR(arcstat_l2_psize
, -arc_hdr_size(hdr
));
6528 ARCSTAT_INCR(arcstat_l2_lsize
, -HDR_GET_LSIZE(hdr
));
6530 bytes_dropped
+= arc_hdr_size(hdr
);
6531 (void) refcount_remove_many(&dev
->l2ad_alloc
,
6532 arc_hdr_size(hdr
), hdr
);
6536 * Allow ARC to begin reads and ghost list evictions to
6539 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2_WRITING
);
6541 mutex_exit(hash_lock
);
6544 atomic_inc_64(&l2arc_writes_done
);
6545 list_remove(buflist
, head
);
6546 ASSERT(!HDR_HAS_L1HDR(head
));
6547 kmem_cache_free(hdr_l2only_cache
, head
);
6548 mutex_exit(&dev
->l2ad_mtx
);
6550 vdev_space_update(dev
->l2ad_vdev
, -bytes_dropped
, 0, 0);
6552 l2arc_do_free_on_write();
6554 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
6558 * A read to a cache device completed. Validate buffer contents before
6559 * handing over to the regular ARC routines.
6562 l2arc_read_done(zio_t
*zio
)
6564 l2arc_read_callback_t
*cb
;
6566 kmutex_t
*hash_lock
;
6567 boolean_t valid_cksum
;
6569 ASSERT3P(zio
->io_vd
, !=, NULL
);
6570 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
6572 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
6574 cb
= zio
->io_private
;
6575 ASSERT3P(cb
, !=, NULL
);
6576 hdr
= cb
->l2rcb_hdr
;
6577 ASSERT3P(hdr
, !=, NULL
);
6579 hash_lock
= HDR_LOCK(hdr
);
6580 mutex_enter(hash_lock
);
6581 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
6584 * If the data was read into a temporary buffer,
6585 * move it and free the buffer.
6587 if (cb
->l2rcb_abd
!= NULL
) {
6588 ASSERT3U(arc_hdr_size(hdr
), <, zio
->io_size
);
6589 if (zio
->io_error
== 0) {
6590 abd_copy(hdr
->b_l1hdr
.b_pabd
, cb
->l2rcb_abd
,
6595 * The following must be done regardless of whether
6596 * there was an error:
6597 * - free the temporary buffer
6598 * - point zio to the real ARC buffer
6599 * - set zio size accordingly
6600 * These are required because zio is either re-used for
6601 * an I/O of the block in the case of the error
6602 * or the zio is passed to arc_read_done() and it
6605 abd_free(cb
->l2rcb_abd
);
6606 zio
->io_size
= zio
->io_orig_size
= arc_hdr_size(hdr
);
6607 zio
->io_abd
= zio
->io_orig_abd
= hdr
->b_l1hdr
.b_pabd
;
6610 ASSERT3P(zio
->io_abd
, !=, NULL
);
6613 * Check this survived the L2ARC journey.
6615 ASSERT3P(zio
->io_abd
, ==, hdr
->b_l1hdr
.b_pabd
);
6616 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
6617 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
6619 valid_cksum
= arc_cksum_is_equal(hdr
, zio
);
6620 if (valid_cksum
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
6621 mutex_exit(hash_lock
);
6622 zio
->io_private
= hdr
;
6625 mutex_exit(hash_lock
);
6627 * Buffer didn't survive caching. Increment stats and
6628 * reissue to the original storage device.
6630 if (zio
->io_error
!= 0) {
6631 ARCSTAT_BUMP(arcstat_l2_io_error
);
6633 zio
->io_error
= SET_ERROR(EIO
);
6636 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
6639 * If there's no waiter, issue an async i/o to the primary
6640 * storage now. If there *is* a waiter, the caller must
6641 * issue the i/o in a context where it's OK to block.
6643 if (zio
->io_waiter
== NULL
) {
6644 zio_t
*pio
= zio_unique_parent(zio
);
6646 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
6648 zio_nowait(zio_read(pio
, zio
->io_spa
, zio
->io_bp
,
6649 hdr
->b_l1hdr
.b_pabd
, zio
->io_size
, arc_read_done
,
6650 hdr
, zio
->io_priority
, cb
->l2rcb_flags
,
6655 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
6659 * This is the list priority from which the L2ARC will search for pages to
6660 * cache. This is used within loops (0..3) to cycle through lists in the
6661 * desired order. This order can have a significant effect on cache
6664 * Currently the metadata lists are hit first, MFU then MRU, followed by
6665 * the data lists. This function returns a locked list, and also returns
6668 static multilist_sublist_t
*
6669 l2arc_sublist_lock(int list_num
)
6671 multilist_t
*ml
= NULL
;
6674 ASSERT(list_num
>= 0 && list_num
<= 3);
6678 ml
= arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
6681 ml
= arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
6684 ml
= arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
6687 ml
= arc_mru
->arcs_list
[ARC_BUFC_DATA
];
6692 * Return a randomly-selected sublist. This is acceptable
6693 * because the caller feeds only a little bit of data for each
6694 * call (8MB). Subsequent calls will result in different
6695 * sublists being selected.
6697 idx
= multilist_get_random_index(ml
);
6698 return (multilist_sublist_lock(ml
, idx
));
6702 * Evict buffers from the device write hand to the distance specified in
6703 * bytes. This distance may span populated buffers, it may span nothing.
6704 * This is clearing a region on the L2ARC device ready for writing.
6705 * If the 'all' boolean is set, every buffer is evicted.
6708 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
6711 arc_buf_hdr_t
*hdr
, *hdr_prev
;
6712 kmutex_t
*hash_lock
;
6715 buflist
= &dev
->l2ad_buflist
;
6717 if (!all
&& dev
->l2ad_first
) {
6719 * This is the first sweep through the device. There is
6725 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
6727 * When nearing the end of the device, evict to the end
6728 * before the device write hand jumps to the start.
6730 taddr
= dev
->l2ad_end
;
6732 taddr
= dev
->l2ad_hand
+ distance
;
6734 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
6735 uint64_t, taddr
, boolean_t
, all
);
6738 mutex_enter(&dev
->l2ad_mtx
);
6739 for (hdr
= list_tail(buflist
); hdr
; hdr
= hdr_prev
) {
6740 hdr_prev
= list_prev(buflist
, hdr
);
6742 hash_lock
= HDR_LOCK(hdr
);
6745 * We cannot use mutex_enter or else we can deadlock
6746 * with l2arc_write_buffers (due to swapping the order
6747 * the hash lock and l2ad_mtx are taken).
6749 if (!mutex_tryenter(hash_lock
)) {
6751 * Missed the hash lock. Retry.
6753 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
6754 mutex_exit(&dev
->l2ad_mtx
);
6755 mutex_enter(hash_lock
);
6756 mutex_exit(hash_lock
);
6761 * A header can't be on this list if it doesn't have L2 header.
6763 ASSERT(HDR_HAS_L2HDR(hdr
));
6765 /* Ensure this header has finished being written. */
6766 ASSERT(!HDR_L2_WRITING(hdr
));
6767 ASSERT(!HDR_L2_WRITE_HEAD(hdr
));
6769 if (!all
&& (hdr
->b_l2hdr
.b_daddr
>= taddr
||
6770 hdr
->b_l2hdr
.b_daddr
< dev
->l2ad_hand
)) {
6772 * We've evicted to the target address,
6773 * or the end of the device.
6775 mutex_exit(hash_lock
);
6779 if (!HDR_HAS_L1HDR(hdr
)) {
6780 ASSERT(!HDR_L2_READING(hdr
));
6782 * This doesn't exist in the ARC. Destroy.
6783 * arc_hdr_destroy() will call list_remove()
6784 * and decrement arcstat_l2_lsize.
6786 arc_change_state(arc_anon
, hdr
, hash_lock
);
6787 arc_hdr_destroy(hdr
);
6789 ASSERT(hdr
->b_l1hdr
.b_state
!= arc_l2c_only
);
6790 ARCSTAT_BUMP(arcstat_l2_evict_l1cached
);
6792 * Invalidate issued or about to be issued
6793 * reads, since we may be about to write
6794 * over this location.
6796 if (HDR_L2_READING(hdr
)) {
6797 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
6798 arc_hdr_set_flags(hdr
, ARC_FLAG_L2_EVICTED
);
6801 arc_hdr_l2hdr_destroy(hdr
);
6803 mutex_exit(hash_lock
);
6805 mutex_exit(&dev
->l2ad_mtx
);
6809 * Find and write ARC buffers to the L2ARC device.
6811 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
6812 * for reading until they have completed writing.
6813 * The headroom_boost is an in-out parameter used to maintain headroom boost
6814 * state between calls to this function.
6816 * Returns the number of bytes actually written (which may be smaller than
6817 * the delta by which the device hand has changed due to alignment).
6820 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
)
6822 arc_buf_hdr_t
*hdr
, *hdr_prev
, *head
;
6823 uint64_t write_asize
, write_psize
, write_lsize
, headroom
;
6825 l2arc_write_callback_t
*cb
;
6827 uint64_t guid
= spa_load_guid(spa
);
6829 ASSERT3P(dev
->l2ad_vdev
, !=, NULL
);
6832 write_lsize
= write_asize
= write_psize
= 0;
6834 head
= kmem_cache_alloc(hdr_l2only_cache
, KM_PUSHPAGE
);
6835 arc_hdr_set_flags(head
, ARC_FLAG_L2_WRITE_HEAD
| ARC_FLAG_HAS_L2HDR
);
6838 * Copy buffers for L2ARC writing.
6840 for (int try = 0; try <= 3; try++) {
6841 multilist_sublist_t
*mls
= l2arc_sublist_lock(try);
6842 uint64_t passed_sz
= 0;
6845 * L2ARC fast warmup.
6847 * Until the ARC is warm and starts to evict, read from the
6848 * head of the ARC lists rather than the tail.
6850 if (arc_warm
== B_FALSE
)
6851 hdr
= multilist_sublist_head(mls
);
6853 hdr
= multilist_sublist_tail(mls
);
6855 headroom
= target_sz
* l2arc_headroom
;
6856 if (zfs_compressed_arc_enabled
)
6857 headroom
= (headroom
* l2arc_headroom_boost
) / 100;
6859 for (; hdr
; hdr
= hdr_prev
) {
6860 kmutex_t
*hash_lock
;
6862 if (arc_warm
== B_FALSE
)
6863 hdr_prev
= multilist_sublist_next(mls
, hdr
);
6865 hdr_prev
= multilist_sublist_prev(mls
, hdr
);
6867 hash_lock
= HDR_LOCK(hdr
);
6868 if (!mutex_tryenter(hash_lock
)) {
6870 * Skip this buffer rather than waiting.
6875 passed_sz
+= HDR_GET_LSIZE(hdr
);
6876 if (passed_sz
> headroom
) {
6880 mutex_exit(hash_lock
);
6884 if (!l2arc_write_eligible(guid
, hdr
)) {
6885 mutex_exit(hash_lock
);
6890 * We rely on the L1 portion of the header below, so
6891 * it's invalid for this header to have been evicted out
6892 * of the ghost cache, prior to being written out. The
6893 * ARC_FLAG_L2_WRITING bit ensures this won't happen.
6895 ASSERT(HDR_HAS_L1HDR(hdr
));
6897 ASSERT3U(HDR_GET_PSIZE(hdr
), >, 0);
6898 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
6899 ASSERT3U(arc_hdr_size(hdr
), >, 0);
6900 uint64_t psize
= arc_hdr_size(hdr
);
6901 uint64_t asize
= vdev_psize_to_asize(dev
->l2ad_vdev
,
6904 if ((write_asize
+ asize
) > target_sz
) {
6906 mutex_exit(hash_lock
);
6912 * Insert a dummy header on the buflist so
6913 * l2arc_write_done() can find where the
6914 * write buffers begin without searching.
6916 mutex_enter(&dev
->l2ad_mtx
);
6917 list_insert_head(&dev
->l2ad_buflist
, head
);
6918 mutex_exit(&dev
->l2ad_mtx
);
6921 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
6922 cb
->l2wcb_dev
= dev
;
6923 cb
->l2wcb_head
= head
;
6924 pio
= zio_root(spa
, l2arc_write_done
, cb
,
6928 hdr
->b_l2hdr
.b_dev
= dev
;
6929 hdr
->b_l2hdr
.b_daddr
= dev
->l2ad_hand
;
6930 arc_hdr_set_flags(hdr
,
6931 ARC_FLAG_L2_WRITING
| ARC_FLAG_HAS_L2HDR
);
6933 mutex_enter(&dev
->l2ad_mtx
);
6934 list_insert_head(&dev
->l2ad_buflist
, hdr
);
6935 mutex_exit(&dev
->l2ad_mtx
);
6937 (void) refcount_add_many(&dev
->l2ad_alloc
, psize
, hdr
);
6940 * Normally the L2ARC can use the hdr's data, but if
6941 * we're sharing data between the hdr and one of its
6942 * bufs, L2ARC needs its own copy of the data so that
6943 * the ZIO below can't race with the buf consumer.
6944 * Another case where we need to create a copy of the
6945 * data is when the buffer size is not device-aligned
6946 * and we need to pad the block to make it such.
6947 * That also keeps the clock hand suitably aligned.
6949 * To ensure that the copy will be available for the
6950 * lifetime of the ZIO and be cleaned up afterwards, we
6951 * add it to the l2arc_free_on_write queue.
6954 if (!HDR_SHARED_DATA(hdr
) && psize
== asize
) {
6955 to_write
= hdr
->b_l1hdr
.b_pabd
;
6957 to_write
= abd_alloc_for_io(asize
,
6958 HDR_ISTYPE_METADATA(hdr
));
6959 abd_copy(to_write
, hdr
->b_l1hdr
.b_pabd
, psize
);
6960 if (asize
!= psize
) {
6961 abd_zero_off(to_write
, psize
,
6964 l2arc_free_abd_on_write(to_write
, asize
,
6967 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
6968 hdr
->b_l2hdr
.b_daddr
, asize
, to_write
,
6969 ZIO_CHECKSUM_OFF
, NULL
, hdr
,
6970 ZIO_PRIORITY_ASYNC_WRITE
,
6971 ZIO_FLAG_CANFAIL
, B_FALSE
);
6973 write_lsize
+= HDR_GET_LSIZE(hdr
);
6974 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
6977 write_psize
+= psize
;
6978 write_asize
+= asize
;
6979 dev
->l2ad_hand
+= asize
;
6981 mutex_exit(hash_lock
);
6983 (void) zio_nowait(wzio
);
6986 multilist_sublist_unlock(mls
);
6992 /* No buffers selected for writing? */
6994 ASSERT0(write_lsize
);
6995 ASSERT(!HDR_HAS_L1HDR(head
));
6996 kmem_cache_free(hdr_l2only_cache
, head
);
7000 ASSERT3U(write_asize
, <=, target_sz
);
7001 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
7002 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_psize
);
7003 ARCSTAT_INCR(arcstat_l2_lsize
, write_lsize
);
7004 ARCSTAT_INCR(arcstat_l2_psize
, write_psize
);
7005 vdev_space_update(dev
->l2ad_vdev
, write_psize
, 0, 0);
7008 * Bump device hand to the device start if it is approaching the end.
7009 * l2arc_evict() will already have evicted ahead for this case.
7011 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
7012 dev
->l2ad_hand
= dev
->l2ad_start
;
7013 dev
->l2ad_first
= B_FALSE
;
7016 dev
->l2ad_writing
= B_TRUE
;
7017 (void) zio_wait(pio
);
7018 dev
->l2ad_writing
= B_FALSE
;
7020 return (write_asize
);
7024 * This thread feeds the L2ARC at regular intervals. This is the beating
7025 * heart of the L2ARC.
7029 l2arc_feed_thread(void *unused
)
7034 uint64_t size
, wrote
;
7035 clock_t begin
, next
= ddi_get_lbolt();
7037 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
7039 mutex_enter(&l2arc_feed_thr_lock
);
7041 while (l2arc_thread_exit
== 0) {
7042 CALLB_CPR_SAFE_BEGIN(&cpr
);
7043 (void) cv_timedwait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
,
7045 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
7046 next
= ddi_get_lbolt() + hz
;
7049 * Quick check for L2ARC devices.
7051 mutex_enter(&l2arc_dev_mtx
);
7052 if (l2arc_ndev
== 0) {
7053 mutex_exit(&l2arc_dev_mtx
);
7056 mutex_exit(&l2arc_dev_mtx
);
7057 begin
= ddi_get_lbolt();
7060 * This selects the next l2arc device to write to, and in
7061 * doing so the next spa to feed from: dev->l2ad_spa. This
7062 * will return NULL if there are now no l2arc devices or if
7063 * they are all faulted.
7065 * If a device is returned, its spa's config lock is also
7066 * held to prevent device removal. l2arc_dev_get_next()
7067 * will grab and release l2arc_dev_mtx.
7069 if ((dev
= l2arc_dev_get_next()) == NULL
)
7072 spa
= dev
->l2ad_spa
;
7073 ASSERT3P(spa
, !=, NULL
);
7076 * If the pool is read-only then force the feed thread to
7077 * sleep a little longer.
7079 if (!spa_writeable(spa
)) {
7080 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
7081 spa_config_exit(spa
, SCL_L2ARC
, dev
);
7086 * Avoid contributing to memory pressure.
7088 if (arc_reclaim_needed()) {
7089 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
7090 spa_config_exit(spa
, SCL_L2ARC
, dev
);
7094 ARCSTAT_BUMP(arcstat_l2_feeds
);
7096 size
= l2arc_write_size();
7099 * Evict L2ARC buffers that will be overwritten.
7101 l2arc_evict(dev
, size
, B_FALSE
);
7104 * Write ARC buffers.
7106 wrote
= l2arc_write_buffers(spa
, dev
, size
);
7109 * Calculate interval between writes.
7111 next
= l2arc_write_interval(begin
, size
, wrote
);
7112 spa_config_exit(spa
, SCL_L2ARC
, dev
);
7115 l2arc_thread_exit
= 0;
7116 cv_broadcast(&l2arc_feed_thr_cv
);
7117 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
7122 l2arc_vdev_present(vdev_t
*vd
)
7126 mutex_enter(&l2arc_dev_mtx
);
7127 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
7128 dev
= list_next(l2arc_dev_list
, dev
)) {
7129 if (dev
->l2ad_vdev
== vd
)
7132 mutex_exit(&l2arc_dev_mtx
);
7134 return (dev
!= NULL
);
7138 * Add a vdev for use by the L2ARC. By this point the spa has already
7139 * validated the vdev and opened it.
7142 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
7144 l2arc_dev_t
*adddev
;
7146 ASSERT(!l2arc_vdev_present(vd
));
7149 * Create a new l2arc device entry.
7151 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
7152 adddev
->l2ad_spa
= spa
;
7153 adddev
->l2ad_vdev
= vd
;
7154 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
;
7155 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
7156 adddev
->l2ad_hand
= adddev
->l2ad_start
;
7157 adddev
->l2ad_first
= B_TRUE
;
7158 adddev
->l2ad_writing
= B_FALSE
;
7160 mutex_init(&adddev
->l2ad_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7162 * This is a list of all ARC buffers that are still valid on the
7165 list_create(&adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
7166 offsetof(arc_buf_hdr_t
, b_l2hdr
.b_l2node
));
7168 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
7169 refcount_create(&adddev
->l2ad_alloc
);
7172 * Add device to global list
7174 mutex_enter(&l2arc_dev_mtx
);
7175 list_insert_head(l2arc_dev_list
, adddev
);
7176 atomic_inc_64(&l2arc_ndev
);
7177 mutex_exit(&l2arc_dev_mtx
);
7181 * Remove a vdev from the L2ARC.
7184 l2arc_remove_vdev(vdev_t
*vd
)
7186 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
7189 * Find the device by vdev
7191 mutex_enter(&l2arc_dev_mtx
);
7192 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
7193 nextdev
= list_next(l2arc_dev_list
, dev
);
7194 if (vd
== dev
->l2ad_vdev
) {
7199 ASSERT3P(remdev
, !=, NULL
);
7202 * Remove device from global list
7204 list_remove(l2arc_dev_list
, remdev
);
7205 l2arc_dev_last
= NULL
; /* may have been invalidated */
7206 atomic_dec_64(&l2arc_ndev
);
7207 mutex_exit(&l2arc_dev_mtx
);
7210 * Clear all buflists and ARC references. L2ARC device flush.
7212 l2arc_evict(remdev
, 0, B_TRUE
);
7213 list_destroy(&remdev
->l2ad_buflist
);
7214 mutex_destroy(&remdev
->l2ad_mtx
);
7215 refcount_destroy(&remdev
->l2ad_alloc
);
7216 kmem_free(remdev
, sizeof (l2arc_dev_t
));
7222 l2arc_thread_exit
= 0;
7224 l2arc_writes_sent
= 0;
7225 l2arc_writes_done
= 0;
7227 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
7228 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
7229 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7230 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7232 l2arc_dev_list
= &L2ARC_dev_list
;
7233 l2arc_free_on_write
= &L2ARC_free_on_write
;
7234 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
7235 offsetof(l2arc_dev_t
, l2ad_node
));
7236 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
7237 offsetof(l2arc_data_free_t
, l2df_list_node
));
7244 * This is called from dmu_fini(), which is called from spa_fini();
7245 * Because of this, we can assume that all l2arc devices have
7246 * already been removed when the pools themselves were removed.
7249 l2arc_do_free_on_write();
7251 mutex_destroy(&l2arc_feed_thr_lock
);
7252 cv_destroy(&l2arc_feed_thr_cv
);
7253 mutex_destroy(&l2arc_dev_mtx
);
7254 mutex_destroy(&l2arc_free_on_write_mtx
);
7256 list_destroy(l2arc_dev_list
);
7257 list_destroy(l2arc_free_on_write
);
7263 if (!(spa_mode_global
& FWRITE
))
7266 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
7267 TS_RUN
, minclsyspri
);
7273 if (!(spa_mode_global
& FWRITE
))
7276 mutex_enter(&l2arc_feed_thr_lock
);
7277 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
7278 l2arc_thread_exit
= 1;
7279 while (l2arc_thread_exit
!= 0)
7280 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
7281 mutex_exit(&l2arc_feed_thr_lock
);