4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2018, Joyent, Inc.
24 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
30 * DVA-based Adjustable Replacement Cache
32 * While much of the theory of operation used here is
33 * based on the self-tuning, low overhead replacement cache
34 * presented by Megiddo and Modha at FAST 2003, there are some
35 * significant differences:
37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 * Pages in its cache cannot be "locked" into memory. This makes
39 * the eviction algorithm simple: evict the last page in the list.
40 * This also make the performance characteristics easy to reason
41 * about. Our cache is not so simple. At any given moment, some
42 * subset of the blocks in the cache are un-evictable because we
43 * have handed out a reference to them. Blocks are only evictable
44 * when there are no external references active. This makes
45 * eviction far more problematic: we choose to evict the evictable
46 * blocks that are the "lowest" in the list.
48 * There are times when it is not possible to evict the requested
49 * space. In these circumstances we are unable to adjust the cache
50 * size. To prevent the cache growing unbounded at these times we
51 * implement a "cache throttle" that slows the flow of new data
52 * into the cache until we can make space available.
54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 * Pages are evicted when the cache is full and there is a cache
56 * miss. Our model has a variable sized cache. It grows with
57 * high use, but also tries to react to memory pressure from the
58 * operating system: decreasing its size when system memory is
61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 * elements of the cache are therefore exactly the same size. So
63 * when adjusting the cache size following a cache miss, its simply
64 * a matter of choosing a single page to evict. In our model, we
65 * have variable sized cache blocks (rangeing from 512 bytes to
66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 * space for a cache miss that approximates as closely as possible
68 * the space used by the new block.
70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 * by N. Megiddo & D. Modha, FAST 2003
77 * A new reference to a cache buffer can be obtained in two
78 * ways: 1) via a hash table lookup using the DVA as a key,
79 * or 2) via one of the ARC lists. The arc_read() interface
80 * uses method 1, while the internal ARC algorithms for
81 * adjusting the cache use method 2. We therefore provide two
82 * types of locks: 1) the hash table lock array, and 2) the
85 * Buffers do not have their own mutexes, rather they rely on the
86 * hash table mutexes for the bulk of their protection (i.e. most
87 * fields in the arc_buf_hdr_t are protected by these mutexes).
89 * buf_hash_find() returns the appropriate mutex (held) when it
90 * locates the requested buffer in the hash table. It returns
91 * NULL for the mutex if the buffer was not in the table.
93 * buf_hash_remove() expects the appropriate hash mutex to be
94 * already held before it is invoked.
96 * Each ARC state also has a mutex which is used to protect the
97 * buffer list associated with the state. When attempting to
98 * obtain a hash table lock while holding an ARC list lock you
99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 * the active state mutex must be held before the ghost state mutex.
102 * Note that the majority of the performance stats are manipulated
103 * with atomic operations.
105 * The L2ARC uses the l2ad_mtx on each vdev for the following:
107 * - L2ARC buflist creation
108 * - L2ARC buflist eviction
109 * - L2ARC write completion, which walks L2ARC buflists
110 * - ARC header destruction, as it removes from L2ARC buflists
111 * - ARC header release, as it removes from L2ARC buflists
117 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
118 * This structure can point either to a block that is still in the cache or to
119 * one that is only accessible in an L2 ARC device, or it can provide
120 * information about a block that was recently evicted. If a block is
121 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
122 * information to retrieve it from the L2ARC device. This information is
123 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
124 * that is in this state cannot access the data directly.
126 * Blocks that are actively being referenced or have not been evicted
127 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
128 * the arc_buf_hdr_t that will point to the data block in memory. A block can
129 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
130 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
131 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
133 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
134 * ability to store the physical data (b_pabd) associated with the DVA of the
135 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
136 * it will match its on-disk compression characteristics. This behavior can be
137 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
138 * compressed ARC functionality is disabled, the b_pabd will point to an
139 * uncompressed version of the on-disk data.
141 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
142 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
143 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
144 * consumer. The ARC will provide references to this data and will keep it
145 * cached until it is no longer in use. The ARC caches only the L1ARC's physical
146 * data block and will evict any arc_buf_t that is no longer referenced. The
147 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
148 * "overhead_size" kstat.
150 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
151 * compressed form. The typical case is that consumers will want uncompressed
152 * data, and when that happens a new data buffer is allocated where the data is
153 * decompressed for them to use. Currently the only consumer who wants
154 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
155 * exists on disk. When this happens, the arc_buf_t's data buffer is shared
156 * with the arc_buf_hdr_t.
158 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
159 * first one is owned by a compressed send consumer (and therefore references
160 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
161 * used by any other consumer (and has its own uncompressed copy of the data
176 * | b_buf +------------>+-----------+ arc_buf_t
177 * | b_pabd +-+ |b_next +---->+-----------+
178 * +-----------+ | |-----------| |b_next +-->NULL
179 * | |b_comp = T | +-----------+
180 * | |b_data +-+ |b_comp = F |
181 * | +-----------+ | |b_data +-+
182 * +->+------+ | +-----------+ |
184 * data | |<--------------+ | uncompressed
185 * +------+ compressed, | data
186 * shared +-->+------+
191 * When a consumer reads a block, the ARC must first look to see if the
192 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
193 * arc_buf_t and either copies uncompressed data into a new data buffer from an
194 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
195 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
196 * hdr is compressed and the desired compression characteristics of the
197 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
198 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
199 * the last buffer in the hdr's b_buf list, however a shared compressed buf can
200 * be anywhere in the hdr's list.
202 * The diagram below shows an example of an uncompressed ARC hdr that is
203 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
204 * the last element in the buf list):
216 * | | arc_buf_t (shared)
217 * | b_buf +------------>+---------+ arc_buf_t
218 * | | |b_next +---->+---------+
219 * | b_pabd +-+ |---------| |b_next +-->NULL
220 * +-----------+ | | | +---------+
222 * | +---------+ | |b_data +-+
223 * +->+------+ | +---------+ |
225 * uncompressed | | | |
228 * | uncompressed | | |
231 * +---------------------------------+
233 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd
234 * since the physical block is about to be rewritten. The new data contents
235 * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
236 * it may compress the data before writing it to disk. The ARC will be called
237 * with the transformed data and will bcopy the transformed on-disk block into
238 * a newly allocated b_pabd. Writes are always done into buffers which have
239 * either been loaned (and hence are new and don't have other readers) or
240 * buffers which have been released (and hence have their own hdr, if there
241 * were originally other readers of the buf's original hdr). This ensures that
242 * the ARC only needs to update a single buf and its hdr after a write occurs.
244 * When the L2ARC is in use, it will also take advantage of the b_pabd. The
245 * L2ARC will always write the contents of b_pabd to the L2ARC. This means
246 * that when compressed ARC is enabled that the L2ARC blocks are identical
247 * to the on-disk block in the main data pool. This provides a significant
248 * advantage since the ARC can leverage the bp's checksum when reading from the
249 * L2ARC to determine if the contents are valid. However, if the compressed
250 * ARC is disabled, then the L2ARC's block must be transformed to look
251 * like the physical block in the main data pool before comparing the
252 * checksum and determining its validity.
257 #include <sys/spa_impl.h>
258 #include <sys/zio_compress.h>
259 #include <sys/zio_checksum.h>
260 #include <sys/zfs_context.h>
262 #include <sys/refcount.h>
263 #include <sys/vdev.h>
264 #include <sys/vdev_impl.h>
265 #include <sys/dsl_pool.h>
266 #include <sys/zio_checksum.h>
267 #include <sys/multilist.h>
270 #include <sys/vmsystm.h>
272 #include <sys/fs/swapnode.h>
273 #include <sys/dnlc.h>
275 #include <sys/callb.h>
276 #include <sys/kstat.h>
277 #include <zfs_fletcher.h>
278 #include <sys/aggsum.h>
279 #include <sys/cityhash.h>
282 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
283 boolean_t arc_watch
= B_FALSE
;
287 static kmutex_t arc_reclaim_lock
;
288 static kcondvar_t arc_reclaim_thread_cv
;
289 static boolean_t arc_reclaim_thread_exit
;
290 static kcondvar_t arc_reclaim_waiters_cv
;
292 uint_t arc_reduce_dnlc_percent
= 3;
295 * The number of headers to evict in arc_evict_state_impl() before
296 * dropping the sublist lock and evicting from another sublist. A lower
297 * value means we're more likely to evict the "correct" header (i.e. the
298 * oldest header in the arc state), but comes with higher overhead
299 * (i.e. more invocations of arc_evict_state_impl()).
301 int zfs_arc_evict_batch_limit
= 10;
303 /* number of seconds before growing cache again */
304 static int arc_grow_retry
= 60;
306 /* number of milliseconds before attempting a kmem-cache-reap */
307 static int arc_kmem_cache_reap_retry_ms
= 1000;
309 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */
310 int zfs_arc_overflow_shift
= 8;
312 /* shift of arc_c for calculating both min and max arc_p */
313 static int arc_p_min_shift
= 4;
315 /* log2(fraction of arc to reclaim) */
316 static int arc_shrink_shift
= 7;
319 * log2(fraction of ARC which must be free to allow growing).
320 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
321 * when reading a new block into the ARC, we will evict an equal-sized block
324 * This must be less than arc_shrink_shift, so that when we shrink the ARC,
325 * we will still not allow it to grow.
327 int arc_no_grow_shift
= 5;
331 * minimum lifespan of a prefetch block in clock ticks
332 * (initialized in arc_init())
334 static int arc_min_prefetch_lifespan
;
337 * If this percent of memory is free, don't throttle.
339 int arc_lotsfree_percent
= 10;
344 * The arc has filled available memory and has now warmed up.
346 static boolean_t arc_warm
;
349 * log2 fraction of the zio arena to keep free.
351 int arc_zio_arena_free_shift
= 2;
354 * These tunables are for performance analysis.
356 uint64_t zfs_arc_max
;
357 uint64_t zfs_arc_min
;
358 uint64_t zfs_arc_meta_limit
= 0;
359 uint64_t zfs_arc_meta_min
= 0;
360 int zfs_arc_grow_retry
= 0;
361 int zfs_arc_shrink_shift
= 0;
362 int zfs_arc_p_min_shift
= 0;
363 int zfs_arc_average_blocksize
= 8 * 1024; /* 8KB */
365 boolean_t zfs_compressed_arc_enabled
= B_TRUE
;
368 * Note that buffers can be in one of 6 states:
369 * ARC_anon - anonymous (discussed below)
370 * ARC_mru - recently used, currently cached
371 * ARC_mru_ghost - recentely used, no longer in cache
372 * ARC_mfu - frequently used, currently cached
373 * ARC_mfu_ghost - frequently used, no longer in cache
374 * ARC_l2c_only - exists in L2ARC but not other states
375 * When there are no active references to the buffer, they are
376 * are linked onto a list in one of these arc states. These are
377 * the only buffers that can be evicted or deleted. Within each
378 * state there are multiple lists, one for meta-data and one for
379 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
380 * etc.) is tracked separately so that it can be managed more
381 * explicitly: favored over data, limited explicitly.
383 * Anonymous buffers are buffers that are not associated with
384 * a DVA. These are buffers that hold dirty block copies
385 * before they are written to stable storage. By definition,
386 * they are "ref'd" and are considered part of arc_mru
387 * that cannot be freed. Generally, they will aquire a DVA
388 * as they are written and migrate onto the arc_mru list.
390 * The ARC_l2c_only state is for buffers that are in the second
391 * level ARC but no longer in any of the ARC_m* lists. The second
392 * level ARC itself may also contain buffers that are in any of
393 * the ARC_m* states - meaning that a buffer can exist in two
394 * places. The reason for the ARC_l2c_only state is to keep the
395 * buffer header in the hash table, so that reads that hit the
396 * second level ARC benefit from these fast lookups.
399 typedef struct arc_state
{
401 * list of evictable buffers
403 multilist_t
*arcs_list
[ARC_BUFC_NUMTYPES
];
405 * total amount of evictable data in this state
407 refcount_t arcs_esize
[ARC_BUFC_NUMTYPES
];
409 * total amount of data in this state; this includes: evictable,
410 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
412 refcount_t arcs_size
;
416 static arc_state_t ARC_anon
;
417 static arc_state_t ARC_mru
;
418 static arc_state_t ARC_mru_ghost
;
419 static arc_state_t ARC_mfu
;
420 static arc_state_t ARC_mfu_ghost
;
421 static arc_state_t ARC_l2c_only
;
423 typedef struct arc_stats
{
424 kstat_named_t arcstat_hits
;
425 kstat_named_t arcstat_misses
;
426 kstat_named_t arcstat_demand_data_hits
;
427 kstat_named_t arcstat_demand_data_misses
;
428 kstat_named_t arcstat_demand_metadata_hits
;
429 kstat_named_t arcstat_demand_metadata_misses
;
430 kstat_named_t arcstat_prefetch_data_hits
;
431 kstat_named_t arcstat_prefetch_data_misses
;
432 kstat_named_t arcstat_prefetch_metadata_hits
;
433 kstat_named_t arcstat_prefetch_metadata_misses
;
434 kstat_named_t arcstat_mru_hits
;
435 kstat_named_t arcstat_mru_ghost_hits
;
436 kstat_named_t arcstat_mfu_hits
;
437 kstat_named_t arcstat_mfu_ghost_hits
;
438 kstat_named_t arcstat_deleted
;
440 * Number of buffers that could not be evicted because the hash lock
441 * was held by another thread. The lock may not necessarily be held
442 * by something using the same buffer, since hash locks are shared
443 * by multiple buffers.
445 kstat_named_t arcstat_mutex_miss
;
447 * Number of buffers skipped because they have I/O in progress, are
448 * indrect prefetch buffers that have not lived long enough, or are
449 * not from the spa we're trying to evict from.
451 kstat_named_t arcstat_evict_skip
;
453 * Number of times arc_evict_state() was unable to evict enough
454 * buffers to reach it's target amount.
456 kstat_named_t arcstat_evict_not_enough
;
457 kstat_named_t arcstat_evict_l2_cached
;
458 kstat_named_t arcstat_evict_l2_eligible
;
459 kstat_named_t arcstat_evict_l2_ineligible
;
460 kstat_named_t arcstat_evict_l2_skip
;
461 kstat_named_t arcstat_hash_elements
;
462 kstat_named_t arcstat_hash_elements_max
;
463 kstat_named_t arcstat_hash_collisions
;
464 kstat_named_t arcstat_hash_chains
;
465 kstat_named_t arcstat_hash_chain_max
;
466 kstat_named_t arcstat_p
;
467 kstat_named_t arcstat_c
;
468 kstat_named_t arcstat_c_min
;
469 kstat_named_t arcstat_c_max
;
470 /* Not updated directly; only synced in arc_kstat_update. */
471 kstat_named_t arcstat_size
;
473 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
474 * Note that the compressed bytes may match the uncompressed bytes
475 * if the block is either not compressed or compressed arc is disabled.
477 kstat_named_t arcstat_compressed_size
;
479 * Uncompressed size of the data stored in b_pabd. If compressed
480 * arc is disabled then this value will be identical to the stat
483 kstat_named_t arcstat_uncompressed_size
;
485 * Number of bytes stored in all the arc_buf_t's. This is classified
486 * as "overhead" since this data is typically short-lived and will
487 * be evicted from the arc when it becomes unreferenced unless the
488 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
489 * values have been set (see comment in dbuf.c for more information).
491 kstat_named_t arcstat_overhead_size
;
493 * Number of bytes consumed by internal ARC structures necessary
494 * for tracking purposes; these structures are not actually
495 * backed by ARC buffers. This includes arc_buf_hdr_t structures
496 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
497 * caches), and arc_buf_t structures (allocated via arc_buf_t
499 * Not updated directly; only synced in arc_kstat_update.
501 kstat_named_t arcstat_hdr_size
;
503 * Number of bytes consumed by ARC buffers of type equal to
504 * ARC_BUFC_DATA. This is generally consumed by buffers backing
505 * on disk user data (e.g. plain file contents).
506 * Not updated directly; only synced in arc_kstat_update.
508 kstat_named_t arcstat_data_size
;
510 * Number of bytes consumed by ARC buffers of type equal to
511 * ARC_BUFC_METADATA. This is generally consumed by buffers
512 * backing on disk data that is used for internal ZFS
513 * structures (e.g. ZAP, dnode, indirect blocks, etc).
514 * Not updated directly; only synced in arc_kstat_update.
516 kstat_named_t arcstat_metadata_size
;
518 * Number of bytes consumed by various buffers and structures
519 * not actually backed with ARC buffers. This includes bonus
520 * buffers (allocated directly via zio_buf_* functions),
521 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
522 * cache), and dnode_t structures (allocated via dnode_t cache).
523 * Not updated directly; only synced in arc_kstat_update.
525 kstat_named_t arcstat_other_size
;
527 * Total number of bytes consumed by ARC buffers residing in the
528 * arc_anon state. This includes *all* buffers in the arc_anon
529 * state; e.g. data, metadata, evictable, and unevictable buffers
530 * are all included in this value.
531 * Not updated directly; only synced in arc_kstat_update.
533 kstat_named_t arcstat_anon_size
;
535 * Number of bytes consumed by ARC buffers that meet the
536 * following criteria: backing buffers of type ARC_BUFC_DATA,
537 * residing in the arc_anon state, and are eligible for eviction
538 * (e.g. have no outstanding holds on the buffer).
539 * Not updated directly; only synced in arc_kstat_update.
541 kstat_named_t arcstat_anon_evictable_data
;
543 * Number of bytes consumed by ARC buffers that meet the
544 * following criteria: backing buffers of type ARC_BUFC_METADATA,
545 * residing in the arc_anon state, and are eligible for eviction
546 * (e.g. have no outstanding holds on the buffer).
547 * Not updated directly; only synced in arc_kstat_update.
549 kstat_named_t arcstat_anon_evictable_metadata
;
551 * Total number of bytes consumed by ARC buffers residing in the
552 * arc_mru state. This includes *all* buffers in the arc_mru
553 * state; e.g. data, metadata, evictable, and unevictable buffers
554 * are all included in this value.
555 * Not updated directly; only synced in arc_kstat_update.
557 kstat_named_t arcstat_mru_size
;
559 * Number of bytes consumed by ARC buffers that meet the
560 * following criteria: backing buffers of type ARC_BUFC_DATA,
561 * residing in the arc_mru state, and are eligible for eviction
562 * (e.g. have no outstanding holds on the buffer).
563 * Not updated directly; only synced in arc_kstat_update.
565 kstat_named_t arcstat_mru_evictable_data
;
567 * Number of bytes consumed by ARC buffers that meet the
568 * following criteria: backing buffers of type ARC_BUFC_METADATA,
569 * residing in the arc_mru state, and are eligible for eviction
570 * (e.g. have no outstanding holds on the buffer).
571 * Not updated directly; only synced in arc_kstat_update.
573 kstat_named_t arcstat_mru_evictable_metadata
;
575 * Total number of bytes that *would have been* consumed by ARC
576 * buffers in the arc_mru_ghost state. The key thing to note
577 * here, is the fact that this size doesn't actually indicate
578 * RAM consumption. The ghost lists only consist of headers and
579 * don't actually have ARC buffers linked off of these headers.
580 * Thus, *if* the headers had associated ARC buffers, these
581 * buffers *would have* consumed this number of bytes.
582 * Not updated directly; only synced in arc_kstat_update.
584 kstat_named_t arcstat_mru_ghost_size
;
586 * Number of bytes that *would have been* consumed by ARC
587 * buffers that are eligible for eviction, of type
588 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
589 * Not updated directly; only synced in arc_kstat_update.
591 kstat_named_t arcstat_mru_ghost_evictable_data
;
593 * Number of bytes that *would have been* consumed by ARC
594 * buffers that are eligible for eviction, of type
595 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
596 * Not updated directly; only synced in arc_kstat_update.
598 kstat_named_t arcstat_mru_ghost_evictable_metadata
;
600 * Total number of bytes consumed by ARC buffers residing in the
601 * arc_mfu state. This includes *all* buffers in the arc_mfu
602 * state; e.g. data, metadata, evictable, and unevictable buffers
603 * are all included in this value.
604 * Not updated directly; only synced in arc_kstat_update.
606 kstat_named_t arcstat_mfu_size
;
608 * Number of bytes consumed by ARC buffers that are eligible for
609 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
611 * Not updated directly; only synced in arc_kstat_update.
613 kstat_named_t arcstat_mfu_evictable_data
;
615 * Number of bytes consumed by ARC buffers that are eligible for
616 * eviction, of type ARC_BUFC_METADATA, and reside in the
618 * Not updated directly; only synced in arc_kstat_update.
620 kstat_named_t arcstat_mfu_evictable_metadata
;
622 * Total number of bytes that *would have been* consumed by ARC
623 * buffers in the arc_mfu_ghost state. See the comment above
624 * arcstat_mru_ghost_size for more details.
625 * Not updated directly; only synced in arc_kstat_update.
627 kstat_named_t arcstat_mfu_ghost_size
;
629 * Number of bytes that *would have been* consumed by ARC
630 * buffers that are eligible for eviction, of type
631 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
632 * Not updated directly; only synced in arc_kstat_update.
634 kstat_named_t arcstat_mfu_ghost_evictable_data
;
636 * Number of bytes that *would have been* consumed by ARC
637 * buffers that are eligible for eviction, of type
638 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
639 * Not updated directly; only synced in arc_kstat_update.
641 kstat_named_t arcstat_mfu_ghost_evictable_metadata
;
642 kstat_named_t arcstat_l2_hits
;
643 kstat_named_t arcstat_l2_misses
;
644 kstat_named_t arcstat_l2_feeds
;
645 kstat_named_t arcstat_l2_rw_clash
;
646 kstat_named_t arcstat_l2_read_bytes
;
647 kstat_named_t arcstat_l2_write_bytes
;
648 kstat_named_t arcstat_l2_writes_sent
;
649 kstat_named_t arcstat_l2_writes_done
;
650 kstat_named_t arcstat_l2_writes_error
;
651 kstat_named_t arcstat_l2_writes_lock_retry
;
652 kstat_named_t arcstat_l2_evict_lock_retry
;
653 kstat_named_t arcstat_l2_evict_reading
;
654 kstat_named_t arcstat_l2_evict_l1cached
;
655 kstat_named_t arcstat_l2_free_on_write
;
656 kstat_named_t arcstat_l2_abort_lowmem
;
657 kstat_named_t arcstat_l2_cksum_bad
;
658 kstat_named_t arcstat_l2_io_error
;
659 kstat_named_t arcstat_l2_lsize
;
660 kstat_named_t arcstat_l2_psize
;
661 /* Not updated directly; only synced in arc_kstat_update. */
662 kstat_named_t arcstat_l2_hdr_size
;
663 kstat_named_t arcstat_memory_throttle_count
;
664 /* Not updated directly; only synced in arc_kstat_update. */
665 kstat_named_t arcstat_meta_used
;
666 kstat_named_t arcstat_meta_limit
;
667 kstat_named_t arcstat_meta_max
;
668 kstat_named_t arcstat_meta_min
;
669 kstat_named_t arcstat_sync_wait_for_async
;
670 kstat_named_t arcstat_demand_hit_predictive_prefetch
;
673 static arc_stats_t arc_stats
= {
674 { "hits", KSTAT_DATA_UINT64
},
675 { "misses", KSTAT_DATA_UINT64
},
676 { "demand_data_hits", KSTAT_DATA_UINT64
},
677 { "demand_data_misses", KSTAT_DATA_UINT64
},
678 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
679 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
680 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
681 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
682 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
683 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
684 { "mru_hits", KSTAT_DATA_UINT64
},
685 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
686 { "mfu_hits", KSTAT_DATA_UINT64
},
687 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
688 { "deleted", KSTAT_DATA_UINT64
},
689 { "mutex_miss", KSTAT_DATA_UINT64
},
690 { "evict_skip", KSTAT_DATA_UINT64
},
691 { "evict_not_enough", KSTAT_DATA_UINT64
},
692 { "evict_l2_cached", KSTAT_DATA_UINT64
},
693 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
694 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
695 { "evict_l2_skip", KSTAT_DATA_UINT64
},
696 { "hash_elements", KSTAT_DATA_UINT64
},
697 { "hash_elements_max", KSTAT_DATA_UINT64
},
698 { "hash_collisions", KSTAT_DATA_UINT64
},
699 { "hash_chains", KSTAT_DATA_UINT64
},
700 { "hash_chain_max", KSTAT_DATA_UINT64
},
701 { "p", KSTAT_DATA_UINT64
},
702 { "c", KSTAT_DATA_UINT64
},
703 { "c_min", KSTAT_DATA_UINT64
},
704 { "c_max", KSTAT_DATA_UINT64
},
705 { "size", KSTAT_DATA_UINT64
},
706 { "compressed_size", KSTAT_DATA_UINT64
},
707 { "uncompressed_size", KSTAT_DATA_UINT64
},
708 { "overhead_size", KSTAT_DATA_UINT64
},
709 { "hdr_size", KSTAT_DATA_UINT64
},
710 { "data_size", KSTAT_DATA_UINT64
},
711 { "metadata_size", KSTAT_DATA_UINT64
},
712 { "other_size", KSTAT_DATA_UINT64
},
713 { "anon_size", KSTAT_DATA_UINT64
},
714 { "anon_evictable_data", KSTAT_DATA_UINT64
},
715 { "anon_evictable_metadata", KSTAT_DATA_UINT64
},
716 { "mru_size", KSTAT_DATA_UINT64
},
717 { "mru_evictable_data", KSTAT_DATA_UINT64
},
718 { "mru_evictable_metadata", KSTAT_DATA_UINT64
},
719 { "mru_ghost_size", KSTAT_DATA_UINT64
},
720 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64
},
721 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
722 { "mfu_size", KSTAT_DATA_UINT64
},
723 { "mfu_evictable_data", KSTAT_DATA_UINT64
},
724 { "mfu_evictable_metadata", KSTAT_DATA_UINT64
},
725 { "mfu_ghost_size", KSTAT_DATA_UINT64
},
726 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64
},
727 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
728 { "l2_hits", KSTAT_DATA_UINT64
},
729 { "l2_misses", KSTAT_DATA_UINT64
},
730 { "l2_feeds", KSTAT_DATA_UINT64
},
731 { "l2_rw_clash", KSTAT_DATA_UINT64
},
732 { "l2_read_bytes", KSTAT_DATA_UINT64
},
733 { "l2_write_bytes", KSTAT_DATA_UINT64
},
734 { "l2_writes_sent", KSTAT_DATA_UINT64
},
735 { "l2_writes_done", KSTAT_DATA_UINT64
},
736 { "l2_writes_error", KSTAT_DATA_UINT64
},
737 { "l2_writes_lock_retry", KSTAT_DATA_UINT64
},
738 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
739 { "l2_evict_reading", KSTAT_DATA_UINT64
},
740 { "l2_evict_l1cached", KSTAT_DATA_UINT64
},
741 { "l2_free_on_write", KSTAT_DATA_UINT64
},
742 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
743 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
744 { "l2_io_error", KSTAT_DATA_UINT64
},
745 { "l2_size", KSTAT_DATA_UINT64
},
746 { "l2_asize", KSTAT_DATA_UINT64
},
747 { "l2_hdr_size", KSTAT_DATA_UINT64
},
748 { "memory_throttle_count", KSTAT_DATA_UINT64
},
749 { "arc_meta_used", KSTAT_DATA_UINT64
},
750 { "arc_meta_limit", KSTAT_DATA_UINT64
},
751 { "arc_meta_max", KSTAT_DATA_UINT64
},
752 { "arc_meta_min", KSTAT_DATA_UINT64
},
753 { "sync_wait_for_async", KSTAT_DATA_UINT64
},
754 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64
},
757 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
759 #define ARCSTAT_INCR(stat, val) \
760 atomic_add_64(&arc_stats.stat.value.ui64, (val))
762 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
763 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
765 #define ARCSTAT_MAX(stat, val) { \
767 while ((val) > (m = arc_stats.stat.value.ui64) && \
768 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
772 #define ARCSTAT_MAXSTAT(stat) \
773 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
776 * We define a macro to allow ARC hits/misses to be easily broken down by
777 * two separate conditions, giving a total of four different subtypes for
778 * each of hits and misses (so eight statistics total).
780 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
783 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
785 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
789 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
791 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
796 static arc_state_t
*arc_anon
;
797 static arc_state_t
*arc_mru
;
798 static arc_state_t
*arc_mru_ghost
;
799 static arc_state_t
*arc_mfu
;
800 static arc_state_t
*arc_mfu_ghost
;
801 static arc_state_t
*arc_l2c_only
;
804 * There are several ARC variables that are critical to export as kstats --
805 * but we don't want to have to grovel around in the kstat whenever we wish to
806 * manipulate them. For these variables, we therefore define them to be in
807 * terms of the statistic variable. This assures that we are not introducing
808 * the possibility of inconsistency by having shadow copies of the variables,
809 * while still allowing the code to be readable.
811 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
812 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
813 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
814 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
815 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
816 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
817 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
819 /* compressed size of entire arc */
820 #define arc_compressed_size ARCSTAT(arcstat_compressed_size)
821 /* uncompressed size of entire arc */
822 #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size)
823 /* number of bytes in the arc from arc_buf_t's */
824 #define arc_overhead_size ARCSTAT(arcstat_overhead_size)
827 * There are also some ARC variables that we want to export, but that are
828 * updated so often that having the canonical representation be the statistic
829 * variable causes a performance bottleneck. We want to use aggsum_t's for these
830 * instead, but still be able to export the kstat in the same way as before.
831 * The solution is to always use the aggsum version, except in the kstat update
835 aggsum_t arc_meta_used
;
836 aggsum_t astat_data_size
;
837 aggsum_t astat_metadata_size
;
838 aggsum_t astat_hdr_size
;
839 aggsum_t astat_other_size
;
840 aggsum_t astat_l2_hdr_size
;
842 static int arc_no_grow
; /* Don't try to grow cache size */
843 static uint64_t arc_tempreserve
;
844 static uint64_t arc_loaned_bytes
;
846 typedef struct arc_callback arc_callback_t
;
848 struct arc_callback
{
850 arc_done_func_t
*acb_done
;
852 boolean_t acb_compressed
;
853 zio_t
*acb_zio_dummy
;
854 arc_callback_t
*acb_next
;
857 typedef struct arc_write_callback arc_write_callback_t
;
859 struct arc_write_callback
{
861 arc_done_func_t
*awcb_ready
;
862 arc_done_func_t
*awcb_children_ready
;
863 arc_done_func_t
*awcb_physdone
;
864 arc_done_func_t
*awcb_done
;
869 * ARC buffers are separated into multiple structs as a memory saving measure:
870 * - Common fields struct, always defined, and embedded within it:
871 * - L2-only fields, always allocated but undefined when not in L2ARC
872 * - L1-only fields, only allocated when in L1ARC
874 * Buffer in L1 Buffer only in L2
875 * +------------------------+ +------------------------+
876 * | arc_buf_hdr_t | | arc_buf_hdr_t |
880 * +------------------------+ +------------------------+
881 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
882 * | (undefined if L1-only) | | |
883 * +------------------------+ +------------------------+
884 * | l1arc_buf_hdr_t |
889 * +------------------------+
891 * Because it's possible for the L2ARC to become extremely large, we can wind
892 * up eating a lot of memory in L2ARC buffer headers, so the size of a header
893 * is minimized by only allocating the fields necessary for an L1-cached buffer
894 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
895 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
896 * words in pointers. arc_hdr_realloc() is used to switch a header between
897 * these two allocation states.
899 typedef struct l1arc_buf_hdr
{
900 kmutex_t b_freeze_lock
;
901 zio_cksum_t
*b_freeze_cksum
;
904 * Used for debugging with kmem_flags - by allocating and freeing
905 * b_thawed when the buffer is thawed, we get a record of the stack
906 * trace that thawed it.
913 /* for waiting on writes to complete */
917 /* protected by arc state mutex */
918 arc_state_t
*b_state
;
919 multilist_node_t b_arc_node
;
921 /* updated atomically */
922 clock_t b_arc_access
;
924 /* self protecting */
927 arc_callback_t
*b_acb
;
931 typedef struct l2arc_dev l2arc_dev_t
;
933 typedef struct l2arc_buf_hdr
{
934 /* protected by arc_buf_hdr mutex */
935 l2arc_dev_t
*b_dev
; /* L2ARC device */
936 uint64_t b_daddr
; /* disk address, offset byte */
938 list_node_t b_l2node
;
942 /* protected by hash lock */
946 arc_buf_contents_t b_type
;
947 arc_buf_hdr_t
*b_hash_next
;
951 * This field stores the size of the data buffer after
952 * compression, and is set in the arc's zio completion handlers.
953 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes).
955 * While the block pointers can store up to 32MB in their psize
956 * field, we can only store up to 32MB minus 512B. This is due
957 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e.
958 * a field of zeros represents 512B in the bp). We can't use a
959 * bias of 1 since we need to reserve a psize of zero, here, to
960 * represent holes and embedded blocks.
962 * This isn't a problem in practice, since the maximum size of a
963 * buffer is limited to 16MB, so we never need to store 32MB in
964 * this field. Even in the upstream illumos code base, the
965 * maximum size of a buffer is limited to 16MB.
970 * This field stores the size of the data buffer before
971 * compression, and cannot change once set. It is in units
972 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes)
974 uint16_t b_lsize
; /* immutable */
975 uint64_t b_spa
; /* immutable */
977 /* L2ARC fields. Undefined when not in L2ARC. */
978 l2arc_buf_hdr_t b_l2hdr
;
979 /* L1ARC fields. Undefined when in l2arc_only state */
980 l1arc_buf_hdr_t b_l1hdr
;
983 #define GHOST_STATE(state) \
984 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
985 (state) == arc_l2c_only)
987 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
988 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
989 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
990 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
991 #define HDR_COMPRESSION_ENABLED(hdr) \
992 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
994 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
995 #define HDR_L2_READING(hdr) \
996 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
997 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
998 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
999 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
1000 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
1001 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
1003 #define HDR_ISTYPE_METADATA(hdr) \
1004 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
1005 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
1007 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
1008 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
1010 /* For storing compression mode in b_flags */
1011 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
1013 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
1014 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
1015 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
1016 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
1018 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
1019 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
1020 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
1026 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
1027 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
1030 * Hash table routines
1033 #define HT_LOCK_PAD 64
1038 unsigned char pad
[(HT_LOCK_PAD
- sizeof (kmutex_t
))];
1042 #define BUF_LOCKS 256
1043 typedef struct buf_hash_table
{
1045 arc_buf_hdr_t
**ht_table
;
1046 struct ht_lock ht_locks
[BUF_LOCKS
];
1049 static buf_hash_table_t buf_hash_table
;
1051 #define BUF_HASH_INDEX(spa, dva, birth) \
1052 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
1053 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
1054 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
1055 #define HDR_LOCK(hdr) \
1056 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
1058 uint64_t zfs_crc64_table
[256];
1064 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
1065 #define L2ARC_HEADROOM 2 /* num of writes */
1067 * If we discover during ARC scan any buffers to be compressed, we boost
1068 * our headroom for the next scanning cycle by this percentage multiple.
1070 #define L2ARC_HEADROOM_BOOST 200
1071 #define L2ARC_FEED_SECS 1 /* caching interval secs */
1072 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
1074 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
1075 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
1077 /* L2ARC Performance Tunables */
1078 uint64_t l2arc_write_max
= L2ARC_WRITE_SIZE
; /* default max write size */
1079 uint64_t l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra write during warmup */
1080 uint64_t l2arc_headroom
= L2ARC_HEADROOM
; /* number of dev writes */
1081 uint64_t l2arc_headroom_boost
= L2ARC_HEADROOM_BOOST
;
1082 uint64_t l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
1083 uint64_t l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval milliseconds */
1084 boolean_t l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
1085 boolean_t l2arc_feed_again
= B_TRUE
; /* turbo warmup */
1086 boolean_t l2arc_norw
= B_TRUE
; /* no reads during writes */
1092 vdev_t
*l2ad_vdev
; /* vdev */
1093 spa_t
*l2ad_spa
; /* spa */
1094 uint64_t l2ad_hand
; /* next write location */
1095 uint64_t l2ad_start
; /* first addr on device */
1096 uint64_t l2ad_end
; /* last addr on device */
1097 boolean_t l2ad_first
; /* first sweep through */
1098 boolean_t l2ad_writing
; /* currently writing */
1099 kmutex_t l2ad_mtx
; /* lock for buffer list */
1100 list_t l2ad_buflist
; /* buffer list */
1101 list_node_t l2ad_node
; /* device list node */
1102 refcount_t l2ad_alloc
; /* allocated bytes */
1105 static list_t L2ARC_dev_list
; /* device list */
1106 static list_t
*l2arc_dev_list
; /* device list pointer */
1107 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
1108 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
1109 static list_t L2ARC_free_on_write
; /* free after write buf list */
1110 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
1111 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
1112 static uint64_t l2arc_ndev
; /* number of devices */
1114 typedef struct l2arc_read_callback
{
1115 arc_buf_hdr_t
*l2rcb_hdr
; /* read header */
1116 blkptr_t l2rcb_bp
; /* original blkptr */
1117 zbookmark_phys_t l2rcb_zb
; /* original bookmark */
1118 int l2rcb_flags
; /* original flags */
1119 abd_t
*l2rcb_abd
; /* temporary buffer */
1120 } l2arc_read_callback_t
;
1122 typedef struct l2arc_write_callback
{
1123 l2arc_dev_t
*l2wcb_dev
; /* device info */
1124 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
1125 } l2arc_write_callback_t
;
1127 typedef struct l2arc_data_free
{
1128 /* protected by l2arc_free_on_write_mtx */
1131 arc_buf_contents_t l2df_type
;
1132 list_node_t l2df_list_node
;
1133 } l2arc_data_free_t
;
1135 static kmutex_t l2arc_feed_thr_lock
;
1136 static kcondvar_t l2arc_feed_thr_cv
;
1137 static uint8_t l2arc_thread_exit
;
1139 static abd_t
*arc_get_data_abd(arc_buf_hdr_t
*, uint64_t, void *);
1140 static void *arc_get_data_buf(arc_buf_hdr_t
*, uint64_t, void *);
1141 static void arc_get_data_impl(arc_buf_hdr_t
*, uint64_t, void *);
1142 static void arc_free_data_abd(arc_buf_hdr_t
*, abd_t
*, uint64_t, void *);
1143 static void arc_free_data_buf(arc_buf_hdr_t
*, void *, uint64_t, void *);
1144 static void arc_free_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
);
1145 static void arc_hdr_free_pabd(arc_buf_hdr_t
*);
1146 static void arc_hdr_alloc_pabd(arc_buf_hdr_t
*);
1147 static void arc_access(arc_buf_hdr_t
*, kmutex_t
*);
1148 static boolean_t
arc_is_overflowing();
1149 static void arc_buf_watch(arc_buf_t
*);
1151 static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t
*);
1152 static uint32_t arc_bufc_to_flags(arc_buf_contents_t
);
1153 static inline void arc_hdr_set_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
);
1154 static inline void arc_hdr_clear_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
);
1156 static boolean_t
l2arc_write_eligible(uint64_t, arc_buf_hdr_t
*);
1157 static void l2arc_read_done(zio_t
*);
1161 * We use Cityhash for this. It's fast, and has good hash properties without
1162 * requiring any large static buffers.
1165 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
1167 return (cityhash4(spa
, dva
->dva_word
[0], dva
->dva_word
[1], birth
));
1170 #define HDR_EMPTY(hdr) \
1171 ((hdr)->b_dva.dva_word[0] == 0 && \
1172 (hdr)->b_dva.dva_word[1] == 0)
1174 #define HDR_EQUAL(spa, dva, birth, hdr) \
1175 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
1176 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
1177 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1180 buf_discard_identity(arc_buf_hdr_t
*hdr
)
1182 hdr
->b_dva
.dva_word
[0] = 0;
1183 hdr
->b_dva
.dva_word
[1] = 0;
1187 static arc_buf_hdr_t
*
1188 buf_hash_find(uint64_t spa
, const blkptr_t
*bp
, kmutex_t
**lockp
)
1190 const dva_t
*dva
= BP_IDENTITY(bp
);
1191 uint64_t birth
= BP_PHYSICAL_BIRTH(bp
);
1192 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
1193 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
1196 mutex_enter(hash_lock
);
1197 for (hdr
= buf_hash_table
.ht_table
[idx
]; hdr
!= NULL
;
1198 hdr
= hdr
->b_hash_next
) {
1199 if (HDR_EQUAL(spa
, dva
, birth
, hdr
)) {
1204 mutex_exit(hash_lock
);
1210 * Insert an entry into the hash table. If there is already an element
1211 * equal to elem in the hash table, then the already existing element
1212 * will be returned and the new element will not be inserted.
1213 * Otherwise returns NULL.
1214 * If lockp == NULL, the caller is assumed to already hold the hash lock.
1216 static arc_buf_hdr_t
*
1217 buf_hash_insert(arc_buf_hdr_t
*hdr
, kmutex_t
**lockp
)
1219 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1220 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
1221 arc_buf_hdr_t
*fhdr
;
1224 ASSERT(!DVA_IS_EMPTY(&hdr
->b_dva
));
1225 ASSERT(hdr
->b_birth
!= 0);
1226 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1228 if (lockp
!= NULL
) {
1230 mutex_enter(hash_lock
);
1232 ASSERT(MUTEX_HELD(hash_lock
));
1235 for (fhdr
= buf_hash_table
.ht_table
[idx
], i
= 0; fhdr
!= NULL
;
1236 fhdr
= fhdr
->b_hash_next
, i
++) {
1237 if (HDR_EQUAL(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
, fhdr
))
1241 hdr
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
1242 buf_hash_table
.ht_table
[idx
] = hdr
;
1243 arc_hdr_set_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
1245 /* collect some hash table performance data */
1247 ARCSTAT_BUMP(arcstat_hash_collisions
);
1249 ARCSTAT_BUMP(arcstat_hash_chains
);
1251 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
1254 ARCSTAT_BUMP(arcstat_hash_elements
);
1255 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
1261 buf_hash_remove(arc_buf_hdr_t
*hdr
)
1263 arc_buf_hdr_t
*fhdr
, **hdrp
;
1264 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1266 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
1267 ASSERT(HDR_IN_HASH_TABLE(hdr
));
1269 hdrp
= &buf_hash_table
.ht_table
[idx
];
1270 while ((fhdr
= *hdrp
) != hdr
) {
1271 ASSERT3P(fhdr
, !=, NULL
);
1272 hdrp
= &fhdr
->b_hash_next
;
1274 *hdrp
= hdr
->b_hash_next
;
1275 hdr
->b_hash_next
= NULL
;
1276 arc_hdr_clear_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
1278 /* collect some hash table performance data */
1279 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
1281 if (buf_hash_table
.ht_table
[idx
] &&
1282 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
1283 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
1287 * Global data structures and functions for the buf kmem cache.
1289 static kmem_cache_t
*hdr_full_cache
;
1290 static kmem_cache_t
*hdr_l2only_cache
;
1291 static kmem_cache_t
*buf_cache
;
1298 kmem_free(buf_hash_table
.ht_table
,
1299 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
1300 for (i
= 0; i
< BUF_LOCKS
; i
++)
1301 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
1302 kmem_cache_destroy(hdr_full_cache
);
1303 kmem_cache_destroy(hdr_l2only_cache
);
1304 kmem_cache_destroy(buf_cache
);
1308 * Constructor callback - called when the cache is empty
1309 * and a new buf is requested.
1313 hdr_full_cons(void *vbuf
, void *unused
, int kmflag
)
1315 arc_buf_hdr_t
*hdr
= vbuf
;
1317 bzero(hdr
, HDR_FULL_SIZE
);
1318 cv_init(&hdr
->b_l1hdr
.b_cv
, NULL
, CV_DEFAULT
, NULL
);
1319 refcount_create(&hdr
->b_l1hdr
.b_refcnt
);
1320 mutex_init(&hdr
->b_l1hdr
.b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1321 multilist_link_init(&hdr
->b_l1hdr
.b_arc_node
);
1322 arc_space_consume(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1329 hdr_l2only_cons(void *vbuf
, void *unused
, int kmflag
)
1331 arc_buf_hdr_t
*hdr
= vbuf
;
1333 bzero(hdr
, HDR_L2ONLY_SIZE
);
1334 arc_space_consume(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1341 buf_cons(void *vbuf
, void *unused
, int kmflag
)
1343 arc_buf_t
*buf
= vbuf
;
1345 bzero(buf
, sizeof (arc_buf_t
));
1346 mutex_init(&buf
->b_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1347 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1353 * Destructor callback - called when a cached buf is
1354 * no longer required.
1358 hdr_full_dest(void *vbuf
, void *unused
)
1360 arc_buf_hdr_t
*hdr
= vbuf
;
1362 ASSERT(HDR_EMPTY(hdr
));
1363 cv_destroy(&hdr
->b_l1hdr
.b_cv
);
1364 refcount_destroy(&hdr
->b_l1hdr
.b_refcnt
);
1365 mutex_destroy(&hdr
->b_l1hdr
.b_freeze_lock
);
1366 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1367 arc_space_return(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1372 hdr_l2only_dest(void *vbuf
, void *unused
)
1374 arc_buf_hdr_t
*hdr
= vbuf
;
1376 ASSERT(HDR_EMPTY(hdr
));
1377 arc_space_return(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1382 buf_dest(void *vbuf
, void *unused
)
1384 arc_buf_t
*buf
= vbuf
;
1386 mutex_destroy(&buf
->b_evict_lock
);
1387 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1391 * Reclaim callback -- invoked when memory is low.
1395 hdr_recl(void *unused
)
1397 dprintf("hdr_recl called\n");
1399 * umem calls the reclaim func when we destroy the buf cache,
1400 * which is after we do arc_fini().
1403 cv_signal(&arc_reclaim_thread_cv
);
1410 uint64_t hsize
= 1ULL << 12;
1414 * The hash table is big enough to fill all of physical memory
1415 * with an average block size of zfs_arc_average_blocksize (default 8K).
1416 * By default, the table will take up
1417 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1419 while (hsize
* zfs_arc_average_blocksize
< physmem
* PAGESIZE
)
1422 buf_hash_table
.ht_mask
= hsize
- 1;
1423 buf_hash_table
.ht_table
=
1424 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
1425 if (buf_hash_table
.ht_table
== NULL
) {
1426 ASSERT(hsize
> (1ULL << 8));
1431 hdr_full_cache
= kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE
,
1432 0, hdr_full_cons
, hdr_full_dest
, hdr_recl
, NULL
, NULL
, 0);
1433 hdr_l2only_cache
= kmem_cache_create("arc_buf_hdr_t_l2only",
1434 HDR_L2ONLY_SIZE
, 0, hdr_l2only_cons
, hdr_l2only_dest
, hdr_recl
,
1436 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
1437 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
1439 for (i
= 0; i
< 256; i
++)
1440 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
1441 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
1443 for (i
= 0; i
< BUF_LOCKS
; i
++) {
1444 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
1445 NULL
, MUTEX_DEFAULT
, NULL
);
1450 * This is the size that the buf occupies in memory. If the buf is compressed,
1451 * it will correspond to the compressed size. You should use this method of
1452 * getting the buf size unless you explicitly need the logical size.
1455 arc_buf_size(arc_buf_t
*buf
)
1457 return (ARC_BUF_COMPRESSED(buf
) ?
1458 HDR_GET_PSIZE(buf
->b_hdr
) : HDR_GET_LSIZE(buf
->b_hdr
));
1462 arc_buf_lsize(arc_buf_t
*buf
)
1464 return (HDR_GET_LSIZE(buf
->b_hdr
));
1468 arc_get_compression(arc_buf_t
*buf
)
1470 return (ARC_BUF_COMPRESSED(buf
) ?
1471 HDR_GET_COMPRESS(buf
->b_hdr
) : ZIO_COMPRESS_OFF
);
1474 #define ARC_MINTIME (hz>>4) /* 62 ms */
1476 static inline boolean_t
1477 arc_buf_is_shared(arc_buf_t
*buf
)
1479 boolean_t shared
= (buf
->b_data
!= NULL
&&
1480 buf
->b_hdr
->b_l1hdr
.b_pabd
!= NULL
&&
1481 abd_is_linear(buf
->b_hdr
->b_l1hdr
.b_pabd
) &&
1482 buf
->b_data
== abd_to_buf(buf
->b_hdr
->b_l1hdr
.b_pabd
));
1483 IMPLY(shared
, HDR_SHARED_DATA(buf
->b_hdr
));
1484 IMPLY(shared
, ARC_BUF_SHARED(buf
));
1485 IMPLY(shared
, ARC_BUF_COMPRESSED(buf
) || ARC_BUF_LAST(buf
));
1488 * It would be nice to assert arc_can_share() too, but the "hdr isn't
1489 * already being shared" requirement prevents us from doing that.
1496 * Free the checksum associated with this header. If there is no checksum, this
1500 arc_cksum_free(arc_buf_hdr_t
*hdr
)
1502 ASSERT(HDR_HAS_L1HDR(hdr
));
1503 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1504 if (hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
) {
1505 kmem_free(hdr
->b_l1hdr
.b_freeze_cksum
, sizeof (zio_cksum_t
));
1506 hdr
->b_l1hdr
.b_freeze_cksum
= NULL
;
1508 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1512 * Return true iff at least one of the bufs on hdr is not compressed.
1515 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t
*hdr
)
1517 for (arc_buf_t
*b
= hdr
->b_l1hdr
.b_buf
; b
!= NULL
; b
= b
->b_next
) {
1518 if (!ARC_BUF_COMPRESSED(b
)) {
1526 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
1527 * matches the checksum that is stored in the hdr. If there is no checksum,
1528 * or if the buf is compressed, this is a no-op.
1531 arc_cksum_verify(arc_buf_t
*buf
)
1533 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1536 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1539 if (ARC_BUF_COMPRESSED(buf
)) {
1540 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
== NULL
||
1541 arc_hdr_has_uncompressed_buf(hdr
));
1545 ASSERT(HDR_HAS_L1HDR(hdr
));
1547 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1548 if (hdr
->b_l1hdr
.b_freeze_cksum
== NULL
|| HDR_IO_ERROR(hdr
)) {
1549 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1553 fletcher_2_native(buf
->b_data
, arc_buf_size(buf
), NULL
, &zc
);
1554 if (!ZIO_CHECKSUM_EQUAL(*hdr
->b_l1hdr
.b_freeze_cksum
, zc
))
1555 panic("buffer modified while frozen!");
1556 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1560 arc_cksum_is_equal(arc_buf_hdr_t
*hdr
, zio_t
*zio
)
1562 enum zio_compress compress
= BP_GET_COMPRESS(zio
->io_bp
);
1563 boolean_t valid_cksum
;
1565 ASSERT(!BP_IS_EMBEDDED(zio
->io_bp
));
1566 VERIFY3U(BP_GET_PSIZE(zio
->io_bp
), ==, HDR_GET_PSIZE(hdr
));
1569 * We rely on the blkptr's checksum to determine if the block
1570 * is valid or not. When compressed arc is enabled, the l2arc
1571 * writes the block to the l2arc just as it appears in the pool.
1572 * This allows us to use the blkptr's checksum to validate the
1573 * data that we just read off of the l2arc without having to store
1574 * a separate checksum in the arc_buf_hdr_t. However, if compressed
1575 * arc is disabled, then the data written to the l2arc is always
1576 * uncompressed and won't match the block as it exists in the main
1577 * pool. When this is the case, we must first compress it if it is
1578 * compressed on the main pool before we can validate the checksum.
1580 if (!HDR_COMPRESSION_ENABLED(hdr
) && compress
!= ZIO_COMPRESS_OFF
) {
1581 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
1582 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
1585 abd_t
*cdata
= abd_alloc_linear(HDR_GET_PSIZE(hdr
), B_TRUE
);
1586 csize
= zio_compress_data(compress
, zio
->io_abd
,
1587 abd_to_buf(cdata
), lsize
);
1589 ASSERT3U(csize
, <=, HDR_GET_PSIZE(hdr
));
1590 if (csize
< HDR_GET_PSIZE(hdr
)) {
1592 * Compressed blocks are always a multiple of the
1593 * smallest ashift in the pool. Ideally, we would
1594 * like to round up the csize to the next
1595 * spa_min_ashift but that value may have changed
1596 * since the block was last written. Instead,
1597 * we rely on the fact that the hdr's psize
1598 * was set to the psize of the block when it was
1599 * last written. We set the csize to that value
1600 * and zero out any part that should not contain
1603 abd_zero_off(cdata
, csize
, HDR_GET_PSIZE(hdr
) - csize
);
1604 csize
= HDR_GET_PSIZE(hdr
);
1606 zio_push_transform(zio
, cdata
, csize
, HDR_GET_PSIZE(hdr
), NULL
);
1610 * Block pointers always store the checksum for the logical data.
1611 * If the block pointer has the gang bit set, then the checksum
1612 * it represents is for the reconstituted data and not for an
1613 * individual gang member. The zio pipeline, however, must be able to
1614 * determine the checksum of each of the gang constituents so it
1615 * treats the checksum comparison differently than what we need
1616 * for l2arc blocks. This prevents us from using the
1617 * zio_checksum_error() interface directly. Instead we must call the
1618 * zio_checksum_error_impl() so that we can ensure the checksum is
1619 * generated using the correct checksum algorithm and accounts for the
1620 * logical I/O size and not just a gang fragment.
1622 valid_cksum
= (zio_checksum_error_impl(zio
->io_spa
, zio
->io_bp
,
1623 BP_GET_CHECKSUM(zio
->io_bp
), zio
->io_abd
, zio
->io_size
,
1624 zio
->io_offset
, NULL
) == 0);
1625 zio_pop_transforms(zio
);
1626 return (valid_cksum
);
1630 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
1631 * checksum and attaches it to the buf's hdr so that we can ensure that the buf
1632 * isn't modified later on. If buf is compressed or there is already a checksum
1633 * on the hdr, this is a no-op (we only checksum uncompressed bufs).
1636 arc_cksum_compute(arc_buf_t
*buf
)
1638 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1640 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1643 ASSERT(HDR_HAS_L1HDR(hdr
));
1645 mutex_enter(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1646 if (hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
) {
1647 ASSERT(arc_hdr_has_uncompressed_buf(hdr
));
1648 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1650 } else if (ARC_BUF_COMPRESSED(buf
)) {
1651 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1655 ASSERT(!ARC_BUF_COMPRESSED(buf
));
1656 hdr
->b_l1hdr
.b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
),
1658 fletcher_2_native(buf
->b_data
, arc_buf_size(buf
), NULL
,
1659 hdr
->b_l1hdr
.b_freeze_cksum
);
1660 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1665 typedef struct procctl
{
1673 arc_buf_unwatch(arc_buf_t
*buf
)
1680 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1681 ctl
.prwatch
.pr_size
= 0;
1682 ctl
.prwatch
.pr_wflags
= 0;
1683 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1684 ASSERT3U(result
, ==, sizeof (ctl
));
1691 arc_buf_watch(arc_buf_t
*buf
)
1698 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1699 ctl
.prwatch
.pr_size
= arc_buf_size(buf
);
1700 ctl
.prwatch
.pr_wflags
= WA_WRITE
;
1701 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1702 ASSERT3U(result
, ==, sizeof (ctl
));
1707 static arc_buf_contents_t
1708 arc_buf_type(arc_buf_hdr_t
*hdr
)
1710 arc_buf_contents_t type
;
1711 if (HDR_ISTYPE_METADATA(hdr
)) {
1712 type
= ARC_BUFC_METADATA
;
1714 type
= ARC_BUFC_DATA
;
1716 VERIFY3U(hdr
->b_type
, ==, type
);
1721 arc_is_metadata(arc_buf_t
*buf
)
1723 return (HDR_ISTYPE_METADATA(buf
->b_hdr
) != 0);
1727 arc_bufc_to_flags(arc_buf_contents_t type
)
1731 /* metadata field is 0 if buffer contains normal data */
1733 case ARC_BUFC_METADATA
:
1734 return (ARC_FLAG_BUFC_METADATA
);
1738 panic("undefined ARC buffer type!");
1739 return ((uint32_t)-1);
1743 arc_buf_thaw(arc_buf_t
*buf
)
1745 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1747 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
1748 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1750 arc_cksum_verify(buf
);
1753 * Compressed buffers do not manipulate the b_freeze_cksum or
1754 * allocate b_thawed.
1756 if (ARC_BUF_COMPRESSED(buf
)) {
1757 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
== NULL
||
1758 arc_hdr_has_uncompressed_buf(hdr
));
1762 ASSERT(HDR_HAS_L1HDR(hdr
));
1763 arc_cksum_free(hdr
);
1765 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1767 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1768 if (hdr
->b_l1hdr
.b_thawed
!= NULL
)
1769 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
1770 hdr
->b_l1hdr
.b_thawed
= kmem_alloc(1, KM_SLEEP
);
1774 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1776 arc_buf_unwatch(buf
);
1780 arc_buf_freeze(arc_buf_t
*buf
)
1782 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1783 kmutex_t
*hash_lock
;
1785 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1788 if (ARC_BUF_COMPRESSED(buf
)) {
1789 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
== NULL
||
1790 arc_hdr_has_uncompressed_buf(hdr
));
1794 hash_lock
= HDR_LOCK(hdr
);
1795 mutex_enter(hash_lock
);
1797 ASSERT(HDR_HAS_L1HDR(hdr
));
1798 ASSERT(hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
||
1799 hdr
->b_l1hdr
.b_state
== arc_anon
);
1800 arc_cksum_compute(buf
);
1801 mutex_exit(hash_lock
);
1805 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
1806 * the following functions should be used to ensure that the flags are
1807 * updated in a thread-safe way. When manipulating the flags either
1808 * the hash_lock must be held or the hdr must be undiscoverable. This
1809 * ensures that we're not racing with any other threads when updating
1813 arc_hdr_set_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
)
1815 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
1816 hdr
->b_flags
|= flags
;
1820 arc_hdr_clear_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
)
1822 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
1823 hdr
->b_flags
&= ~flags
;
1827 * Setting the compression bits in the arc_buf_hdr_t's b_flags is
1828 * done in a special way since we have to clear and set bits
1829 * at the same time. Consumers that wish to set the compression bits
1830 * must use this function to ensure that the flags are updated in
1831 * thread-safe manner.
1834 arc_hdr_set_compress(arc_buf_hdr_t
*hdr
, enum zio_compress cmp
)
1836 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
1839 * Holes and embedded blocks will always have a psize = 0 so
1840 * we ignore the compression of the blkptr and set the
1841 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF.
1842 * Holes and embedded blocks remain anonymous so we don't
1843 * want to uncompress them. Mark them as uncompressed.
1845 if (!zfs_compressed_arc_enabled
|| HDR_GET_PSIZE(hdr
) == 0) {
1846 arc_hdr_clear_flags(hdr
, ARC_FLAG_COMPRESSED_ARC
);
1847 HDR_SET_COMPRESS(hdr
, ZIO_COMPRESS_OFF
);
1848 ASSERT(!HDR_COMPRESSION_ENABLED(hdr
));
1849 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
1851 arc_hdr_set_flags(hdr
, ARC_FLAG_COMPRESSED_ARC
);
1852 HDR_SET_COMPRESS(hdr
, cmp
);
1853 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, cmp
);
1854 ASSERT(HDR_COMPRESSION_ENABLED(hdr
));
1859 * Looks for another buf on the same hdr which has the data decompressed, copies
1860 * from it, and returns true. If no such buf exists, returns false.
1863 arc_buf_try_copy_decompressed_data(arc_buf_t
*buf
)
1865 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1866 boolean_t copied
= B_FALSE
;
1868 ASSERT(HDR_HAS_L1HDR(hdr
));
1869 ASSERT3P(buf
->b_data
, !=, NULL
);
1870 ASSERT(!ARC_BUF_COMPRESSED(buf
));
1872 for (arc_buf_t
*from
= hdr
->b_l1hdr
.b_buf
; from
!= NULL
;
1873 from
= from
->b_next
) {
1874 /* can't use our own data buffer */
1879 if (!ARC_BUF_COMPRESSED(from
)) {
1880 bcopy(from
->b_data
, buf
->b_data
, arc_buf_size(buf
));
1887 * There were no decompressed bufs, so there should not be a
1888 * checksum on the hdr either.
1890 EQUIV(!copied
, hdr
->b_l1hdr
.b_freeze_cksum
== NULL
);
1896 * Given a buf that has a data buffer attached to it, this function will
1897 * efficiently fill the buf with data of the specified compression setting from
1898 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
1899 * are already sharing a data buf, no copy is performed.
1901 * If the buf is marked as compressed but uncompressed data was requested, this
1902 * will allocate a new data buffer for the buf, remove that flag, and fill the
1903 * buf with uncompressed data. You can't request a compressed buf on a hdr with
1904 * uncompressed data, and (since we haven't added support for it yet) if you
1905 * want compressed data your buf must already be marked as compressed and have
1906 * the correct-sized data buffer.
1909 arc_buf_fill(arc_buf_t
*buf
, boolean_t compressed
)
1911 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1912 boolean_t hdr_compressed
= (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
);
1913 dmu_object_byteswap_t bswap
= hdr
->b_l1hdr
.b_byteswap
;
1915 ASSERT3P(buf
->b_data
, !=, NULL
);
1916 IMPLY(compressed
, hdr_compressed
);
1917 IMPLY(compressed
, ARC_BUF_COMPRESSED(buf
));
1919 if (hdr_compressed
== compressed
) {
1920 if (!arc_buf_is_shared(buf
)) {
1921 abd_copy_to_buf(buf
->b_data
, hdr
->b_l1hdr
.b_pabd
,
1925 ASSERT(hdr_compressed
);
1926 ASSERT(!compressed
);
1927 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, HDR_GET_PSIZE(hdr
));
1930 * If the buf is sharing its data with the hdr, unlink it and
1931 * allocate a new data buffer for the buf.
1933 if (arc_buf_is_shared(buf
)) {
1934 ASSERT(ARC_BUF_COMPRESSED(buf
));
1936 /* We need to give the buf it's own b_data */
1937 buf
->b_flags
&= ~ARC_BUF_FLAG_SHARED
;
1939 arc_get_data_buf(hdr
, HDR_GET_LSIZE(hdr
), buf
);
1940 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
1942 /* Previously overhead was 0; just add new overhead */
1943 ARCSTAT_INCR(arcstat_overhead_size
, HDR_GET_LSIZE(hdr
));
1944 } else if (ARC_BUF_COMPRESSED(buf
)) {
1945 /* We need to reallocate the buf's b_data */
1946 arc_free_data_buf(hdr
, buf
->b_data
, HDR_GET_PSIZE(hdr
),
1949 arc_get_data_buf(hdr
, HDR_GET_LSIZE(hdr
), buf
);
1951 /* We increased the size of b_data; update overhead */
1952 ARCSTAT_INCR(arcstat_overhead_size
,
1953 HDR_GET_LSIZE(hdr
) - HDR_GET_PSIZE(hdr
));
1957 * Regardless of the buf's previous compression settings, it
1958 * should not be compressed at the end of this function.
1960 buf
->b_flags
&= ~ARC_BUF_FLAG_COMPRESSED
;
1963 * Try copying the data from another buf which already has a
1964 * decompressed version. If that's not possible, it's time to
1965 * bite the bullet and decompress the data from the hdr.
1967 if (arc_buf_try_copy_decompressed_data(buf
)) {
1968 /* Skip byteswapping and checksumming (already done) */
1969 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, !=, NULL
);
1972 int error
= zio_decompress_data(HDR_GET_COMPRESS(hdr
),
1973 hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
1974 HDR_GET_PSIZE(hdr
), HDR_GET_LSIZE(hdr
));
1977 * Absent hardware errors or software bugs, this should
1978 * be impossible, but log it anyway so we can debug it.
1982 "hdr %p, compress %d, psize %d, lsize %d",
1983 hdr
, HDR_GET_COMPRESS(hdr
),
1984 HDR_GET_PSIZE(hdr
), HDR_GET_LSIZE(hdr
));
1985 return (SET_ERROR(EIO
));
1990 /* Byteswap the buf's data if necessary */
1991 if (bswap
!= DMU_BSWAP_NUMFUNCS
) {
1992 ASSERT(!HDR_SHARED_DATA(hdr
));
1993 ASSERT3U(bswap
, <, DMU_BSWAP_NUMFUNCS
);
1994 dmu_ot_byteswap
[bswap
].ob_func(buf
->b_data
, HDR_GET_LSIZE(hdr
));
1997 /* Compute the hdr's checksum if necessary */
1998 arc_cksum_compute(buf
);
2004 arc_decompress(arc_buf_t
*buf
)
2006 return (arc_buf_fill(buf
, B_FALSE
));
2010 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
2013 arc_hdr_size(arc_buf_hdr_t
*hdr
)
2017 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
2018 HDR_GET_PSIZE(hdr
) > 0) {
2019 size
= HDR_GET_PSIZE(hdr
);
2021 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, 0);
2022 size
= HDR_GET_LSIZE(hdr
);
2028 * Increment the amount of evictable space in the arc_state_t's refcount.
2029 * We account for the space used by the hdr and the arc buf individually
2030 * so that we can add and remove them from the refcount individually.
2033 arc_evictable_space_increment(arc_buf_hdr_t
*hdr
, arc_state_t
*state
)
2035 arc_buf_contents_t type
= arc_buf_type(hdr
);
2037 ASSERT(HDR_HAS_L1HDR(hdr
));
2039 if (GHOST_STATE(state
)) {
2040 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
2041 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2042 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2043 (void) refcount_add_many(&state
->arcs_esize
[type
],
2044 HDR_GET_LSIZE(hdr
), hdr
);
2048 ASSERT(!GHOST_STATE(state
));
2049 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2050 (void) refcount_add_many(&state
->arcs_esize
[type
],
2051 arc_hdr_size(hdr
), hdr
);
2053 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2054 buf
= buf
->b_next
) {
2055 if (arc_buf_is_shared(buf
))
2057 (void) refcount_add_many(&state
->arcs_esize
[type
],
2058 arc_buf_size(buf
), buf
);
2063 * Decrement the amount of evictable space in the arc_state_t's refcount.
2064 * We account for the space used by the hdr and the arc buf individually
2065 * so that we can add and remove them from the refcount individually.
2068 arc_evictable_space_decrement(arc_buf_hdr_t
*hdr
, arc_state_t
*state
)
2070 arc_buf_contents_t type
= arc_buf_type(hdr
);
2072 ASSERT(HDR_HAS_L1HDR(hdr
));
2074 if (GHOST_STATE(state
)) {
2075 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
2076 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2077 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2078 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2079 HDR_GET_LSIZE(hdr
), hdr
);
2083 ASSERT(!GHOST_STATE(state
));
2084 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2085 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2086 arc_hdr_size(hdr
), hdr
);
2088 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2089 buf
= buf
->b_next
) {
2090 if (arc_buf_is_shared(buf
))
2092 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2093 arc_buf_size(buf
), buf
);
2098 * Add a reference to this hdr indicating that someone is actively
2099 * referencing that memory. When the refcount transitions from 0 to 1,
2100 * we remove it from the respective arc_state_t list to indicate that
2101 * it is not evictable.
2104 add_reference(arc_buf_hdr_t
*hdr
, void *tag
)
2106 ASSERT(HDR_HAS_L1HDR(hdr
));
2107 if (!MUTEX_HELD(HDR_LOCK(hdr
))) {
2108 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
);
2109 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2110 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2113 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2115 if ((refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
) == 1) &&
2116 (state
!= arc_anon
)) {
2117 /* We don't use the L2-only state list. */
2118 if (state
!= arc_l2c_only
) {
2119 multilist_remove(state
->arcs_list
[arc_buf_type(hdr
)],
2121 arc_evictable_space_decrement(hdr
, state
);
2123 /* remove the prefetch flag if we get a reference */
2124 arc_hdr_clear_flags(hdr
, ARC_FLAG_PREFETCH
);
2129 * Remove a reference from this hdr. When the reference transitions from
2130 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
2131 * list making it eligible for eviction.
2134 remove_reference(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
, void *tag
)
2137 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2139 ASSERT(HDR_HAS_L1HDR(hdr
));
2140 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
2141 ASSERT(!GHOST_STATE(state
));
2144 * arc_l2c_only counts as a ghost state so we don't need to explicitly
2145 * check to prevent usage of the arc_l2c_only list.
2147 if (((cnt
= refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
)) == 0) &&
2148 (state
!= arc_anon
)) {
2149 multilist_insert(state
->arcs_list
[arc_buf_type(hdr
)], hdr
);
2150 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, >, 0);
2151 arc_evictable_space_increment(hdr
, state
);
2157 * Move the supplied buffer to the indicated state. The hash lock
2158 * for the buffer must be held by the caller.
2161 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*hdr
,
2162 kmutex_t
*hash_lock
)
2164 arc_state_t
*old_state
;
2167 boolean_t update_old
, update_new
;
2168 arc_buf_contents_t buftype
= arc_buf_type(hdr
);
2171 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
2172 * in arc_read() when bringing a buffer out of the L2ARC. However, the
2173 * L1 hdr doesn't always exist when we change state to arc_anon before
2174 * destroying a header, in which case reallocating to add the L1 hdr is
2177 if (HDR_HAS_L1HDR(hdr
)) {
2178 old_state
= hdr
->b_l1hdr
.b_state
;
2179 refcnt
= refcount_count(&hdr
->b_l1hdr
.b_refcnt
);
2180 bufcnt
= hdr
->b_l1hdr
.b_bufcnt
;
2181 update_old
= (bufcnt
> 0 || hdr
->b_l1hdr
.b_pabd
!= NULL
);
2183 old_state
= arc_l2c_only
;
2186 update_old
= B_FALSE
;
2188 update_new
= update_old
;
2190 ASSERT(MUTEX_HELD(hash_lock
));
2191 ASSERT3P(new_state
, !=, old_state
);
2192 ASSERT(!GHOST_STATE(new_state
) || bufcnt
== 0);
2193 ASSERT(old_state
!= arc_anon
|| bufcnt
<= 1);
2196 * If this buffer is evictable, transfer it from the
2197 * old state list to the new state list.
2200 if (old_state
!= arc_anon
&& old_state
!= arc_l2c_only
) {
2201 ASSERT(HDR_HAS_L1HDR(hdr
));
2202 multilist_remove(old_state
->arcs_list
[buftype
], hdr
);
2204 if (GHOST_STATE(old_state
)) {
2206 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2207 update_old
= B_TRUE
;
2209 arc_evictable_space_decrement(hdr
, old_state
);
2211 if (new_state
!= arc_anon
&& new_state
!= arc_l2c_only
) {
2214 * An L1 header always exists here, since if we're
2215 * moving to some L1-cached state (i.e. not l2c_only or
2216 * anonymous), we realloc the header to add an L1hdr
2219 ASSERT(HDR_HAS_L1HDR(hdr
));
2220 multilist_insert(new_state
->arcs_list
[buftype
], hdr
);
2222 if (GHOST_STATE(new_state
)) {
2224 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2225 update_new
= B_TRUE
;
2227 arc_evictable_space_increment(hdr
, new_state
);
2231 ASSERT(!HDR_EMPTY(hdr
));
2232 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(hdr
))
2233 buf_hash_remove(hdr
);
2235 /* adjust state sizes (ignore arc_l2c_only) */
2237 if (update_new
&& new_state
!= arc_l2c_only
) {
2238 ASSERT(HDR_HAS_L1HDR(hdr
));
2239 if (GHOST_STATE(new_state
)) {
2243 * When moving a header to a ghost state, we first
2244 * remove all arc buffers. Thus, we'll have a
2245 * bufcnt of zero, and no arc buffer to use for
2246 * the reference. As a result, we use the arc
2247 * header pointer for the reference.
2249 (void) refcount_add_many(&new_state
->arcs_size
,
2250 HDR_GET_LSIZE(hdr
), hdr
);
2251 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2253 uint32_t buffers
= 0;
2256 * Each individual buffer holds a unique reference,
2257 * thus we must remove each of these references one
2260 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2261 buf
= buf
->b_next
) {
2262 ASSERT3U(bufcnt
, !=, 0);
2266 * When the arc_buf_t is sharing the data
2267 * block with the hdr, the owner of the
2268 * reference belongs to the hdr. Only
2269 * add to the refcount if the arc_buf_t is
2272 if (arc_buf_is_shared(buf
))
2275 (void) refcount_add_many(&new_state
->arcs_size
,
2276 arc_buf_size(buf
), buf
);
2278 ASSERT3U(bufcnt
, ==, buffers
);
2280 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2281 (void) refcount_add_many(&new_state
->arcs_size
,
2282 arc_hdr_size(hdr
), hdr
);
2284 ASSERT(GHOST_STATE(old_state
));
2289 if (update_old
&& old_state
!= arc_l2c_only
) {
2290 ASSERT(HDR_HAS_L1HDR(hdr
));
2291 if (GHOST_STATE(old_state
)) {
2293 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2296 * When moving a header off of a ghost state,
2297 * the header will not contain any arc buffers.
2298 * We use the arc header pointer for the reference
2299 * which is exactly what we did when we put the
2300 * header on the ghost state.
2303 (void) refcount_remove_many(&old_state
->arcs_size
,
2304 HDR_GET_LSIZE(hdr
), hdr
);
2306 uint32_t buffers
= 0;
2309 * Each individual buffer holds a unique reference,
2310 * thus we must remove each of these references one
2313 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2314 buf
= buf
->b_next
) {
2315 ASSERT3U(bufcnt
, !=, 0);
2319 * When the arc_buf_t is sharing the data
2320 * block with the hdr, the owner of the
2321 * reference belongs to the hdr. Only
2322 * add to the refcount if the arc_buf_t is
2325 if (arc_buf_is_shared(buf
))
2328 (void) refcount_remove_many(
2329 &old_state
->arcs_size
, arc_buf_size(buf
),
2332 ASSERT3U(bufcnt
, ==, buffers
);
2333 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2334 (void) refcount_remove_many(
2335 &old_state
->arcs_size
, arc_hdr_size(hdr
), hdr
);
2339 if (HDR_HAS_L1HDR(hdr
))
2340 hdr
->b_l1hdr
.b_state
= new_state
;
2343 * L2 headers should never be on the L2 state list since they don't
2344 * have L1 headers allocated.
2346 ASSERT(multilist_is_empty(arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
]) &&
2347 multilist_is_empty(arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
]));
2351 arc_space_consume(uint64_t space
, arc_space_type_t type
)
2353 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
2356 case ARC_SPACE_DATA
:
2357 aggsum_add(&astat_data_size
, space
);
2359 case ARC_SPACE_META
:
2360 aggsum_add(&astat_metadata_size
, space
);
2362 case ARC_SPACE_OTHER
:
2363 aggsum_add(&astat_other_size
, space
);
2365 case ARC_SPACE_HDRS
:
2366 aggsum_add(&astat_hdr_size
, space
);
2368 case ARC_SPACE_L2HDRS
:
2369 aggsum_add(&astat_l2_hdr_size
, space
);
2373 if (type
!= ARC_SPACE_DATA
)
2374 aggsum_add(&arc_meta_used
, space
);
2376 aggsum_add(&arc_size
, space
);
2380 arc_space_return(uint64_t space
, arc_space_type_t type
)
2382 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
2385 case ARC_SPACE_DATA
:
2386 aggsum_add(&astat_data_size
, -space
);
2388 case ARC_SPACE_META
:
2389 aggsum_add(&astat_metadata_size
, -space
);
2391 case ARC_SPACE_OTHER
:
2392 aggsum_add(&astat_other_size
, -space
);
2394 case ARC_SPACE_HDRS
:
2395 aggsum_add(&astat_hdr_size
, -space
);
2397 case ARC_SPACE_L2HDRS
:
2398 aggsum_add(&astat_l2_hdr_size
, -space
);
2402 if (type
!= ARC_SPACE_DATA
) {
2403 ASSERT(aggsum_compare(&arc_meta_used
, space
) >= 0);
2405 * We use the upper bound here rather than the precise value
2406 * because the arc_meta_max value doesn't need to be
2407 * precise. It's only consumed by humans via arcstats.
2409 if (arc_meta_max
< aggsum_upper_bound(&arc_meta_used
))
2410 arc_meta_max
= aggsum_upper_bound(&arc_meta_used
);
2411 aggsum_add(&arc_meta_used
, -space
);
2414 ASSERT(aggsum_compare(&arc_size
, space
) >= 0);
2415 aggsum_add(&arc_size
, -space
);
2419 * Given a hdr and a buf, returns whether that buf can share its b_data buffer
2420 * with the hdr's b_pabd.
2423 arc_can_share(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2426 * The criteria for sharing a hdr's data are:
2427 * 1. the hdr's compression matches the buf's compression
2428 * 2. the hdr doesn't need to be byteswapped
2429 * 3. the hdr isn't already being shared
2430 * 4. the buf is either compressed or it is the last buf in the hdr list
2432 * Criterion #4 maintains the invariant that shared uncompressed
2433 * bufs must be the final buf in the hdr's b_buf list. Reading this, you
2434 * might ask, "if a compressed buf is allocated first, won't that be the
2435 * last thing in the list?", but in that case it's impossible to create
2436 * a shared uncompressed buf anyway (because the hdr must be compressed
2437 * to have the compressed buf). You might also think that #3 is
2438 * sufficient to make this guarantee, however it's possible
2439 * (specifically in the rare L2ARC write race mentioned in
2440 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
2441 * is sharable, but wasn't at the time of its allocation. Rather than
2442 * allow a new shared uncompressed buf to be created and then shuffle
2443 * the list around to make it the last element, this simply disallows
2444 * sharing if the new buf isn't the first to be added.
2446 ASSERT3P(buf
->b_hdr
, ==, hdr
);
2447 boolean_t hdr_compressed
= HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
;
2448 boolean_t buf_compressed
= ARC_BUF_COMPRESSED(buf
) != 0;
2449 return (buf_compressed
== hdr_compressed
&&
2450 hdr
->b_l1hdr
.b_byteswap
== DMU_BSWAP_NUMFUNCS
&&
2451 !HDR_SHARED_DATA(hdr
) &&
2452 (ARC_BUF_LAST(buf
) || ARC_BUF_COMPRESSED(buf
)));
2456 * Allocate a buf for this hdr. If you care about the data that's in the hdr,
2457 * or if you want a compressed buffer, pass those flags in. Returns 0 if the
2458 * copy was made successfully, or an error code otherwise.
2461 arc_buf_alloc_impl(arc_buf_hdr_t
*hdr
, void *tag
, boolean_t compressed
,
2462 boolean_t fill
, arc_buf_t
**ret
)
2466 ASSERT(HDR_HAS_L1HDR(hdr
));
2467 ASSERT3U(HDR_GET_LSIZE(hdr
), >, 0);
2468 VERIFY(hdr
->b_type
== ARC_BUFC_DATA
||
2469 hdr
->b_type
== ARC_BUFC_METADATA
);
2470 ASSERT3P(ret
, !=, NULL
);
2471 ASSERT3P(*ret
, ==, NULL
);
2473 buf
= *ret
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
2476 buf
->b_next
= hdr
->b_l1hdr
.b_buf
;
2479 add_reference(hdr
, tag
);
2482 * We're about to change the hdr's b_flags. We must either
2483 * hold the hash_lock or be undiscoverable.
2485 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2488 * Only honor requests for compressed bufs if the hdr is actually
2491 if (compressed
&& HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
)
2492 buf
->b_flags
|= ARC_BUF_FLAG_COMPRESSED
;
2495 * If the hdr's data can be shared then we share the data buffer and
2496 * set the appropriate bit in the hdr's b_flags to indicate the hdr is
2497 * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
2498 * buffer to store the buf's data.
2500 * There are two additional restrictions here because we're sharing
2501 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
2502 * actively involved in an L2ARC write, because if this buf is used by
2503 * an arc_write() then the hdr's data buffer will be released when the
2504 * write completes, even though the L2ARC write might still be using it.
2505 * Second, the hdr's ABD must be linear so that the buf's user doesn't
2506 * need to be ABD-aware.
2508 boolean_t can_share
= arc_can_share(hdr
, buf
) && !HDR_L2_WRITING(hdr
) &&
2509 abd_is_linear(hdr
->b_l1hdr
.b_pabd
);
2511 /* Set up b_data and sharing */
2513 buf
->b_data
= abd_to_buf(hdr
->b_l1hdr
.b_pabd
);
2514 buf
->b_flags
|= ARC_BUF_FLAG_SHARED
;
2515 arc_hdr_set_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2518 arc_get_data_buf(hdr
, arc_buf_size(buf
), buf
);
2519 ARCSTAT_INCR(arcstat_overhead_size
, arc_buf_size(buf
));
2521 VERIFY3P(buf
->b_data
, !=, NULL
);
2523 hdr
->b_l1hdr
.b_buf
= buf
;
2524 hdr
->b_l1hdr
.b_bufcnt
+= 1;
2527 * If the user wants the data from the hdr, we need to either copy or
2528 * decompress the data.
2531 return (arc_buf_fill(buf
, ARC_BUF_COMPRESSED(buf
) != 0));
2537 static char *arc_onloan_tag
= "onloan";
2540 arc_loaned_bytes_update(int64_t delta
)
2542 atomic_add_64(&arc_loaned_bytes
, delta
);
2544 /* assert that it did not wrap around */
2545 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes
, 0), >=, 0);
2549 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
2550 * flight data by arc_tempreserve_space() until they are "returned". Loaned
2551 * buffers must be returned to the arc before they can be used by the DMU or
2555 arc_loan_buf(spa_t
*spa
, boolean_t is_metadata
, int size
)
2557 arc_buf_t
*buf
= arc_alloc_buf(spa
, arc_onloan_tag
,
2558 is_metadata
? ARC_BUFC_METADATA
: ARC_BUFC_DATA
, size
);
2560 arc_loaned_bytes_update(size
);
2566 arc_loan_compressed_buf(spa_t
*spa
, uint64_t psize
, uint64_t lsize
,
2567 enum zio_compress compression_type
)
2569 arc_buf_t
*buf
= arc_alloc_compressed_buf(spa
, arc_onloan_tag
,
2570 psize
, lsize
, compression_type
);
2572 arc_loaned_bytes_update(psize
);
2579 * Return a loaned arc buffer to the arc.
2582 arc_return_buf(arc_buf_t
*buf
, void *tag
)
2584 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2586 ASSERT3P(buf
->b_data
, !=, NULL
);
2587 ASSERT(HDR_HAS_L1HDR(hdr
));
2588 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
);
2589 (void) refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
2591 arc_loaned_bytes_update(-arc_buf_size(buf
));
2594 /* Detach an arc_buf from a dbuf (tag) */
2596 arc_loan_inuse_buf(arc_buf_t
*buf
, void *tag
)
2598 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2600 ASSERT3P(buf
->b_data
, !=, NULL
);
2601 ASSERT(HDR_HAS_L1HDR(hdr
));
2602 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
2603 (void) refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
);
2605 arc_loaned_bytes_update(arc_buf_size(buf
));
2609 l2arc_free_abd_on_write(abd_t
*abd
, size_t size
, arc_buf_contents_t type
)
2611 l2arc_data_free_t
*df
= kmem_alloc(sizeof (*df
), KM_SLEEP
);
2614 df
->l2df_size
= size
;
2615 df
->l2df_type
= type
;
2616 mutex_enter(&l2arc_free_on_write_mtx
);
2617 list_insert_head(l2arc_free_on_write
, df
);
2618 mutex_exit(&l2arc_free_on_write_mtx
);
2622 arc_hdr_free_on_write(arc_buf_hdr_t
*hdr
)
2624 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2625 arc_buf_contents_t type
= arc_buf_type(hdr
);
2626 uint64_t size
= arc_hdr_size(hdr
);
2628 /* protected by hash lock, if in the hash table */
2629 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
2630 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2631 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
2633 (void) refcount_remove_many(&state
->arcs_esize
[type
],
2636 (void) refcount_remove_many(&state
->arcs_size
, size
, hdr
);
2637 if (type
== ARC_BUFC_METADATA
) {
2638 arc_space_return(size
, ARC_SPACE_META
);
2640 ASSERT(type
== ARC_BUFC_DATA
);
2641 arc_space_return(size
, ARC_SPACE_DATA
);
2644 l2arc_free_abd_on_write(hdr
->b_l1hdr
.b_pabd
, size
, type
);
2648 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
2649 * data buffer, we transfer the refcount ownership to the hdr and update
2650 * the appropriate kstats.
2653 arc_share_buf(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2655 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2657 ASSERT(arc_can_share(hdr
, buf
));
2658 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2659 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2662 * Start sharing the data buffer. We transfer the
2663 * refcount ownership to the hdr since it always owns
2664 * the refcount whenever an arc_buf_t is shared.
2666 refcount_transfer_ownership(&state
->arcs_size
, buf
, hdr
);
2667 hdr
->b_l1hdr
.b_pabd
= abd_get_from_buf(buf
->b_data
, arc_buf_size(buf
));
2668 abd_take_ownership_of_buf(hdr
->b_l1hdr
.b_pabd
,
2669 HDR_ISTYPE_METADATA(hdr
));
2670 arc_hdr_set_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2671 buf
->b_flags
|= ARC_BUF_FLAG_SHARED
;
2674 * Since we've transferred ownership to the hdr we need
2675 * to increment its compressed and uncompressed kstats and
2676 * decrement the overhead size.
2678 ARCSTAT_INCR(arcstat_compressed_size
, arc_hdr_size(hdr
));
2679 ARCSTAT_INCR(arcstat_uncompressed_size
, HDR_GET_LSIZE(hdr
));
2680 ARCSTAT_INCR(arcstat_overhead_size
, -arc_buf_size(buf
));
2684 arc_unshare_buf(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2686 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2688 ASSERT(arc_buf_is_shared(buf
));
2689 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2690 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2693 * We are no longer sharing this buffer so we need
2694 * to transfer its ownership to the rightful owner.
2696 refcount_transfer_ownership(&state
->arcs_size
, hdr
, buf
);
2697 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2698 abd_release_ownership_of_buf(hdr
->b_l1hdr
.b_pabd
);
2699 abd_put(hdr
->b_l1hdr
.b_pabd
);
2700 hdr
->b_l1hdr
.b_pabd
= NULL
;
2701 buf
->b_flags
&= ~ARC_BUF_FLAG_SHARED
;
2704 * Since the buffer is no longer shared between
2705 * the arc buf and the hdr, count it as overhead.
2707 ARCSTAT_INCR(arcstat_compressed_size
, -arc_hdr_size(hdr
));
2708 ARCSTAT_INCR(arcstat_uncompressed_size
, -HDR_GET_LSIZE(hdr
));
2709 ARCSTAT_INCR(arcstat_overhead_size
, arc_buf_size(buf
));
2713 * Remove an arc_buf_t from the hdr's buf list and return the last
2714 * arc_buf_t on the list. If no buffers remain on the list then return
2718 arc_buf_remove(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2720 ASSERT(HDR_HAS_L1HDR(hdr
));
2721 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2723 arc_buf_t
**bufp
= &hdr
->b_l1hdr
.b_buf
;
2724 arc_buf_t
*lastbuf
= NULL
;
2727 * Remove the buf from the hdr list and locate the last
2728 * remaining buffer on the list.
2730 while (*bufp
!= NULL
) {
2732 *bufp
= buf
->b_next
;
2735 * If we've removed a buffer in the middle of
2736 * the list then update the lastbuf and update
2739 if (*bufp
!= NULL
) {
2741 bufp
= &(*bufp
)->b_next
;
2745 ASSERT3P(lastbuf
, !=, buf
);
2746 IMPLY(hdr
->b_l1hdr
.b_bufcnt
> 0, lastbuf
!= NULL
);
2747 IMPLY(hdr
->b_l1hdr
.b_bufcnt
> 0, hdr
->b_l1hdr
.b_buf
!= NULL
);
2748 IMPLY(lastbuf
!= NULL
, ARC_BUF_LAST(lastbuf
));
2754 * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
2758 arc_buf_destroy_impl(arc_buf_t
*buf
)
2760 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2763 * Free up the data associated with the buf but only if we're not
2764 * sharing this with the hdr. If we are sharing it with the hdr, the
2765 * hdr is responsible for doing the free.
2767 if (buf
->b_data
!= NULL
) {
2769 * We're about to change the hdr's b_flags. We must either
2770 * hold the hash_lock or be undiscoverable.
2772 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)) || HDR_EMPTY(hdr
));
2774 arc_cksum_verify(buf
);
2775 arc_buf_unwatch(buf
);
2777 if (arc_buf_is_shared(buf
)) {
2778 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2780 uint64_t size
= arc_buf_size(buf
);
2781 arc_free_data_buf(hdr
, buf
->b_data
, size
, buf
);
2782 ARCSTAT_INCR(arcstat_overhead_size
, -size
);
2786 ASSERT(hdr
->b_l1hdr
.b_bufcnt
> 0);
2787 hdr
->b_l1hdr
.b_bufcnt
-= 1;
2790 arc_buf_t
*lastbuf
= arc_buf_remove(hdr
, buf
);
2792 if (ARC_BUF_SHARED(buf
) && !ARC_BUF_COMPRESSED(buf
)) {
2794 * If the current arc_buf_t is sharing its data buffer with the
2795 * hdr, then reassign the hdr's b_pabd to share it with the new
2796 * buffer at the end of the list. The shared buffer is always
2797 * the last one on the hdr's buffer list.
2799 * There is an equivalent case for compressed bufs, but since
2800 * they aren't guaranteed to be the last buf in the list and
2801 * that is an exceedingly rare case, we just allow that space be
2802 * wasted temporarily.
2804 if (lastbuf
!= NULL
) {
2805 /* Only one buf can be shared at once */
2806 VERIFY(!arc_buf_is_shared(lastbuf
));
2807 /* hdr is uncompressed so can't have compressed buf */
2808 VERIFY(!ARC_BUF_COMPRESSED(lastbuf
));
2810 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2811 arc_hdr_free_pabd(hdr
);
2814 * We must setup a new shared block between the
2815 * last buffer and the hdr. The data would have
2816 * been allocated by the arc buf so we need to transfer
2817 * ownership to the hdr since it's now being shared.
2819 arc_share_buf(hdr
, lastbuf
);
2821 } else if (HDR_SHARED_DATA(hdr
)) {
2823 * Uncompressed shared buffers are always at the end
2824 * of the list. Compressed buffers don't have the
2825 * same requirements. This makes it hard to
2826 * simply assert that the lastbuf is shared so
2827 * we rely on the hdr's compression flags to determine
2828 * if we have a compressed, shared buffer.
2830 ASSERT3P(lastbuf
, !=, NULL
);
2831 ASSERT(arc_buf_is_shared(lastbuf
) ||
2832 HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
);
2836 * Free the checksum if we're removing the last uncompressed buf from
2839 if (!arc_hdr_has_uncompressed_buf(hdr
)) {
2840 arc_cksum_free(hdr
);
2843 /* clean up the buf */
2845 kmem_cache_free(buf_cache
, buf
);
2849 arc_hdr_alloc_pabd(arc_buf_hdr_t
*hdr
)
2851 ASSERT3U(HDR_GET_LSIZE(hdr
), >, 0);
2852 ASSERT(HDR_HAS_L1HDR(hdr
));
2853 ASSERT(!HDR_SHARED_DATA(hdr
));
2855 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2856 hdr
->b_l1hdr
.b_pabd
= arc_get_data_abd(hdr
, arc_hdr_size(hdr
), hdr
);
2857 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
2858 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2860 ARCSTAT_INCR(arcstat_compressed_size
, arc_hdr_size(hdr
));
2861 ARCSTAT_INCR(arcstat_uncompressed_size
, HDR_GET_LSIZE(hdr
));
2865 arc_hdr_free_pabd(arc_buf_hdr_t
*hdr
)
2867 ASSERT(HDR_HAS_L1HDR(hdr
));
2868 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
2871 * If the hdr is currently being written to the l2arc then
2872 * we defer freeing the data by adding it to the l2arc_free_on_write
2873 * list. The l2arc will free the data once it's finished
2874 * writing it to the l2arc device.
2876 if (HDR_L2_WRITING(hdr
)) {
2877 arc_hdr_free_on_write(hdr
);
2878 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
2880 arc_free_data_abd(hdr
, hdr
->b_l1hdr
.b_pabd
,
2881 arc_hdr_size(hdr
), hdr
);
2883 hdr
->b_l1hdr
.b_pabd
= NULL
;
2884 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
2886 ARCSTAT_INCR(arcstat_compressed_size
, -arc_hdr_size(hdr
));
2887 ARCSTAT_INCR(arcstat_uncompressed_size
, -HDR_GET_LSIZE(hdr
));
2890 static arc_buf_hdr_t
*
2891 arc_hdr_alloc(uint64_t spa
, int32_t psize
, int32_t lsize
,
2892 enum zio_compress compression_type
, arc_buf_contents_t type
)
2896 VERIFY(type
== ARC_BUFC_DATA
|| type
== ARC_BUFC_METADATA
);
2898 hdr
= kmem_cache_alloc(hdr_full_cache
, KM_PUSHPAGE
);
2899 ASSERT(HDR_EMPTY(hdr
));
2900 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
2901 ASSERT3P(hdr
->b_l1hdr
.b_thawed
, ==, NULL
);
2902 HDR_SET_PSIZE(hdr
, psize
);
2903 HDR_SET_LSIZE(hdr
, lsize
);
2907 arc_hdr_set_flags(hdr
, arc_bufc_to_flags(type
) | ARC_FLAG_HAS_L1HDR
);
2908 arc_hdr_set_compress(hdr
, compression_type
);
2910 hdr
->b_l1hdr
.b_state
= arc_anon
;
2911 hdr
->b_l1hdr
.b_arc_access
= 0;
2912 hdr
->b_l1hdr
.b_bufcnt
= 0;
2913 hdr
->b_l1hdr
.b_buf
= NULL
;
2916 * Allocate the hdr's buffer. This will contain either
2917 * the compressed or uncompressed data depending on the block
2918 * it references and compressed arc enablement.
2920 arc_hdr_alloc_pabd(hdr
);
2921 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2927 * Transition between the two allocation states for the arc_buf_hdr struct.
2928 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
2929 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
2930 * version is used when a cache buffer is only in the L2ARC in order to reduce
2933 static arc_buf_hdr_t
*
2934 arc_hdr_realloc(arc_buf_hdr_t
*hdr
, kmem_cache_t
*old
, kmem_cache_t
*new)
2936 ASSERT(HDR_HAS_L2HDR(hdr
));
2938 arc_buf_hdr_t
*nhdr
;
2939 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
2941 ASSERT((old
== hdr_full_cache
&& new == hdr_l2only_cache
) ||
2942 (old
== hdr_l2only_cache
&& new == hdr_full_cache
));
2944 nhdr
= kmem_cache_alloc(new, KM_PUSHPAGE
);
2946 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
2947 buf_hash_remove(hdr
);
2949 bcopy(hdr
, nhdr
, HDR_L2ONLY_SIZE
);
2951 if (new == hdr_full_cache
) {
2952 arc_hdr_set_flags(nhdr
, ARC_FLAG_HAS_L1HDR
);
2954 * arc_access and arc_change_state need to be aware that a
2955 * header has just come out of L2ARC, so we set its state to
2956 * l2c_only even though it's about to change.
2958 nhdr
->b_l1hdr
.b_state
= arc_l2c_only
;
2960 /* Verify previous threads set to NULL before freeing */
2961 ASSERT3P(nhdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2963 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2964 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
2965 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
2968 * If we've reached here, We must have been called from
2969 * arc_evict_hdr(), as such we should have already been
2970 * removed from any ghost list we were previously on
2971 * (which protects us from racing with arc_evict_state),
2972 * thus no locking is needed during this check.
2974 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
2977 * A buffer must not be moved into the arc_l2c_only
2978 * state if it's not finished being written out to the
2979 * l2arc device. Otherwise, the b_l1hdr.b_pabd field
2980 * might try to be accessed, even though it was removed.
2982 VERIFY(!HDR_L2_WRITING(hdr
));
2983 VERIFY3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2986 if (hdr
->b_l1hdr
.b_thawed
!= NULL
) {
2987 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
2988 hdr
->b_l1hdr
.b_thawed
= NULL
;
2992 arc_hdr_clear_flags(nhdr
, ARC_FLAG_HAS_L1HDR
);
2995 * The header has been reallocated so we need to re-insert it into any
2998 (void) buf_hash_insert(nhdr
, NULL
);
3000 ASSERT(list_link_active(&hdr
->b_l2hdr
.b_l2node
));
3002 mutex_enter(&dev
->l2ad_mtx
);
3005 * We must place the realloc'ed header back into the list at
3006 * the same spot. Otherwise, if it's placed earlier in the list,
3007 * l2arc_write_buffers() could find it during the function's
3008 * write phase, and try to write it out to the l2arc.
3010 list_insert_after(&dev
->l2ad_buflist
, hdr
, nhdr
);
3011 list_remove(&dev
->l2ad_buflist
, hdr
);
3013 mutex_exit(&dev
->l2ad_mtx
);
3016 * Since we're using the pointer address as the tag when
3017 * incrementing and decrementing the l2ad_alloc refcount, we
3018 * must remove the old pointer (that we're about to destroy) and
3019 * add the new pointer to the refcount. Otherwise we'd remove
3020 * the wrong pointer address when calling arc_hdr_destroy() later.
3023 (void) refcount_remove_many(&dev
->l2ad_alloc
, arc_hdr_size(hdr
), hdr
);
3024 (void) refcount_add_many(&dev
->l2ad_alloc
, arc_hdr_size(nhdr
), nhdr
);
3026 buf_discard_identity(hdr
);
3027 kmem_cache_free(old
, hdr
);
3033 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
3034 * The buf is returned thawed since we expect the consumer to modify it.
3037 arc_alloc_buf(spa_t
*spa
, void *tag
, arc_buf_contents_t type
, int32_t size
)
3039 arc_buf_hdr_t
*hdr
= arc_hdr_alloc(spa_load_guid(spa
), size
, size
,
3040 ZIO_COMPRESS_OFF
, type
);
3041 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr
)));
3043 arc_buf_t
*buf
= NULL
;
3044 VERIFY0(arc_buf_alloc_impl(hdr
, tag
, B_FALSE
, B_FALSE
, &buf
));
3051 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
3052 * for bufs containing metadata.
3055 arc_alloc_compressed_buf(spa_t
*spa
, void *tag
, uint64_t psize
, uint64_t lsize
,
3056 enum zio_compress compression_type
)
3058 ASSERT3U(lsize
, >, 0);
3059 ASSERT3U(lsize
, >=, psize
);
3060 ASSERT(compression_type
> ZIO_COMPRESS_OFF
);
3061 ASSERT(compression_type
< ZIO_COMPRESS_FUNCTIONS
);
3063 arc_buf_hdr_t
*hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
,
3064 compression_type
, ARC_BUFC_DATA
);
3065 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr
)));
3067 arc_buf_t
*buf
= NULL
;
3068 VERIFY0(arc_buf_alloc_impl(hdr
, tag
, B_TRUE
, B_FALSE
, &buf
));
3070 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
3072 if (!arc_buf_is_shared(buf
)) {
3074 * To ensure that the hdr has the correct data in it if we call
3075 * arc_decompress() on this buf before it's been written to
3076 * disk, it's easiest if we just set up sharing between the
3079 ASSERT(!abd_is_linear(hdr
->b_l1hdr
.b_pabd
));
3080 arc_hdr_free_pabd(hdr
);
3081 arc_share_buf(hdr
, buf
);
3088 arc_hdr_l2hdr_destroy(arc_buf_hdr_t
*hdr
)
3090 l2arc_buf_hdr_t
*l2hdr
= &hdr
->b_l2hdr
;
3091 l2arc_dev_t
*dev
= l2hdr
->b_dev
;
3092 uint64_t psize
= arc_hdr_size(hdr
);
3094 ASSERT(MUTEX_HELD(&dev
->l2ad_mtx
));
3095 ASSERT(HDR_HAS_L2HDR(hdr
));
3097 list_remove(&dev
->l2ad_buflist
, hdr
);
3099 ARCSTAT_INCR(arcstat_l2_psize
, -psize
);
3100 ARCSTAT_INCR(arcstat_l2_lsize
, -HDR_GET_LSIZE(hdr
));
3102 vdev_space_update(dev
->l2ad_vdev
, -psize
, 0, 0);
3104 (void) refcount_remove_many(&dev
->l2ad_alloc
, psize
, hdr
);
3105 arc_hdr_clear_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
3109 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
3111 if (HDR_HAS_L1HDR(hdr
)) {
3112 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
||
3113 hdr
->b_l1hdr
.b_bufcnt
> 0);
3114 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3115 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
3117 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3118 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
3120 if (!HDR_EMPTY(hdr
))
3121 buf_discard_identity(hdr
);
3123 if (HDR_HAS_L2HDR(hdr
)) {
3124 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
3125 boolean_t buflist_held
= MUTEX_HELD(&dev
->l2ad_mtx
);
3128 mutex_enter(&dev
->l2ad_mtx
);
3131 * Even though we checked this conditional above, we
3132 * need to check this again now that we have the
3133 * l2ad_mtx. This is because we could be racing with
3134 * another thread calling l2arc_evict() which might have
3135 * destroyed this header's L2 portion as we were waiting
3136 * to acquire the l2ad_mtx. If that happens, we don't
3137 * want to re-destroy the header's L2 portion.
3139 if (HDR_HAS_L2HDR(hdr
))
3140 arc_hdr_l2hdr_destroy(hdr
);
3143 mutex_exit(&dev
->l2ad_mtx
);
3146 if (HDR_HAS_L1HDR(hdr
)) {
3147 arc_cksum_free(hdr
);
3149 while (hdr
->b_l1hdr
.b_buf
!= NULL
)
3150 arc_buf_destroy_impl(hdr
->b_l1hdr
.b_buf
);
3153 if (hdr
->b_l1hdr
.b_thawed
!= NULL
) {
3154 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
3155 hdr
->b_l1hdr
.b_thawed
= NULL
;
3159 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
3160 arc_hdr_free_pabd(hdr
);
3164 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
3165 if (HDR_HAS_L1HDR(hdr
)) {
3166 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3167 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
3168 kmem_cache_free(hdr_full_cache
, hdr
);
3170 kmem_cache_free(hdr_l2only_cache
, hdr
);
3175 arc_buf_destroy(arc_buf_t
*buf
, void* tag
)
3177 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3178 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
3180 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
3181 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, ==, 1);
3182 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3183 VERIFY0(remove_reference(hdr
, NULL
, tag
));
3184 arc_hdr_destroy(hdr
);
3188 mutex_enter(hash_lock
);
3189 ASSERT3P(hdr
, ==, buf
->b_hdr
);
3190 ASSERT(hdr
->b_l1hdr
.b_bufcnt
> 0);
3191 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3192 ASSERT3P(hdr
->b_l1hdr
.b_state
, !=, arc_anon
);
3193 ASSERT3P(buf
->b_data
, !=, NULL
);
3195 (void) remove_reference(hdr
, hash_lock
, tag
);
3196 arc_buf_destroy_impl(buf
);
3197 mutex_exit(hash_lock
);
3201 * Evict the arc_buf_hdr that is provided as a parameter. The resultant
3202 * state of the header is dependent on it's state prior to entering this
3203 * function. The following transitions are possible:
3205 * - arc_mru -> arc_mru_ghost
3206 * - arc_mfu -> arc_mfu_ghost
3207 * - arc_mru_ghost -> arc_l2c_only
3208 * - arc_mru_ghost -> deleted
3209 * - arc_mfu_ghost -> arc_l2c_only
3210 * - arc_mfu_ghost -> deleted
3213 arc_evict_hdr(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
)
3215 arc_state_t
*evicted_state
, *state
;
3216 int64_t bytes_evicted
= 0;
3218 ASSERT(MUTEX_HELD(hash_lock
));
3219 ASSERT(HDR_HAS_L1HDR(hdr
));
3221 state
= hdr
->b_l1hdr
.b_state
;
3222 if (GHOST_STATE(state
)) {
3223 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3224 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
3227 * l2arc_write_buffers() relies on a header's L1 portion
3228 * (i.e. its b_pabd field) during it's write phase.
3229 * Thus, we cannot push a header onto the arc_l2c_only
3230 * state (removing it's L1 piece) until the header is
3231 * done being written to the l2arc.
3233 if (HDR_HAS_L2HDR(hdr
) && HDR_L2_WRITING(hdr
)) {
3234 ARCSTAT_BUMP(arcstat_evict_l2_skip
);
3235 return (bytes_evicted
);
3238 ARCSTAT_BUMP(arcstat_deleted
);
3239 bytes_evicted
+= HDR_GET_LSIZE(hdr
);
3241 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, hdr
);
3243 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
3244 if (HDR_HAS_L2HDR(hdr
)) {
3246 * This buffer is cached on the 2nd Level ARC;
3247 * don't destroy the header.
3249 arc_change_state(arc_l2c_only
, hdr
, hash_lock
);
3251 * dropping from L1+L2 cached to L2-only,
3252 * realloc to remove the L1 header.
3254 hdr
= arc_hdr_realloc(hdr
, hdr_full_cache
,
3257 arc_change_state(arc_anon
, hdr
, hash_lock
);
3258 arc_hdr_destroy(hdr
);
3260 return (bytes_evicted
);
3263 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
3264 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
3266 /* prefetch buffers have a minimum lifespan */
3267 if (HDR_IO_IN_PROGRESS(hdr
) ||
3268 ((hdr
->b_flags
& (ARC_FLAG_PREFETCH
| ARC_FLAG_INDIRECT
)) &&
3269 ddi_get_lbolt() - hdr
->b_l1hdr
.b_arc_access
<
3270 arc_min_prefetch_lifespan
)) {
3271 ARCSTAT_BUMP(arcstat_evict_skip
);
3272 return (bytes_evicted
);
3275 ASSERT0(refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
3276 while (hdr
->b_l1hdr
.b_buf
) {
3277 arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
;
3278 if (!mutex_tryenter(&buf
->b_evict_lock
)) {
3279 ARCSTAT_BUMP(arcstat_mutex_miss
);
3282 if (buf
->b_data
!= NULL
)
3283 bytes_evicted
+= HDR_GET_LSIZE(hdr
);
3284 mutex_exit(&buf
->b_evict_lock
);
3285 arc_buf_destroy_impl(buf
);
3288 if (HDR_HAS_L2HDR(hdr
)) {
3289 ARCSTAT_INCR(arcstat_evict_l2_cached
, HDR_GET_LSIZE(hdr
));
3291 if (l2arc_write_eligible(hdr
->b_spa
, hdr
)) {
3292 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
3293 HDR_GET_LSIZE(hdr
));
3295 ARCSTAT_INCR(arcstat_evict_l2_ineligible
,
3296 HDR_GET_LSIZE(hdr
));
3300 if (hdr
->b_l1hdr
.b_bufcnt
== 0) {
3301 arc_cksum_free(hdr
);
3303 bytes_evicted
+= arc_hdr_size(hdr
);
3306 * If this hdr is being evicted and has a compressed
3307 * buffer then we discard it here before we change states.
3308 * This ensures that the accounting is updated correctly
3309 * in arc_free_data_impl().
3311 arc_hdr_free_pabd(hdr
);
3313 arc_change_state(evicted_state
, hdr
, hash_lock
);
3314 ASSERT(HDR_IN_HASH_TABLE(hdr
));
3315 arc_hdr_set_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
3316 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, hdr
);
3319 return (bytes_evicted
);
3323 arc_evict_state_impl(multilist_t
*ml
, int idx
, arc_buf_hdr_t
*marker
,
3324 uint64_t spa
, int64_t bytes
)
3326 multilist_sublist_t
*mls
;
3327 uint64_t bytes_evicted
= 0;
3329 kmutex_t
*hash_lock
;
3330 int evict_count
= 0;
3332 ASSERT3P(marker
, !=, NULL
);
3333 IMPLY(bytes
< 0, bytes
== ARC_EVICT_ALL
);
3335 mls
= multilist_sublist_lock(ml
, idx
);
3337 for (hdr
= multilist_sublist_prev(mls
, marker
); hdr
!= NULL
;
3338 hdr
= multilist_sublist_prev(mls
, marker
)) {
3339 if ((bytes
!= ARC_EVICT_ALL
&& bytes_evicted
>= bytes
) ||
3340 (evict_count
>= zfs_arc_evict_batch_limit
))
3344 * To keep our iteration location, move the marker
3345 * forward. Since we're not holding hdr's hash lock, we
3346 * must be very careful and not remove 'hdr' from the
3347 * sublist. Otherwise, other consumers might mistake the
3348 * 'hdr' as not being on a sublist when they call the
3349 * multilist_link_active() function (they all rely on
3350 * the hash lock protecting concurrent insertions and
3351 * removals). multilist_sublist_move_forward() was
3352 * specifically implemented to ensure this is the case
3353 * (only 'marker' will be removed and re-inserted).
3355 multilist_sublist_move_forward(mls
, marker
);
3358 * The only case where the b_spa field should ever be
3359 * zero, is the marker headers inserted by
3360 * arc_evict_state(). It's possible for multiple threads
3361 * to be calling arc_evict_state() concurrently (e.g.
3362 * dsl_pool_close() and zio_inject_fault()), so we must
3363 * skip any markers we see from these other threads.
3365 if (hdr
->b_spa
== 0)
3368 /* we're only interested in evicting buffers of a certain spa */
3369 if (spa
!= 0 && hdr
->b_spa
!= spa
) {
3370 ARCSTAT_BUMP(arcstat_evict_skip
);
3374 hash_lock
= HDR_LOCK(hdr
);
3377 * We aren't calling this function from any code path
3378 * that would already be holding a hash lock, so we're
3379 * asserting on this assumption to be defensive in case
3380 * this ever changes. Without this check, it would be
3381 * possible to incorrectly increment arcstat_mutex_miss
3382 * below (e.g. if the code changed such that we called
3383 * this function with a hash lock held).
3385 ASSERT(!MUTEX_HELD(hash_lock
));
3387 if (mutex_tryenter(hash_lock
)) {
3388 uint64_t evicted
= arc_evict_hdr(hdr
, hash_lock
);
3389 mutex_exit(hash_lock
);
3391 bytes_evicted
+= evicted
;
3394 * If evicted is zero, arc_evict_hdr() must have
3395 * decided to skip this header, don't increment
3396 * evict_count in this case.
3402 * If arc_size isn't overflowing, signal any
3403 * threads that might happen to be waiting.
3405 * For each header evicted, we wake up a single
3406 * thread. If we used cv_broadcast, we could
3407 * wake up "too many" threads causing arc_size
3408 * to significantly overflow arc_c; since
3409 * arc_get_data_impl() doesn't check for overflow
3410 * when it's woken up (it doesn't because it's
3411 * possible for the ARC to be overflowing while
3412 * full of un-evictable buffers, and the
3413 * function should proceed in this case).
3415 * If threads are left sleeping, due to not
3416 * using cv_broadcast, they will be woken up
3417 * just before arc_reclaim_thread() sleeps.
3419 mutex_enter(&arc_reclaim_lock
);
3420 if (!arc_is_overflowing())
3421 cv_signal(&arc_reclaim_waiters_cv
);
3422 mutex_exit(&arc_reclaim_lock
);
3424 ARCSTAT_BUMP(arcstat_mutex_miss
);
3428 multilist_sublist_unlock(mls
);
3430 return (bytes_evicted
);
3434 * Evict buffers from the given arc state, until we've removed the
3435 * specified number of bytes. Move the removed buffers to the
3436 * appropriate evict state.
3438 * This function makes a "best effort". It skips over any buffers
3439 * it can't get a hash_lock on, and so, may not catch all candidates.
3440 * It may also return without evicting as much space as requested.
3442 * If bytes is specified using the special value ARC_EVICT_ALL, this
3443 * will evict all available (i.e. unlocked and evictable) buffers from
3444 * the given arc state; which is used by arc_flush().
3447 arc_evict_state(arc_state_t
*state
, uint64_t spa
, int64_t bytes
,
3448 arc_buf_contents_t type
)
3450 uint64_t total_evicted
= 0;
3451 multilist_t
*ml
= state
->arcs_list
[type
];
3453 arc_buf_hdr_t
**markers
;
3455 IMPLY(bytes
< 0, bytes
== ARC_EVICT_ALL
);
3457 num_sublists
= multilist_get_num_sublists(ml
);
3460 * If we've tried to evict from each sublist, made some
3461 * progress, but still have not hit the target number of bytes
3462 * to evict, we want to keep trying. The markers allow us to
3463 * pick up where we left off for each individual sublist, rather
3464 * than starting from the tail each time.
3466 markers
= kmem_zalloc(sizeof (*markers
) * num_sublists
, KM_SLEEP
);
3467 for (int i
= 0; i
< num_sublists
; i
++) {
3468 markers
[i
] = kmem_cache_alloc(hdr_full_cache
, KM_SLEEP
);
3471 * A b_spa of 0 is used to indicate that this header is
3472 * a marker. This fact is used in arc_adjust_type() and
3473 * arc_evict_state_impl().
3475 markers
[i
]->b_spa
= 0;
3477 multilist_sublist_t
*mls
= multilist_sublist_lock(ml
, i
);
3478 multilist_sublist_insert_tail(mls
, markers
[i
]);
3479 multilist_sublist_unlock(mls
);
3483 * While we haven't hit our target number of bytes to evict, or
3484 * we're evicting all available buffers.
3486 while (total_evicted
< bytes
|| bytes
== ARC_EVICT_ALL
) {
3488 * Start eviction using a randomly selected sublist,
3489 * this is to try and evenly balance eviction across all
3490 * sublists. Always starting at the same sublist
3491 * (e.g. index 0) would cause evictions to favor certain
3492 * sublists over others.
3494 int sublist_idx
= multilist_get_random_index(ml
);
3495 uint64_t scan_evicted
= 0;
3497 for (int i
= 0; i
< num_sublists
; i
++) {
3498 uint64_t bytes_remaining
;
3499 uint64_t bytes_evicted
;
3501 if (bytes
== ARC_EVICT_ALL
)
3502 bytes_remaining
= ARC_EVICT_ALL
;
3503 else if (total_evicted
< bytes
)
3504 bytes_remaining
= bytes
- total_evicted
;
3508 bytes_evicted
= arc_evict_state_impl(ml
, sublist_idx
,
3509 markers
[sublist_idx
], spa
, bytes_remaining
);
3511 scan_evicted
+= bytes_evicted
;
3512 total_evicted
+= bytes_evicted
;
3514 /* we've reached the end, wrap to the beginning */
3515 if (++sublist_idx
>= num_sublists
)
3520 * If we didn't evict anything during this scan, we have
3521 * no reason to believe we'll evict more during another
3522 * scan, so break the loop.
3524 if (scan_evicted
== 0) {
3525 /* This isn't possible, let's make that obvious */
3526 ASSERT3S(bytes
, !=, 0);
3529 * When bytes is ARC_EVICT_ALL, the only way to
3530 * break the loop is when scan_evicted is zero.
3531 * In that case, we actually have evicted enough,
3532 * so we don't want to increment the kstat.
3534 if (bytes
!= ARC_EVICT_ALL
) {
3535 ASSERT3S(total_evicted
, <, bytes
);
3536 ARCSTAT_BUMP(arcstat_evict_not_enough
);
3543 for (int i
= 0; i
< num_sublists
; i
++) {
3544 multilist_sublist_t
*mls
= multilist_sublist_lock(ml
, i
);
3545 multilist_sublist_remove(mls
, markers
[i
]);
3546 multilist_sublist_unlock(mls
);
3548 kmem_cache_free(hdr_full_cache
, markers
[i
]);
3550 kmem_free(markers
, sizeof (*markers
) * num_sublists
);
3552 return (total_evicted
);
3556 * Flush all "evictable" data of the given type from the arc state
3557 * specified. This will not evict any "active" buffers (i.e. referenced).
3559 * When 'retry' is set to B_FALSE, the function will make a single pass
3560 * over the state and evict any buffers that it can. Since it doesn't
3561 * continually retry the eviction, it might end up leaving some buffers
3562 * in the ARC due to lock misses.
3564 * When 'retry' is set to B_TRUE, the function will continually retry the
3565 * eviction until *all* evictable buffers have been removed from the
3566 * state. As a result, if concurrent insertions into the state are
3567 * allowed (e.g. if the ARC isn't shutting down), this function might
3568 * wind up in an infinite loop, continually trying to evict buffers.
3571 arc_flush_state(arc_state_t
*state
, uint64_t spa
, arc_buf_contents_t type
,
3574 uint64_t evicted
= 0;
3576 while (refcount_count(&state
->arcs_esize
[type
]) != 0) {
3577 evicted
+= arc_evict_state(state
, spa
, ARC_EVICT_ALL
, type
);
3587 * Evict the specified number of bytes from the state specified,
3588 * restricting eviction to the spa and type given. This function
3589 * prevents us from trying to evict more from a state's list than
3590 * is "evictable", and to skip evicting altogether when passed a
3591 * negative value for "bytes". In contrast, arc_evict_state() will
3592 * evict everything it can, when passed a negative value for "bytes".
3595 arc_adjust_impl(arc_state_t
*state
, uint64_t spa
, int64_t bytes
,
3596 arc_buf_contents_t type
)
3600 if (bytes
> 0 && refcount_count(&state
->arcs_esize
[type
]) > 0) {
3601 delta
= MIN(refcount_count(&state
->arcs_esize
[type
]), bytes
);
3602 return (arc_evict_state(state
, spa
, delta
, type
));
3609 * Evict metadata buffers from the cache, such that arc_meta_used is
3610 * capped by the arc_meta_limit tunable.
3613 arc_adjust_meta(uint64_t meta_used
)
3615 uint64_t total_evicted
= 0;
3619 * If we're over the meta limit, we want to evict enough
3620 * metadata to get back under the meta limit. We don't want to
3621 * evict so much that we drop the MRU below arc_p, though. If
3622 * we're over the meta limit more than we're over arc_p, we
3623 * evict some from the MRU here, and some from the MFU below.
3625 target
= MIN((int64_t)(meta_used
- arc_meta_limit
),
3626 (int64_t)(refcount_count(&arc_anon
->arcs_size
) +
3627 refcount_count(&arc_mru
->arcs_size
) - arc_p
));
3629 total_evicted
+= arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_METADATA
);
3632 * Similar to the above, we want to evict enough bytes to get us
3633 * below the meta limit, but not so much as to drop us below the
3634 * space allotted to the MFU (which is defined as arc_c - arc_p).
3636 target
= MIN((int64_t)(meta_used
- arc_meta_limit
),
3637 (int64_t)(refcount_count(&arc_mfu
->arcs_size
) -
3640 total_evicted
+= arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_METADATA
);
3642 return (total_evicted
);
3646 * Return the type of the oldest buffer in the given arc state
3648 * This function will select a random sublist of type ARC_BUFC_DATA and
3649 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
3650 * is compared, and the type which contains the "older" buffer will be
3653 static arc_buf_contents_t
3654 arc_adjust_type(arc_state_t
*state
)
3656 multilist_t
*data_ml
= state
->arcs_list
[ARC_BUFC_DATA
];
3657 multilist_t
*meta_ml
= state
->arcs_list
[ARC_BUFC_METADATA
];
3658 int data_idx
= multilist_get_random_index(data_ml
);
3659 int meta_idx
= multilist_get_random_index(meta_ml
);
3660 multilist_sublist_t
*data_mls
;
3661 multilist_sublist_t
*meta_mls
;
3662 arc_buf_contents_t type
;
3663 arc_buf_hdr_t
*data_hdr
;
3664 arc_buf_hdr_t
*meta_hdr
;
3667 * We keep the sublist lock until we're finished, to prevent
3668 * the headers from being destroyed via arc_evict_state().
3670 data_mls
= multilist_sublist_lock(data_ml
, data_idx
);
3671 meta_mls
= multilist_sublist_lock(meta_ml
, meta_idx
);
3674 * These two loops are to ensure we skip any markers that
3675 * might be at the tail of the lists due to arc_evict_state().
3678 for (data_hdr
= multilist_sublist_tail(data_mls
); data_hdr
!= NULL
;
3679 data_hdr
= multilist_sublist_prev(data_mls
, data_hdr
)) {
3680 if (data_hdr
->b_spa
!= 0)
3684 for (meta_hdr
= multilist_sublist_tail(meta_mls
); meta_hdr
!= NULL
;
3685 meta_hdr
= multilist_sublist_prev(meta_mls
, meta_hdr
)) {
3686 if (meta_hdr
->b_spa
!= 0)
3690 if (data_hdr
== NULL
&& meta_hdr
== NULL
) {
3691 type
= ARC_BUFC_DATA
;
3692 } else if (data_hdr
== NULL
) {
3693 ASSERT3P(meta_hdr
, !=, NULL
);
3694 type
= ARC_BUFC_METADATA
;
3695 } else if (meta_hdr
== NULL
) {
3696 ASSERT3P(data_hdr
, !=, NULL
);
3697 type
= ARC_BUFC_DATA
;
3699 ASSERT3P(data_hdr
, !=, NULL
);
3700 ASSERT3P(meta_hdr
, !=, NULL
);
3702 /* The headers can't be on the sublist without an L1 header */
3703 ASSERT(HDR_HAS_L1HDR(data_hdr
));
3704 ASSERT(HDR_HAS_L1HDR(meta_hdr
));
3706 if (data_hdr
->b_l1hdr
.b_arc_access
<
3707 meta_hdr
->b_l1hdr
.b_arc_access
) {
3708 type
= ARC_BUFC_DATA
;
3710 type
= ARC_BUFC_METADATA
;
3714 multilist_sublist_unlock(meta_mls
);
3715 multilist_sublist_unlock(data_mls
);
3721 * Evict buffers from the cache, such that arc_size is capped by arc_c.
3726 uint64_t total_evicted
= 0;
3729 uint64_t asize
= aggsum_value(&arc_size
);
3730 uint64_t ameta
= aggsum_value(&arc_meta_used
);
3733 * If we're over arc_meta_limit, we want to correct that before
3734 * potentially evicting data buffers below.
3736 total_evicted
+= arc_adjust_meta(ameta
);
3741 * If we're over the target cache size, we want to evict enough
3742 * from the list to get back to our target size. We don't want
3743 * to evict too much from the MRU, such that it drops below
3744 * arc_p. So, if we're over our target cache size more than
3745 * the MRU is over arc_p, we'll evict enough to get back to
3746 * arc_p here, and then evict more from the MFU below.
3748 target
= MIN((int64_t)(asize
- arc_c
),
3749 (int64_t)(refcount_count(&arc_anon
->arcs_size
) +
3750 refcount_count(&arc_mru
->arcs_size
) + ameta
- arc_p
));
3753 * If we're below arc_meta_min, always prefer to evict data.
3754 * Otherwise, try to satisfy the requested number of bytes to
3755 * evict from the type which contains older buffers; in an
3756 * effort to keep newer buffers in the cache regardless of their
3757 * type. If we cannot satisfy the number of bytes from this
3758 * type, spill over into the next type.
3760 if (arc_adjust_type(arc_mru
) == ARC_BUFC_METADATA
&&
3761 ameta
> arc_meta_min
) {
3762 bytes
= arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_METADATA
);
3763 total_evicted
+= bytes
;
3766 * If we couldn't evict our target number of bytes from
3767 * metadata, we try to get the rest from data.
3772 arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_DATA
);
3774 bytes
= arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_DATA
);
3775 total_evicted
+= bytes
;
3778 * If we couldn't evict our target number of bytes from
3779 * data, we try to get the rest from metadata.
3784 arc_adjust_impl(arc_mru
, 0, target
, ARC_BUFC_METADATA
);
3790 * Now that we've tried to evict enough from the MRU to get its
3791 * size back to arc_p, if we're still above the target cache
3792 * size, we evict the rest from the MFU.
3794 target
= asize
- arc_c
;
3796 if (arc_adjust_type(arc_mfu
) == ARC_BUFC_METADATA
&&
3797 ameta
> arc_meta_min
) {
3798 bytes
= arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_METADATA
);
3799 total_evicted
+= bytes
;
3802 * If we couldn't evict our target number of bytes from
3803 * metadata, we try to get the rest from data.
3808 arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_DATA
);
3810 bytes
= arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_DATA
);
3811 total_evicted
+= bytes
;
3814 * If we couldn't evict our target number of bytes from
3815 * data, we try to get the rest from data.
3820 arc_adjust_impl(arc_mfu
, 0, target
, ARC_BUFC_METADATA
);
3824 * Adjust ghost lists
3826 * In addition to the above, the ARC also defines target values
3827 * for the ghost lists. The sum of the mru list and mru ghost
3828 * list should never exceed the target size of the cache, and
3829 * the sum of the mru list, mfu list, mru ghost list, and mfu
3830 * ghost list should never exceed twice the target size of the
3831 * cache. The following logic enforces these limits on the ghost
3832 * caches, and evicts from them as needed.
3834 target
= refcount_count(&arc_mru
->arcs_size
) +
3835 refcount_count(&arc_mru_ghost
->arcs_size
) - arc_c
;
3837 bytes
= arc_adjust_impl(arc_mru_ghost
, 0, target
, ARC_BUFC_DATA
);
3838 total_evicted
+= bytes
;
3843 arc_adjust_impl(arc_mru_ghost
, 0, target
, ARC_BUFC_METADATA
);
3846 * We assume the sum of the mru list and mfu list is less than
3847 * or equal to arc_c (we enforced this above), which means we
3848 * can use the simpler of the two equations below:
3850 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
3851 * mru ghost + mfu ghost <= arc_c
3853 target
= refcount_count(&arc_mru_ghost
->arcs_size
) +
3854 refcount_count(&arc_mfu_ghost
->arcs_size
) - arc_c
;
3856 bytes
= arc_adjust_impl(arc_mfu_ghost
, 0, target
, ARC_BUFC_DATA
);
3857 total_evicted
+= bytes
;
3862 arc_adjust_impl(arc_mfu_ghost
, 0, target
, ARC_BUFC_METADATA
);
3864 return (total_evicted
);
3868 arc_flush(spa_t
*spa
, boolean_t retry
)
3873 * If retry is B_TRUE, a spa must not be specified since we have
3874 * no good way to determine if all of a spa's buffers have been
3875 * evicted from an arc state.
3877 ASSERT(!retry
|| spa
== 0);
3880 guid
= spa_load_guid(spa
);
3882 (void) arc_flush_state(arc_mru
, guid
, ARC_BUFC_DATA
, retry
);
3883 (void) arc_flush_state(arc_mru
, guid
, ARC_BUFC_METADATA
, retry
);
3885 (void) arc_flush_state(arc_mfu
, guid
, ARC_BUFC_DATA
, retry
);
3886 (void) arc_flush_state(arc_mfu
, guid
, ARC_BUFC_METADATA
, retry
);
3888 (void) arc_flush_state(arc_mru_ghost
, guid
, ARC_BUFC_DATA
, retry
);
3889 (void) arc_flush_state(arc_mru_ghost
, guid
, ARC_BUFC_METADATA
, retry
);
3891 (void) arc_flush_state(arc_mfu_ghost
, guid
, ARC_BUFC_DATA
, retry
);
3892 (void) arc_flush_state(arc_mfu_ghost
, guid
, ARC_BUFC_METADATA
, retry
);
3896 arc_shrink(int64_t to_free
)
3898 uint64_t asize
= aggsum_value(&arc_size
);
3899 if (arc_c
> arc_c_min
) {
3901 if (arc_c
> arc_c_min
+ to_free
)
3902 atomic_add_64(&arc_c
, -to_free
);
3906 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
3908 arc_c
= MAX(asize
, arc_c_min
);
3910 arc_p
= (arc_c
>> 1);
3911 ASSERT(arc_c
>= arc_c_min
);
3912 ASSERT((int64_t)arc_p
>= 0);
3916 (void) arc_adjust();
3919 typedef enum free_memory_reason_t
{
3924 FMR_PAGES_PP_MAXIMUM
,
3927 } free_memory_reason_t
;
3929 int64_t last_free_memory
;
3930 free_memory_reason_t last_free_reason
;
3933 * Additional reserve of pages for pp_reserve.
3935 int64_t arc_pages_pp_reserve
= 64;
3938 * Additional reserve of pages for swapfs.
3940 int64_t arc_swapfs_reserve
= 64;
3943 * Return the amount of memory that can be consumed before reclaim will be
3944 * needed. Positive if there is sufficient free memory, negative indicates
3945 * the amount of memory that needs to be freed up.
3948 arc_available_memory(void)
3950 int64_t lowest
= INT64_MAX
;
3952 free_memory_reason_t r
= FMR_UNKNOWN
;
3956 n
= PAGESIZE
* (-needfree
);
3964 * check that we're out of range of the pageout scanner. It starts to
3965 * schedule paging if freemem is less than lotsfree and needfree.
3966 * lotsfree is the high-water mark for pageout, and needfree is the
3967 * number of needed free pages. We add extra pages here to make sure
3968 * the scanner doesn't start up while we're freeing memory.
3970 n
= PAGESIZE
* (freemem
- lotsfree
- needfree
- desfree
);
3977 * check to make sure that swapfs has enough space so that anon
3978 * reservations can still succeed. anon_resvmem() checks that the
3979 * availrmem is greater than swapfs_minfree, and the number of reserved
3980 * swap pages. We also add a bit of extra here just to prevent
3981 * circumstances from getting really dire.
3983 n
= PAGESIZE
* (availrmem
- swapfs_minfree
- swapfs_reserve
-
3984 desfree
- arc_swapfs_reserve
);
3987 r
= FMR_SWAPFS_MINFREE
;
3992 * Check that we have enough availrmem that memory locking (e.g., via
3993 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
3994 * stores the number of pages that cannot be locked; when availrmem
3995 * drops below pages_pp_maximum, page locking mechanisms such as
3996 * page_pp_lock() will fail.)
3998 n
= PAGESIZE
* (availrmem
- pages_pp_maximum
-
3999 arc_pages_pp_reserve
);
4002 r
= FMR_PAGES_PP_MAXIMUM
;
4007 * If we're on an i386 platform, it's possible that we'll exhaust the
4008 * kernel heap space before we ever run out of available physical
4009 * memory. Most checks of the size of the heap_area compare against
4010 * tune.t_minarmem, which is the minimum available real memory that we
4011 * can have in the system. However, this is generally fixed at 25 pages
4012 * which is so low that it's useless. In this comparison, we seek to
4013 * calculate the total heap-size, and reclaim if more than 3/4ths of the
4014 * heap is allocated. (Or, in the calculation, if less than 1/4th is
4017 n
= (int64_t)vmem_size(heap_arena
, VMEM_FREE
) -
4018 (vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
) >> 2);
4026 * If zio data pages are being allocated out of a separate heap segment,
4027 * then enforce that the size of available vmem for this arena remains
4028 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
4030 * Note that reducing the arc_zio_arena_free_shift keeps more virtual
4031 * memory (in the zio_arena) free, which can avoid memory
4032 * fragmentation issues.
4034 if (zio_arena
!= NULL
) {
4035 n
= (int64_t)vmem_size(zio_arena
, VMEM_FREE
) -
4036 (vmem_size(zio_arena
, VMEM_ALLOC
) >>
4037 arc_zio_arena_free_shift
);
4044 /* Every 100 calls, free a small amount */
4045 if (spa_get_random(100) == 0)
4049 last_free_memory
= lowest
;
4050 last_free_reason
= r
;
4057 * Determine if the system is under memory pressure and is asking
4058 * to reclaim memory. A return value of B_TRUE indicates that the system
4059 * is under memory pressure and that the arc should adjust accordingly.
4062 arc_reclaim_needed(void)
4064 return (arc_available_memory() < 0);
4068 arc_kmem_reap_now(void)
4071 kmem_cache_t
*prev_cache
= NULL
;
4072 kmem_cache_t
*prev_data_cache
= NULL
;
4073 extern kmem_cache_t
*zio_buf_cache
[];
4074 extern kmem_cache_t
*zio_data_buf_cache
[];
4075 extern kmem_cache_t
*range_seg_cache
;
4076 extern kmem_cache_t
*abd_chunk_cache
;
4079 if (aggsum_compare(&arc_meta_used
, arc_meta_limit
) >= 0) {
4081 * We are exceeding our meta-data cache limit.
4082 * Purge some DNLC entries to release holds on meta-data.
4084 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent
);
4088 * Reclaim unused memory from all kmem caches.
4095 * If a kmem reap is already active, don't schedule more. We must
4096 * check for this because kmem_cache_reap_soon() won't actually
4097 * block on the cache being reaped (this is to prevent callers from
4098 * becoming implicitly blocked by a system-wide kmem reap -- which,
4099 * on a system with many, many full magazines, can take minutes).
4101 if (kmem_cache_reap_active())
4104 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
4105 if (zio_buf_cache
[i
] != prev_cache
) {
4106 prev_cache
= zio_buf_cache
[i
];
4107 kmem_cache_reap_soon(zio_buf_cache
[i
]);
4109 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
4110 prev_data_cache
= zio_data_buf_cache
[i
];
4111 kmem_cache_reap_soon(zio_data_buf_cache
[i
]);
4114 kmem_cache_reap_soon(abd_chunk_cache
);
4115 kmem_cache_reap_soon(buf_cache
);
4116 kmem_cache_reap_soon(hdr_full_cache
);
4117 kmem_cache_reap_soon(hdr_l2only_cache
);
4118 kmem_cache_reap_soon(range_seg_cache
);
4120 if (zio_arena
!= NULL
) {
4122 * Ask the vmem arena to reclaim unused memory from its
4125 vmem_qcache_reap(zio_arena
);
4130 * Threads can block in arc_get_data_impl() waiting for this thread to evict
4131 * enough data and signal them to proceed. When this happens, the threads in
4132 * arc_get_data_impl() are sleeping while holding the hash lock for their
4133 * particular arc header. Thus, we must be careful to never sleep on a
4134 * hash lock in this thread. This is to prevent the following deadlock:
4136 * - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L",
4137 * waiting for the reclaim thread to signal it.
4139 * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
4140 * fails, and goes to sleep forever.
4142 * This possible deadlock is avoided by always acquiring a hash lock
4143 * using mutex_tryenter() from arc_reclaim_thread().
4147 arc_reclaim_thread(void *unused
)
4149 hrtime_t growtime
= 0;
4150 hrtime_t kmem_reap_time
= 0;
4153 CALLB_CPR_INIT(&cpr
, &arc_reclaim_lock
, callb_generic_cpr
, FTAG
);
4155 mutex_enter(&arc_reclaim_lock
);
4156 while (!arc_reclaim_thread_exit
) {
4157 uint64_t evicted
= 0;
4160 * This is necessary in order for the mdb ::arc dcmd to
4161 * show up to date information. Since the ::arc command
4162 * does not call the kstat's update function, without
4163 * this call, the command may show stale stats for the
4164 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
4165 * with this change, the data might be up to 1 second
4166 * out of date; but that should suffice. The arc_state_t
4167 * structures can be queried directly if more accurate
4168 * information is needed.
4170 if (arc_ksp
!= NULL
)
4171 arc_ksp
->ks_update(arc_ksp
, KSTAT_READ
);
4173 mutex_exit(&arc_reclaim_lock
);
4176 * We call arc_adjust() before (possibly) calling
4177 * arc_kmem_reap_now(), so that we can wake up
4178 * arc_get_data_impl() sooner.
4180 evicted
= arc_adjust();
4182 int64_t free_memory
= arc_available_memory();
4183 if (free_memory
< 0) {
4184 hrtime_t curtime
= gethrtime();
4185 arc_no_grow
= B_TRUE
;
4189 * Wait at least zfs_grow_retry (default 60) seconds
4190 * before considering growing.
4192 growtime
= curtime
+ SEC2NSEC(arc_grow_retry
);
4195 * Wait at least arc_kmem_cache_reap_retry_ms
4196 * between arc_kmem_reap_now() calls. Without
4197 * this check it is possible to end up in a
4198 * situation where we spend lots of time
4199 * reaping caches, while we're near arc_c_min.
4201 if (curtime
>= kmem_reap_time
) {
4202 arc_kmem_reap_now();
4203 kmem_reap_time
= gethrtime() +
4204 MSEC2NSEC(arc_kmem_cache_reap_retry_ms
);
4208 * If we are still low on memory, shrink the ARC
4209 * so that we have arc_shrink_min free space.
4211 free_memory
= arc_available_memory();
4214 (arc_c
>> arc_shrink_shift
) - free_memory
;
4217 to_free
= MAX(to_free
, ptob(needfree
));
4219 arc_shrink(to_free
);
4221 } else if (free_memory
< arc_c
>> arc_no_grow_shift
) {
4222 arc_no_grow
= B_TRUE
;
4223 } else if (gethrtime() >= growtime
) {
4224 arc_no_grow
= B_FALSE
;
4227 mutex_enter(&arc_reclaim_lock
);
4230 * If evicted is zero, we couldn't evict anything via
4231 * arc_adjust(). This could be due to hash lock
4232 * collisions, but more likely due to the majority of
4233 * arc buffers being unevictable. Therefore, even if
4234 * arc_size is above arc_c, another pass is unlikely to
4235 * be helpful and could potentially cause us to enter an
4238 if (aggsum_compare(&arc_size
, arc_c
) <= 0|| evicted
== 0) {
4240 * We're either no longer overflowing, or we
4241 * can't evict anything more, so we should wake
4242 * up any threads before we go to sleep.
4244 cv_broadcast(&arc_reclaim_waiters_cv
);
4247 * Block until signaled, or after one second (we
4248 * might need to perform arc_kmem_reap_now()
4249 * even if we aren't being signalled)
4251 CALLB_CPR_SAFE_BEGIN(&cpr
);
4252 (void) cv_timedwait_hires(&arc_reclaim_thread_cv
,
4253 &arc_reclaim_lock
, SEC2NSEC(1), MSEC2NSEC(1), 0);
4254 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_lock
);
4258 arc_reclaim_thread_exit
= B_FALSE
;
4259 cv_broadcast(&arc_reclaim_thread_cv
);
4260 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_lock */
4265 * Adapt arc info given the number of bytes we are trying to add and
4266 * the state that we are comming from. This function is only called
4267 * when we are adding new content to the cache.
4270 arc_adapt(int bytes
, arc_state_t
*state
)
4273 uint64_t arc_p_min
= (arc_c
>> arc_p_min_shift
);
4274 int64_t mrug_size
= refcount_count(&arc_mru_ghost
->arcs_size
);
4275 int64_t mfug_size
= refcount_count(&arc_mfu_ghost
->arcs_size
);
4277 if (state
== arc_l2c_only
)
4282 * Adapt the target size of the MRU list:
4283 * - if we just hit in the MRU ghost list, then increase
4284 * the target size of the MRU list.
4285 * - if we just hit in the MFU ghost list, then increase
4286 * the target size of the MFU list by decreasing the
4287 * target size of the MRU list.
4289 if (state
== arc_mru_ghost
) {
4290 mult
= (mrug_size
>= mfug_size
) ? 1 : (mfug_size
/ mrug_size
);
4291 mult
= MIN(mult
, 10); /* avoid wild arc_p adjustment */
4293 arc_p
= MIN(arc_c
- arc_p_min
, arc_p
+ bytes
* mult
);
4294 } else if (state
== arc_mfu_ghost
) {
4297 mult
= (mfug_size
>= mrug_size
) ? 1 : (mrug_size
/ mfug_size
);
4298 mult
= MIN(mult
, 10);
4300 delta
= MIN(bytes
* mult
, arc_p
);
4301 arc_p
= MAX(arc_p_min
, arc_p
- delta
);
4303 ASSERT((int64_t)arc_p
>= 0);
4305 if (arc_reclaim_needed()) {
4306 cv_signal(&arc_reclaim_thread_cv
);
4313 if (arc_c
>= arc_c_max
)
4317 * If we're within (2 * maxblocksize) bytes of the target
4318 * cache size, increment the target cache size
4320 if (aggsum_compare(&arc_size
, arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) >
4322 atomic_add_64(&arc_c
, (int64_t)bytes
);
4323 if (arc_c
> arc_c_max
)
4325 else if (state
== arc_anon
)
4326 atomic_add_64(&arc_p
, (int64_t)bytes
);
4330 ASSERT((int64_t)arc_p
>= 0);
4334 * Check if arc_size has grown past our upper threshold, determined by
4335 * zfs_arc_overflow_shift.
4338 arc_is_overflowing(void)
4340 /* Always allow at least one block of overflow */
4341 uint64_t overflow
= MAX(SPA_MAXBLOCKSIZE
,
4342 arc_c
>> zfs_arc_overflow_shift
);
4345 * We just compare the lower bound here for performance reasons. Our
4346 * primary goals are to make sure that the arc never grows without
4347 * bound, and that it can reach its maximum size. This check
4348 * accomplishes both goals. The maximum amount we could run over by is
4349 * 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
4350 * in the ARC. In practice, that's in the tens of MB, which is low
4351 * enough to be safe.
4353 return (aggsum_lower_bound(&arc_size
) >= arc_c
+ overflow
);
4357 arc_get_data_abd(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4359 arc_buf_contents_t type
= arc_buf_type(hdr
);
4361 arc_get_data_impl(hdr
, size
, tag
);
4362 if (type
== ARC_BUFC_METADATA
) {
4363 return (abd_alloc(size
, B_TRUE
));
4365 ASSERT(type
== ARC_BUFC_DATA
);
4366 return (abd_alloc(size
, B_FALSE
));
4371 arc_get_data_buf(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4373 arc_buf_contents_t type
= arc_buf_type(hdr
);
4375 arc_get_data_impl(hdr
, size
, tag
);
4376 if (type
== ARC_BUFC_METADATA
) {
4377 return (zio_buf_alloc(size
));
4379 ASSERT(type
== ARC_BUFC_DATA
);
4380 return (zio_data_buf_alloc(size
));
4385 * Allocate a block and return it to the caller. If we are hitting the
4386 * hard limit for the cache size, we must sleep, waiting for the eviction
4387 * thread to catch up. If we're past the target size but below the hard
4388 * limit, we'll only signal the reclaim thread and continue on.
4391 arc_get_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4393 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
4394 arc_buf_contents_t type
= arc_buf_type(hdr
);
4396 arc_adapt(size
, state
);
4399 * If arc_size is currently overflowing, and has grown past our
4400 * upper limit, we must be adding data faster than the evict
4401 * thread can evict. Thus, to ensure we don't compound the
4402 * problem by adding more data and forcing arc_size to grow even
4403 * further past it's target size, we halt and wait for the
4404 * eviction thread to catch up.
4406 * It's also possible that the reclaim thread is unable to evict
4407 * enough buffers to get arc_size below the overflow limit (e.g.
4408 * due to buffers being un-evictable, or hash lock collisions).
4409 * In this case, we want to proceed regardless if we're
4410 * overflowing; thus we don't use a while loop here.
4412 if (arc_is_overflowing()) {
4413 mutex_enter(&arc_reclaim_lock
);
4416 * Now that we've acquired the lock, we may no longer be
4417 * over the overflow limit, lets check.
4419 * We're ignoring the case of spurious wake ups. If that
4420 * were to happen, it'd let this thread consume an ARC
4421 * buffer before it should have (i.e. before we're under
4422 * the overflow limit and were signalled by the reclaim
4423 * thread). As long as that is a rare occurrence, it
4424 * shouldn't cause any harm.
4426 if (arc_is_overflowing()) {
4427 cv_signal(&arc_reclaim_thread_cv
);
4428 cv_wait(&arc_reclaim_waiters_cv
, &arc_reclaim_lock
);
4431 mutex_exit(&arc_reclaim_lock
);
4434 VERIFY3U(hdr
->b_type
, ==, type
);
4435 if (type
== ARC_BUFC_METADATA
) {
4436 arc_space_consume(size
, ARC_SPACE_META
);
4438 arc_space_consume(size
, ARC_SPACE_DATA
);
4442 * Update the state size. Note that ghost states have a
4443 * "ghost size" and so don't need to be updated.
4445 if (!GHOST_STATE(state
)) {
4447 (void) refcount_add_many(&state
->arcs_size
, size
, tag
);
4450 * If this is reached via arc_read, the link is
4451 * protected by the hash lock. If reached via
4452 * arc_buf_alloc, the header should not be accessed by
4453 * any other thread. And, if reached via arc_read_done,
4454 * the hash lock will protect it if it's found in the
4455 * hash table; otherwise no other thread should be
4456 * trying to [add|remove]_reference it.
4458 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
4459 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4460 (void) refcount_add_many(&state
->arcs_esize
[type
],
4465 * If we are growing the cache, and we are adding anonymous
4466 * data, and we have outgrown arc_p, update arc_p
4468 if (aggsum_compare(&arc_size
, arc_c
) < 0 &&
4469 hdr
->b_l1hdr
.b_state
== arc_anon
&&
4470 (refcount_count(&arc_anon
->arcs_size
) +
4471 refcount_count(&arc_mru
->arcs_size
) > arc_p
))
4472 arc_p
= MIN(arc_c
, arc_p
+ size
);
4477 arc_free_data_abd(arc_buf_hdr_t
*hdr
, abd_t
*abd
, uint64_t size
, void *tag
)
4479 arc_free_data_impl(hdr
, size
, tag
);
4484 arc_free_data_buf(arc_buf_hdr_t
*hdr
, void *buf
, uint64_t size
, void *tag
)
4486 arc_buf_contents_t type
= arc_buf_type(hdr
);
4488 arc_free_data_impl(hdr
, size
, tag
);
4489 if (type
== ARC_BUFC_METADATA
) {
4490 zio_buf_free(buf
, size
);
4492 ASSERT(type
== ARC_BUFC_DATA
);
4493 zio_data_buf_free(buf
, size
);
4498 * Free the arc data buffer.
4501 arc_free_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, void *tag
)
4503 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
4504 arc_buf_contents_t type
= arc_buf_type(hdr
);
4506 /* protected by hash lock, if in the hash table */
4507 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
4508 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4509 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
4511 (void) refcount_remove_many(&state
->arcs_esize
[type
],
4514 (void) refcount_remove_many(&state
->arcs_size
, size
, tag
);
4516 VERIFY3U(hdr
->b_type
, ==, type
);
4517 if (type
== ARC_BUFC_METADATA
) {
4518 arc_space_return(size
, ARC_SPACE_META
);
4520 ASSERT(type
== ARC_BUFC_DATA
);
4521 arc_space_return(size
, ARC_SPACE_DATA
);
4526 * This routine is called whenever a buffer is accessed.
4527 * NOTE: the hash lock is dropped in this function.
4530 arc_access(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
)
4534 ASSERT(MUTEX_HELD(hash_lock
));
4535 ASSERT(HDR_HAS_L1HDR(hdr
));
4537 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
4539 * This buffer is not in the cache, and does not
4540 * appear in our "ghost" list. Add the new buffer
4544 ASSERT0(hdr
->b_l1hdr
.b_arc_access
);
4545 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4546 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
4547 arc_change_state(arc_mru
, hdr
, hash_lock
);
4549 } else if (hdr
->b_l1hdr
.b_state
== arc_mru
) {
4550 now
= ddi_get_lbolt();
4553 * If this buffer is here because of a prefetch, then either:
4554 * - clear the flag if this is a "referencing" read
4555 * (any subsequent access will bump this into the MFU state).
4557 * - move the buffer to the head of the list if this is
4558 * another prefetch (to make it less likely to be evicted).
4560 if (HDR_PREFETCH(hdr
)) {
4561 if (refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 0) {
4562 /* link protected by hash lock */
4563 ASSERT(multilist_link_active(
4564 &hdr
->b_l1hdr
.b_arc_node
));
4566 arc_hdr_clear_flags(hdr
, ARC_FLAG_PREFETCH
);
4567 ARCSTAT_BUMP(arcstat_mru_hits
);
4569 hdr
->b_l1hdr
.b_arc_access
= now
;
4574 * This buffer has been "accessed" only once so far,
4575 * but it is still in the cache. Move it to the MFU
4578 if (now
> hdr
->b_l1hdr
.b_arc_access
+ ARC_MINTIME
) {
4580 * More than 125ms have passed since we
4581 * instantiated this buffer. Move it to the
4582 * most frequently used state.
4584 hdr
->b_l1hdr
.b_arc_access
= now
;
4585 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4586 arc_change_state(arc_mfu
, hdr
, hash_lock
);
4588 ARCSTAT_BUMP(arcstat_mru_hits
);
4589 } else if (hdr
->b_l1hdr
.b_state
== arc_mru_ghost
) {
4590 arc_state_t
*new_state
;
4592 * This buffer has been "accessed" recently, but
4593 * was evicted from the cache. Move it to the
4597 if (HDR_PREFETCH(hdr
)) {
4598 new_state
= arc_mru
;
4599 if (refcount_count(&hdr
->b_l1hdr
.b_refcnt
) > 0)
4600 arc_hdr_clear_flags(hdr
, ARC_FLAG_PREFETCH
);
4601 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
4603 new_state
= arc_mfu
;
4604 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4607 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4608 arc_change_state(new_state
, hdr
, hash_lock
);
4610 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
4611 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu
) {
4613 * This buffer has been accessed more than once and is
4614 * still in the cache. Keep it in the MFU state.
4616 * NOTE: an add_reference() that occurred when we did
4617 * the arc_read() will have kicked this off the list.
4618 * If it was a prefetch, we will explicitly move it to
4619 * the head of the list now.
4621 if ((HDR_PREFETCH(hdr
)) != 0) {
4622 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4623 /* link protected by hash_lock */
4624 ASSERT(multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
4626 ARCSTAT_BUMP(arcstat_mfu_hits
);
4627 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4628 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu_ghost
) {
4629 arc_state_t
*new_state
= arc_mfu
;
4631 * This buffer has been accessed more than once but has
4632 * been evicted from the cache. Move it back to the
4636 if (HDR_PREFETCH(hdr
)) {
4638 * This is a prefetch access...
4639 * move this block back to the MRU state.
4641 ASSERT0(refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
4642 new_state
= arc_mru
;
4645 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4646 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4647 arc_change_state(new_state
, hdr
, hash_lock
);
4649 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
4650 } else if (hdr
->b_l1hdr
.b_state
== arc_l2c_only
) {
4652 * This buffer is on the 2nd Level ARC.
4655 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
4656 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
4657 arc_change_state(arc_mfu
, hdr
, hash_lock
);
4659 ASSERT(!"invalid arc state");
4663 /* a generic arc_done_func_t which you can use */
4666 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
4668 if (zio
== NULL
|| zio
->io_error
== 0)
4669 bcopy(buf
->b_data
, arg
, arc_buf_size(buf
));
4670 arc_buf_destroy(buf
, arg
);
4673 /* a generic arc_done_func_t */
4675 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
4677 arc_buf_t
**bufp
= arg
;
4678 if (zio
&& zio
->io_error
) {
4679 arc_buf_destroy(buf
, arg
);
4683 ASSERT(buf
->b_data
);
4688 arc_hdr_verify(arc_buf_hdr_t
*hdr
, blkptr_t
*bp
)
4690 if (BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
)) {
4691 ASSERT3U(HDR_GET_PSIZE(hdr
), ==, 0);
4692 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
4694 if (HDR_COMPRESSION_ENABLED(hdr
)) {
4695 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==,
4696 BP_GET_COMPRESS(bp
));
4698 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, BP_GET_LSIZE(bp
));
4699 ASSERT3U(HDR_GET_PSIZE(hdr
), ==, BP_GET_PSIZE(bp
));
4704 arc_read_done(zio_t
*zio
)
4706 arc_buf_hdr_t
*hdr
= zio
->io_private
;
4707 kmutex_t
*hash_lock
= NULL
;
4708 arc_callback_t
*callback_list
;
4709 arc_callback_t
*acb
;
4710 boolean_t freeable
= B_FALSE
;
4711 boolean_t no_zio_error
= (zio
->io_error
== 0);
4714 * The hdr was inserted into hash-table and removed from lists
4715 * prior to starting I/O. We should find this header, since
4716 * it's in the hash table, and it should be legit since it's
4717 * not possible to evict it during the I/O. The only possible
4718 * reason for it not to be found is if we were freed during the
4721 if (HDR_IN_HASH_TABLE(hdr
)) {
4722 ASSERT3U(hdr
->b_birth
, ==, BP_PHYSICAL_BIRTH(zio
->io_bp
));
4723 ASSERT3U(hdr
->b_dva
.dva_word
[0], ==,
4724 BP_IDENTITY(zio
->io_bp
)->dva_word
[0]);
4725 ASSERT3U(hdr
->b_dva
.dva_word
[1], ==,
4726 BP_IDENTITY(zio
->io_bp
)->dva_word
[1]);
4728 arc_buf_hdr_t
*found
= buf_hash_find(hdr
->b_spa
, zio
->io_bp
,
4731 ASSERT((found
== hdr
&&
4732 DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
4733 (found
== hdr
&& HDR_L2_READING(hdr
)));
4734 ASSERT3P(hash_lock
, !=, NULL
);
4738 /* byteswap if necessary */
4739 if (BP_SHOULD_BYTESWAP(zio
->io_bp
)) {
4740 if (BP_GET_LEVEL(zio
->io_bp
) > 0) {
4741 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_UINT64
;
4743 hdr
->b_l1hdr
.b_byteswap
=
4744 DMU_OT_BYTESWAP(BP_GET_TYPE(zio
->io_bp
));
4747 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
4751 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2_EVICTED
);
4752 if (l2arc_noprefetch
&& HDR_PREFETCH(hdr
))
4753 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2CACHE
);
4755 callback_list
= hdr
->b_l1hdr
.b_acb
;
4756 ASSERT3P(callback_list
, !=, NULL
);
4758 if (hash_lock
&& no_zio_error
&& hdr
->b_l1hdr
.b_state
== arc_anon
) {
4760 * Only call arc_access on anonymous buffers. This is because
4761 * if we've issued an I/O for an evicted buffer, we've already
4762 * called arc_access (to prevent any simultaneous readers from
4763 * getting confused).
4765 arc_access(hdr
, hash_lock
);
4769 * If a read request has a callback (i.e. acb_done is not NULL), then we
4770 * make a buf containing the data according to the parameters which were
4771 * passed in. The implementation of arc_buf_alloc_impl() ensures that we
4772 * aren't needlessly decompressing the data multiple times.
4774 int callback_cnt
= 0;
4775 for (acb
= callback_list
; acb
!= NULL
; acb
= acb
->acb_next
) {
4779 /* This is a demand read since prefetches don't use callbacks */
4782 int error
= arc_buf_alloc_impl(hdr
, acb
->acb_private
,
4783 acb
->acb_compressed
, no_zio_error
, &acb
->acb_buf
);
4785 zio
->io_error
= error
;
4788 hdr
->b_l1hdr
.b_acb
= NULL
;
4789 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
4790 if (callback_cnt
== 0) {
4791 ASSERT(HDR_PREFETCH(hdr
));
4792 ASSERT0(hdr
->b_l1hdr
.b_bufcnt
);
4793 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
4796 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
) ||
4797 callback_list
!= NULL
);
4800 arc_hdr_verify(hdr
, zio
->io_bp
);
4802 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_ERROR
);
4803 if (hdr
->b_l1hdr
.b_state
!= arc_anon
)
4804 arc_change_state(arc_anon
, hdr
, hash_lock
);
4805 if (HDR_IN_HASH_TABLE(hdr
))
4806 buf_hash_remove(hdr
);
4807 freeable
= refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
);
4811 * Broadcast before we drop the hash_lock to avoid the possibility
4812 * that the hdr (and hence the cv) might be freed before we get to
4813 * the cv_broadcast().
4815 cv_broadcast(&hdr
->b_l1hdr
.b_cv
);
4817 if (hash_lock
!= NULL
) {
4818 mutex_exit(hash_lock
);
4821 * This block was freed while we waited for the read to
4822 * complete. It has been removed from the hash table and
4823 * moved to the anonymous state (so that it won't show up
4826 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
4827 freeable
= refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
);
4830 /* execute each callback and free its structure */
4831 while ((acb
= callback_list
) != NULL
) {
4833 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
4835 if (acb
->acb_zio_dummy
!= NULL
) {
4836 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
4837 zio_nowait(acb
->acb_zio_dummy
);
4840 callback_list
= acb
->acb_next
;
4841 kmem_free(acb
, sizeof (arc_callback_t
));
4845 arc_hdr_destroy(hdr
);
4849 * "Read" the block at the specified DVA (in bp) via the
4850 * cache. If the block is found in the cache, invoke the provided
4851 * callback immediately and return. Note that the `zio' parameter
4852 * in the callback will be NULL in this case, since no IO was
4853 * required. If the block is not in the cache pass the read request
4854 * on to the spa with a substitute callback function, so that the
4855 * requested block will be added to the cache.
4857 * If a read request arrives for a block that has a read in-progress,
4858 * either wait for the in-progress read to complete (and return the
4859 * results); or, if this is a read with a "done" func, add a record
4860 * to the read to invoke the "done" func when the read completes,
4861 * and return; or just return.
4863 * arc_read_done() will invoke all the requested "done" functions
4864 * for readers of this block.
4867 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
, arc_done_func_t
*done
,
4868 void *private, zio_priority_t priority
, int zio_flags
,
4869 arc_flags_t
*arc_flags
, const zbookmark_phys_t
*zb
)
4871 arc_buf_hdr_t
*hdr
= NULL
;
4872 kmutex_t
*hash_lock
= NULL
;
4874 uint64_t guid
= spa_load_guid(spa
);
4875 boolean_t compressed_read
= (zio_flags
& ZIO_FLAG_RAW
) != 0;
4877 ASSERT(!BP_IS_EMBEDDED(bp
) ||
4878 BPE_GET_ETYPE(bp
) == BP_EMBEDDED_TYPE_DATA
);
4881 if (!BP_IS_EMBEDDED(bp
)) {
4883 * Embedded BP's have no DVA and require no I/O to "read".
4884 * Create an anonymous arc buf to back it.
4886 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
4889 if (hdr
!= NULL
&& HDR_HAS_L1HDR(hdr
) && hdr
->b_l1hdr
.b_pabd
!= NULL
) {
4890 arc_buf_t
*buf
= NULL
;
4891 *arc_flags
|= ARC_FLAG_CACHED
;
4893 if (HDR_IO_IN_PROGRESS(hdr
)) {
4895 if ((hdr
->b_flags
& ARC_FLAG_PRIO_ASYNC_READ
) &&
4896 priority
== ZIO_PRIORITY_SYNC_READ
) {
4898 * This sync read must wait for an
4899 * in-progress async read (e.g. a predictive
4900 * prefetch). Async reads are queued
4901 * separately at the vdev_queue layer, so
4902 * this is a form of priority inversion.
4903 * Ideally, we would "inherit" the demand
4904 * i/o's priority by moving the i/o from
4905 * the async queue to the synchronous queue,
4906 * but there is currently no mechanism to do
4907 * so. Track this so that we can evaluate
4908 * the magnitude of this potential performance
4911 * Note that if the prefetch i/o is already
4912 * active (has been issued to the device),
4913 * the prefetch improved performance, because
4914 * we issued it sooner than we would have
4915 * without the prefetch.
4917 DTRACE_PROBE1(arc__sync__wait__for__async
,
4918 arc_buf_hdr_t
*, hdr
);
4919 ARCSTAT_BUMP(arcstat_sync_wait_for_async
);
4921 if (hdr
->b_flags
& ARC_FLAG_PREDICTIVE_PREFETCH
) {
4922 arc_hdr_clear_flags(hdr
,
4923 ARC_FLAG_PREDICTIVE_PREFETCH
);
4926 if (*arc_flags
& ARC_FLAG_WAIT
) {
4927 cv_wait(&hdr
->b_l1hdr
.b_cv
, hash_lock
);
4928 mutex_exit(hash_lock
);
4931 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
4934 arc_callback_t
*acb
= NULL
;
4936 acb
= kmem_zalloc(sizeof (arc_callback_t
),
4938 acb
->acb_done
= done
;
4939 acb
->acb_private
= private;
4940 acb
->acb_compressed
= compressed_read
;
4942 acb
->acb_zio_dummy
= zio_null(pio
,
4943 spa
, NULL
, NULL
, NULL
, zio_flags
);
4945 ASSERT3P(acb
->acb_done
, !=, NULL
);
4946 acb
->acb_next
= hdr
->b_l1hdr
.b_acb
;
4947 hdr
->b_l1hdr
.b_acb
= acb
;
4948 mutex_exit(hash_lock
);
4951 mutex_exit(hash_lock
);
4955 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
4956 hdr
->b_l1hdr
.b_state
== arc_mfu
);
4959 if (hdr
->b_flags
& ARC_FLAG_PREDICTIVE_PREFETCH
) {
4961 * This is a demand read which does not have to
4962 * wait for i/o because we did a predictive
4963 * prefetch i/o for it, which has completed.
4966 arc__demand__hit__predictive__prefetch
,
4967 arc_buf_hdr_t
*, hdr
);
4969 arcstat_demand_hit_predictive_prefetch
);
4970 arc_hdr_clear_flags(hdr
,
4971 ARC_FLAG_PREDICTIVE_PREFETCH
);
4973 ASSERT(!BP_IS_EMBEDDED(bp
) || !BP_IS_HOLE(bp
));
4975 /* Get a buf with the desired data in it. */
4976 VERIFY0(arc_buf_alloc_impl(hdr
, private,
4977 compressed_read
, B_TRUE
, &buf
));
4978 } else if (*arc_flags
& ARC_FLAG_PREFETCH
&&
4979 refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 0) {
4980 arc_hdr_set_flags(hdr
, ARC_FLAG_PREFETCH
);
4982 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
4983 arc_access(hdr
, hash_lock
);
4984 if (*arc_flags
& ARC_FLAG_L2CACHE
)
4985 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
4986 mutex_exit(hash_lock
);
4987 ARCSTAT_BUMP(arcstat_hits
);
4988 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
4989 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
4990 data
, metadata
, hits
);
4993 done(NULL
, buf
, private);
4995 uint64_t lsize
= BP_GET_LSIZE(bp
);
4996 uint64_t psize
= BP_GET_PSIZE(bp
);
4997 arc_callback_t
*acb
;
5000 boolean_t devw
= B_FALSE
;
5004 /* this block is not in the cache */
5005 arc_buf_hdr_t
*exists
= NULL
;
5006 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
5007 hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
,
5008 BP_GET_COMPRESS(bp
), type
);
5010 if (!BP_IS_EMBEDDED(bp
)) {
5011 hdr
->b_dva
= *BP_IDENTITY(bp
);
5012 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
5013 exists
= buf_hash_insert(hdr
, &hash_lock
);
5015 if (exists
!= NULL
) {
5016 /* somebody beat us to the hash insert */
5017 mutex_exit(hash_lock
);
5018 buf_discard_identity(hdr
);
5019 arc_hdr_destroy(hdr
);
5020 goto top
; /* restart the IO request */
5024 * This block is in the ghost cache. If it was L2-only
5025 * (and thus didn't have an L1 hdr), we realloc the
5026 * header to add an L1 hdr.
5028 if (!HDR_HAS_L1HDR(hdr
)) {
5029 hdr
= arc_hdr_realloc(hdr
, hdr_l2only_cache
,
5032 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
5033 ASSERT(GHOST_STATE(hdr
->b_l1hdr
.b_state
));
5034 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5035 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
5036 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
5037 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
5040 * This is a delicate dance that we play here.
5041 * This hdr is in the ghost list so we access it
5042 * to move it out of the ghost list before we
5043 * initiate the read. If it's a prefetch then
5044 * it won't have a callback so we'll remove the
5045 * reference that arc_buf_alloc_impl() created. We
5046 * do this after we've called arc_access() to
5047 * avoid hitting an assert in remove_reference().
5049 arc_access(hdr
, hash_lock
);
5050 arc_hdr_alloc_pabd(hdr
);
5052 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
5053 size
= arc_hdr_size(hdr
);
5056 * If compression is enabled on the hdr, then will do
5057 * RAW I/O and will store the compressed data in the hdr's
5058 * data block. Otherwise, the hdr's data block will contain
5059 * the uncompressed data.
5061 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
) {
5062 zio_flags
|= ZIO_FLAG_RAW
;
5065 if (*arc_flags
& ARC_FLAG_PREFETCH
)
5066 arc_hdr_set_flags(hdr
, ARC_FLAG_PREFETCH
);
5067 if (*arc_flags
& ARC_FLAG_L2CACHE
)
5068 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
5069 if (BP_GET_LEVEL(bp
) > 0)
5070 arc_hdr_set_flags(hdr
, ARC_FLAG_INDIRECT
);
5071 if (*arc_flags
& ARC_FLAG_PREDICTIVE_PREFETCH
)
5072 arc_hdr_set_flags(hdr
, ARC_FLAG_PREDICTIVE_PREFETCH
);
5073 ASSERT(!GHOST_STATE(hdr
->b_l1hdr
.b_state
));
5075 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_SLEEP
);
5076 acb
->acb_done
= done
;
5077 acb
->acb_private
= private;
5078 acb
->acb_compressed
= compressed_read
;
5080 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5081 hdr
->b_l1hdr
.b_acb
= acb
;
5082 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5084 if (HDR_HAS_L2HDR(hdr
) &&
5085 (vd
= hdr
->b_l2hdr
.b_dev
->l2ad_vdev
) != NULL
) {
5086 devw
= hdr
->b_l2hdr
.b_dev
->l2ad_writing
;
5087 addr
= hdr
->b_l2hdr
.b_daddr
;
5089 * Lock out L2ARC device removal.
5091 if (vdev_is_dead(vd
) ||
5092 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
5096 if (priority
== ZIO_PRIORITY_ASYNC_READ
)
5097 arc_hdr_set_flags(hdr
, ARC_FLAG_PRIO_ASYNC_READ
);
5099 arc_hdr_clear_flags(hdr
, ARC_FLAG_PRIO_ASYNC_READ
);
5101 if (hash_lock
!= NULL
)
5102 mutex_exit(hash_lock
);
5105 * At this point, we have a level 1 cache miss. Try again in
5106 * L2ARC if possible.
5108 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, lsize
);
5110 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
, blkptr_t
*, bp
,
5111 uint64_t, lsize
, zbookmark_phys_t
*, zb
);
5112 ARCSTAT_BUMP(arcstat_misses
);
5113 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
5114 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
5115 data
, metadata
, misses
);
5117 if (vd
!= NULL
&& l2arc_ndev
!= 0 && !(l2arc_norw
&& devw
)) {
5119 * Read from the L2ARC if the following are true:
5120 * 1. The L2ARC vdev was previously cached.
5121 * 2. This buffer still has L2ARC metadata.
5122 * 3. This buffer isn't currently writing to the L2ARC.
5123 * 4. The L2ARC entry wasn't evicted, which may
5124 * also have invalidated the vdev.
5125 * 5. This isn't prefetch and l2arc_noprefetch is set.
5127 if (HDR_HAS_L2HDR(hdr
) &&
5128 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
5129 !(l2arc_noprefetch
&& HDR_PREFETCH(hdr
))) {
5130 l2arc_read_callback_t
*cb
;
5134 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
5135 ARCSTAT_BUMP(arcstat_l2_hits
);
5137 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
5139 cb
->l2rcb_hdr
= hdr
;
5142 cb
->l2rcb_flags
= zio_flags
;
5144 asize
= vdev_psize_to_asize(vd
, size
);
5145 if (asize
!= size
) {
5146 abd
= abd_alloc_for_io(asize
,
5147 HDR_ISTYPE_METADATA(hdr
));
5148 cb
->l2rcb_abd
= abd
;
5150 abd
= hdr
->b_l1hdr
.b_pabd
;
5153 ASSERT(addr
>= VDEV_LABEL_START_SIZE
&&
5154 addr
+ asize
<= vd
->vdev_psize
-
5155 VDEV_LABEL_END_SIZE
);
5158 * l2arc read. The SCL_L2ARC lock will be
5159 * released by l2arc_read_done().
5160 * Issue a null zio if the underlying buffer
5161 * was squashed to zero size by compression.
5163 ASSERT3U(HDR_GET_COMPRESS(hdr
), !=,
5164 ZIO_COMPRESS_EMPTY
);
5165 rzio
= zio_read_phys(pio
, vd
, addr
,
5168 l2arc_read_done
, cb
, priority
,
5169 zio_flags
| ZIO_FLAG_DONT_CACHE
|
5171 ZIO_FLAG_DONT_PROPAGATE
|
5172 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
5173 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
5175 ARCSTAT_INCR(arcstat_l2_read_bytes
, size
);
5177 if (*arc_flags
& ARC_FLAG_NOWAIT
) {
5182 ASSERT(*arc_flags
& ARC_FLAG_WAIT
);
5183 if (zio_wait(rzio
) == 0)
5186 /* l2arc read error; goto zio_read() */
5188 DTRACE_PROBE1(l2arc__miss
,
5189 arc_buf_hdr_t
*, hdr
);
5190 ARCSTAT_BUMP(arcstat_l2_misses
);
5191 if (HDR_L2_WRITING(hdr
))
5192 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
5193 spa_config_exit(spa
, SCL_L2ARC
, vd
);
5197 spa_config_exit(spa
, SCL_L2ARC
, vd
);
5198 if (l2arc_ndev
!= 0) {
5199 DTRACE_PROBE1(l2arc__miss
,
5200 arc_buf_hdr_t
*, hdr
);
5201 ARCSTAT_BUMP(arcstat_l2_misses
);
5205 rzio
= zio_read(pio
, spa
, bp
, hdr
->b_l1hdr
.b_pabd
, size
,
5206 arc_read_done
, hdr
, priority
, zio_flags
, zb
);
5208 if (*arc_flags
& ARC_FLAG_WAIT
)
5209 return (zio_wait(rzio
));
5211 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
5218 * Notify the arc that a block was freed, and thus will never be used again.
5221 arc_freed(spa_t
*spa
, const blkptr_t
*bp
)
5224 kmutex_t
*hash_lock
;
5225 uint64_t guid
= spa_load_guid(spa
);
5227 ASSERT(!BP_IS_EMBEDDED(bp
));
5229 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
5234 * We might be trying to free a block that is still doing I/O
5235 * (i.e. prefetch) or has a reference (i.e. a dedup-ed,
5236 * dmu_sync-ed block). If this block is being prefetched, then it
5237 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
5238 * until the I/O completes. A block may also have a reference if it is
5239 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
5240 * have written the new block to its final resting place on disk but
5241 * without the dedup flag set. This would have left the hdr in the MRU
5242 * state and discoverable. When the txg finally syncs it detects that
5243 * the block was overridden in open context and issues an override I/O.
5244 * Since this is a dedup block, the override I/O will determine if the
5245 * block is already in the DDT. If so, then it will replace the io_bp
5246 * with the bp from the DDT and allow the I/O to finish. When the I/O
5247 * reaches the done callback, dbuf_write_override_done, it will
5248 * check to see if the io_bp and io_bp_override are identical.
5249 * If they are not, then it indicates that the bp was replaced with
5250 * the bp in the DDT and the override bp is freed. This allows
5251 * us to arrive here with a reference on a block that is being
5252 * freed. So if we have an I/O in progress, or a reference to
5253 * this hdr, then we don't destroy the hdr.
5255 if (!HDR_HAS_L1HDR(hdr
) || (!HDR_IO_IN_PROGRESS(hdr
) &&
5256 refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
))) {
5257 arc_change_state(arc_anon
, hdr
, hash_lock
);
5258 arc_hdr_destroy(hdr
);
5259 mutex_exit(hash_lock
);
5261 mutex_exit(hash_lock
);
5267 * Release this buffer from the cache, making it an anonymous buffer. This
5268 * must be done after a read and prior to modifying the buffer contents.
5269 * If the buffer has more than one reference, we must make
5270 * a new hdr for the buffer.
5273 arc_release(arc_buf_t
*buf
, void *tag
)
5275 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5278 * It would be nice to assert that if it's DMU metadata (level >
5279 * 0 || it's the dnode file), then it must be syncing context.
5280 * But we don't know that information at this level.
5283 mutex_enter(&buf
->b_evict_lock
);
5285 ASSERT(HDR_HAS_L1HDR(hdr
));
5288 * We don't grab the hash lock prior to this check, because if
5289 * the buffer's header is in the arc_anon state, it won't be
5290 * linked into the hash table.
5292 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
5293 mutex_exit(&buf
->b_evict_lock
);
5294 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5295 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
5296 ASSERT(!HDR_HAS_L2HDR(hdr
));
5297 ASSERT(HDR_EMPTY(hdr
));
5299 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, ==, 1);
5300 ASSERT3S(refcount_count(&hdr
->b_l1hdr
.b_refcnt
), ==, 1);
5301 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
5303 hdr
->b_l1hdr
.b_arc_access
= 0;
5306 * If the buf is being overridden then it may already
5307 * have a hdr that is not empty.
5309 buf_discard_identity(hdr
);
5315 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
5316 mutex_enter(hash_lock
);
5319 * This assignment is only valid as long as the hash_lock is
5320 * held, we must be careful not to reference state or the
5321 * b_state field after dropping the lock.
5323 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
5324 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
5325 ASSERT3P(state
, !=, arc_anon
);
5327 /* this buffer is not on any list */
5328 ASSERT3S(refcount_count(&hdr
->b_l1hdr
.b_refcnt
), >, 0);
5330 if (HDR_HAS_L2HDR(hdr
)) {
5331 mutex_enter(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
5334 * We have to recheck this conditional again now that
5335 * we're holding the l2ad_mtx to prevent a race with
5336 * another thread which might be concurrently calling
5337 * l2arc_evict(). In that case, l2arc_evict() might have
5338 * destroyed the header's L2 portion as we were waiting
5339 * to acquire the l2ad_mtx.
5341 if (HDR_HAS_L2HDR(hdr
))
5342 arc_hdr_l2hdr_destroy(hdr
);
5344 mutex_exit(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
5348 * Do we have more than one buf?
5350 if (hdr
->b_l1hdr
.b_bufcnt
> 1) {
5351 arc_buf_hdr_t
*nhdr
;
5352 uint64_t spa
= hdr
->b_spa
;
5353 uint64_t psize
= HDR_GET_PSIZE(hdr
);
5354 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
5355 enum zio_compress compress
= HDR_GET_COMPRESS(hdr
);
5356 arc_buf_contents_t type
= arc_buf_type(hdr
);
5357 VERIFY3U(hdr
->b_type
, ==, type
);
5359 ASSERT(hdr
->b_l1hdr
.b_buf
!= buf
|| buf
->b_next
!= NULL
);
5360 (void) remove_reference(hdr
, hash_lock
, tag
);
5362 if (arc_buf_is_shared(buf
) && !ARC_BUF_COMPRESSED(buf
)) {
5363 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, buf
);
5364 ASSERT(ARC_BUF_LAST(buf
));
5368 * Pull the data off of this hdr and attach it to
5369 * a new anonymous hdr. Also find the last buffer
5370 * in the hdr's buffer list.
5372 arc_buf_t
*lastbuf
= arc_buf_remove(hdr
, buf
);
5373 ASSERT3P(lastbuf
, !=, NULL
);
5376 * If the current arc_buf_t and the hdr are sharing their data
5377 * buffer, then we must stop sharing that block.
5379 if (arc_buf_is_shared(buf
)) {
5380 VERIFY(!arc_buf_is_shared(lastbuf
));
5383 * First, sever the block sharing relationship between
5384 * buf and the arc_buf_hdr_t.
5386 arc_unshare_buf(hdr
, buf
);
5389 * Now we need to recreate the hdr's b_pabd. Since we
5390 * have lastbuf handy, we try to share with it, but if
5391 * we can't then we allocate a new b_pabd and copy the
5392 * data from buf into it.
5394 if (arc_can_share(hdr
, lastbuf
)) {
5395 arc_share_buf(hdr
, lastbuf
);
5397 arc_hdr_alloc_pabd(hdr
);
5398 abd_copy_from_buf(hdr
->b_l1hdr
.b_pabd
,
5399 buf
->b_data
, psize
);
5401 VERIFY3P(lastbuf
->b_data
, !=, NULL
);
5402 } else if (HDR_SHARED_DATA(hdr
)) {
5404 * Uncompressed shared buffers are always at the end
5405 * of the list. Compressed buffers don't have the
5406 * same requirements. This makes it hard to
5407 * simply assert that the lastbuf is shared so
5408 * we rely on the hdr's compression flags to determine
5409 * if we have a compressed, shared buffer.
5411 ASSERT(arc_buf_is_shared(lastbuf
) ||
5412 HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
);
5413 ASSERT(!ARC_BUF_SHARED(buf
));
5415 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
5416 ASSERT3P(state
, !=, arc_l2c_only
);
5418 (void) refcount_remove_many(&state
->arcs_size
,
5419 arc_buf_size(buf
), buf
);
5421 if (refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
)) {
5422 ASSERT3P(state
, !=, arc_l2c_only
);
5423 (void) refcount_remove_many(&state
->arcs_esize
[type
],
5424 arc_buf_size(buf
), buf
);
5427 hdr
->b_l1hdr
.b_bufcnt
-= 1;
5428 arc_cksum_verify(buf
);
5429 arc_buf_unwatch(buf
);
5431 mutex_exit(hash_lock
);
5434 * Allocate a new hdr. The new hdr will contain a b_pabd
5435 * buffer which will be freed in arc_write().
5437 nhdr
= arc_hdr_alloc(spa
, psize
, lsize
, compress
, type
);
5438 ASSERT3P(nhdr
->b_l1hdr
.b_buf
, ==, NULL
);
5439 ASSERT0(nhdr
->b_l1hdr
.b_bufcnt
);
5440 ASSERT0(refcount_count(&nhdr
->b_l1hdr
.b_refcnt
));
5441 VERIFY3U(nhdr
->b_type
, ==, type
);
5442 ASSERT(!HDR_SHARED_DATA(nhdr
));
5444 nhdr
->b_l1hdr
.b_buf
= buf
;
5445 nhdr
->b_l1hdr
.b_bufcnt
= 1;
5446 (void) refcount_add(&nhdr
->b_l1hdr
.b_refcnt
, tag
);
5449 mutex_exit(&buf
->b_evict_lock
);
5450 (void) refcount_add_many(&arc_anon
->arcs_size
,
5451 arc_buf_size(buf
), buf
);
5453 mutex_exit(&buf
->b_evict_lock
);
5454 ASSERT(refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 1);
5455 /* protected by hash lock, or hdr is on arc_anon */
5456 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
5457 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5458 arc_change_state(arc_anon
, hdr
, hash_lock
);
5459 hdr
->b_l1hdr
.b_arc_access
= 0;
5460 mutex_exit(hash_lock
);
5462 buf_discard_identity(hdr
);
5468 arc_released(arc_buf_t
*buf
)
5472 mutex_enter(&buf
->b_evict_lock
);
5473 released
= (buf
->b_data
!= NULL
&&
5474 buf
->b_hdr
->b_l1hdr
.b_state
== arc_anon
);
5475 mutex_exit(&buf
->b_evict_lock
);
5481 arc_referenced(arc_buf_t
*buf
)
5485 mutex_enter(&buf
->b_evict_lock
);
5486 referenced
= (refcount_count(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
5487 mutex_exit(&buf
->b_evict_lock
);
5488 return (referenced
);
5493 arc_write_ready(zio_t
*zio
)
5495 arc_write_callback_t
*callback
= zio
->io_private
;
5496 arc_buf_t
*buf
= callback
->awcb_buf
;
5497 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5498 uint64_t psize
= BP_IS_HOLE(zio
->io_bp
) ? 0 : BP_GET_PSIZE(zio
->io_bp
);
5500 ASSERT(HDR_HAS_L1HDR(hdr
));
5501 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
5502 ASSERT(hdr
->b_l1hdr
.b_bufcnt
> 0);
5505 * If we're reexecuting this zio because the pool suspended, then
5506 * cleanup any state that was previously set the first time the
5507 * callback was invoked.
5509 if (zio
->io_flags
& ZIO_FLAG_REEXECUTED
) {
5510 arc_cksum_free(hdr
);
5511 arc_buf_unwatch(buf
);
5512 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
5513 if (arc_buf_is_shared(buf
)) {
5514 arc_unshare_buf(hdr
, buf
);
5516 arc_hdr_free_pabd(hdr
);
5520 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
5521 ASSERT(!HDR_SHARED_DATA(hdr
));
5522 ASSERT(!arc_buf_is_shared(buf
));
5524 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
5526 if (HDR_IO_IN_PROGRESS(hdr
))
5527 ASSERT(zio
->io_flags
& ZIO_FLAG_REEXECUTED
);
5529 arc_cksum_compute(buf
);
5530 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5532 enum zio_compress compress
;
5533 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
5534 compress
= ZIO_COMPRESS_OFF
;
5536 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, BP_GET_LSIZE(zio
->io_bp
));
5537 compress
= BP_GET_COMPRESS(zio
->io_bp
);
5539 HDR_SET_PSIZE(hdr
, psize
);
5540 arc_hdr_set_compress(hdr
, compress
);
5544 * Fill the hdr with data. If the hdr is compressed, the data we want
5545 * is available from the zio, otherwise we can take it from the buf.
5547 * We might be able to share the buf's data with the hdr here. However,
5548 * doing so would cause the ARC to be full of linear ABDs if we write a
5549 * lot of shareable data. As a compromise, we check whether scattered
5550 * ABDs are allowed, and assume that if they are then the user wants
5551 * the ARC to be primarily filled with them regardless of the data being
5552 * written. Therefore, if they're allowed then we allocate one and copy
5553 * the data into it; otherwise, we share the data directly if we can.
5555 if (zfs_abd_scatter_enabled
|| !arc_can_share(hdr
, buf
)) {
5556 arc_hdr_alloc_pabd(hdr
);
5559 * Ideally, we would always copy the io_abd into b_pabd, but the
5560 * user may have disabled compressed ARC, thus we must check the
5561 * hdr's compression setting rather than the io_bp's.
5563 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
) {
5564 ASSERT3U(BP_GET_COMPRESS(zio
->io_bp
), !=,
5566 ASSERT3U(psize
, >, 0);
5568 abd_copy(hdr
->b_l1hdr
.b_pabd
, zio
->io_abd
, psize
);
5570 ASSERT3U(zio
->io_orig_size
, ==, arc_hdr_size(hdr
));
5572 abd_copy_from_buf(hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
5576 ASSERT3P(buf
->b_data
, ==, abd_to_buf(zio
->io_orig_abd
));
5577 ASSERT3U(zio
->io_orig_size
, ==, arc_buf_size(buf
));
5578 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, ==, 1);
5580 arc_share_buf(hdr
, buf
);
5583 arc_hdr_verify(hdr
, zio
->io_bp
);
5587 arc_write_children_ready(zio_t
*zio
)
5589 arc_write_callback_t
*callback
= zio
->io_private
;
5590 arc_buf_t
*buf
= callback
->awcb_buf
;
5592 callback
->awcb_children_ready(zio
, buf
, callback
->awcb_private
);
5596 * The SPA calls this callback for each physical write that happens on behalf
5597 * of a logical write. See the comment in dbuf_write_physdone() for details.
5600 arc_write_physdone(zio_t
*zio
)
5602 arc_write_callback_t
*cb
= zio
->io_private
;
5603 if (cb
->awcb_physdone
!= NULL
)
5604 cb
->awcb_physdone(zio
, cb
->awcb_buf
, cb
->awcb_private
);
5608 arc_write_done(zio_t
*zio
)
5610 arc_write_callback_t
*callback
= zio
->io_private
;
5611 arc_buf_t
*buf
= callback
->awcb_buf
;
5612 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5614 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5616 if (zio
->io_error
== 0) {
5617 arc_hdr_verify(hdr
, zio
->io_bp
);
5619 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
5620 buf_discard_identity(hdr
);
5622 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
5623 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
5626 ASSERT(HDR_EMPTY(hdr
));
5630 * If the block to be written was all-zero or compressed enough to be
5631 * embedded in the BP, no write was performed so there will be no
5632 * dva/birth/checksum. The buffer must therefore remain anonymous
5635 if (!HDR_EMPTY(hdr
)) {
5636 arc_buf_hdr_t
*exists
;
5637 kmutex_t
*hash_lock
;
5639 ASSERT3U(zio
->io_error
, ==, 0);
5641 arc_cksum_verify(buf
);
5643 exists
= buf_hash_insert(hdr
, &hash_lock
);
5644 if (exists
!= NULL
) {
5646 * This can only happen if we overwrite for
5647 * sync-to-convergence, because we remove
5648 * buffers from the hash table when we arc_free().
5650 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
5651 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
5652 panic("bad overwrite, hdr=%p exists=%p",
5653 (void *)hdr
, (void *)exists
);
5654 ASSERT(refcount_is_zero(
5655 &exists
->b_l1hdr
.b_refcnt
));
5656 arc_change_state(arc_anon
, exists
, hash_lock
);
5657 mutex_exit(hash_lock
);
5658 arc_hdr_destroy(exists
);
5659 exists
= buf_hash_insert(hdr
, &hash_lock
);
5660 ASSERT3P(exists
, ==, NULL
);
5661 } else if (zio
->io_flags
& ZIO_FLAG_NOPWRITE
) {
5663 ASSERT(zio
->io_prop
.zp_nopwrite
);
5664 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
5665 panic("bad nopwrite, hdr=%p exists=%p",
5666 (void *)hdr
, (void *)exists
);
5669 ASSERT(hdr
->b_l1hdr
.b_bufcnt
== 1);
5670 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
);
5671 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
5672 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
5675 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5676 /* if it's not anon, we are doing a scrub */
5677 if (exists
== NULL
&& hdr
->b_l1hdr
.b_state
== arc_anon
)
5678 arc_access(hdr
, hash_lock
);
5679 mutex_exit(hash_lock
);
5681 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5684 ASSERT(!refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
5685 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
5687 abd_put(zio
->io_abd
);
5688 kmem_free(callback
, sizeof (arc_write_callback_t
));
5692 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
, blkptr_t
*bp
, arc_buf_t
*buf
,
5693 boolean_t l2arc
, const zio_prop_t
*zp
, arc_done_func_t
*ready
,
5694 arc_done_func_t
*children_ready
, arc_done_func_t
*physdone
,
5695 arc_done_func_t
*done
, void *private, zio_priority_t priority
,
5696 int zio_flags
, const zbookmark_phys_t
*zb
)
5698 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5699 arc_write_callback_t
*callback
;
5701 zio_prop_t localprop
= *zp
;
5703 ASSERT3P(ready
, !=, NULL
);
5704 ASSERT3P(done
, !=, NULL
);
5705 ASSERT(!HDR_IO_ERROR(hdr
));
5706 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5707 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5708 ASSERT3U(hdr
->b_l1hdr
.b_bufcnt
, >, 0);
5710 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
5711 if (ARC_BUF_COMPRESSED(buf
)) {
5713 * We're writing a pre-compressed buffer. Make the
5714 * compression algorithm requested by the zio_prop_t match
5715 * the pre-compressed buffer's compression algorithm.
5717 localprop
.zp_compress
= HDR_GET_COMPRESS(hdr
);
5719 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, arc_buf_size(buf
));
5720 zio_flags
|= ZIO_FLAG_RAW
;
5722 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
5723 callback
->awcb_ready
= ready
;
5724 callback
->awcb_children_ready
= children_ready
;
5725 callback
->awcb_physdone
= physdone
;
5726 callback
->awcb_done
= done
;
5727 callback
->awcb_private
= private;
5728 callback
->awcb_buf
= buf
;
5731 * The hdr's b_pabd is now stale, free it now. A new data block
5732 * will be allocated when the zio pipeline calls arc_write_ready().
5734 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
5736 * If the buf is currently sharing the data block with
5737 * the hdr then we need to break that relationship here.
5738 * The hdr will remain with a NULL data pointer and the
5739 * buf will take sole ownership of the block.
5741 if (arc_buf_is_shared(buf
)) {
5742 arc_unshare_buf(hdr
, buf
);
5744 arc_hdr_free_pabd(hdr
);
5746 VERIFY3P(buf
->b_data
, !=, NULL
);
5747 arc_hdr_set_compress(hdr
, ZIO_COMPRESS_OFF
);
5749 ASSERT(!arc_buf_is_shared(buf
));
5750 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
5752 zio
= zio_write(pio
, spa
, txg
, bp
,
5753 abd_get_from_buf(buf
->b_data
, HDR_GET_LSIZE(hdr
)),
5754 HDR_GET_LSIZE(hdr
), arc_buf_size(buf
), &localprop
, arc_write_ready
,
5755 (children_ready
!= NULL
) ? arc_write_children_ready
: NULL
,
5756 arc_write_physdone
, arc_write_done
, callback
,
5757 priority
, zio_flags
, zb
);
5763 arc_memory_throttle(uint64_t reserve
, uint64_t txg
)
5766 uint64_t available_memory
= ptob(freemem
);
5767 static uint64_t page_load
= 0;
5768 static uint64_t last_txg
= 0;
5772 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
5775 if (freemem
> physmem
* arc_lotsfree_percent
/ 100)
5778 if (txg
> last_txg
) {
5783 * If we are in pageout, we know that memory is already tight,
5784 * the arc is already going to be evicting, so we just want to
5785 * continue to let page writes occur as quickly as possible.
5787 if (curproc
== proc_pageout
) {
5788 if (page_load
> MAX(ptob(minfree
), available_memory
) / 4)
5789 return (SET_ERROR(ERESTART
));
5790 /* Note: reserve is inflated, so we deflate */
5791 page_load
+= reserve
/ 8;
5793 } else if (page_load
> 0 && arc_reclaim_needed()) {
5794 /* memory is low, delay before restarting */
5795 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
5796 return (SET_ERROR(EAGAIN
));
5804 arc_tempreserve_clear(uint64_t reserve
)
5806 atomic_add_64(&arc_tempreserve
, -reserve
);
5807 ASSERT((int64_t)arc_tempreserve
>= 0);
5811 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
5816 if (reserve
> arc_c
/4 && !arc_no_grow
)
5817 arc_c
= MIN(arc_c_max
, reserve
* 4);
5818 if (reserve
> arc_c
)
5819 return (SET_ERROR(ENOMEM
));
5822 * Don't count loaned bufs as in flight dirty data to prevent long
5823 * network delays from blocking transactions that are ready to be
5824 * assigned to a txg.
5827 /* assert that it has not wrapped around */
5828 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes
, 0), >=, 0);
5830 anon_size
= MAX((int64_t)(refcount_count(&arc_anon
->arcs_size
) -
5831 arc_loaned_bytes
), 0);
5834 * Writes will, almost always, require additional memory allocations
5835 * in order to compress/encrypt/etc the data. We therefore need to
5836 * make sure that there is sufficient available memory for this.
5838 error
= arc_memory_throttle(reserve
, txg
);
5843 * Throttle writes when the amount of dirty data in the cache
5844 * gets too large. We try to keep the cache less than half full
5845 * of dirty blocks so that our sync times don't grow too large.
5846 * Note: if two requests come in concurrently, we might let them
5847 * both succeed, when one of them should fail. Not a huge deal.
5850 if (reserve
+ arc_tempreserve
+ anon_size
> arc_c
/ 2 &&
5851 anon_size
> arc_c
/ 4) {
5852 uint64_t meta_esize
=
5853 refcount_count(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
5854 uint64_t data_esize
=
5855 refcount_count(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
5856 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
5857 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
5858 arc_tempreserve
>> 10, meta_esize
>> 10,
5859 data_esize
>> 10, reserve
>> 10, arc_c
>> 10);
5860 return (SET_ERROR(ERESTART
));
5862 atomic_add_64(&arc_tempreserve
, reserve
);
5867 arc_kstat_update_state(arc_state_t
*state
, kstat_named_t
*size
,
5868 kstat_named_t
*evict_data
, kstat_named_t
*evict_metadata
)
5870 size
->value
.ui64
= refcount_count(&state
->arcs_size
);
5871 evict_data
->value
.ui64
=
5872 refcount_count(&state
->arcs_esize
[ARC_BUFC_DATA
]);
5873 evict_metadata
->value
.ui64
=
5874 refcount_count(&state
->arcs_esize
[ARC_BUFC_METADATA
]);
5878 arc_kstat_update(kstat_t
*ksp
, int rw
)
5880 arc_stats_t
*as
= ksp
->ks_data
;
5882 if (rw
== KSTAT_WRITE
) {
5885 arc_kstat_update_state(arc_anon
,
5886 &as
->arcstat_anon_size
,
5887 &as
->arcstat_anon_evictable_data
,
5888 &as
->arcstat_anon_evictable_metadata
);
5889 arc_kstat_update_state(arc_mru
,
5890 &as
->arcstat_mru_size
,
5891 &as
->arcstat_mru_evictable_data
,
5892 &as
->arcstat_mru_evictable_metadata
);
5893 arc_kstat_update_state(arc_mru_ghost
,
5894 &as
->arcstat_mru_ghost_size
,
5895 &as
->arcstat_mru_ghost_evictable_data
,
5896 &as
->arcstat_mru_ghost_evictable_metadata
);
5897 arc_kstat_update_state(arc_mfu
,
5898 &as
->arcstat_mfu_size
,
5899 &as
->arcstat_mfu_evictable_data
,
5900 &as
->arcstat_mfu_evictable_metadata
);
5901 arc_kstat_update_state(arc_mfu_ghost
,
5902 &as
->arcstat_mfu_ghost_size
,
5903 &as
->arcstat_mfu_ghost_evictable_data
,
5904 &as
->arcstat_mfu_ghost_evictable_metadata
);
5906 ARCSTAT(arcstat_size
) = aggsum_value(&arc_size
);
5907 ARCSTAT(arcstat_meta_used
) = aggsum_value(&arc_meta_used
);
5908 ARCSTAT(arcstat_data_size
) = aggsum_value(&astat_data_size
);
5909 ARCSTAT(arcstat_metadata_size
) =
5910 aggsum_value(&astat_metadata_size
);
5911 ARCSTAT(arcstat_hdr_size
) = aggsum_value(&astat_hdr_size
);
5912 ARCSTAT(arcstat_other_size
) = aggsum_value(&astat_other_size
);
5913 ARCSTAT(arcstat_l2_hdr_size
) = aggsum_value(&astat_l2_hdr_size
);
5920 * This function *must* return indices evenly distributed between all
5921 * sublists of the multilist. This is needed due to how the ARC eviction
5922 * code is laid out; arc_evict_state() assumes ARC buffers are evenly
5923 * distributed between all sublists and uses this assumption when
5924 * deciding which sublist to evict from and how much to evict from it.
5927 arc_state_multilist_index_func(multilist_t
*ml
, void *obj
)
5929 arc_buf_hdr_t
*hdr
= obj
;
5932 * We rely on b_dva to generate evenly distributed index
5933 * numbers using buf_hash below. So, as an added precaution,
5934 * let's make sure we never add empty buffers to the arc lists.
5936 ASSERT(!HDR_EMPTY(hdr
));
5939 * The assumption here, is the hash value for a given
5940 * arc_buf_hdr_t will remain constant throughout it's lifetime
5941 * (i.e. it's b_spa, b_dva, and b_birth fields don't change).
5942 * Thus, we don't need to store the header's sublist index
5943 * on insertion, as this index can be recalculated on removal.
5945 * Also, the low order bits of the hash value are thought to be
5946 * distributed evenly. Otherwise, in the case that the multilist
5947 * has a power of two number of sublists, each sublists' usage
5948 * would not be evenly distributed.
5950 return (buf_hash(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
) %
5951 multilist_get_num_sublists(ml
));
5955 arc_state_init(void)
5957 arc_anon
= &ARC_anon
;
5959 arc_mru_ghost
= &ARC_mru_ghost
;
5961 arc_mfu_ghost
= &ARC_mfu_ghost
;
5962 arc_l2c_only
= &ARC_l2c_only
;
5964 arc_mru
->arcs_list
[ARC_BUFC_METADATA
] =
5965 multilist_create(sizeof (arc_buf_hdr_t
),
5966 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5967 arc_state_multilist_index_func
);
5968 arc_mru
->arcs_list
[ARC_BUFC_DATA
] =
5969 multilist_create(sizeof (arc_buf_hdr_t
),
5970 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5971 arc_state_multilist_index_func
);
5972 arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
] =
5973 multilist_create(sizeof (arc_buf_hdr_t
),
5974 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5975 arc_state_multilist_index_func
);
5976 arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
] =
5977 multilist_create(sizeof (arc_buf_hdr_t
),
5978 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5979 arc_state_multilist_index_func
);
5980 arc_mfu
->arcs_list
[ARC_BUFC_METADATA
] =
5981 multilist_create(sizeof (arc_buf_hdr_t
),
5982 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5983 arc_state_multilist_index_func
);
5984 arc_mfu
->arcs_list
[ARC_BUFC_DATA
] =
5985 multilist_create(sizeof (arc_buf_hdr_t
),
5986 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5987 arc_state_multilist_index_func
);
5988 arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
] =
5989 multilist_create(sizeof (arc_buf_hdr_t
),
5990 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5991 arc_state_multilist_index_func
);
5992 arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
] =
5993 multilist_create(sizeof (arc_buf_hdr_t
),
5994 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5995 arc_state_multilist_index_func
);
5996 arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
] =
5997 multilist_create(sizeof (arc_buf_hdr_t
),
5998 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
5999 arc_state_multilist_index_func
);
6000 arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
] =
6001 multilist_create(sizeof (arc_buf_hdr_t
),
6002 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
),
6003 arc_state_multilist_index_func
);
6005 refcount_create(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
6006 refcount_create(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
6007 refcount_create(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]);
6008 refcount_create(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]);
6009 refcount_create(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
6010 refcount_create(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
6011 refcount_create(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
6012 refcount_create(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]);
6013 refcount_create(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
6014 refcount_create(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
6015 refcount_create(&arc_l2c_only
->arcs_esize
[ARC_BUFC_METADATA
]);
6016 refcount_create(&arc_l2c_only
->arcs_esize
[ARC_BUFC_DATA
]);
6018 refcount_create(&arc_anon
->arcs_size
);
6019 refcount_create(&arc_mru
->arcs_size
);
6020 refcount_create(&arc_mru_ghost
->arcs_size
);
6021 refcount_create(&arc_mfu
->arcs_size
);
6022 refcount_create(&arc_mfu_ghost
->arcs_size
);
6023 refcount_create(&arc_l2c_only
->arcs_size
);
6025 aggsum_init(&arc_meta_used
, 0);
6026 aggsum_init(&arc_size
, 0);
6027 aggsum_init(&astat_data_size
, 0);
6028 aggsum_init(&astat_metadata_size
, 0);
6029 aggsum_init(&astat_hdr_size
, 0);
6030 aggsum_init(&astat_other_size
, 0);
6031 aggsum_init(&astat_l2_hdr_size
, 0);
6035 arc_state_fini(void)
6037 refcount_destroy(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
6038 refcount_destroy(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
6039 refcount_destroy(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]);
6040 refcount_destroy(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]);
6041 refcount_destroy(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
6042 refcount_destroy(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
6043 refcount_destroy(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
6044 refcount_destroy(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]);
6045 refcount_destroy(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
6046 refcount_destroy(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
6047 refcount_destroy(&arc_l2c_only
->arcs_esize
[ARC_BUFC_METADATA
]);
6048 refcount_destroy(&arc_l2c_only
->arcs_esize
[ARC_BUFC_DATA
]);
6050 refcount_destroy(&arc_anon
->arcs_size
);
6051 refcount_destroy(&arc_mru
->arcs_size
);
6052 refcount_destroy(&arc_mru_ghost
->arcs_size
);
6053 refcount_destroy(&arc_mfu
->arcs_size
);
6054 refcount_destroy(&arc_mfu_ghost
->arcs_size
);
6055 refcount_destroy(&arc_l2c_only
->arcs_size
);
6057 multilist_destroy(arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
6058 multilist_destroy(arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
6059 multilist_destroy(arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
6060 multilist_destroy(arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
6061 multilist_destroy(arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
6062 multilist_destroy(arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
6063 multilist_destroy(arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
6064 multilist_destroy(arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
6077 * allmem is "all memory that we could possibly use".
6080 uint64_t allmem
= ptob(physmem
- swapfs_minfree
);
6082 uint64_t allmem
= (physmem
* PAGESIZE
) / 2;
6085 mutex_init(&arc_reclaim_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
6086 cv_init(&arc_reclaim_thread_cv
, NULL
, CV_DEFAULT
, NULL
);
6087 cv_init(&arc_reclaim_waiters_cv
, NULL
, CV_DEFAULT
, NULL
);
6089 /* Convert seconds to clock ticks */
6090 arc_min_prefetch_lifespan
= 1 * hz
;
6092 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
6093 arc_c_min
= MAX(allmem
/ 32, 64 << 20);
6094 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
6095 if (allmem
>= 1 << 30)
6096 arc_c_max
= allmem
- (1 << 30);
6098 arc_c_max
= arc_c_min
;
6099 arc_c_max
= MAX(allmem
* 3 / 4, arc_c_max
);
6102 * In userland, there's only the memory pressure that we artificially
6103 * create (see arc_available_memory()). Don't let arc_c get too
6104 * small, because it can cause transactions to be larger than
6105 * arc_c, causing arc_tempreserve_space() to fail.
6108 arc_c_min
= arc_c_max
/ 2;
6112 * Allow the tunables to override our calculations if they are
6113 * reasonable (ie. over 64MB)
6115 if (zfs_arc_max
> 64 << 20 && zfs_arc_max
< allmem
) {
6116 arc_c_max
= zfs_arc_max
;
6117 arc_c_min
= MIN(arc_c_min
, arc_c_max
);
6119 if (zfs_arc_min
> 64 << 20 && zfs_arc_min
<= arc_c_max
)
6120 arc_c_min
= zfs_arc_min
;
6123 arc_p
= (arc_c
>> 1);
6125 /* limit meta-data to 1/4 of the arc capacity */
6126 arc_meta_limit
= arc_c_max
/ 4;
6130 * Metadata is stored in the kernel's heap. Don't let us
6131 * use more than half the heap for the ARC.
6133 arc_meta_limit
= MIN(arc_meta_limit
,
6134 vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 2);
6137 /* Allow the tunable to override if it is reasonable */
6138 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
6139 arc_meta_limit
= zfs_arc_meta_limit
;
6141 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
6142 arc_c_min
= arc_meta_limit
/ 2;
6144 if (zfs_arc_meta_min
> 0) {
6145 arc_meta_min
= zfs_arc_meta_min
;
6147 arc_meta_min
= arc_c_min
/ 2;
6150 if (zfs_arc_grow_retry
> 0)
6151 arc_grow_retry
= zfs_arc_grow_retry
;
6153 if (zfs_arc_shrink_shift
> 0)
6154 arc_shrink_shift
= zfs_arc_shrink_shift
;
6157 * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
6159 if (arc_no_grow_shift
>= arc_shrink_shift
)
6160 arc_no_grow_shift
= arc_shrink_shift
- 1;
6162 if (zfs_arc_p_min_shift
> 0)
6163 arc_p_min_shift
= zfs_arc_p_min_shift
;
6165 /* if kmem_flags are set, lets try to use less memory */
6166 if (kmem_debugging())
6168 if (arc_c
< arc_c_min
)
6174 arc_reclaim_thread_exit
= B_FALSE
;
6176 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
6177 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
6179 if (arc_ksp
!= NULL
) {
6180 arc_ksp
->ks_data
= &arc_stats
;
6181 arc_ksp
->ks_update
= arc_kstat_update
;
6182 kstat_install(arc_ksp
);
6185 (void) thread_create(NULL
, 0, arc_reclaim_thread
, NULL
, 0, &p0
,
6186 TS_RUN
, minclsyspri
);
6192 * Calculate maximum amount of dirty data per pool.
6194 * If it has been set by /etc/system, take that.
6195 * Otherwise, use a percentage of physical memory defined by
6196 * zfs_dirty_data_max_percent (default 10%) with a cap at
6197 * zfs_dirty_data_max_max (default 4GB).
6199 if (zfs_dirty_data_max
== 0) {
6200 zfs_dirty_data_max
= physmem
* PAGESIZE
*
6201 zfs_dirty_data_max_percent
/ 100;
6202 zfs_dirty_data_max
= MIN(zfs_dirty_data_max
,
6203 zfs_dirty_data_max_max
);
6210 mutex_enter(&arc_reclaim_lock
);
6211 arc_reclaim_thread_exit
= B_TRUE
;
6213 * The reclaim thread will set arc_reclaim_thread_exit back to
6214 * B_FALSE when it is finished exiting; we're waiting for that.
6216 while (arc_reclaim_thread_exit
) {
6217 cv_signal(&arc_reclaim_thread_cv
);
6218 cv_wait(&arc_reclaim_thread_cv
, &arc_reclaim_lock
);
6220 mutex_exit(&arc_reclaim_lock
);
6222 /* Use B_TRUE to ensure *all* buffers are evicted */
6223 arc_flush(NULL
, B_TRUE
);
6227 if (arc_ksp
!= NULL
) {
6228 kstat_delete(arc_ksp
);
6232 mutex_destroy(&arc_reclaim_lock
);
6233 cv_destroy(&arc_reclaim_thread_cv
);
6234 cv_destroy(&arc_reclaim_waiters_cv
);
6239 ASSERT0(arc_loaned_bytes
);
6245 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
6246 * It uses dedicated storage devices to hold cached data, which are populated
6247 * using large infrequent writes. The main role of this cache is to boost
6248 * the performance of random read workloads. The intended L2ARC devices
6249 * include short-stroked disks, solid state disks, and other media with
6250 * substantially faster read latency than disk.
6252 * +-----------------------+
6254 * +-----------------------+
6257 * l2arc_feed_thread() arc_read()
6261 * +---------------+ |
6263 * +---------------+ |
6268 * +-------+ +-------+
6270 * | cache | | cache |
6271 * +-------+ +-------+
6272 * +=========+ .-----.
6273 * : L2ARC : |-_____-|
6274 * : devices : | Disks |
6275 * +=========+ `-_____-'
6277 * Read requests are satisfied from the following sources, in order:
6280 * 2) vdev cache of L2ARC devices
6282 * 4) vdev cache of disks
6285 * Some L2ARC device types exhibit extremely slow write performance.
6286 * To accommodate for this there are some significant differences between
6287 * the L2ARC and traditional cache design:
6289 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
6290 * the ARC behave as usual, freeing buffers and placing headers on ghost
6291 * lists. The ARC does not send buffers to the L2ARC during eviction as
6292 * this would add inflated write latencies for all ARC memory pressure.
6294 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
6295 * It does this by periodically scanning buffers from the eviction-end of
6296 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
6297 * not already there. It scans until a headroom of buffers is satisfied,
6298 * which itself is a buffer for ARC eviction. If a compressible buffer is
6299 * found during scanning and selected for writing to an L2ARC device, we
6300 * temporarily boost scanning headroom during the next scan cycle to make
6301 * sure we adapt to compression effects (which might significantly reduce
6302 * the data volume we write to L2ARC). The thread that does this is
6303 * l2arc_feed_thread(), illustrated below; example sizes are included to
6304 * provide a better sense of ratio than this diagram:
6307 * +---------------------+----------+
6308 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
6309 * +---------------------+----------+ | o L2ARC eligible
6310 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
6311 * +---------------------+----------+ |
6312 * 15.9 Gbytes ^ 32 Mbytes |
6314 * l2arc_feed_thread()
6316 * l2arc write hand <--[oooo]--'
6320 * +==============================+
6321 * L2ARC dev |####|#|###|###| |####| ... |
6322 * +==============================+
6325 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
6326 * evicted, then the L2ARC has cached a buffer much sooner than it probably
6327 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
6328 * safe to say that this is an uncommon case, since buffers at the end of
6329 * the ARC lists have moved there due to inactivity.
6331 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
6332 * then the L2ARC simply misses copying some buffers. This serves as a
6333 * pressure valve to prevent heavy read workloads from both stalling the ARC
6334 * with waits and clogging the L2ARC with writes. This also helps prevent
6335 * the potential for the L2ARC to churn if it attempts to cache content too
6336 * quickly, such as during backups of the entire pool.
6338 * 5. After system boot and before the ARC has filled main memory, there are
6339 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
6340 * lists can remain mostly static. Instead of searching from tail of these
6341 * lists as pictured, the l2arc_feed_thread() will search from the list heads
6342 * for eligible buffers, greatly increasing its chance of finding them.
6344 * The L2ARC device write speed is also boosted during this time so that
6345 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
6346 * there are no L2ARC reads, and no fear of degrading read performance
6347 * through increased writes.
6349 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
6350 * the vdev queue can aggregate them into larger and fewer writes. Each
6351 * device is written to in a rotor fashion, sweeping writes through
6352 * available space then repeating.
6354 * 7. The L2ARC does not store dirty content. It never needs to flush
6355 * write buffers back to disk based storage.
6357 * 8. If an ARC buffer is written (and dirtied) which also exists in the
6358 * L2ARC, the now stale L2ARC buffer is immediately dropped.
6360 * The performance of the L2ARC can be tweaked by a number of tunables, which
6361 * may be necessary for different workloads:
6363 * l2arc_write_max max write bytes per interval
6364 * l2arc_write_boost extra write bytes during device warmup
6365 * l2arc_noprefetch skip caching prefetched buffers
6366 * l2arc_headroom number of max device writes to precache
6367 * l2arc_headroom_boost when we find compressed buffers during ARC
6368 * scanning, we multiply headroom by this
6369 * percentage factor for the next scan cycle,
6370 * since more compressed buffers are likely to
6372 * l2arc_feed_secs seconds between L2ARC writing
6374 * Tunables may be removed or added as future performance improvements are
6375 * integrated, and also may become zpool properties.
6377 * There are three key functions that control how the L2ARC warms up:
6379 * l2arc_write_eligible() check if a buffer is eligible to cache
6380 * l2arc_write_size() calculate how much to write
6381 * l2arc_write_interval() calculate sleep delay between writes
6383 * These three functions determine what to write, how much, and how quickly
6388 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*hdr
)
6391 * A buffer is *not* eligible for the L2ARC if it:
6392 * 1. belongs to a different spa.
6393 * 2. is already cached on the L2ARC.
6394 * 3. has an I/O in progress (it may be an incomplete read).
6395 * 4. is flagged not eligible (zfs property).
6397 if (hdr
->b_spa
!= spa_guid
|| HDR_HAS_L2HDR(hdr
) ||
6398 HDR_IO_IN_PROGRESS(hdr
) || !HDR_L2CACHE(hdr
))
6405 l2arc_write_size(void)
6410 * Make sure our globals have meaningful values in case the user
6413 size
= l2arc_write_max
;
6415 cmn_err(CE_NOTE
, "Bad value for l2arc_write_max, value must "
6416 "be greater than zero, resetting it to the default (%d)",
6418 size
= l2arc_write_max
= L2ARC_WRITE_SIZE
;
6421 if (arc_warm
== B_FALSE
)
6422 size
+= l2arc_write_boost
;
6429 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
6431 clock_t interval
, next
, now
;
6434 * If the ARC lists are busy, increase our write rate; if the
6435 * lists are stale, idle back. This is achieved by checking
6436 * how much we previously wrote - if it was more than half of
6437 * what we wanted, schedule the next write much sooner.
6439 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
6440 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
6442 interval
= hz
* l2arc_feed_secs
;
6444 now
= ddi_get_lbolt();
6445 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
6451 * Cycle through L2ARC devices. This is how L2ARC load balances.
6452 * If a device is returned, this also returns holding the spa config lock.
6454 static l2arc_dev_t
*
6455 l2arc_dev_get_next(void)
6457 l2arc_dev_t
*first
, *next
= NULL
;
6460 * Lock out the removal of spas (spa_namespace_lock), then removal
6461 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
6462 * both locks will be dropped and a spa config lock held instead.
6464 mutex_enter(&spa_namespace_lock
);
6465 mutex_enter(&l2arc_dev_mtx
);
6467 /* if there are no vdevs, there is nothing to do */
6468 if (l2arc_ndev
== 0)
6472 next
= l2arc_dev_last
;
6474 /* loop around the list looking for a non-faulted vdev */
6476 next
= list_head(l2arc_dev_list
);
6478 next
= list_next(l2arc_dev_list
, next
);
6480 next
= list_head(l2arc_dev_list
);
6483 /* if we have come back to the start, bail out */
6486 else if (next
== first
)
6489 } while (vdev_is_dead(next
->l2ad_vdev
));
6491 /* if we were unable to find any usable vdevs, return NULL */
6492 if (vdev_is_dead(next
->l2ad_vdev
))
6495 l2arc_dev_last
= next
;
6498 mutex_exit(&l2arc_dev_mtx
);
6501 * Grab the config lock to prevent the 'next' device from being
6502 * removed while we are writing to it.
6505 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
6506 mutex_exit(&spa_namespace_lock
);
6512 * Free buffers that were tagged for destruction.
6515 l2arc_do_free_on_write()
6518 l2arc_data_free_t
*df
, *df_prev
;
6520 mutex_enter(&l2arc_free_on_write_mtx
);
6521 buflist
= l2arc_free_on_write
;
6523 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
6524 df_prev
= list_prev(buflist
, df
);
6525 ASSERT3P(df
->l2df_abd
, !=, NULL
);
6526 abd_free(df
->l2df_abd
);
6527 list_remove(buflist
, df
);
6528 kmem_free(df
, sizeof (l2arc_data_free_t
));
6531 mutex_exit(&l2arc_free_on_write_mtx
);
6535 * A write to a cache device has completed. Update all headers to allow
6536 * reads from these buffers to begin.
6539 l2arc_write_done(zio_t
*zio
)
6541 l2arc_write_callback_t
*cb
;
6544 arc_buf_hdr_t
*head
, *hdr
, *hdr_prev
;
6545 kmutex_t
*hash_lock
;
6546 int64_t bytes_dropped
= 0;
6548 cb
= zio
->io_private
;
6549 ASSERT3P(cb
, !=, NULL
);
6550 dev
= cb
->l2wcb_dev
;
6551 ASSERT3P(dev
, !=, NULL
);
6552 head
= cb
->l2wcb_head
;
6553 ASSERT3P(head
, !=, NULL
);
6554 buflist
= &dev
->l2ad_buflist
;
6555 ASSERT3P(buflist
, !=, NULL
);
6556 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
6557 l2arc_write_callback_t
*, cb
);
6559 if (zio
->io_error
!= 0)
6560 ARCSTAT_BUMP(arcstat_l2_writes_error
);
6563 * All writes completed, or an error was hit.
6566 mutex_enter(&dev
->l2ad_mtx
);
6567 for (hdr
= list_prev(buflist
, head
); hdr
; hdr
= hdr_prev
) {
6568 hdr_prev
= list_prev(buflist
, hdr
);
6570 hash_lock
= HDR_LOCK(hdr
);
6573 * We cannot use mutex_enter or else we can deadlock
6574 * with l2arc_write_buffers (due to swapping the order
6575 * the hash lock and l2ad_mtx are taken).
6577 if (!mutex_tryenter(hash_lock
)) {
6579 * Missed the hash lock. We must retry so we
6580 * don't leave the ARC_FLAG_L2_WRITING bit set.
6582 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry
);
6585 * We don't want to rescan the headers we've
6586 * already marked as having been written out, so
6587 * we reinsert the head node so we can pick up
6588 * where we left off.
6590 list_remove(buflist
, head
);
6591 list_insert_after(buflist
, hdr
, head
);
6593 mutex_exit(&dev
->l2ad_mtx
);
6596 * We wait for the hash lock to become available
6597 * to try and prevent busy waiting, and increase
6598 * the chance we'll be able to acquire the lock
6599 * the next time around.
6601 mutex_enter(hash_lock
);
6602 mutex_exit(hash_lock
);
6607 * We could not have been moved into the arc_l2c_only
6608 * state while in-flight due to our ARC_FLAG_L2_WRITING
6609 * bit being set. Let's just ensure that's being enforced.
6611 ASSERT(HDR_HAS_L1HDR(hdr
));
6613 if (zio
->io_error
!= 0) {
6615 * Error - drop L2ARC entry.
6617 list_remove(buflist
, hdr
);
6618 arc_hdr_clear_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
6620 ARCSTAT_INCR(arcstat_l2_psize
, -arc_hdr_size(hdr
));
6621 ARCSTAT_INCR(arcstat_l2_lsize
, -HDR_GET_LSIZE(hdr
));
6623 bytes_dropped
+= arc_hdr_size(hdr
);
6624 (void) refcount_remove_many(&dev
->l2ad_alloc
,
6625 arc_hdr_size(hdr
), hdr
);
6629 * Allow ARC to begin reads and ghost list evictions to
6632 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2_WRITING
);
6634 mutex_exit(hash_lock
);
6637 atomic_inc_64(&l2arc_writes_done
);
6638 list_remove(buflist
, head
);
6639 ASSERT(!HDR_HAS_L1HDR(head
));
6640 kmem_cache_free(hdr_l2only_cache
, head
);
6641 mutex_exit(&dev
->l2ad_mtx
);
6643 vdev_space_update(dev
->l2ad_vdev
, -bytes_dropped
, 0, 0);
6645 l2arc_do_free_on_write();
6647 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
6651 * A read to a cache device completed. Validate buffer contents before
6652 * handing over to the regular ARC routines.
6655 l2arc_read_done(zio_t
*zio
)
6657 l2arc_read_callback_t
*cb
;
6659 kmutex_t
*hash_lock
;
6660 boolean_t valid_cksum
;
6662 ASSERT3P(zio
->io_vd
, !=, NULL
);
6663 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
6665 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
6667 cb
= zio
->io_private
;
6668 ASSERT3P(cb
, !=, NULL
);
6669 hdr
= cb
->l2rcb_hdr
;
6670 ASSERT3P(hdr
, !=, NULL
);
6672 hash_lock
= HDR_LOCK(hdr
);
6673 mutex_enter(hash_lock
);
6674 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
6677 * If the data was read into a temporary buffer,
6678 * move it and free the buffer.
6680 if (cb
->l2rcb_abd
!= NULL
) {
6681 ASSERT3U(arc_hdr_size(hdr
), <, zio
->io_size
);
6682 if (zio
->io_error
== 0) {
6683 abd_copy(hdr
->b_l1hdr
.b_pabd
, cb
->l2rcb_abd
,
6688 * The following must be done regardless of whether
6689 * there was an error:
6690 * - free the temporary buffer
6691 * - point zio to the real ARC buffer
6692 * - set zio size accordingly
6693 * These are required because zio is either re-used for
6694 * an I/O of the block in the case of the error
6695 * or the zio is passed to arc_read_done() and it
6698 abd_free(cb
->l2rcb_abd
);
6699 zio
->io_size
= zio
->io_orig_size
= arc_hdr_size(hdr
);
6700 zio
->io_abd
= zio
->io_orig_abd
= hdr
->b_l1hdr
.b_pabd
;
6703 ASSERT3P(zio
->io_abd
, !=, NULL
);
6706 * Check this survived the L2ARC journey.
6708 ASSERT3P(zio
->io_abd
, ==, hdr
->b_l1hdr
.b_pabd
);
6709 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
6710 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
6712 valid_cksum
= arc_cksum_is_equal(hdr
, zio
);
6713 if (valid_cksum
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
6714 mutex_exit(hash_lock
);
6715 zio
->io_private
= hdr
;
6718 mutex_exit(hash_lock
);
6720 * Buffer didn't survive caching. Increment stats and
6721 * reissue to the original storage device.
6723 if (zio
->io_error
!= 0) {
6724 ARCSTAT_BUMP(arcstat_l2_io_error
);
6726 zio
->io_error
= SET_ERROR(EIO
);
6729 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
6732 * If there's no waiter, issue an async i/o to the primary
6733 * storage now. If there *is* a waiter, the caller must
6734 * issue the i/o in a context where it's OK to block.
6736 if (zio
->io_waiter
== NULL
) {
6737 zio_t
*pio
= zio_unique_parent(zio
);
6739 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
6741 zio_nowait(zio_read(pio
, zio
->io_spa
, zio
->io_bp
,
6742 hdr
->b_l1hdr
.b_pabd
, zio
->io_size
, arc_read_done
,
6743 hdr
, zio
->io_priority
, cb
->l2rcb_flags
,
6748 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
6752 * This is the list priority from which the L2ARC will search for pages to
6753 * cache. This is used within loops (0..3) to cycle through lists in the
6754 * desired order. This order can have a significant effect on cache
6757 * Currently the metadata lists are hit first, MFU then MRU, followed by
6758 * the data lists. This function returns a locked list, and also returns
6761 static multilist_sublist_t
*
6762 l2arc_sublist_lock(int list_num
)
6764 multilist_t
*ml
= NULL
;
6767 ASSERT(list_num
>= 0 && list_num
<= 3);
6771 ml
= arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
6774 ml
= arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
6777 ml
= arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
6780 ml
= arc_mru
->arcs_list
[ARC_BUFC_DATA
];
6785 * Return a randomly-selected sublist. This is acceptable
6786 * because the caller feeds only a little bit of data for each
6787 * call (8MB). Subsequent calls will result in different
6788 * sublists being selected.
6790 idx
= multilist_get_random_index(ml
);
6791 return (multilist_sublist_lock(ml
, idx
));
6795 * Evict buffers from the device write hand to the distance specified in
6796 * bytes. This distance may span populated buffers, it may span nothing.
6797 * This is clearing a region on the L2ARC device ready for writing.
6798 * If the 'all' boolean is set, every buffer is evicted.
6801 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
6804 arc_buf_hdr_t
*hdr
, *hdr_prev
;
6805 kmutex_t
*hash_lock
;
6808 buflist
= &dev
->l2ad_buflist
;
6810 if (!all
&& dev
->l2ad_first
) {
6812 * This is the first sweep through the device. There is
6818 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
6820 * When nearing the end of the device, evict to the end
6821 * before the device write hand jumps to the start.
6823 taddr
= dev
->l2ad_end
;
6825 taddr
= dev
->l2ad_hand
+ distance
;
6827 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
6828 uint64_t, taddr
, boolean_t
, all
);
6831 mutex_enter(&dev
->l2ad_mtx
);
6832 for (hdr
= list_tail(buflist
); hdr
; hdr
= hdr_prev
) {
6833 hdr_prev
= list_prev(buflist
, hdr
);
6835 hash_lock
= HDR_LOCK(hdr
);
6838 * We cannot use mutex_enter or else we can deadlock
6839 * with l2arc_write_buffers (due to swapping the order
6840 * the hash lock and l2ad_mtx are taken).
6842 if (!mutex_tryenter(hash_lock
)) {
6844 * Missed the hash lock. Retry.
6846 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
6847 mutex_exit(&dev
->l2ad_mtx
);
6848 mutex_enter(hash_lock
);
6849 mutex_exit(hash_lock
);
6854 * A header can't be on this list if it doesn't have L2 header.
6856 ASSERT(HDR_HAS_L2HDR(hdr
));
6858 /* Ensure this header has finished being written. */
6859 ASSERT(!HDR_L2_WRITING(hdr
));
6860 ASSERT(!HDR_L2_WRITE_HEAD(hdr
));
6862 if (!all
&& (hdr
->b_l2hdr
.b_daddr
>= taddr
||
6863 hdr
->b_l2hdr
.b_daddr
< dev
->l2ad_hand
)) {
6865 * We've evicted to the target address,
6866 * or the end of the device.
6868 mutex_exit(hash_lock
);
6872 if (!HDR_HAS_L1HDR(hdr
)) {
6873 ASSERT(!HDR_L2_READING(hdr
));
6875 * This doesn't exist in the ARC. Destroy.
6876 * arc_hdr_destroy() will call list_remove()
6877 * and decrement arcstat_l2_lsize.
6879 arc_change_state(arc_anon
, hdr
, hash_lock
);
6880 arc_hdr_destroy(hdr
);
6882 ASSERT(hdr
->b_l1hdr
.b_state
!= arc_l2c_only
);
6883 ARCSTAT_BUMP(arcstat_l2_evict_l1cached
);
6885 * Invalidate issued or about to be issued
6886 * reads, since we may be about to write
6887 * over this location.
6889 if (HDR_L2_READING(hdr
)) {
6890 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
6891 arc_hdr_set_flags(hdr
, ARC_FLAG_L2_EVICTED
);
6894 arc_hdr_l2hdr_destroy(hdr
);
6896 mutex_exit(hash_lock
);
6898 mutex_exit(&dev
->l2ad_mtx
);
6902 * Find and write ARC buffers to the L2ARC device.
6904 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
6905 * for reading until they have completed writing.
6906 * The headroom_boost is an in-out parameter used to maintain headroom boost
6907 * state between calls to this function.
6909 * Returns the number of bytes actually written (which may be smaller than
6910 * the delta by which the device hand has changed due to alignment).
6913 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
)
6915 arc_buf_hdr_t
*hdr
, *hdr_prev
, *head
;
6916 uint64_t write_asize
, write_psize
, write_lsize
, headroom
;
6918 l2arc_write_callback_t
*cb
;
6920 uint64_t guid
= spa_load_guid(spa
);
6922 ASSERT3P(dev
->l2ad_vdev
, !=, NULL
);
6925 write_lsize
= write_asize
= write_psize
= 0;
6927 head
= kmem_cache_alloc(hdr_l2only_cache
, KM_PUSHPAGE
);
6928 arc_hdr_set_flags(head
, ARC_FLAG_L2_WRITE_HEAD
| ARC_FLAG_HAS_L2HDR
);
6931 * Copy buffers for L2ARC writing.
6933 for (int try = 0; try <= 3; try++) {
6934 multilist_sublist_t
*mls
= l2arc_sublist_lock(try);
6935 uint64_t passed_sz
= 0;
6938 * L2ARC fast warmup.
6940 * Until the ARC is warm and starts to evict, read from the
6941 * head of the ARC lists rather than the tail.
6943 if (arc_warm
== B_FALSE
)
6944 hdr
= multilist_sublist_head(mls
);
6946 hdr
= multilist_sublist_tail(mls
);
6948 headroom
= target_sz
* l2arc_headroom
;
6949 if (zfs_compressed_arc_enabled
)
6950 headroom
= (headroom
* l2arc_headroom_boost
) / 100;
6952 for (; hdr
; hdr
= hdr_prev
) {
6953 kmutex_t
*hash_lock
;
6955 if (arc_warm
== B_FALSE
)
6956 hdr_prev
= multilist_sublist_next(mls
, hdr
);
6958 hdr_prev
= multilist_sublist_prev(mls
, hdr
);
6960 hash_lock
= HDR_LOCK(hdr
);
6961 if (!mutex_tryenter(hash_lock
)) {
6963 * Skip this buffer rather than waiting.
6968 passed_sz
+= HDR_GET_LSIZE(hdr
);
6969 if (passed_sz
> headroom
) {
6973 mutex_exit(hash_lock
);
6977 if (!l2arc_write_eligible(guid
, hdr
)) {
6978 mutex_exit(hash_lock
);
6983 * We rely on the L1 portion of the header below, so
6984 * it's invalid for this header to have been evicted out
6985 * of the ghost cache, prior to being written out. The
6986 * ARC_FLAG_L2_WRITING bit ensures this won't happen.
6988 ASSERT(HDR_HAS_L1HDR(hdr
));
6990 ASSERT3U(HDR_GET_PSIZE(hdr
), >, 0);
6991 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
6992 ASSERT3U(arc_hdr_size(hdr
), >, 0);
6993 uint64_t psize
= arc_hdr_size(hdr
);
6994 uint64_t asize
= vdev_psize_to_asize(dev
->l2ad_vdev
,
6997 if ((write_asize
+ asize
) > target_sz
) {
6999 mutex_exit(hash_lock
);
7005 * Insert a dummy header on the buflist so
7006 * l2arc_write_done() can find where the
7007 * write buffers begin without searching.
7009 mutex_enter(&dev
->l2ad_mtx
);
7010 list_insert_head(&dev
->l2ad_buflist
, head
);
7011 mutex_exit(&dev
->l2ad_mtx
);
7014 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
7015 cb
->l2wcb_dev
= dev
;
7016 cb
->l2wcb_head
= head
;
7017 pio
= zio_root(spa
, l2arc_write_done
, cb
,
7021 hdr
->b_l2hdr
.b_dev
= dev
;
7022 hdr
->b_l2hdr
.b_daddr
= dev
->l2ad_hand
;
7023 arc_hdr_set_flags(hdr
,
7024 ARC_FLAG_L2_WRITING
| ARC_FLAG_HAS_L2HDR
);
7026 mutex_enter(&dev
->l2ad_mtx
);
7027 list_insert_head(&dev
->l2ad_buflist
, hdr
);
7028 mutex_exit(&dev
->l2ad_mtx
);
7030 (void) refcount_add_many(&dev
->l2ad_alloc
, psize
, hdr
);
7033 * Normally the L2ARC can use the hdr's data, but if
7034 * we're sharing data between the hdr and one of its
7035 * bufs, L2ARC needs its own copy of the data so that
7036 * the ZIO below can't race with the buf consumer.
7037 * Another case where we need to create a copy of the
7038 * data is when the buffer size is not device-aligned
7039 * and we need to pad the block to make it such.
7040 * That also keeps the clock hand suitably aligned.
7042 * To ensure that the copy will be available for the
7043 * lifetime of the ZIO and be cleaned up afterwards, we
7044 * add it to the l2arc_free_on_write queue.
7047 if (!HDR_SHARED_DATA(hdr
) && psize
== asize
) {
7048 to_write
= hdr
->b_l1hdr
.b_pabd
;
7050 to_write
= abd_alloc_for_io(asize
,
7051 HDR_ISTYPE_METADATA(hdr
));
7052 abd_copy(to_write
, hdr
->b_l1hdr
.b_pabd
, psize
);
7053 if (asize
!= psize
) {
7054 abd_zero_off(to_write
, psize
,
7057 l2arc_free_abd_on_write(to_write
, asize
,
7060 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
7061 hdr
->b_l2hdr
.b_daddr
, asize
, to_write
,
7062 ZIO_CHECKSUM_OFF
, NULL
, hdr
,
7063 ZIO_PRIORITY_ASYNC_WRITE
,
7064 ZIO_FLAG_CANFAIL
, B_FALSE
);
7066 write_lsize
+= HDR_GET_LSIZE(hdr
);
7067 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
7070 write_psize
+= psize
;
7071 write_asize
+= asize
;
7072 dev
->l2ad_hand
+= asize
;
7074 mutex_exit(hash_lock
);
7076 (void) zio_nowait(wzio
);
7079 multilist_sublist_unlock(mls
);
7085 /* No buffers selected for writing? */
7087 ASSERT0(write_lsize
);
7088 ASSERT(!HDR_HAS_L1HDR(head
));
7089 kmem_cache_free(hdr_l2only_cache
, head
);
7093 ASSERT3U(write_asize
, <=, target_sz
);
7094 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
7095 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_psize
);
7096 ARCSTAT_INCR(arcstat_l2_lsize
, write_lsize
);
7097 ARCSTAT_INCR(arcstat_l2_psize
, write_psize
);
7098 vdev_space_update(dev
->l2ad_vdev
, write_psize
, 0, 0);
7101 * Bump device hand to the device start if it is approaching the end.
7102 * l2arc_evict() will already have evicted ahead for this case.
7104 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
7105 dev
->l2ad_hand
= dev
->l2ad_start
;
7106 dev
->l2ad_first
= B_FALSE
;
7109 dev
->l2ad_writing
= B_TRUE
;
7110 (void) zio_wait(pio
);
7111 dev
->l2ad_writing
= B_FALSE
;
7113 return (write_asize
);
7117 * This thread feeds the L2ARC at regular intervals. This is the beating
7118 * heart of the L2ARC.
7122 l2arc_feed_thread(void *unused
)
7127 uint64_t size
, wrote
;
7128 clock_t begin
, next
= ddi_get_lbolt();
7130 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
7132 mutex_enter(&l2arc_feed_thr_lock
);
7134 while (l2arc_thread_exit
== 0) {
7135 CALLB_CPR_SAFE_BEGIN(&cpr
);
7136 (void) cv_timedwait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
,
7138 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
7139 next
= ddi_get_lbolt() + hz
;
7142 * Quick check for L2ARC devices.
7144 mutex_enter(&l2arc_dev_mtx
);
7145 if (l2arc_ndev
== 0) {
7146 mutex_exit(&l2arc_dev_mtx
);
7149 mutex_exit(&l2arc_dev_mtx
);
7150 begin
= ddi_get_lbolt();
7153 * This selects the next l2arc device to write to, and in
7154 * doing so the next spa to feed from: dev->l2ad_spa. This
7155 * will return NULL if there are now no l2arc devices or if
7156 * they are all faulted.
7158 * If a device is returned, its spa's config lock is also
7159 * held to prevent device removal. l2arc_dev_get_next()
7160 * will grab and release l2arc_dev_mtx.
7162 if ((dev
= l2arc_dev_get_next()) == NULL
)
7165 spa
= dev
->l2ad_spa
;
7166 ASSERT3P(spa
, !=, NULL
);
7169 * If the pool is read-only then force the feed thread to
7170 * sleep a little longer.
7172 if (!spa_writeable(spa
)) {
7173 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
7174 spa_config_exit(spa
, SCL_L2ARC
, dev
);
7179 * Avoid contributing to memory pressure.
7181 if (arc_reclaim_needed()) {
7182 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
7183 spa_config_exit(spa
, SCL_L2ARC
, dev
);
7187 ARCSTAT_BUMP(arcstat_l2_feeds
);
7189 size
= l2arc_write_size();
7192 * Evict L2ARC buffers that will be overwritten.
7194 l2arc_evict(dev
, size
, B_FALSE
);
7197 * Write ARC buffers.
7199 wrote
= l2arc_write_buffers(spa
, dev
, size
);
7202 * Calculate interval between writes.
7204 next
= l2arc_write_interval(begin
, size
, wrote
);
7205 spa_config_exit(spa
, SCL_L2ARC
, dev
);
7208 l2arc_thread_exit
= 0;
7209 cv_broadcast(&l2arc_feed_thr_cv
);
7210 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
7215 l2arc_vdev_present(vdev_t
*vd
)
7219 mutex_enter(&l2arc_dev_mtx
);
7220 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
7221 dev
= list_next(l2arc_dev_list
, dev
)) {
7222 if (dev
->l2ad_vdev
== vd
)
7225 mutex_exit(&l2arc_dev_mtx
);
7227 return (dev
!= NULL
);
7231 * Add a vdev for use by the L2ARC. By this point the spa has already
7232 * validated the vdev and opened it.
7235 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
7237 l2arc_dev_t
*adddev
;
7239 ASSERT(!l2arc_vdev_present(vd
));
7242 * Create a new l2arc device entry.
7244 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
7245 adddev
->l2ad_spa
= spa
;
7246 adddev
->l2ad_vdev
= vd
;
7247 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
;
7248 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
7249 adddev
->l2ad_hand
= adddev
->l2ad_start
;
7250 adddev
->l2ad_first
= B_TRUE
;
7251 adddev
->l2ad_writing
= B_FALSE
;
7253 mutex_init(&adddev
->l2ad_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7255 * This is a list of all ARC buffers that are still valid on the
7258 list_create(&adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
7259 offsetof(arc_buf_hdr_t
, b_l2hdr
.b_l2node
));
7261 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
7262 refcount_create(&adddev
->l2ad_alloc
);
7265 * Add device to global list
7267 mutex_enter(&l2arc_dev_mtx
);
7268 list_insert_head(l2arc_dev_list
, adddev
);
7269 atomic_inc_64(&l2arc_ndev
);
7270 mutex_exit(&l2arc_dev_mtx
);
7274 * Remove a vdev from the L2ARC.
7277 l2arc_remove_vdev(vdev_t
*vd
)
7279 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
7282 * Find the device by vdev
7284 mutex_enter(&l2arc_dev_mtx
);
7285 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
7286 nextdev
= list_next(l2arc_dev_list
, dev
);
7287 if (vd
== dev
->l2ad_vdev
) {
7292 ASSERT3P(remdev
, !=, NULL
);
7295 * Remove device from global list
7297 list_remove(l2arc_dev_list
, remdev
);
7298 l2arc_dev_last
= NULL
; /* may have been invalidated */
7299 atomic_dec_64(&l2arc_ndev
);
7300 mutex_exit(&l2arc_dev_mtx
);
7303 * Clear all buflists and ARC references. L2ARC device flush.
7305 l2arc_evict(remdev
, 0, B_TRUE
);
7306 list_destroy(&remdev
->l2ad_buflist
);
7307 mutex_destroy(&remdev
->l2ad_mtx
);
7308 refcount_destroy(&remdev
->l2ad_alloc
);
7309 kmem_free(remdev
, sizeof (l2arc_dev_t
));
7315 l2arc_thread_exit
= 0;
7317 l2arc_writes_sent
= 0;
7318 l2arc_writes_done
= 0;
7320 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
7321 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
7322 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7323 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7325 l2arc_dev_list
= &L2ARC_dev_list
;
7326 l2arc_free_on_write
= &L2ARC_free_on_write
;
7327 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
7328 offsetof(l2arc_dev_t
, l2ad_node
));
7329 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
7330 offsetof(l2arc_data_free_t
, l2df_list_node
));
7337 * This is called from dmu_fini(), which is called from spa_fini();
7338 * Because of this, we can assume that all l2arc devices have
7339 * already been removed when the pools themselves were removed.
7342 l2arc_do_free_on_write();
7344 mutex_destroy(&l2arc_feed_thr_lock
);
7345 cv_destroy(&l2arc_feed_thr_cv
);
7346 mutex_destroy(&l2arc_dev_mtx
);
7347 mutex_destroy(&l2arc_free_on_write_mtx
);
7349 list_destroy(l2arc_dev_list
);
7350 list_destroy(l2arc_free_on_write
);
7356 if (!(spa_mode_global
& FWRITE
))
7359 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
7360 TS_RUN
, minclsyspri
);
7366 if (!(spa_mode_global
& FWRITE
))
7369 mutex_enter(&l2arc_feed_thr_lock
);
7370 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
7371 l2arc_thread_exit
= 1;
7372 while (l2arc_thread_exit
!= 0)
7373 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
7374 mutex_exit(&l2arc_feed_thr_lock
);