4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
30 * DVA-based Adjustable Replacement Cache
32 * While much of the theory of operation used here is
33 * based on the self-tuning, low overhead replacement cache
34 * presented by Megiddo and Modha at FAST 2003, there are some
35 * significant differences:
37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 * Pages in its cache cannot be "locked" into memory. This makes
39 * the eviction algorithm simple: evict the last page in the list.
40 * This also make the performance characteristics easy to reason
41 * about. Our cache is not so simple. At any given moment, some
42 * subset of the blocks in the cache are un-evictable because we
43 * have handed out a reference to them. Blocks are only evictable
44 * when there are no external references active. This makes
45 * eviction far more problematic: we choose to evict the evictable
46 * blocks that are the "lowest" in the list.
48 * There are times when it is not possible to evict the requested
49 * space. In these circumstances we are unable to adjust the cache
50 * size. To prevent the cache growing unbounded at these times we
51 * implement a "cache throttle" that slows the flow of new data
52 * into the cache until we can make space available.
54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 * Pages are evicted when the cache is full and there is a cache
56 * miss. Our model has a variable sized cache. It grows with
57 * high use, but also tries to react to memory pressure from the
58 * operating system: decreasing its size when system memory is
61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 * elements of the cache are therefore exactly the same size. So
63 * when adjusting the cache size following a cache miss, its simply
64 * a matter of choosing a single page to evict. In our model, we
65 * have variable sized cache blocks (rangeing from 512 bytes to
66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 * space for a cache miss that approximates as closely as possible
68 * the space used by the new block.
70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 * by N. Megiddo & D. Modha, FAST 2003
77 * A new reference to a cache buffer can be obtained in two
78 * ways: 1) via a hash table lookup using the DVA as a key,
79 * or 2) via one of the ARC lists. The arc_read() interface
80 * uses method 1, while the internal arc algorithms for
81 * adjusting the cache use method 2. We therefore provide two
82 * types of locks: 1) the hash table lock array, and 2) the
85 * Buffers do not have their own mutexes, rather they rely on the
86 * hash table mutexes for the bulk of their protection (i.e. most
87 * fields in the arc_buf_hdr_t are protected by these mutexes).
89 * buf_hash_find() returns the appropriate mutex (held) when it
90 * locates the requested buffer in the hash table. It returns
91 * NULL for the mutex if the buffer was not in the table.
93 * buf_hash_remove() expects the appropriate hash mutex to be
94 * already held before it is invoked.
96 * Each arc state also has a mutex which is used to protect the
97 * buffer list associated with the state. When attempting to
98 * obtain a hash table lock while holding an arc list lock you
99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 * the active state mutex must be held before the ghost state mutex.
102 * Arc buffers may have an associated eviction callback function.
103 * This function will be invoked prior to removing the buffer (e.g.
104 * in arc_do_user_evicts()). Note however that the data associated
105 * with the buffer may be evicted prior to the callback. The callback
106 * must be made with *no locks held* (to prevent deadlock). Additionally,
107 * the users of callbacks must ensure that their private data is
108 * protected from simultaneous callbacks from arc_clear_callback()
109 * and arc_do_user_evicts().
111 * Note that the majority of the performance stats are manipulated
112 * with atomic operations.
114 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
116 * - L2ARC buflist creation
117 * - L2ARC buflist eviction
118 * - L2ARC write completion, which walks L2ARC buflists
119 * - ARC header destruction, as it removes from L2ARC buflists
120 * - ARC header release, as it removes from L2ARC buflists
125 #include <sys/zio_compress.h>
126 #include <sys/zfs_context.h>
128 #include <sys/refcount.h>
129 #include <sys/vdev.h>
130 #include <sys/vdev_impl.h>
131 #include <sys/dsl_pool.h>
133 #include <sys/vmsystm.h>
135 #include <sys/fs/swapnode.h>
136 #include <sys/dnlc.h>
138 #include <sys/callb.h>
139 #include <sys/kstat.h>
140 #include <zfs_fletcher.h>
143 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
144 boolean_t arc_watch
= B_FALSE
;
148 static kmutex_t arc_reclaim_thr_lock
;
149 static kcondvar_t arc_reclaim_thr_cv
; /* used to signal reclaim thr */
150 static uint8_t arc_thread_exit
;
152 #define ARC_REDUCE_DNLC_PERCENT 3
153 uint_t arc_reduce_dnlc_percent
= ARC_REDUCE_DNLC_PERCENT
;
155 typedef enum arc_reclaim_strategy
{
156 ARC_RECLAIM_AGGR
, /* Aggressive reclaim strategy */
157 ARC_RECLAIM_CONS
/* Conservative reclaim strategy */
158 } arc_reclaim_strategy_t
;
161 * The number of iterations through arc_evict_*() before we
162 * drop & reacquire the lock.
164 int arc_evict_iterations
= 100;
166 /* number of seconds before growing cache again */
167 static int arc_grow_retry
= 60;
169 /* shift of arc_c for calculating both min and max arc_p */
170 static int arc_p_min_shift
= 4;
172 /* log2(fraction of arc to reclaim) */
173 static int arc_shrink_shift
= 5;
176 * minimum lifespan of a prefetch block in clock ticks
177 * (initialized in arc_init())
179 static int arc_min_prefetch_lifespan
;
182 * If this percent of memory is free, don't throttle.
184 int arc_lotsfree_percent
= 10;
189 * The arc has filled available memory and has now warmed up.
191 static boolean_t arc_warm
;
194 * These tunables are for performance analysis.
196 uint64_t zfs_arc_max
;
197 uint64_t zfs_arc_min
;
198 uint64_t zfs_arc_meta_limit
= 0;
199 int zfs_arc_grow_retry
= 0;
200 int zfs_arc_shrink_shift
= 0;
201 int zfs_arc_p_min_shift
= 0;
202 int zfs_disable_dup_eviction
= 0;
203 int zfs_arc_average_blocksize
= 8 * 1024; /* 8KB */
206 * Note that buffers can be in one of 6 states:
207 * ARC_anon - anonymous (discussed below)
208 * ARC_mru - recently used, currently cached
209 * ARC_mru_ghost - recentely used, no longer in cache
210 * ARC_mfu - frequently used, currently cached
211 * ARC_mfu_ghost - frequently used, no longer in cache
212 * ARC_l2c_only - exists in L2ARC but not other states
213 * When there are no active references to the buffer, they are
214 * are linked onto a list in one of these arc states. These are
215 * the only buffers that can be evicted or deleted. Within each
216 * state there are multiple lists, one for meta-data and one for
217 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
218 * etc.) is tracked separately so that it can be managed more
219 * explicitly: favored over data, limited explicitly.
221 * Anonymous buffers are buffers that are not associated with
222 * a DVA. These are buffers that hold dirty block copies
223 * before they are written to stable storage. By definition,
224 * they are "ref'd" and are considered part of arc_mru
225 * that cannot be freed. Generally, they will aquire a DVA
226 * as they are written and migrate onto the arc_mru list.
228 * The ARC_l2c_only state is for buffers that are in the second
229 * level ARC but no longer in any of the ARC_m* lists. The second
230 * level ARC itself may also contain buffers that are in any of
231 * the ARC_m* states - meaning that a buffer can exist in two
232 * places. The reason for the ARC_l2c_only state is to keep the
233 * buffer header in the hash table, so that reads that hit the
234 * second level ARC benefit from these fast lookups.
237 typedef struct arc_state
{
238 list_t arcs_list
[ARC_BUFC_NUMTYPES
]; /* list of evictable buffers */
239 uint64_t arcs_lsize
[ARC_BUFC_NUMTYPES
]; /* amount of evictable data */
240 uint64_t arcs_size
; /* total amount of data in this state */
245 static arc_state_t ARC_anon
;
246 static arc_state_t ARC_mru
;
247 static arc_state_t ARC_mru_ghost
;
248 static arc_state_t ARC_mfu
;
249 static arc_state_t ARC_mfu_ghost
;
250 static arc_state_t ARC_l2c_only
;
252 typedef struct arc_stats
{
253 kstat_named_t arcstat_hits
;
254 kstat_named_t arcstat_misses
;
255 kstat_named_t arcstat_demand_data_hits
;
256 kstat_named_t arcstat_demand_data_misses
;
257 kstat_named_t arcstat_demand_metadata_hits
;
258 kstat_named_t arcstat_demand_metadata_misses
;
259 kstat_named_t arcstat_prefetch_data_hits
;
260 kstat_named_t arcstat_prefetch_data_misses
;
261 kstat_named_t arcstat_prefetch_metadata_hits
;
262 kstat_named_t arcstat_prefetch_metadata_misses
;
263 kstat_named_t arcstat_mru_hits
;
264 kstat_named_t arcstat_mru_ghost_hits
;
265 kstat_named_t arcstat_mfu_hits
;
266 kstat_named_t arcstat_mfu_ghost_hits
;
267 kstat_named_t arcstat_deleted
;
268 kstat_named_t arcstat_recycle_miss
;
270 * Number of buffers that could not be evicted because the hash lock
271 * was held by another thread. The lock may not necessarily be held
272 * by something using the same buffer, since hash locks are shared
273 * by multiple buffers.
275 kstat_named_t arcstat_mutex_miss
;
277 * Number of buffers skipped because they have I/O in progress, are
278 * indrect prefetch buffers that have not lived long enough, or are
279 * not from the spa we're trying to evict from.
281 kstat_named_t arcstat_evict_skip
;
282 kstat_named_t arcstat_evict_l2_cached
;
283 kstat_named_t arcstat_evict_l2_eligible
;
284 kstat_named_t arcstat_evict_l2_ineligible
;
285 kstat_named_t arcstat_hash_elements
;
286 kstat_named_t arcstat_hash_elements_max
;
287 kstat_named_t arcstat_hash_collisions
;
288 kstat_named_t arcstat_hash_chains
;
289 kstat_named_t arcstat_hash_chain_max
;
290 kstat_named_t arcstat_p
;
291 kstat_named_t arcstat_c
;
292 kstat_named_t arcstat_c_min
;
293 kstat_named_t arcstat_c_max
;
294 kstat_named_t arcstat_size
;
295 kstat_named_t arcstat_hdr_size
;
296 kstat_named_t arcstat_data_size
;
297 kstat_named_t arcstat_other_size
;
298 kstat_named_t arcstat_l2_hits
;
299 kstat_named_t arcstat_l2_misses
;
300 kstat_named_t arcstat_l2_feeds
;
301 kstat_named_t arcstat_l2_rw_clash
;
302 kstat_named_t arcstat_l2_read_bytes
;
303 kstat_named_t arcstat_l2_write_bytes
;
304 kstat_named_t arcstat_l2_writes_sent
;
305 kstat_named_t arcstat_l2_writes_done
;
306 kstat_named_t arcstat_l2_writes_error
;
307 kstat_named_t arcstat_l2_writes_hdr_miss
;
308 kstat_named_t arcstat_l2_evict_lock_retry
;
309 kstat_named_t arcstat_l2_evict_reading
;
310 kstat_named_t arcstat_l2_free_on_write
;
311 kstat_named_t arcstat_l2_abort_lowmem
;
312 kstat_named_t arcstat_l2_cksum_bad
;
313 kstat_named_t arcstat_l2_io_error
;
314 kstat_named_t arcstat_l2_size
;
315 kstat_named_t arcstat_l2_asize
;
316 kstat_named_t arcstat_l2_hdr_size
;
317 kstat_named_t arcstat_l2_compress_successes
;
318 kstat_named_t arcstat_l2_compress_zeros
;
319 kstat_named_t arcstat_l2_compress_failures
;
320 kstat_named_t arcstat_memory_throttle_count
;
321 kstat_named_t arcstat_duplicate_buffers
;
322 kstat_named_t arcstat_duplicate_buffers_size
;
323 kstat_named_t arcstat_duplicate_reads
;
324 kstat_named_t arcstat_meta_used
;
325 kstat_named_t arcstat_meta_limit
;
326 kstat_named_t arcstat_meta_max
;
329 static arc_stats_t arc_stats
= {
330 { "hits", KSTAT_DATA_UINT64
},
331 { "misses", KSTAT_DATA_UINT64
},
332 { "demand_data_hits", KSTAT_DATA_UINT64
},
333 { "demand_data_misses", KSTAT_DATA_UINT64
},
334 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
335 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
336 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
337 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
338 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
339 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
340 { "mru_hits", KSTAT_DATA_UINT64
},
341 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
342 { "mfu_hits", KSTAT_DATA_UINT64
},
343 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
344 { "deleted", KSTAT_DATA_UINT64
},
345 { "recycle_miss", KSTAT_DATA_UINT64
},
346 { "mutex_miss", KSTAT_DATA_UINT64
},
347 { "evict_skip", KSTAT_DATA_UINT64
},
348 { "evict_l2_cached", KSTAT_DATA_UINT64
},
349 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
350 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
351 { "hash_elements", KSTAT_DATA_UINT64
},
352 { "hash_elements_max", KSTAT_DATA_UINT64
},
353 { "hash_collisions", KSTAT_DATA_UINT64
},
354 { "hash_chains", KSTAT_DATA_UINT64
},
355 { "hash_chain_max", KSTAT_DATA_UINT64
},
356 { "p", KSTAT_DATA_UINT64
},
357 { "c", KSTAT_DATA_UINT64
},
358 { "c_min", KSTAT_DATA_UINT64
},
359 { "c_max", KSTAT_DATA_UINT64
},
360 { "size", KSTAT_DATA_UINT64
},
361 { "hdr_size", KSTAT_DATA_UINT64
},
362 { "data_size", KSTAT_DATA_UINT64
},
363 { "other_size", KSTAT_DATA_UINT64
},
364 { "l2_hits", KSTAT_DATA_UINT64
},
365 { "l2_misses", KSTAT_DATA_UINT64
},
366 { "l2_feeds", KSTAT_DATA_UINT64
},
367 { "l2_rw_clash", KSTAT_DATA_UINT64
},
368 { "l2_read_bytes", KSTAT_DATA_UINT64
},
369 { "l2_write_bytes", KSTAT_DATA_UINT64
},
370 { "l2_writes_sent", KSTAT_DATA_UINT64
},
371 { "l2_writes_done", KSTAT_DATA_UINT64
},
372 { "l2_writes_error", KSTAT_DATA_UINT64
},
373 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64
},
374 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
375 { "l2_evict_reading", KSTAT_DATA_UINT64
},
376 { "l2_free_on_write", KSTAT_DATA_UINT64
},
377 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
378 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
379 { "l2_io_error", KSTAT_DATA_UINT64
},
380 { "l2_size", KSTAT_DATA_UINT64
},
381 { "l2_asize", KSTAT_DATA_UINT64
},
382 { "l2_hdr_size", KSTAT_DATA_UINT64
},
383 { "l2_compress_successes", KSTAT_DATA_UINT64
},
384 { "l2_compress_zeros", KSTAT_DATA_UINT64
},
385 { "l2_compress_failures", KSTAT_DATA_UINT64
},
386 { "memory_throttle_count", KSTAT_DATA_UINT64
},
387 { "duplicate_buffers", KSTAT_DATA_UINT64
},
388 { "duplicate_buffers_size", KSTAT_DATA_UINT64
},
389 { "duplicate_reads", KSTAT_DATA_UINT64
},
390 { "arc_meta_used", KSTAT_DATA_UINT64
},
391 { "arc_meta_limit", KSTAT_DATA_UINT64
},
392 { "arc_meta_max", KSTAT_DATA_UINT64
}
395 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
397 #define ARCSTAT_INCR(stat, val) \
398 atomic_add_64(&arc_stats.stat.value.ui64, (val))
400 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
401 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
403 #define ARCSTAT_MAX(stat, val) { \
405 while ((val) > (m = arc_stats.stat.value.ui64) && \
406 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
410 #define ARCSTAT_MAXSTAT(stat) \
411 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
414 * We define a macro to allow ARC hits/misses to be easily broken down by
415 * two separate conditions, giving a total of four different subtypes for
416 * each of hits and misses (so eight statistics total).
418 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
421 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
423 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
427 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
429 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
434 static arc_state_t
*arc_anon
;
435 static arc_state_t
*arc_mru
;
436 static arc_state_t
*arc_mru_ghost
;
437 static arc_state_t
*arc_mfu
;
438 static arc_state_t
*arc_mfu_ghost
;
439 static arc_state_t
*arc_l2c_only
;
442 * There are several ARC variables that are critical to export as kstats --
443 * but we don't want to have to grovel around in the kstat whenever we wish to
444 * manipulate them. For these variables, we therefore define them to be in
445 * terms of the statistic variable. This assures that we are not introducing
446 * the possibility of inconsistency by having shadow copies of the variables,
447 * while still allowing the code to be readable.
449 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
450 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
451 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
452 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
453 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
454 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
455 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
456 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
458 #define L2ARC_IS_VALID_COMPRESS(_c_) \
459 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
461 static int arc_no_grow
; /* Don't try to grow cache size */
462 static uint64_t arc_tempreserve
;
463 static uint64_t arc_loaned_bytes
;
465 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t
;
467 typedef struct arc_callback arc_callback_t
;
469 struct arc_callback
{
471 arc_done_func_t
*acb_done
;
473 zio_t
*acb_zio_dummy
;
474 arc_callback_t
*acb_next
;
477 typedef struct arc_write_callback arc_write_callback_t
;
479 struct arc_write_callback
{
481 arc_done_func_t
*awcb_ready
;
482 arc_done_func_t
*awcb_physdone
;
483 arc_done_func_t
*awcb_done
;
488 /* protected by hash lock */
493 kmutex_t b_freeze_lock
;
494 zio_cksum_t
*b_freeze_cksum
;
497 arc_buf_hdr_t
*b_hash_next
;
502 arc_callback_t
*b_acb
;
506 arc_buf_contents_t b_type
;
510 /* protected by arc state mutex */
511 arc_state_t
*b_state
;
512 list_node_t b_arc_node
;
514 /* updated atomically */
515 clock_t b_arc_access
;
517 /* self protecting */
520 l2arc_buf_hdr_t
*b_l2hdr
;
521 list_node_t b_l2node
;
524 static arc_buf_t
*arc_eviction_list
;
525 static kmutex_t arc_eviction_mtx
;
526 static arc_buf_hdr_t arc_eviction_hdr
;
527 static void arc_get_data_buf(arc_buf_t
*buf
);
528 static void arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
);
529 static int arc_evict_needed(arc_buf_contents_t type
);
530 static void arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
);
531 static void arc_buf_watch(arc_buf_t
*buf
);
533 static boolean_t
l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*ab
);
535 #define GHOST_STATE(state) \
536 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
537 (state) == arc_l2c_only)
540 * Private ARC flags. These flags are private ARC only flags that will show up
541 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
542 * be passed in as arc_flags in things like arc_read. However, these flags
543 * should never be passed and should only be set by ARC code. When adding new
544 * public flags, make sure not to smash the private ones.
547 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
548 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
549 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
550 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
551 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
552 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
553 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
554 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
555 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
556 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
558 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
559 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
560 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
561 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
562 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
563 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
564 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
565 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
566 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
567 (hdr)->b_l2hdr != NULL)
568 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
569 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
570 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
576 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
577 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
580 * Hash table routines
583 #define HT_LOCK_PAD 64
588 unsigned char pad
[(HT_LOCK_PAD
- sizeof (kmutex_t
))];
592 #define BUF_LOCKS 256
593 typedef struct buf_hash_table
{
595 arc_buf_hdr_t
**ht_table
;
596 struct ht_lock ht_locks
[BUF_LOCKS
];
599 static buf_hash_table_t buf_hash_table
;
601 #define BUF_HASH_INDEX(spa, dva, birth) \
602 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
603 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
604 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
605 #define HDR_LOCK(hdr) \
606 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
608 uint64_t zfs_crc64_table
[256];
614 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
615 #define L2ARC_HEADROOM 2 /* num of writes */
617 * If we discover during ARC scan any buffers to be compressed, we boost
618 * our headroom for the next scanning cycle by this percentage multiple.
620 #define L2ARC_HEADROOM_BOOST 200
621 #define L2ARC_FEED_SECS 1 /* caching interval secs */
622 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
624 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
625 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
627 /* L2ARC Performance Tunables */
628 uint64_t l2arc_write_max
= L2ARC_WRITE_SIZE
; /* default max write size */
629 uint64_t l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra write during warmup */
630 uint64_t l2arc_headroom
= L2ARC_HEADROOM
; /* number of dev writes */
631 uint64_t l2arc_headroom_boost
= L2ARC_HEADROOM_BOOST
;
632 uint64_t l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
633 uint64_t l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval milliseconds */
634 boolean_t l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
635 boolean_t l2arc_feed_again
= B_TRUE
; /* turbo warmup */
636 boolean_t l2arc_norw
= B_TRUE
; /* no reads during writes */
641 typedef struct l2arc_dev
{
642 vdev_t
*l2ad_vdev
; /* vdev */
643 spa_t
*l2ad_spa
; /* spa */
644 uint64_t l2ad_hand
; /* next write location */
645 uint64_t l2ad_start
; /* first addr on device */
646 uint64_t l2ad_end
; /* last addr on device */
647 uint64_t l2ad_evict
; /* last addr eviction reached */
648 boolean_t l2ad_first
; /* first sweep through */
649 boolean_t l2ad_writing
; /* currently writing */
650 list_t
*l2ad_buflist
; /* buffer list */
651 list_node_t l2ad_node
; /* device list node */
654 static list_t L2ARC_dev_list
; /* device list */
655 static list_t
*l2arc_dev_list
; /* device list pointer */
656 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
657 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
658 static kmutex_t l2arc_buflist_mtx
; /* mutex for all buflists */
659 static list_t L2ARC_free_on_write
; /* free after write buf list */
660 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
661 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
662 static uint64_t l2arc_ndev
; /* number of devices */
664 typedef struct l2arc_read_callback
{
665 arc_buf_t
*l2rcb_buf
; /* read buffer */
666 spa_t
*l2rcb_spa
; /* spa */
667 blkptr_t l2rcb_bp
; /* original blkptr */
668 zbookmark_phys_t l2rcb_zb
; /* original bookmark */
669 int l2rcb_flags
; /* original flags */
670 enum zio_compress l2rcb_compress
; /* applied compress */
671 } l2arc_read_callback_t
;
673 typedef struct l2arc_write_callback
{
674 l2arc_dev_t
*l2wcb_dev
; /* device info */
675 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
676 } l2arc_write_callback_t
;
678 struct l2arc_buf_hdr
{
679 /* protected by arc_buf_hdr mutex */
680 l2arc_dev_t
*b_dev
; /* L2ARC device */
681 uint64_t b_daddr
; /* disk address, offset byte */
682 /* compression applied to buffer data */
683 enum zio_compress b_compress
;
684 /* real alloc'd buffer size depending on b_compress applied */
686 /* temporary buffer holder for in-flight compressed data */
690 typedef struct l2arc_data_free
{
691 /* protected by l2arc_free_on_write_mtx */
694 void (*l2df_func
)(void *, size_t);
695 list_node_t l2df_list_node
;
698 static kmutex_t l2arc_feed_thr_lock
;
699 static kcondvar_t l2arc_feed_thr_cv
;
700 static uint8_t l2arc_thread_exit
;
702 static void l2arc_read_done(zio_t
*zio
);
703 static void l2arc_hdr_stat_add(void);
704 static void l2arc_hdr_stat_remove(void);
706 static boolean_t
l2arc_compress_buf(l2arc_buf_hdr_t
*l2hdr
);
707 static void l2arc_decompress_zio(zio_t
*zio
, arc_buf_hdr_t
*hdr
,
708 enum zio_compress c
);
709 static void l2arc_release_cdata_buf(arc_buf_hdr_t
*ab
);
712 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
714 uint8_t *vdva
= (uint8_t *)dva
;
715 uint64_t crc
= -1ULL;
718 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
720 for (i
= 0; i
< sizeof (dva_t
); i
++)
721 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ vdva
[i
]) & 0xFF];
723 crc
^= (spa
>>8) ^ birth
;
728 #define BUF_EMPTY(buf) \
729 ((buf)->b_dva.dva_word[0] == 0 && \
730 (buf)->b_dva.dva_word[1] == 0 && \
731 (buf)->b_cksum0 == 0)
733 #define BUF_EQUAL(spa, dva, birth, buf) \
734 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
735 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
736 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
739 buf_discard_identity(arc_buf_hdr_t
*hdr
)
741 hdr
->b_dva
.dva_word
[0] = 0;
742 hdr
->b_dva
.dva_word
[1] = 0;
747 static arc_buf_hdr_t
*
748 buf_hash_find(uint64_t spa
, const blkptr_t
*bp
, kmutex_t
**lockp
)
750 const dva_t
*dva
= BP_IDENTITY(bp
);
751 uint64_t birth
= BP_PHYSICAL_BIRTH(bp
);
752 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
753 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
756 mutex_enter(hash_lock
);
757 for (buf
= buf_hash_table
.ht_table
[idx
]; buf
!= NULL
;
758 buf
= buf
->b_hash_next
) {
759 if (BUF_EQUAL(spa
, dva
, birth
, buf
)) {
764 mutex_exit(hash_lock
);
770 * Insert an entry into the hash table. If there is already an element
771 * equal to elem in the hash table, then the already existing element
772 * will be returned and the new element will not be inserted.
773 * Otherwise returns NULL.
775 static arc_buf_hdr_t
*
776 buf_hash_insert(arc_buf_hdr_t
*buf
, kmutex_t
**lockp
)
778 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
779 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
783 ASSERT(!DVA_IS_EMPTY(&buf
->b_dva
));
784 ASSERT(buf
->b_birth
!= 0);
785 ASSERT(!HDR_IN_HASH_TABLE(buf
));
787 mutex_enter(hash_lock
);
788 for (fbuf
= buf_hash_table
.ht_table
[idx
], i
= 0; fbuf
!= NULL
;
789 fbuf
= fbuf
->b_hash_next
, i
++) {
790 if (BUF_EQUAL(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
, fbuf
))
794 buf
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
795 buf_hash_table
.ht_table
[idx
] = buf
;
796 buf
->b_flags
|= ARC_IN_HASH_TABLE
;
798 /* collect some hash table performance data */
800 ARCSTAT_BUMP(arcstat_hash_collisions
);
802 ARCSTAT_BUMP(arcstat_hash_chains
);
804 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
807 ARCSTAT_BUMP(arcstat_hash_elements
);
808 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
814 buf_hash_remove(arc_buf_hdr_t
*buf
)
816 arc_buf_hdr_t
*fbuf
, **bufp
;
817 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
819 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
820 ASSERT(HDR_IN_HASH_TABLE(buf
));
822 bufp
= &buf_hash_table
.ht_table
[idx
];
823 while ((fbuf
= *bufp
) != buf
) {
824 ASSERT(fbuf
!= NULL
);
825 bufp
= &fbuf
->b_hash_next
;
827 *bufp
= buf
->b_hash_next
;
828 buf
->b_hash_next
= NULL
;
829 buf
->b_flags
&= ~ARC_IN_HASH_TABLE
;
831 /* collect some hash table performance data */
832 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
834 if (buf_hash_table
.ht_table
[idx
] &&
835 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
836 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
840 * Global data structures and functions for the buf kmem cache.
842 static kmem_cache_t
*hdr_cache
;
843 static kmem_cache_t
*buf_cache
;
850 kmem_free(buf_hash_table
.ht_table
,
851 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
852 for (i
= 0; i
< BUF_LOCKS
; i
++)
853 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
854 kmem_cache_destroy(hdr_cache
);
855 kmem_cache_destroy(buf_cache
);
859 * Constructor callback - called when the cache is empty
860 * and a new buf is requested.
864 hdr_cons(void *vbuf
, void *unused
, int kmflag
)
866 arc_buf_hdr_t
*buf
= vbuf
;
868 bzero(buf
, sizeof (arc_buf_hdr_t
));
869 refcount_create(&buf
->b_refcnt
);
870 cv_init(&buf
->b_cv
, NULL
, CV_DEFAULT
, NULL
);
871 mutex_init(&buf
->b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
872 arc_space_consume(sizeof (arc_buf_hdr_t
), ARC_SPACE_HDRS
);
879 buf_cons(void *vbuf
, void *unused
, int kmflag
)
881 arc_buf_t
*buf
= vbuf
;
883 bzero(buf
, sizeof (arc_buf_t
));
884 mutex_init(&buf
->b_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
885 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
891 * Destructor callback - called when a cached buf is
892 * no longer required.
896 hdr_dest(void *vbuf
, void *unused
)
898 arc_buf_hdr_t
*buf
= vbuf
;
900 ASSERT(BUF_EMPTY(buf
));
901 refcount_destroy(&buf
->b_refcnt
);
902 cv_destroy(&buf
->b_cv
);
903 mutex_destroy(&buf
->b_freeze_lock
);
904 arc_space_return(sizeof (arc_buf_hdr_t
), ARC_SPACE_HDRS
);
909 buf_dest(void *vbuf
, void *unused
)
911 arc_buf_t
*buf
= vbuf
;
913 mutex_destroy(&buf
->b_evict_lock
);
914 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
918 * Reclaim callback -- invoked when memory is low.
922 hdr_recl(void *unused
)
924 dprintf("hdr_recl called\n");
926 * umem calls the reclaim func when we destroy the buf cache,
927 * which is after we do arc_fini().
930 cv_signal(&arc_reclaim_thr_cv
);
937 uint64_t hsize
= 1ULL << 12;
941 * The hash table is big enough to fill all of physical memory
942 * with an average block size of zfs_arc_average_blocksize (default 8K).
943 * By default, the table will take up
944 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
946 while (hsize
* zfs_arc_average_blocksize
< physmem
* PAGESIZE
)
949 buf_hash_table
.ht_mask
= hsize
- 1;
950 buf_hash_table
.ht_table
=
951 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
952 if (buf_hash_table
.ht_table
== NULL
) {
953 ASSERT(hsize
> (1ULL << 8));
958 hdr_cache
= kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t
),
959 0, hdr_cons
, hdr_dest
, hdr_recl
, NULL
, NULL
, 0);
960 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
961 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
963 for (i
= 0; i
< 256; i
++)
964 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
965 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
967 for (i
= 0; i
< BUF_LOCKS
; i
++) {
968 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
969 NULL
, MUTEX_DEFAULT
, NULL
);
973 #define ARC_MINTIME (hz>>4) /* 62 ms */
976 arc_cksum_verify(arc_buf_t
*buf
)
980 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
983 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
984 if (buf
->b_hdr
->b_freeze_cksum
== NULL
||
985 (buf
->b_hdr
->b_flags
& ARC_IO_ERROR
)) {
986 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
989 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
990 if (!ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
))
991 panic("buffer modified while frozen!");
992 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
996 arc_cksum_equal(arc_buf_t
*buf
)
1001 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
1002 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
1003 equal
= ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
);
1004 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1010 arc_cksum_compute(arc_buf_t
*buf
, boolean_t force
)
1012 if (!force
&& !(zfs_flags
& ZFS_DEBUG_MODIFY
))
1015 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
1016 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
1017 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1020 buf
->b_hdr
->b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
), KM_SLEEP
);
1021 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
,
1022 buf
->b_hdr
->b_freeze_cksum
);
1023 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1028 typedef struct procctl
{
1036 arc_buf_unwatch(arc_buf_t
*buf
)
1043 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1044 ctl
.prwatch
.pr_size
= 0;
1045 ctl
.prwatch
.pr_wflags
= 0;
1046 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1047 ASSERT3U(result
, ==, sizeof (ctl
));
1054 arc_buf_watch(arc_buf_t
*buf
)
1061 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1062 ctl
.prwatch
.pr_size
= buf
->b_hdr
->b_size
;
1063 ctl
.prwatch
.pr_wflags
= WA_WRITE
;
1064 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1065 ASSERT3U(result
, ==, sizeof (ctl
));
1071 arc_buf_thaw(arc_buf_t
*buf
)
1073 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1074 if (buf
->b_hdr
->b_state
!= arc_anon
)
1075 panic("modifying non-anon buffer!");
1076 if (buf
->b_hdr
->b_flags
& ARC_IO_IN_PROGRESS
)
1077 panic("modifying buffer while i/o in progress!");
1078 arc_cksum_verify(buf
);
1081 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
1082 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
1083 kmem_free(buf
->b_hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1084 buf
->b_hdr
->b_freeze_cksum
= NULL
;
1087 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1088 if (buf
->b_hdr
->b_thawed
)
1089 kmem_free(buf
->b_hdr
->b_thawed
, 1);
1090 buf
->b_hdr
->b_thawed
= kmem_alloc(1, KM_SLEEP
);
1093 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1095 arc_buf_unwatch(buf
);
1099 arc_buf_freeze(arc_buf_t
*buf
)
1101 kmutex_t
*hash_lock
;
1103 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1106 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1107 mutex_enter(hash_lock
);
1109 ASSERT(buf
->b_hdr
->b_freeze_cksum
!= NULL
||
1110 buf
->b_hdr
->b_state
== arc_anon
);
1111 arc_cksum_compute(buf
, B_FALSE
);
1112 mutex_exit(hash_lock
);
1117 add_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1119 ASSERT(MUTEX_HELD(hash_lock
));
1121 if ((refcount_add(&ab
->b_refcnt
, tag
) == 1) &&
1122 (ab
->b_state
!= arc_anon
)) {
1123 uint64_t delta
= ab
->b_size
* ab
->b_datacnt
;
1124 list_t
*list
= &ab
->b_state
->arcs_list
[ab
->b_type
];
1125 uint64_t *size
= &ab
->b_state
->arcs_lsize
[ab
->b_type
];
1127 ASSERT(!MUTEX_HELD(&ab
->b_state
->arcs_mtx
));
1128 mutex_enter(&ab
->b_state
->arcs_mtx
);
1129 ASSERT(list_link_active(&ab
->b_arc_node
));
1130 list_remove(list
, ab
);
1131 if (GHOST_STATE(ab
->b_state
)) {
1132 ASSERT0(ab
->b_datacnt
);
1133 ASSERT3P(ab
->b_buf
, ==, NULL
);
1137 ASSERT3U(*size
, >=, delta
);
1138 atomic_add_64(size
, -delta
);
1139 mutex_exit(&ab
->b_state
->arcs_mtx
);
1140 /* remove the prefetch flag if we get a reference */
1141 if (ab
->b_flags
& ARC_PREFETCH
)
1142 ab
->b_flags
&= ~ARC_PREFETCH
;
1147 remove_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1150 arc_state_t
*state
= ab
->b_state
;
1152 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
1153 ASSERT(!GHOST_STATE(state
));
1155 if (((cnt
= refcount_remove(&ab
->b_refcnt
, tag
)) == 0) &&
1156 (state
!= arc_anon
)) {
1157 uint64_t *size
= &state
->arcs_lsize
[ab
->b_type
];
1159 ASSERT(!MUTEX_HELD(&state
->arcs_mtx
));
1160 mutex_enter(&state
->arcs_mtx
);
1161 ASSERT(!list_link_active(&ab
->b_arc_node
));
1162 list_insert_head(&state
->arcs_list
[ab
->b_type
], ab
);
1163 ASSERT(ab
->b_datacnt
> 0);
1164 atomic_add_64(size
, ab
->b_size
* ab
->b_datacnt
);
1165 mutex_exit(&state
->arcs_mtx
);
1171 * Move the supplied buffer to the indicated state. The mutex
1172 * for the buffer must be held by the caller.
1175 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
)
1177 arc_state_t
*old_state
= ab
->b_state
;
1178 int64_t refcnt
= refcount_count(&ab
->b_refcnt
);
1179 uint64_t from_delta
, to_delta
;
1181 ASSERT(MUTEX_HELD(hash_lock
));
1182 ASSERT3P(new_state
, !=, old_state
);
1183 ASSERT(refcnt
== 0 || ab
->b_datacnt
> 0);
1184 ASSERT(ab
->b_datacnt
== 0 || !GHOST_STATE(new_state
));
1185 ASSERT(ab
->b_datacnt
<= 1 || old_state
!= arc_anon
);
1187 from_delta
= to_delta
= ab
->b_datacnt
* ab
->b_size
;
1190 * If this buffer is evictable, transfer it from the
1191 * old state list to the new state list.
1194 if (old_state
!= arc_anon
) {
1195 int use_mutex
= !MUTEX_HELD(&old_state
->arcs_mtx
);
1196 uint64_t *size
= &old_state
->arcs_lsize
[ab
->b_type
];
1199 mutex_enter(&old_state
->arcs_mtx
);
1201 ASSERT(list_link_active(&ab
->b_arc_node
));
1202 list_remove(&old_state
->arcs_list
[ab
->b_type
], ab
);
1205 * If prefetching out of the ghost cache,
1206 * we will have a non-zero datacnt.
1208 if (GHOST_STATE(old_state
) && ab
->b_datacnt
== 0) {
1209 /* ghost elements have a ghost size */
1210 ASSERT(ab
->b_buf
== NULL
);
1211 from_delta
= ab
->b_size
;
1213 ASSERT3U(*size
, >=, from_delta
);
1214 atomic_add_64(size
, -from_delta
);
1217 mutex_exit(&old_state
->arcs_mtx
);
1219 if (new_state
!= arc_anon
) {
1220 int use_mutex
= !MUTEX_HELD(&new_state
->arcs_mtx
);
1221 uint64_t *size
= &new_state
->arcs_lsize
[ab
->b_type
];
1224 mutex_enter(&new_state
->arcs_mtx
);
1226 list_insert_head(&new_state
->arcs_list
[ab
->b_type
], ab
);
1228 /* ghost elements have a ghost size */
1229 if (GHOST_STATE(new_state
)) {
1230 ASSERT(ab
->b_datacnt
== 0);
1231 ASSERT(ab
->b_buf
== NULL
);
1232 to_delta
= ab
->b_size
;
1234 atomic_add_64(size
, to_delta
);
1237 mutex_exit(&new_state
->arcs_mtx
);
1241 ASSERT(!BUF_EMPTY(ab
));
1242 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(ab
))
1243 buf_hash_remove(ab
);
1245 /* adjust state sizes */
1247 atomic_add_64(&new_state
->arcs_size
, to_delta
);
1249 ASSERT3U(old_state
->arcs_size
, >=, from_delta
);
1250 atomic_add_64(&old_state
->arcs_size
, -from_delta
);
1252 ab
->b_state
= new_state
;
1254 /* adjust l2arc hdr stats */
1255 if (new_state
== arc_l2c_only
)
1256 l2arc_hdr_stat_add();
1257 else if (old_state
== arc_l2c_only
)
1258 l2arc_hdr_stat_remove();
1262 arc_space_consume(uint64_t space
, arc_space_type_t type
)
1264 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1267 case ARC_SPACE_DATA
:
1268 ARCSTAT_INCR(arcstat_data_size
, space
);
1270 case ARC_SPACE_OTHER
:
1271 ARCSTAT_INCR(arcstat_other_size
, space
);
1273 case ARC_SPACE_HDRS
:
1274 ARCSTAT_INCR(arcstat_hdr_size
, space
);
1276 case ARC_SPACE_L2HDRS
:
1277 ARCSTAT_INCR(arcstat_l2_hdr_size
, space
);
1281 ARCSTAT_INCR(arcstat_meta_used
, space
);
1282 atomic_add_64(&arc_size
, space
);
1286 arc_space_return(uint64_t space
, arc_space_type_t type
)
1288 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1291 case ARC_SPACE_DATA
:
1292 ARCSTAT_INCR(arcstat_data_size
, -space
);
1294 case ARC_SPACE_OTHER
:
1295 ARCSTAT_INCR(arcstat_other_size
, -space
);
1297 case ARC_SPACE_HDRS
:
1298 ARCSTAT_INCR(arcstat_hdr_size
, -space
);
1300 case ARC_SPACE_L2HDRS
:
1301 ARCSTAT_INCR(arcstat_l2_hdr_size
, -space
);
1305 ASSERT(arc_meta_used
>= space
);
1306 if (arc_meta_max
< arc_meta_used
)
1307 arc_meta_max
= arc_meta_used
;
1308 ARCSTAT_INCR(arcstat_meta_used
, -space
);
1309 ASSERT(arc_size
>= space
);
1310 atomic_add_64(&arc_size
, -space
);
1314 arc_data_buf_alloc(uint64_t size
)
1316 if (arc_evict_needed(ARC_BUFC_DATA
))
1317 cv_signal(&arc_reclaim_thr_cv
);
1318 atomic_add_64(&arc_size
, size
);
1319 return (zio_data_buf_alloc(size
));
1323 arc_data_buf_free(void *buf
, uint64_t size
)
1325 zio_data_buf_free(buf
, size
);
1326 ASSERT(arc_size
>= size
);
1327 atomic_add_64(&arc_size
, -size
);
1331 arc_buf_alloc(spa_t
*spa
, int size
, void *tag
, arc_buf_contents_t type
)
1336 ASSERT3U(size
, >, 0);
1337 hdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
1338 ASSERT(BUF_EMPTY(hdr
));
1341 hdr
->b_spa
= spa_load_guid(spa
);
1342 hdr
->b_state
= arc_anon
;
1343 hdr
->b_arc_access
= 0;
1344 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1347 buf
->b_efunc
= NULL
;
1348 buf
->b_private
= NULL
;
1351 arc_get_data_buf(buf
);
1354 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1355 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1360 static char *arc_onloan_tag
= "onloan";
1363 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1364 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1365 * buffers must be returned to the arc before they can be used by the DMU or
1369 arc_loan_buf(spa_t
*spa
, int size
)
1373 buf
= arc_buf_alloc(spa
, size
, arc_onloan_tag
, ARC_BUFC_DATA
);
1375 atomic_add_64(&arc_loaned_bytes
, size
);
1380 * Return a loaned arc buffer to the arc.
1383 arc_return_buf(arc_buf_t
*buf
, void *tag
)
1385 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1387 ASSERT(buf
->b_data
!= NULL
);
1388 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1389 (void) refcount_remove(&hdr
->b_refcnt
, arc_onloan_tag
);
1391 atomic_add_64(&arc_loaned_bytes
, -hdr
->b_size
);
1394 /* Detach an arc_buf from a dbuf (tag) */
1396 arc_loan_inuse_buf(arc_buf_t
*buf
, void *tag
)
1400 ASSERT(buf
->b_data
!= NULL
);
1402 (void) refcount_add(&hdr
->b_refcnt
, arc_onloan_tag
);
1403 (void) refcount_remove(&hdr
->b_refcnt
, tag
);
1404 buf
->b_efunc
= NULL
;
1405 buf
->b_private
= NULL
;
1407 atomic_add_64(&arc_loaned_bytes
, hdr
->b_size
);
1411 arc_buf_clone(arc_buf_t
*from
)
1414 arc_buf_hdr_t
*hdr
= from
->b_hdr
;
1415 uint64_t size
= hdr
->b_size
;
1417 ASSERT(hdr
->b_state
!= arc_anon
);
1419 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1422 buf
->b_efunc
= NULL
;
1423 buf
->b_private
= NULL
;
1424 buf
->b_next
= hdr
->b_buf
;
1426 arc_get_data_buf(buf
);
1427 bcopy(from
->b_data
, buf
->b_data
, size
);
1430 * This buffer already exists in the arc so create a duplicate
1431 * copy for the caller. If the buffer is associated with user data
1432 * then track the size and number of duplicates. These stats will be
1433 * updated as duplicate buffers are created and destroyed.
1435 if (hdr
->b_type
== ARC_BUFC_DATA
) {
1436 ARCSTAT_BUMP(arcstat_duplicate_buffers
);
1437 ARCSTAT_INCR(arcstat_duplicate_buffers_size
, size
);
1439 hdr
->b_datacnt
+= 1;
1444 arc_buf_add_ref(arc_buf_t
*buf
, void* tag
)
1447 kmutex_t
*hash_lock
;
1450 * Check to see if this buffer is evicted. Callers
1451 * must verify b_data != NULL to know if the add_ref
1454 mutex_enter(&buf
->b_evict_lock
);
1455 if (buf
->b_data
== NULL
) {
1456 mutex_exit(&buf
->b_evict_lock
);
1459 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1460 mutex_enter(hash_lock
);
1462 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1463 mutex_exit(&buf
->b_evict_lock
);
1465 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
1466 add_reference(hdr
, hash_lock
, tag
);
1467 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
1468 arc_access(hdr
, hash_lock
);
1469 mutex_exit(hash_lock
);
1470 ARCSTAT_BUMP(arcstat_hits
);
1471 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
1472 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
1473 data
, metadata
, hits
);
1477 * Free the arc data buffer. If it is an l2arc write in progress,
1478 * the buffer is placed on l2arc_free_on_write to be freed later.
1481 arc_buf_data_free(arc_buf_t
*buf
, void (*free_func
)(void *, size_t))
1483 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1485 if (HDR_L2_WRITING(hdr
)) {
1486 l2arc_data_free_t
*df
;
1487 df
= kmem_alloc(sizeof (l2arc_data_free_t
), KM_SLEEP
);
1488 df
->l2df_data
= buf
->b_data
;
1489 df
->l2df_size
= hdr
->b_size
;
1490 df
->l2df_func
= free_func
;
1491 mutex_enter(&l2arc_free_on_write_mtx
);
1492 list_insert_head(l2arc_free_on_write
, df
);
1493 mutex_exit(&l2arc_free_on_write_mtx
);
1494 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
1496 free_func(buf
->b_data
, hdr
->b_size
);
1501 * Free up buf->b_data and if 'remove' is set, then pull the
1502 * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
1505 arc_buf_destroy(arc_buf_t
*buf
, boolean_t recycle
, boolean_t remove
)
1509 /* free up data associated with the buf */
1511 arc_state_t
*state
= buf
->b_hdr
->b_state
;
1512 uint64_t size
= buf
->b_hdr
->b_size
;
1513 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
1515 arc_cksum_verify(buf
);
1516 arc_buf_unwatch(buf
);
1519 if (type
== ARC_BUFC_METADATA
) {
1520 arc_buf_data_free(buf
, zio_buf_free
);
1521 arc_space_return(size
, ARC_SPACE_DATA
);
1523 ASSERT(type
== ARC_BUFC_DATA
);
1524 arc_buf_data_free(buf
, zio_data_buf_free
);
1525 ARCSTAT_INCR(arcstat_data_size
, -size
);
1526 atomic_add_64(&arc_size
, -size
);
1529 if (list_link_active(&buf
->b_hdr
->b_arc_node
)) {
1530 uint64_t *cnt
= &state
->arcs_lsize
[type
];
1532 ASSERT(refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
1533 ASSERT(state
!= arc_anon
);
1535 ASSERT3U(*cnt
, >=, size
);
1536 atomic_add_64(cnt
, -size
);
1538 ASSERT3U(state
->arcs_size
, >=, size
);
1539 atomic_add_64(&state
->arcs_size
, -size
);
1543 * If we're destroying a duplicate buffer make sure
1544 * that the appropriate statistics are updated.
1546 if (buf
->b_hdr
->b_datacnt
> 1 &&
1547 buf
->b_hdr
->b_type
== ARC_BUFC_DATA
) {
1548 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers
);
1549 ARCSTAT_INCR(arcstat_duplicate_buffers_size
, -size
);
1551 ASSERT(buf
->b_hdr
->b_datacnt
> 0);
1552 buf
->b_hdr
->b_datacnt
-= 1;
1555 /* only remove the buf if requested */
1559 /* remove the buf from the hdr list */
1560 for (bufp
= &buf
->b_hdr
->b_buf
; *bufp
!= buf
; bufp
= &(*bufp
)->b_next
)
1562 *bufp
= buf
->b_next
;
1565 ASSERT(buf
->b_efunc
== NULL
);
1567 /* clean up the buf */
1569 kmem_cache_free(buf_cache
, buf
);
1573 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
1575 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1576 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
1577 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1578 l2arc_buf_hdr_t
*l2hdr
= hdr
->b_l2hdr
;
1580 if (l2hdr
!= NULL
) {
1581 boolean_t buflist_held
= MUTEX_HELD(&l2arc_buflist_mtx
);
1583 * To prevent arc_free() and l2arc_evict() from
1584 * attempting to free the same buffer at the same time,
1585 * a FREE_IN_PROGRESS flag is given to arc_free() to
1586 * give it priority. l2arc_evict() can't destroy this
1587 * header while we are waiting on l2arc_buflist_mtx.
1589 * The hdr may be removed from l2ad_buflist before we
1590 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1592 if (!buflist_held
) {
1593 mutex_enter(&l2arc_buflist_mtx
);
1594 l2hdr
= hdr
->b_l2hdr
;
1597 if (l2hdr
!= NULL
) {
1598 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
1599 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
1600 ARCSTAT_INCR(arcstat_l2_asize
, -l2hdr
->b_asize
);
1601 vdev_space_update(l2hdr
->b_dev
->l2ad_vdev
,
1602 -l2hdr
->b_asize
, 0, 0);
1603 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
1604 if (hdr
->b_state
== arc_l2c_only
)
1605 l2arc_hdr_stat_remove();
1606 hdr
->b_l2hdr
= NULL
;
1610 mutex_exit(&l2arc_buflist_mtx
);
1613 if (!BUF_EMPTY(hdr
)) {
1614 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1615 buf_discard_identity(hdr
);
1617 while (hdr
->b_buf
) {
1618 arc_buf_t
*buf
= hdr
->b_buf
;
1621 mutex_enter(&arc_eviction_mtx
);
1622 mutex_enter(&buf
->b_evict_lock
);
1623 ASSERT(buf
->b_hdr
!= NULL
);
1624 arc_buf_destroy(hdr
->b_buf
, FALSE
, FALSE
);
1625 hdr
->b_buf
= buf
->b_next
;
1626 buf
->b_hdr
= &arc_eviction_hdr
;
1627 buf
->b_next
= arc_eviction_list
;
1628 arc_eviction_list
= buf
;
1629 mutex_exit(&buf
->b_evict_lock
);
1630 mutex_exit(&arc_eviction_mtx
);
1632 arc_buf_destroy(hdr
->b_buf
, FALSE
, TRUE
);
1635 if (hdr
->b_freeze_cksum
!= NULL
) {
1636 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1637 hdr
->b_freeze_cksum
= NULL
;
1639 if (hdr
->b_thawed
) {
1640 kmem_free(hdr
->b_thawed
, 1);
1641 hdr
->b_thawed
= NULL
;
1644 ASSERT(!list_link_active(&hdr
->b_arc_node
));
1645 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
1646 ASSERT3P(hdr
->b_acb
, ==, NULL
);
1647 kmem_cache_free(hdr_cache
, hdr
);
1651 arc_buf_free(arc_buf_t
*buf
, void *tag
)
1653 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1654 int hashed
= hdr
->b_state
!= arc_anon
;
1656 ASSERT(buf
->b_efunc
== NULL
);
1657 ASSERT(buf
->b_data
!= NULL
);
1660 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1662 mutex_enter(hash_lock
);
1664 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1666 (void) remove_reference(hdr
, hash_lock
, tag
);
1667 if (hdr
->b_datacnt
> 1) {
1668 arc_buf_destroy(buf
, FALSE
, TRUE
);
1670 ASSERT(buf
== hdr
->b_buf
);
1671 ASSERT(buf
->b_efunc
== NULL
);
1672 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1674 mutex_exit(hash_lock
);
1675 } else if (HDR_IO_IN_PROGRESS(hdr
)) {
1678 * We are in the middle of an async write. Don't destroy
1679 * this buffer unless the write completes before we finish
1680 * decrementing the reference count.
1682 mutex_enter(&arc_eviction_mtx
);
1683 (void) remove_reference(hdr
, NULL
, tag
);
1684 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1685 destroy_hdr
= !HDR_IO_IN_PROGRESS(hdr
);
1686 mutex_exit(&arc_eviction_mtx
);
1688 arc_hdr_destroy(hdr
);
1690 if (remove_reference(hdr
, NULL
, tag
) > 0)
1691 arc_buf_destroy(buf
, FALSE
, TRUE
);
1693 arc_hdr_destroy(hdr
);
1698 arc_buf_remove_ref(arc_buf_t
*buf
, void* tag
)
1700 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1701 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1702 boolean_t no_callback
= (buf
->b_efunc
== NULL
);
1704 if (hdr
->b_state
== arc_anon
) {
1705 ASSERT(hdr
->b_datacnt
== 1);
1706 arc_buf_free(buf
, tag
);
1707 return (no_callback
);
1710 mutex_enter(hash_lock
);
1712 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1713 ASSERT(hdr
->b_state
!= arc_anon
);
1714 ASSERT(buf
->b_data
!= NULL
);
1716 (void) remove_reference(hdr
, hash_lock
, tag
);
1717 if (hdr
->b_datacnt
> 1) {
1719 arc_buf_destroy(buf
, FALSE
, TRUE
);
1720 } else if (no_callback
) {
1721 ASSERT(hdr
->b_buf
== buf
&& buf
->b_next
== NULL
);
1722 ASSERT(buf
->b_efunc
== NULL
);
1723 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1725 ASSERT(no_callback
|| hdr
->b_datacnt
> 1 ||
1726 refcount_is_zero(&hdr
->b_refcnt
));
1727 mutex_exit(hash_lock
);
1728 return (no_callback
);
1732 arc_buf_size(arc_buf_t
*buf
)
1734 return (buf
->b_hdr
->b_size
);
1738 * Called from the DMU to determine if the current buffer should be
1739 * evicted. In order to ensure proper locking, the eviction must be initiated
1740 * from the DMU. Return true if the buffer is associated with user data and
1741 * duplicate buffers still exist.
1744 arc_buf_eviction_needed(arc_buf_t
*buf
)
1747 boolean_t evict_needed
= B_FALSE
;
1749 if (zfs_disable_dup_eviction
)
1752 mutex_enter(&buf
->b_evict_lock
);
1756 * We are in arc_do_user_evicts(); let that function
1757 * perform the eviction.
1759 ASSERT(buf
->b_data
== NULL
);
1760 mutex_exit(&buf
->b_evict_lock
);
1762 } else if (buf
->b_data
== NULL
) {
1764 * We have already been added to the arc eviction list;
1765 * recommend eviction.
1767 ASSERT3P(hdr
, ==, &arc_eviction_hdr
);
1768 mutex_exit(&buf
->b_evict_lock
);
1772 if (hdr
->b_datacnt
> 1 && hdr
->b_type
== ARC_BUFC_DATA
)
1773 evict_needed
= B_TRUE
;
1775 mutex_exit(&buf
->b_evict_lock
);
1776 return (evict_needed
);
1780 * Evict buffers from list until we've removed the specified number of
1781 * bytes. Move the removed buffers to the appropriate evict state.
1782 * If the recycle flag is set, then attempt to "recycle" a buffer:
1783 * - look for a buffer to evict that is `bytes' long.
1784 * - return the data block from this buffer rather than freeing it.
1785 * This flag is used by callers that are trying to make space for a
1786 * new buffer in a full arc cache.
1788 * This function makes a "best effort". It skips over any buffers
1789 * it can't get a hash_lock on, and so may not catch all candidates.
1790 * It may also return without evicting as much space as requested.
1793 arc_evict(arc_state_t
*state
, uint64_t spa
, int64_t bytes
, boolean_t recycle
,
1794 arc_buf_contents_t type
)
1796 arc_state_t
*evicted_state
;
1797 uint64_t bytes_evicted
= 0, skipped
= 0, missed
= 0;
1798 arc_buf_hdr_t
*ab
, *ab_prev
= NULL
;
1799 list_t
*list
= &state
->arcs_list
[type
];
1800 kmutex_t
*hash_lock
;
1801 boolean_t have_lock
;
1802 void *stolen
= NULL
;
1803 arc_buf_hdr_t marker
= { 0 };
1806 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
1808 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
1810 mutex_enter(&state
->arcs_mtx
);
1811 mutex_enter(&evicted_state
->arcs_mtx
);
1813 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1814 ab_prev
= list_prev(list
, ab
);
1815 /* prefetch buffers have a minimum lifespan */
1816 if (HDR_IO_IN_PROGRESS(ab
) ||
1817 (spa
&& ab
->b_spa
!= spa
) ||
1818 (ab
->b_flags
& (ARC_PREFETCH
|ARC_INDIRECT
) &&
1819 ddi_get_lbolt() - ab
->b_arc_access
<
1820 arc_min_prefetch_lifespan
)) {
1824 /* "lookahead" for better eviction candidate */
1825 if (recycle
&& ab
->b_size
!= bytes
&&
1826 ab_prev
&& ab_prev
->b_size
== bytes
)
1829 /* ignore markers */
1834 * It may take a long time to evict all the bufs requested.
1835 * To avoid blocking all arc activity, periodically drop
1836 * the arcs_mtx and give other threads a chance to run
1837 * before reacquiring the lock.
1839 * If we are looking for a buffer to recycle, we are in
1840 * the hot code path, so don't sleep.
1842 if (!recycle
&& count
++ > arc_evict_iterations
) {
1843 list_insert_after(list
, ab
, &marker
);
1844 mutex_exit(&evicted_state
->arcs_mtx
);
1845 mutex_exit(&state
->arcs_mtx
);
1846 kpreempt(KPREEMPT_SYNC
);
1847 mutex_enter(&state
->arcs_mtx
);
1848 mutex_enter(&evicted_state
->arcs_mtx
);
1849 ab_prev
= list_prev(list
, &marker
);
1850 list_remove(list
, &marker
);
1855 hash_lock
= HDR_LOCK(ab
);
1856 have_lock
= MUTEX_HELD(hash_lock
);
1857 if (have_lock
|| mutex_tryenter(hash_lock
)) {
1858 ASSERT0(refcount_count(&ab
->b_refcnt
));
1859 ASSERT(ab
->b_datacnt
> 0);
1861 arc_buf_t
*buf
= ab
->b_buf
;
1862 if (!mutex_tryenter(&buf
->b_evict_lock
)) {
1867 bytes_evicted
+= ab
->b_size
;
1868 if (recycle
&& ab
->b_type
== type
&&
1869 ab
->b_size
== bytes
&&
1870 !HDR_L2_WRITING(ab
)) {
1871 stolen
= buf
->b_data
;
1876 mutex_enter(&arc_eviction_mtx
);
1877 arc_buf_destroy(buf
,
1878 buf
->b_data
== stolen
, FALSE
);
1879 ab
->b_buf
= buf
->b_next
;
1880 buf
->b_hdr
= &arc_eviction_hdr
;
1881 buf
->b_next
= arc_eviction_list
;
1882 arc_eviction_list
= buf
;
1883 mutex_exit(&arc_eviction_mtx
);
1884 mutex_exit(&buf
->b_evict_lock
);
1886 mutex_exit(&buf
->b_evict_lock
);
1887 arc_buf_destroy(buf
,
1888 buf
->b_data
== stolen
, TRUE
);
1893 ARCSTAT_INCR(arcstat_evict_l2_cached
,
1896 if (l2arc_write_eligible(ab
->b_spa
, ab
)) {
1897 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
1901 arcstat_evict_l2_ineligible
,
1906 if (ab
->b_datacnt
== 0) {
1907 arc_change_state(evicted_state
, ab
, hash_lock
);
1908 ASSERT(HDR_IN_HASH_TABLE(ab
));
1909 ab
->b_flags
|= ARC_IN_HASH_TABLE
;
1910 ab
->b_flags
&= ~ARC_BUF_AVAILABLE
;
1911 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, ab
);
1914 mutex_exit(hash_lock
);
1915 if (bytes
>= 0 && bytes_evicted
>= bytes
)
1922 mutex_exit(&evicted_state
->arcs_mtx
);
1923 mutex_exit(&state
->arcs_mtx
);
1925 if (bytes_evicted
< bytes
)
1926 dprintf("only evicted %lld bytes from %x",
1927 (longlong_t
)bytes_evicted
, state
);
1930 ARCSTAT_INCR(arcstat_evict_skip
, skipped
);
1933 ARCSTAT_INCR(arcstat_mutex_miss
, missed
);
1936 * Note: we have just evicted some data into the ghost state,
1937 * potentially putting the ghost size over the desired size. Rather
1938 * that evicting from the ghost list in this hot code path, leave
1939 * this chore to the arc_reclaim_thread().
1946 * Remove buffers from list until we've removed the specified number of
1947 * bytes. Destroy the buffers that are removed.
1950 arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
)
1952 arc_buf_hdr_t
*ab
, *ab_prev
;
1953 arc_buf_hdr_t marker
= { 0 };
1954 list_t
*list
= &state
->arcs_list
[ARC_BUFC_DATA
];
1955 kmutex_t
*hash_lock
;
1956 uint64_t bytes_deleted
= 0;
1957 uint64_t bufs_skipped
= 0;
1960 ASSERT(GHOST_STATE(state
));
1962 mutex_enter(&state
->arcs_mtx
);
1963 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1964 ab_prev
= list_prev(list
, ab
);
1965 if (ab
->b_type
> ARC_BUFC_NUMTYPES
)
1966 panic("invalid ab=%p", (void *)ab
);
1967 if (spa
&& ab
->b_spa
!= spa
)
1970 /* ignore markers */
1974 hash_lock
= HDR_LOCK(ab
);
1975 /* caller may be trying to modify this buffer, skip it */
1976 if (MUTEX_HELD(hash_lock
))
1980 * It may take a long time to evict all the bufs requested.
1981 * To avoid blocking all arc activity, periodically drop
1982 * the arcs_mtx and give other threads a chance to run
1983 * before reacquiring the lock.
1985 if (count
++ > arc_evict_iterations
) {
1986 list_insert_after(list
, ab
, &marker
);
1987 mutex_exit(&state
->arcs_mtx
);
1988 kpreempt(KPREEMPT_SYNC
);
1989 mutex_enter(&state
->arcs_mtx
);
1990 ab_prev
= list_prev(list
, &marker
);
1991 list_remove(list
, &marker
);
1995 if (mutex_tryenter(hash_lock
)) {
1996 ASSERT(!HDR_IO_IN_PROGRESS(ab
));
1997 ASSERT(ab
->b_buf
== NULL
);
1998 ARCSTAT_BUMP(arcstat_deleted
);
1999 bytes_deleted
+= ab
->b_size
;
2001 if (ab
->b_l2hdr
!= NULL
) {
2003 * This buffer is cached on the 2nd Level ARC;
2004 * don't destroy the header.
2006 arc_change_state(arc_l2c_only
, ab
, hash_lock
);
2007 mutex_exit(hash_lock
);
2009 arc_change_state(arc_anon
, ab
, hash_lock
);
2010 mutex_exit(hash_lock
);
2011 arc_hdr_destroy(ab
);
2014 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, ab
);
2015 if (bytes
>= 0 && bytes_deleted
>= bytes
)
2017 } else if (bytes
< 0) {
2019 * Insert a list marker and then wait for the
2020 * hash lock to become available. Once its
2021 * available, restart from where we left off.
2023 list_insert_after(list
, ab
, &marker
);
2024 mutex_exit(&state
->arcs_mtx
);
2025 mutex_enter(hash_lock
);
2026 mutex_exit(hash_lock
);
2027 mutex_enter(&state
->arcs_mtx
);
2028 ab_prev
= list_prev(list
, &marker
);
2029 list_remove(list
, &marker
);
2035 mutex_exit(&state
->arcs_mtx
);
2037 if (list
== &state
->arcs_list
[ARC_BUFC_DATA
] &&
2038 (bytes
< 0 || bytes_deleted
< bytes
)) {
2039 list
= &state
->arcs_list
[ARC_BUFC_METADATA
];
2044 ARCSTAT_INCR(arcstat_mutex_miss
, bufs_skipped
);
2048 if (bytes_deleted
< bytes
)
2049 dprintf("only deleted %lld bytes from %p",
2050 (longlong_t
)bytes_deleted
, state
);
2056 int64_t adjustment
, delta
;
2062 adjustment
= MIN((int64_t)(arc_size
- arc_c
),
2063 (int64_t)(arc_anon
->arcs_size
+ arc_mru
->arcs_size
+ arc_meta_used
-
2066 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
2067 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_DATA
], adjustment
);
2068 (void) arc_evict(arc_mru
, NULL
, delta
, FALSE
, ARC_BUFC_DATA
);
2069 adjustment
-= delta
;
2072 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
2073 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
], adjustment
);
2074 (void) arc_evict(arc_mru
, NULL
, delta
, FALSE
,
2082 adjustment
= arc_size
- arc_c
;
2084 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
2085 delta
= MIN(adjustment
, arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
]);
2086 (void) arc_evict(arc_mfu
, NULL
, delta
, FALSE
, ARC_BUFC_DATA
);
2087 adjustment
-= delta
;
2090 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
2091 int64_t delta
= MIN(adjustment
,
2092 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
]);
2093 (void) arc_evict(arc_mfu
, NULL
, delta
, FALSE
,
2098 * Adjust ghost lists
2101 adjustment
= arc_mru
->arcs_size
+ arc_mru_ghost
->arcs_size
- arc_c
;
2103 if (adjustment
> 0 && arc_mru_ghost
->arcs_size
> 0) {
2104 delta
= MIN(arc_mru_ghost
->arcs_size
, adjustment
);
2105 arc_evict_ghost(arc_mru_ghost
, NULL
, delta
);
2109 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
- arc_c
;
2111 if (adjustment
> 0 && arc_mfu_ghost
->arcs_size
> 0) {
2112 delta
= MIN(arc_mfu_ghost
->arcs_size
, adjustment
);
2113 arc_evict_ghost(arc_mfu_ghost
, NULL
, delta
);
2118 arc_do_user_evicts(void)
2120 mutex_enter(&arc_eviction_mtx
);
2121 while (arc_eviction_list
!= NULL
) {
2122 arc_buf_t
*buf
= arc_eviction_list
;
2123 arc_eviction_list
= buf
->b_next
;
2124 mutex_enter(&buf
->b_evict_lock
);
2126 mutex_exit(&buf
->b_evict_lock
);
2127 mutex_exit(&arc_eviction_mtx
);
2129 if (buf
->b_efunc
!= NULL
)
2130 VERIFY0(buf
->b_efunc(buf
->b_private
));
2132 buf
->b_efunc
= NULL
;
2133 buf
->b_private
= NULL
;
2134 kmem_cache_free(buf_cache
, buf
);
2135 mutex_enter(&arc_eviction_mtx
);
2137 mutex_exit(&arc_eviction_mtx
);
2141 * Flush all *evictable* data from the cache for the given spa.
2142 * NOTE: this will not touch "active" (i.e. referenced) data.
2145 arc_flush(spa_t
*spa
)
2150 guid
= spa_load_guid(spa
);
2152 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_DATA
])) {
2153 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
2157 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
])) {
2158 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
2162 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
])) {
2163 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
2167 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
])) {
2168 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
2173 arc_evict_ghost(arc_mru_ghost
, guid
, -1);
2174 arc_evict_ghost(arc_mfu_ghost
, guid
, -1);
2176 mutex_enter(&arc_reclaim_thr_lock
);
2177 arc_do_user_evicts();
2178 mutex_exit(&arc_reclaim_thr_lock
);
2179 ASSERT(spa
|| arc_eviction_list
== NULL
);
2185 if (arc_c
> arc_c_min
) {
2189 to_free
= MAX(arc_c
>> arc_shrink_shift
, ptob(needfree
));
2191 to_free
= arc_c
>> arc_shrink_shift
;
2193 if (arc_c
> arc_c_min
+ to_free
)
2194 atomic_add_64(&arc_c
, -to_free
);
2198 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
2199 if (arc_c
> arc_size
)
2200 arc_c
= MAX(arc_size
, arc_c_min
);
2202 arc_p
= (arc_c
>> 1);
2203 ASSERT(arc_c
>= arc_c_min
);
2204 ASSERT((int64_t)arc_p
>= 0);
2207 if (arc_size
> arc_c
)
2212 * Determine if the system is under memory pressure and is asking
2213 * to reclaim memory. A return value of 1 indicates that the system
2214 * is under memory pressure and that the arc should adjust accordingly.
2217 arc_reclaim_needed(void)
2227 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2232 * check that we're out of range of the pageout scanner. It starts to
2233 * schedule paging if freemem is less than lotsfree and needfree.
2234 * lotsfree is the high-water mark for pageout, and needfree is the
2235 * number of needed free pages. We add extra pages here to make sure
2236 * the scanner doesn't start up while we're freeing memory.
2238 if (freemem
< lotsfree
+ needfree
+ extra
)
2242 * check to make sure that swapfs has enough space so that anon
2243 * reservations can still succeed. anon_resvmem() checks that the
2244 * availrmem is greater than swapfs_minfree, and the number of reserved
2245 * swap pages. We also add a bit of extra here just to prevent
2246 * circumstances from getting really dire.
2248 if (availrmem
< swapfs_minfree
+ swapfs_reserve
+ extra
)
2252 * Check that we have enough availrmem that memory locking (e.g., via
2253 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
2254 * stores the number of pages that cannot be locked; when availrmem
2255 * drops below pages_pp_maximum, page locking mechanisms such as
2256 * page_pp_lock() will fail.)
2258 if (availrmem
<= pages_pp_maximum
)
2263 * If we're on an i386 platform, it's possible that we'll exhaust the
2264 * kernel heap space before we ever run out of available physical
2265 * memory. Most checks of the size of the heap_area compare against
2266 * tune.t_minarmem, which is the minimum available real memory that we
2267 * can have in the system. However, this is generally fixed at 25 pages
2268 * which is so low that it's useless. In this comparison, we seek to
2269 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2270 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2273 if (vmem_size(heap_arena
, VMEM_FREE
) <
2274 (vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
) >> 2))
2279 * If zio data pages are being allocated out of a separate heap segment,
2280 * then enforce that the size of available vmem for this arena remains
2281 * above about 1/16th free.
2283 * Note: The 1/16th arena free requirement was put in place
2284 * to aggressively evict memory from the arc in order to avoid
2285 * memory fragmentation issues.
2287 if (zio_arena
!= NULL
&&
2288 vmem_size(zio_arena
, VMEM_FREE
) <
2289 (vmem_size(zio_arena
, VMEM_ALLOC
) >> 4))
2292 if (spa_get_random(100) == 0)
2299 arc_kmem_reap_now(arc_reclaim_strategy_t strat
)
2302 kmem_cache_t
*prev_cache
= NULL
;
2303 kmem_cache_t
*prev_data_cache
= NULL
;
2304 extern kmem_cache_t
*zio_buf_cache
[];
2305 extern kmem_cache_t
*zio_data_buf_cache
[];
2306 extern kmem_cache_t
*range_seg_cache
;
2309 if (arc_meta_used
>= arc_meta_limit
) {
2311 * We are exceeding our meta-data cache limit.
2312 * Purge some DNLC entries to release holds on meta-data.
2314 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent
);
2318 * Reclaim unused memory from all kmem caches.
2325 * An aggressive reclamation will shrink the cache size as well as
2326 * reap free buffers from the arc kmem caches.
2328 if (strat
== ARC_RECLAIM_AGGR
)
2331 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
2332 if (zio_buf_cache
[i
] != prev_cache
) {
2333 prev_cache
= zio_buf_cache
[i
];
2334 kmem_cache_reap_now(zio_buf_cache
[i
]);
2336 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
2337 prev_data_cache
= zio_data_buf_cache
[i
];
2338 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
2341 kmem_cache_reap_now(buf_cache
);
2342 kmem_cache_reap_now(hdr_cache
);
2343 kmem_cache_reap_now(range_seg_cache
);
2346 * Ask the vmem areana to reclaim unused memory from its
2349 if (zio_arena
!= NULL
&& strat
== ARC_RECLAIM_AGGR
)
2350 vmem_qcache_reap(zio_arena
);
2354 arc_reclaim_thread(void)
2356 clock_t growtime
= 0;
2357 arc_reclaim_strategy_t last_reclaim
= ARC_RECLAIM_CONS
;
2360 CALLB_CPR_INIT(&cpr
, &arc_reclaim_thr_lock
, callb_generic_cpr
, FTAG
);
2362 mutex_enter(&arc_reclaim_thr_lock
);
2363 while (arc_thread_exit
== 0) {
2364 if (arc_reclaim_needed()) {
2367 if (last_reclaim
== ARC_RECLAIM_CONS
) {
2368 last_reclaim
= ARC_RECLAIM_AGGR
;
2370 last_reclaim
= ARC_RECLAIM_CONS
;
2374 last_reclaim
= ARC_RECLAIM_AGGR
;
2378 /* reset the growth delay for every reclaim */
2379 growtime
= ddi_get_lbolt() + (arc_grow_retry
* hz
);
2381 arc_kmem_reap_now(last_reclaim
);
2384 } else if (arc_no_grow
&& ddi_get_lbolt() >= growtime
) {
2385 arc_no_grow
= FALSE
;
2390 if (arc_eviction_list
!= NULL
)
2391 arc_do_user_evicts();
2393 /* block until needed, or one second, whichever is shorter */
2394 CALLB_CPR_SAFE_BEGIN(&cpr
);
2395 (void) cv_timedwait(&arc_reclaim_thr_cv
,
2396 &arc_reclaim_thr_lock
, (ddi_get_lbolt() + hz
));
2397 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_thr_lock
);
2400 arc_thread_exit
= 0;
2401 cv_broadcast(&arc_reclaim_thr_cv
);
2402 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_thr_lock */
2407 * Adapt arc info given the number of bytes we are trying to add and
2408 * the state that we are comming from. This function is only called
2409 * when we are adding new content to the cache.
2412 arc_adapt(int bytes
, arc_state_t
*state
)
2415 uint64_t arc_p_min
= (arc_c
>> arc_p_min_shift
);
2417 if (state
== arc_l2c_only
)
2422 * Adapt the target size of the MRU list:
2423 * - if we just hit in the MRU ghost list, then increase
2424 * the target size of the MRU list.
2425 * - if we just hit in the MFU ghost list, then increase
2426 * the target size of the MFU list by decreasing the
2427 * target size of the MRU list.
2429 if (state
== arc_mru_ghost
) {
2430 mult
= ((arc_mru_ghost
->arcs_size
>= arc_mfu_ghost
->arcs_size
) ?
2431 1 : (arc_mfu_ghost
->arcs_size
/arc_mru_ghost
->arcs_size
));
2432 mult
= MIN(mult
, 10); /* avoid wild arc_p adjustment */
2434 arc_p
= MIN(arc_c
- arc_p_min
, arc_p
+ bytes
* mult
);
2435 } else if (state
== arc_mfu_ghost
) {
2438 mult
= ((arc_mfu_ghost
->arcs_size
>= arc_mru_ghost
->arcs_size
) ?
2439 1 : (arc_mru_ghost
->arcs_size
/arc_mfu_ghost
->arcs_size
));
2440 mult
= MIN(mult
, 10);
2442 delta
= MIN(bytes
* mult
, arc_p
);
2443 arc_p
= MAX(arc_p_min
, arc_p
- delta
);
2445 ASSERT((int64_t)arc_p
>= 0);
2447 if (arc_reclaim_needed()) {
2448 cv_signal(&arc_reclaim_thr_cv
);
2455 if (arc_c
>= arc_c_max
)
2459 * If we're within (2 * maxblocksize) bytes of the target
2460 * cache size, increment the target cache size
2462 if (arc_size
> arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) {
2463 atomic_add_64(&arc_c
, (int64_t)bytes
);
2464 if (arc_c
> arc_c_max
)
2466 else if (state
== arc_anon
)
2467 atomic_add_64(&arc_p
, (int64_t)bytes
);
2471 ASSERT((int64_t)arc_p
>= 0);
2475 * Check if the cache has reached its limits and eviction is required
2479 arc_evict_needed(arc_buf_contents_t type
)
2481 if (type
== ARC_BUFC_METADATA
&& arc_meta_used
>= arc_meta_limit
)
2484 if (arc_reclaim_needed())
2487 return (arc_size
> arc_c
);
2491 * The buffer, supplied as the first argument, needs a data block.
2492 * So, if we are at cache max, determine which cache should be victimized.
2493 * We have the following cases:
2495 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2496 * In this situation if we're out of space, but the resident size of the MFU is
2497 * under the limit, victimize the MFU cache to satisfy this insertion request.
2499 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2500 * Here, we've used up all of the available space for the MRU, so we need to
2501 * evict from our own cache instead. Evict from the set of resident MRU
2504 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2505 * c minus p represents the MFU space in the cache, since p is the size of the
2506 * cache that is dedicated to the MRU. In this situation there's still space on
2507 * the MFU side, so the MRU side needs to be victimized.
2509 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2510 * MFU's resident set is consuming more space than it has been allotted. In
2511 * this situation, we must victimize our own cache, the MFU, for this insertion.
2514 arc_get_data_buf(arc_buf_t
*buf
)
2516 arc_state_t
*state
= buf
->b_hdr
->b_state
;
2517 uint64_t size
= buf
->b_hdr
->b_size
;
2518 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
2520 arc_adapt(size
, state
);
2523 * We have not yet reached cache maximum size,
2524 * just allocate a new buffer.
2526 if (!arc_evict_needed(type
)) {
2527 if (type
== ARC_BUFC_METADATA
) {
2528 buf
->b_data
= zio_buf_alloc(size
);
2529 arc_space_consume(size
, ARC_SPACE_DATA
);
2531 ASSERT(type
== ARC_BUFC_DATA
);
2532 buf
->b_data
= zio_data_buf_alloc(size
);
2533 ARCSTAT_INCR(arcstat_data_size
, size
);
2534 atomic_add_64(&arc_size
, size
);
2540 * If we are prefetching from the mfu ghost list, this buffer
2541 * will end up on the mru list; so steal space from there.
2543 if (state
== arc_mfu_ghost
)
2544 state
= buf
->b_hdr
->b_flags
& ARC_PREFETCH
? arc_mru
: arc_mfu
;
2545 else if (state
== arc_mru_ghost
)
2548 if (state
== arc_mru
|| state
== arc_anon
) {
2549 uint64_t mru_used
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
2550 state
= (arc_mfu
->arcs_lsize
[type
] >= size
&&
2551 arc_p
> mru_used
) ? arc_mfu
: arc_mru
;
2554 uint64_t mfu_space
= arc_c
- arc_p
;
2555 state
= (arc_mru
->arcs_lsize
[type
] >= size
&&
2556 mfu_space
> arc_mfu
->arcs_size
) ? arc_mru
: arc_mfu
;
2558 if ((buf
->b_data
= arc_evict(state
, NULL
, size
, TRUE
, type
)) == NULL
) {
2559 if (type
== ARC_BUFC_METADATA
) {
2560 buf
->b_data
= zio_buf_alloc(size
);
2561 arc_space_consume(size
, ARC_SPACE_DATA
);
2563 ASSERT(type
== ARC_BUFC_DATA
);
2564 buf
->b_data
= zio_data_buf_alloc(size
);
2565 ARCSTAT_INCR(arcstat_data_size
, size
);
2566 atomic_add_64(&arc_size
, size
);
2568 ARCSTAT_BUMP(arcstat_recycle_miss
);
2570 ASSERT(buf
->b_data
!= NULL
);
2573 * Update the state size. Note that ghost states have a
2574 * "ghost size" and so don't need to be updated.
2576 if (!GHOST_STATE(buf
->b_hdr
->b_state
)) {
2577 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2579 atomic_add_64(&hdr
->b_state
->arcs_size
, size
);
2580 if (list_link_active(&hdr
->b_arc_node
)) {
2581 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
2582 atomic_add_64(&hdr
->b_state
->arcs_lsize
[type
], size
);
2585 * If we are growing the cache, and we are adding anonymous
2586 * data, and we have outgrown arc_p, update arc_p
2588 if (arc_size
< arc_c
&& hdr
->b_state
== arc_anon
&&
2589 arc_anon
->arcs_size
+ arc_mru
->arcs_size
> arc_p
)
2590 arc_p
= MIN(arc_c
, arc_p
+ size
);
2595 * This routine is called whenever a buffer is accessed.
2596 * NOTE: the hash lock is dropped in this function.
2599 arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
)
2603 ASSERT(MUTEX_HELD(hash_lock
));
2605 if (buf
->b_state
== arc_anon
) {
2607 * This buffer is not in the cache, and does not
2608 * appear in our "ghost" list. Add the new buffer
2612 ASSERT(buf
->b_arc_access
== 0);
2613 buf
->b_arc_access
= ddi_get_lbolt();
2614 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2615 arc_change_state(arc_mru
, buf
, hash_lock
);
2617 } else if (buf
->b_state
== arc_mru
) {
2618 now
= ddi_get_lbolt();
2621 * If this buffer is here because of a prefetch, then either:
2622 * - clear the flag if this is a "referencing" read
2623 * (any subsequent access will bump this into the MFU state).
2625 * - move the buffer to the head of the list if this is
2626 * another prefetch (to make it less likely to be evicted).
2628 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2629 if (refcount_count(&buf
->b_refcnt
) == 0) {
2630 ASSERT(list_link_active(&buf
->b_arc_node
));
2632 buf
->b_flags
&= ~ARC_PREFETCH
;
2633 ARCSTAT_BUMP(arcstat_mru_hits
);
2635 buf
->b_arc_access
= now
;
2640 * This buffer has been "accessed" only once so far,
2641 * but it is still in the cache. Move it to the MFU
2644 if (now
> buf
->b_arc_access
+ ARC_MINTIME
) {
2646 * More than 125ms have passed since we
2647 * instantiated this buffer. Move it to the
2648 * most frequently used state.
2650 buf
->b_arc_access
= now
;
2651 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2652 arc_change_state(arc_mfu
, buf
, hash_lock
);
2654 ARCSTAT_BUMP(arcstat_mru_hits
);
2655 } else if (buf
->b_state
== arc_mru_ghost
) {
2656 arc_state_t
*new_state
;
2658 * This buffer has been "accessed" recently, but
2659 * was evicted from the cache. Move it to the
2663 if (buf
->b_flags
& ARC_PREFETCH
) {
2664 new_state
= arc_mru
;
2665 if (refcount_count(&buf
->b_refcnt
) > 0)
2666 buf
->b_flags
&= ~ARC_PREFETCH
;
2667 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2669 new_state
= arc_mfu
;
2670 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2673 buf
->b_arc_access
= ddi_get_lbolt();
2674 arc_change_state(new_state
, buf
, hash_lock
);
2676 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
2677 } else if (buf
->b_state
== arc_mfu
) {
2679 * This buffer has been accessed more than once and is
2680 * still in the cache. Keep it in the MFU state.
2682 * NOTE: an add_reference() that occurred when we did
2683 * the arc_read() will have kicked this off the list.
2684 * If it was a prefetch, we will explicitly move it to
2685 * the head of the list now.
2687 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2688 ASSERT(refcount_count(&buf
->b_refcnt
) == 0);
2689 ASSERT(list_link_active(&buf
->b_arc_node
));
2691 ARCSTAT_BUMP(arcstat_mfu_hits
);
2692 buf
->b_arc_access
= ddi_get_lbolt();
2693 } else if (buf
->b_state
== arc_mfu_ghost
) {
2694 arc_state_t
*new_state
= arc_mfu
;
2696 * This buffer has been accessed more than once but has
2697 * been evicted from the cache. Move it back to the
2701 if (buf
->b_flags
& ARC_PREFETCH
) {
2703 * This is a prefetch access...
2704 * move this block back to the MRU state.
2706 ASSERT0(refcount_count(&buf
->b_refcnt
));
2707 new_state
= arc_mru
;
2710 buf
->b_arc_access
= ddi_get_lbolt();
2711 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2712 arc_change_state(new_state
, buf
, hash_lock
);
2714 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
2715 } else if (buf
->b_state
== arc_l2c_only
) {
2717 * This buffer is on the 2nd Level ARC.
2720 buf
->b_arc_access
= ddi_get_lbolt();
2721 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2722 arc_change_state(arc_mfu
, buf
, hash_lock
);
2724 ASSERT(!"invalid arc state");
2728 /* a generic arc_done_func_t which you can use */
2731 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2733 if (zio
== NULL
|| zio
->io_error
== 0)
2734 bcopy(buf
->b_data
, arg
, buf
->b_hdr
->b_size
);
2735 VERIFY(arc_buf_remove_ref(buf
, arg
));
2738 /* a generic arc_done_func_t */
2740 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2742 arc_buf_t
**bufp
= arg
;
2743 if (zio
&& zio
->io_error
) {
2744 VERIFY(arc_buf_remove_ref(buf
, arg
));
2748 ASSERT(buf
->b_data
);
2753 arc_read_done(zio_t
*zio
)
2757 arc_buf_t
*abuf
; /* buffer we're assigning to callback */
2758 kmutex_t
*hash_lock
= NULL
;
2759 arc_callback_t
*callback_list
, *acb
;
2760 int freeable
= FALSE
;
2762 buf
= zio
->io_private
;
2766 * The hdr was inserted into hash-table and removed from lists
2767 * prior to starting I/O. We should find this header, since
2768 * it's in the hash table, and it should be legit since it's
2769 * not possible to evict it during the I/O. The only possible
2770 * reason for it not to be found is if we were freed during the
2773 if (HDR_IN_HASH_TABLE(hdr
)) {
2774 ASSERT3U(hdr
->b_birth
, ==, BP_PHYSICAL_BIRTH(zio
->io_bp
));
2775 ASSERT3U(hdr
->b_dva
.dva_word
[0], ==,
2776 BP_IDENTITY(zio
->io_bp
)->dva_word
[0]);
2777 ASSERT3U(hdr
->b_dva
.dva_word
[1], ==,
2778 BP_IDENTITY(zio
->io_bp
)->dva_word
[1]);
2780 arc_buf_hdr_t
*found
= buf_hash_find(hdr
->b_spa
, zio
->io_bp
,
2783 ASSERT((found
== NULL
&& HDR_FREED_IN_READ(hdr
) &&
2784 hash_lock
== NULL
) ||
2786 DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
2787 (found
== hdr
&& HDR_L2_READING(hdr
)));
2790 hdr
->b_flags
&= ~ARC_L2_EVICTED
;
2791 if (l2arc_noprefetch
&& (hdr
->b_flags
& ARC_PREFETCH
))
2792 hdr
->b_flags
&= ~ARC_L2CACHE
;
2794 /* byteswap if necessary */
2795 callback_list
= hdr
->b_acb
;
2796 ASSERT(callback_list
!= NULL
);
2797 if (BP_SHOULD_BYTESWAP(zio
->io_bp
) && zio
->io_error
== 0) {
2798 dmu_object_byteswap_t bswap
=
2799 DMU_OT_BYTESWAP(BP_GET_TYPE(zio
->io_bp
));
2800 arc_byteswap_func_t
*func
= BP_GET_LEVEL(zio
->io_bp
) > 0 ?
2801 byteswap_uint64_array
:
2802 dmu_ot_byteswap
[bswap
].ob_func
;
2803 func(buf
->b_data
, hdr
->b_size
);
2806 arc_cksum_compute(buf
, B_FALSE
);
2809 if (hash_lock
&& zio
->io_error
== 0 && hdr
->b_state
== arc_anon
) {
2811 * Only call arc_access on anonymous buffers. This is because
2812 * if we've issued an I/O for an evicted buffer, we've already
2813 * called arc_access (to prevent any simultaneous readers from
2814 * getting confused).
2816 arc_access(hdr
, hash_lock
);
2819 /* create copies of the data buffer for the callers */
2821 for (acb
= callback_list
; acb
; acb
= acb
->acb_next
) {
2822 if (acb
->acb_done
) {
2824 ARCSTAT_BUMP(arcstat_duplicate_reads
);
2825 abuf
= arc_buf_clone(buf
);
2827 acb
->acb_buf
= abuf
;
2832 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
2833 ASSERT(!HDR_BUF_AVAILABLE(hdr
));
2835 ASSERT(buf
->b_efunc
== NULL
);
2836 ASSERT(hdr
->b_datacnt
== 1);
2837 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
2840 ASSERT(refcount_is_zero(&hdr
->b_refcnt
) || callback_list
!= NULL
);
2842 if (zio
->io_error
!= 0) {
2843 hdr
->b_flags
|= ARC_IO_ERROR
;
2844 if (hdr
->b_state
!= arc_anon
)
2845 arc_change_state(arc_anon
, hdr
, hash_lock
);
2846 if (HDR_IN_HASH_TABLE(hdr
))
2847 buf_hash_remove(hdr
);
2848 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2852 * Broadcast before we drop the hash_lock to avoid the possibility
2853 * that the hdr (and hence the cv) might be freed before we get to
2854 * the cv_broadcast().
2856 cv_broadcast(&hdr
->b_cv
);
2859 mutex_exit(hash_lock
);
2862 * This block was freed while we waited for the read to
2863 * complete. It has been removed from the hash table and
2864 * moved to the anonymous state (so that it won't show up
2867 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
2868 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2871 /* execute each callback and free its structure */
2872 while ((acb
= callback_list
) != NULL
) {
2874 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
2876 if (acb
->acb_zio_dummy
!= NULL
) {
2877 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
2878 zio_nowait(acb
->acb_zio_dummy
);
2881 callback_list
= acb
->acb_next
;
2882 kmem_free(acb
, sizeof (arc_callback_t
));
2886 arc_hdr_destroy(hdr
);
2890 * "Read" the block at the specified DVA (in bp) via the
2891 * cache. If the block is found in the cache, invoke the provided
2892 * callback immediately and return. Note that the `zio' parameter
2893 * in the callback will be NULL in this case, since no IO was
2894 * required. If the block is not in the cache pass the read request
2895 * on to the spa with a substitute callback function, so that the
2896 * requested block will be added to the cache.
2898 * If a read request arrives for a block that has a read in-progress,
2899 * either wait for the in-progress read to complete (and return the
2900 * results); or, if this is a read with a "done" func, add a record
2901 * to the read to invoke the "done" func when the read completes,
2902 * and return; or just return.
2904 * arc_read_done() will invoke all the requested "done" functions
2905 * for readers of this block.
2908 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
, arc_done_func_t
*done
,
2909 void *private, zio_priority_t priority
, int zio_flags
, uint32_t *arc_flags
,
2910 const zbookmark_phys_t
*zb
)
2912 arc_buf_hdr_t
*hdr
= NULL
;
2913 arc_buf_t
*buf
= NULL
;
2914 kmutex_t
*hash_lock
= NULL
;
2916 uint64_t guid
= spa_load_guid(spa
);
2918 ASSERT(!BP_IS_EMBEDDED(bp
) ||
2919 BPE_GET_ETYPE(bp
) == BP_EMBEDDED_TYPE_DATA
);
2922 if (!BP_IS_EMBEDDED(bp
)) {
2924 * Embedded BP's have no DVA and require no I/O to "read".
2925 * Create an anonymous arc buf to back it.
2927 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
2930 if (hdr
!= NULL
&& hdr
->b_datacnt
> 0) {
2932 *arc_flags
|= ARC_CACHED
;
2934 if (HDR_IO_IN_PROGRESS(hdr
)) {
2936 if (*arc_flags
& ARC_WAIT
) {
2937 cv_wait(&hdr
->b_cv
, hash_lock
);
2938 mutex_exit(hash_lock
);
2941 ASSERT(*arc_flags
& ARC_NOWAIT
);
2944 arc_callback_t
*acb
= NULL
;
2946 acb
= kmem_zalloc(sizeof (arc_callback_t
),
2948 acb
->acb_done
= done
;
2949 acb
->acb_private
= private;
2951 acb
->acb_zio_dummy
= zio_null(pio
,
2952 spa
, NULL
, NULL
, NULL
, zio_flags
);
2954 ASSERT(acb
->acb_done
!= NULL
);
2955 acb
->acb_next
= hdr
->b_acb
;
2957 add_reference(hdr
, hash_lock
, private);
2958 mutex_exit(hash_lock
);
2961 mutex_exit(hash_lock
);
2965 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
2968 add_reference(hdr
, hash_lock
, private);
2970 * If this block is already in use, create a new
2971 * copy of the data so that we will be guaranteed
2972 * that arc_release() will always succeed.
2976 ASSERT(buf
->b_data
);
2977 if (HDR_BUF_AVAILABLE(hdr
)) {
2978 ASSERT(buf
->b_efunc
== NULL
);
2979 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
2981 buf
= arc_buf_clone(buf
);
2984 } else if (*arc_flags
& ARC_PREFETCH
&&
2985 refcount_count(&hdr
->b_refcnt
) == 0) {
2986 hdr
->b_flags
|= ARC_PREFETCH
;
2988 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
2989 arc_access(hdr
, hash_lock
);
2990 if (*arc_flags
& ARC_L2CACHE
)
2991 hdr
->b_flags
|= ARC_L2CACHE
;
2992 if (*arc_flags
& ARC_L2COMPRESS
)
2993 hdr
->b_flags
|= ARC_L2COMPRESS
;
2994 mutex_exit(hash_lock
);
2995 ARCSTAT_BUMP(arcstat_hits
);
2996 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
2997 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
2998 data
, metadata
, hits
);
3001 done(NULL
, buf
, private);
3003 uint64_t size
= BP_GET_LSIZE(bp
);
3004 arc_callback_t
*acb
;
3007 boolean_t devw
= B_FALSE
;
3008 enum zio_compress b_compress
= ZIO_COMPRESS_OFF
;
3009 uint64_t b_asize
= 0;
3012 /* this block is not in the cache */
3013 arc_buf_hdr_t
*exists
= NULL
;
3014 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
3015 buf
= arc_buf_alloc(spa
, size
, private, type
);
3017 if (!BP_IS_EMBEDDED(bp
)) {
3018 hdr
->b_dva
= *BP_IDENTITY(bp
);
3019 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
3020 hdr
->b_cksum0
= bp
->blk_cksum
.zc_word
[0];
3021 exists
= buf_hash_insert(hdr
, &hash_lock
);
3023 if (exists
!= NULL
) {
3024 /* somebody beat us to the hash insert */
3025 mutex_exit(hash_lock
);
3026 buf_discard_identity(hdr
);
3027 (void) arc_buf_remove_ref(buf
, private);
3028 goto top
; /* restart the IO request */
3030 /* if this is a prefetch, we don't have a reference */
3031 if (*arc_flags
& ARC_PREFETCH
) {
3032 (void) remove_reference(hdr
, hash_lock
,
3034 hdr
->b_flags
|= ARC_PREFETCH
;
3036 if (*arc_flags
& ARC_L2CACHE
)
3037 hdr
->b_flags
|= ARC_L2CACHE
;
3038 if (*arc_flags
& ARC_L2COMPRESS
)
3039 hdr
->b_flags
|= ARC_L2COMPRESS
;
3040 if (BP_GET_LEVEL(bp
) > 0)
3041 hdr
->b_flags
|= ARC_INDIRECT
;
3043 /* this block is in the ghost cache */
3044 ASSERT(GHOST_STATE(hdr
->b_state
));
3045 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3046 ASSERT0(refcount_count(&hdr
->b_refcnt
));
3047 ASSERT(hdr
->b_buf
== NULL
);
3049 /* if this is a prefetch, we don't have a reference */
3050 if (*arc_flags
& ARC_PREFETCH
)
3051 hdr
->b_flags
|= ARC_PREFETCH
;
3053 add_reference(hdr
, hash_lock
, private);
3054 if (*arc_flags
& ARC_L2CACHE
)
3055 hdr
->b_flags
|= ARC_L2CACHE
;
3056 if (*arc_flags
& ARC_L2COMPRESS
)
3057 hdr
->b_flags
|= ARC_L2COMPRESS
;
3058 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
3061 buf
->b_efunc
= NULL
;
3062 buf
->b_private
= NULL
;
3065 ASSERT(hdr
->b_datacnt
== 0);
3067 arc_get_data_buf(buf
);
3068 arc_access(hdr
, hash_lock
);
3071 ASSERT(!GHOST_STATE(hdr
->b_state
));
3073 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_SLEEP
);
3074 acb
->acb_done
= done
;
3075 acb
->acb_private
= private;
3077 ASSERT(hdr
->b_acb
== NULL
);
3079 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
3081 if (hdr
->b_l2hdr
!= NULL
&&
3082 (vd
= hdr
->b_l2hdr
->b_dev
->l2ad_vdev
) != NULL
) {
3083 devw
= hdr
->b_l2hdr
->b_dev
->l2ad_writing
;
3084 addr
= hdr
->b_l2hdr
->b_daddr
;
3085 b_compress
= hdr
->b_l2hdr
->b_compress
;
3086 b_asize
= hdr
->b_l2hdr
->b_asize
;
3088 * Lock out device removal.
3090 if (vdev_is_dead(vd
) ||
3091 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
3095 if (hash_lock
!= NULL
)
3096 mutex_exit(hash_lock
);
3099 * At this point, we have a level 1 cache miss. Try again in
3100 * L2ARC if possible.
3102 ASSERT3U(hdr
->b_size
, ==, size
);
3103 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
, blkptr_t
*, bp
,
3104 uint64_t, size
, zbookmark_phys_t
*, zb
);
3105 ARCSTAT_BUMP(arcstat_misses
);
3106 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
3107 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
3108 data
, metadata
, misses
);
3110 if (vd
!= NULL
&& l2arc_ndev
!= 0 && !(l2arc_norw
&& devw
)) {
3112 * Read from the L2ARC if the following are true:
3113 * 1. The L2ARC vdev was previously cached.
3114 * 2. This buffer still has L2ARC metadata.
3115 * 3. This buffer isn't currently writing to the L2ARC.
3116 * 4. The L2ARC entry wasn't evicted, which may
3117 * also have invalidated the vdev.
3118 * 5. This isn't prefetch and l2arc_noprefetch is set.
3120 if (hdr
->b_l2hdr
!= NULL
&&
3121 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
3122 !(l2arc_noprefetch
&& HDR_PREFETCH(hdr
))) {
3123 l2arc_read_callback_t
*cb
;
3125 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
3126 ARCSTAT_BUMP(arcstat_l2_hits
);
3128 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
3130 cb
->l2rcb_buf
= buf
;
3131 cb
->l2rcb_spa
= spa
;
3134 cb
->l2rcb_flags
= zio_flags
;
3135 cb
->l2rcb_compress
= b_compress
;
3137 ASSERT(addr
>= VDEV_LABEL_START_SIZE
&&
3138 addr
+ size
< vd
->vdev_psize
-
3139 VDEV_LABEL_END_SIZE
);
3142 * l2arc read. The SCL_L2ARC lock will be
3143 * released by l2arc_read_done().
3144 * Issue a null zio if the underlying buffer
3145 * was squashed to zero size by compression.
3147 if (b_compress
== ZIO_COMPRESS_EMPTY
) {
3148 rzio
= zio_null(pio
, spa
, vd
,
3149 l2arc_read_done
, cb
,
3150 zio_flags
| ZIO_FLAG_DONT_CACHE
|
3152 ZIO_FLAG_DONT_PROPAGATE
|
3153 ZIO_FLAG_DONT_RETRY
);
3155 rzio
= zio_read_phys(pio
, vd
, addr
,
3156 b_asize
, buf
->b_data
,
3158 l2arc_read_done
, cb
, priority
,
3159 zio_flags
| ZIO_FLAG_DONT_CACHE
|
3161 ZIO_FLAG_DONT_PROPAGATE
|
3162 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
3164 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
3166 ARCSTAT_INCR(arcstat_l2_read_bytes
, b_asize
);
3168 if (*arc_flags
& ARC_NOWAIT
) {
3173 ASSERT(*arc_flags
& ARC_WAIT
);
3174 if (zio_wait(rzio
) == 0)
3177 /* l2arc read error; goto zio_read() */
3179 DTRACE_PROBE1(l2arc__miss
,
3180 arc_buf_hdr_t
*, hdr
);
3181 ARCSTAT_BUMP(arcstat_l2_misses
);
3182 if (HDR_L2_WRITING(hdr
))
3183 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
3184 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3188 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3189 if (l2arc_ndev
!= 0) {
3190 DTRACE_PROBE1(l2arc__miss
,
3191 arc_buf_hdr_t
*, hdr
);
3192 ARCSTAT_BUMP(arcstat_l2_misses
);
3196 rzio
= zio_read(pio
, spa
, bp
, buf
->b_data
, size
,
3197 arc_read_done
, buf
, priority
, zio_flags
, zb
);
3199 if (*arc_flags
& ARC_WAIT
)
3200 return (zio_wait(rzio
));
3202 ASSERT(*arc_flags
& ARC_NOWAIT
);
3209 arc_set_callback(arc_buf_t
*buf
, arc_evict_func_t
*func
, void *private)
3211 ASSERT(buf
->b_hdr
!= NULL
);
3212 ASSERT(buf
->b_hdr
->b_state
!= arc_anon
);
3213 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
) || func
== NULL
);
3214 ASSERT(buf
->b_efunc
== NULL
);
3215 ASSERT(!HDR_BUF_AVAILABLE(buf
->b_hdr
));
3217 buf
->b_efunc
= func
;
3218 buf
->b_private
= private;
3222 * Notify the arc that a block was freed, and thus will never be used again.
3225 arc_freed(spa_t
*spa
, const blkptr_t
*bp
)
3228 kmutex_t
*hash_lock
;
3229 uint64_t guid
= spa_load_guid(spa
);
3231 ASSERT(!BP_IS_EMBEDDED(bp
));
3233 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
3236 if (HDR_BUF_AVAILABLE(hdr
)) {
3237 arc_buf_t
*buf
= hdr
->b_buf
;
3238 add_reference(hdr
, hash_lock
, FTAG
);
3239 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
3240 mutex_exit(hash_lock
);
3242 arc_release(buf
, FTAG
);
3243 (void) arc_buf_remove_ref(buf
, FTAG
);
3245 mutex_exit(hash_lock
);
3251 * Clear the user eviction callback set by arc_set_callback(), first calling
3252 * it if it exists. Because the presence of a callback keeps an arc_buf cached
3253 * clearing the callback may result in the arc_buf being destroyed. However,
3254 * it will not result in the *last* arc_buf being destroyed, hence the data
3255 * will remain cached in the ARC. We make a copy of the arc buffer here so
3256 * that we can process the callback without holding any locks.
3258 * It's possible that the callback is already in the process of being cleared
3259 * by another thread. In this case we can not clear the callback.
3261 * Returns B_TRUE if the callback was successfully called and cleared.
3264 arc_clear_callback(arc_buf_t
*buf
)
3267 kmutex_t
*hash_lock
;
3268 arc_evict_func_t
*efunc
= buf
->b_efunc
;
3269 void *private = buf
->b_private
;
3271 mutex_enter(&buf
->b_evict_lock
);
3275 * We are in arc_do_user_evicts().
3277 ASSERT(buf
->b_data
== NULL
);
3278 mutex_exit(&buf
->b_evict_lock
);
3280 } else if (buf
->b_data
== NULL
) {
3282 * We are on the eviction list; process this buffer now
3283 * but let arc_do_user_evicts() do the reaping.
3285 buf
->b_efunc
= NULL
;
3286 mutex_exit(&buf
->b_evict_lock
);
3287 VERIFY0(efunc(private));
3290 hash_lock
= HDR_LOCK(hdr
);
3291 mutex_enter(hash_lock
);
3293 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3295 ASSERT3U(refcount_count(&hdr
->b_refcnt
), <, hdr
->b_datacnt
);
3296 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
3298 buf
->b_efunc
= NULL
;
3299 buf
->b_private
= NULL
;
3301 if (hdr
->b_datacnt
> 1) {
3302 mutex_exit(&buf
->b_evict_lock
);
3303 arc_buf_destroy(buf
, FALSE
, TRUE
);
3305 ASSERT(buf
== hdr
->b_buf
);
3306 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
3307 mutex_exit(&buf
->b_evict_lock
);
3310 mutex_exit(hash_lock
);
3311 VERIFY0(efunc(private));
3316 * Release this buffer from the cache, making it an anonymous buffer. This
3317 * must be done after a read and prior to modifying the buffer contents.
3318 * If the buffer has more than one reference, we must make
3319 * a new hdr for the buffer.
3322 arc_release(arc_buf_t
*buf
, void *tag
)
3325 kmutex_t
*hash_lock
= NULL
;
3326 l2arc_buf_hdr_t
*l2hdr
;
3330 * It would be nice to assert that if it's DMU metadata (level >
3331 * 0 || it's the dnode file), then it must be syncing context.
3332 * But we don't know that information at this level.
3335 mutex_enter(&buf
->b_evict_lock
);
3338 /* this buffer is not on any list */
3339 ASSERT(refcount_count(&hdr
->b_refcnt
) > 0);
3341 if (hdr
->b_state
== arc_anon
) {
3342 /* this buffer is already released */
3343 ASSERT(buf
->b_efunc
== NULL
);
3345 hash_lock
= HDR_LOCK(hdr
);
3346 mutex_enter(hash_lock
);
3348 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3351 l2hdr
= hdr
->b_l2hdr
;
3353 mutex_enter(&l2arc_buflist_mtx
);
3354 hdr
->b_l2hdr
= NULL
;
3355 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
3357 buf_size
= hdr
->b_size
;
3360 * Do we have more than one buf?
3362 if (hdr
->b_datacnt
> 1) {
3363 arc_buf_hdr_t
*nhdr
;
3365 uint64_t blksz
= hdr
->b_size
;
3366 uint64_t spa
= hdr
->b_spa
;
3367 arc_buf_contents_t type
= hdr
->b_type
;
3368 uint32_t flags
= hdr
->b_flags
;
3370 ASSERT(hdr
->b_buf
!= buf
|| buf
->b_next
!= NULL
);
3372 * Pull the data off of this hdr and attach it to
3373 * a new anonymous hdr.
3375 (void) remove_reference(hdr
, hash_lock
, tag
);
3377 while (*bufp
!= buf
)
3378 bufp
= &(*bufp
)->b_next
;
3379 *bufp
= buf
->b_next
;
3382 ASSERT3U(hdr
->b_state
->arcs_size
, >=, hdr
->b_size
);
3383 atomic_add_64(&hdr
->b_state
->arcs_size
, -hdr
->b_size
);
3384 if (refcount_is_zero(&hdr
->b_refcnt
)) {
3385 uint64_t *size
= &hdr
->b_state
->arcs_lsize
[hdr
->b_type
];
3386 ASSERT3U(*size
, >=, hdr
->b_size
);
3387 atomic_add_64(size
, -hdr
->b_size
);
3391 * We're releasing a duplicate user data buffer, update
3392 * our statistics accordingly.
3394 if (hdr
->b_type
== ARC_BUFC_DATA
) {
3395 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers
);
3396 ARCSTAT_INCR(arcstat_duplicate_buffers_size
,
3399 hdr
->b_datacnt
-= 1;
3400 arc_cksum_verify(buf
);
3401 arc_buf_unwatch(buf
);
3403 mutex_exit(hash_lock
);
3405 nhdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
3406 nhdr
->b_size
= blksz
;
3408 nhdr
->b_type
= type
;
3410 nhdr
->b_state
= arc_anon
;
3411 nhdr
->b_arc_access
= 0;
3412 nhdr
->b_flags
= flags
& ARC_L2_WRITING
;
3413 nhdr
->b_l2hdr
= NULL
;
3414 nhdr
->b_datacnt
= 1;
3415 nhdr
->b_freeze_cksum
= NULL
;
3416 (void) refcount_add(&nhdr
->b_refcnt
, tag
);
3418 mutex_exit(&buf
->b_evict_lock
);
3419 atomic_add_64(&arc_anon
->arcs_size
, blksz
);
3421 mutex_exit(&buf
->b_evict_lock
);
3422 ASSERT(refcount_count(&hdr
->b_refcnt
) == 1);
3423 ASSERT(!list_link_active(&hdr
->b_arc_node
));
3424 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3425 if (hdr
->b_state
!= arc_anon
)
3426 arc_change_state(arc_anon
, hdr
, hash_lock
);
3427 hdr
->b_arc_access
= 0;
3429 mutex_exit(hash_lock
);
3431 buf_discard_identity(hdr
);
3434 buf
->b_efunc
= NULL
;
3435 buf
->b_private
= NULL
;
3438 ARCSTAT_INCR(arcstat_l2_asize
, -l2hdr
->b_asize
);
3439 vdev_space_update(l2hdr
->b_dev
->l2ad_vdev
,
3440 -l2hdr
->b_asize
, 0, 0);
3441 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
3442 ARCSTAT_INCR(arcstat_l2_size
, -buf_size
);
3443 mutex_exit(&l2arc_buflist_mtx
);
3448 arc_released(arc_buf_t
*buf
)
3452 mutex_enter(&buf
->b_evict_lock
);
3453 released
= (buf
->b_data
!= NULL
&& buf
->b_hdr
->b_state
== arc_anon
);
3454 mutex_exit(&buf
->b_evict_lock
);
3460 arc_referenced(arc_buf_t
*buf
)
3464 mutex_enter(&buf
->b_evict_lock
);
3465 referenced
= (refcount_count(&buf
->b_hdr
->b_refcnt
));
3466 mutex_exit(&buf
->b_evict_lock
);
3467 return (referenced
);
3472 arc_write_ready(zio_t
*zio
)
3474 arc_write_callback_t
*callback
= zio
->io_private
;
3475 arc_buf_t
*buf
= callback
->awcb_buf
;
3476 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3478 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
3479 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
3482 * If the IO is already in progress, then this is a re-write
3483 * attempt, so we need to thaw and re-compute the cksum.
3484 * It is the responsibility of the callback to handle the
3485 * accounting for any re-write attempt.
3487 if (HDR_IO_IN_PROGRESS(hdr
)) {
3488 mutex_enter(&hdr
->b_freeze_lock
);
3489 if (hdr
->b_freeze_cksum
!= NULL
) {
3490 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
3491 hdr
->b_freeze_cksum
= NULL
;
3493 mutex_exit(&hdr
->b_freeze_lock
);
3495 arc_cksum_compute(buf
, B_FALSE
);
3496 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
3500 * The SPA calls this callback for each physical write that happens on behalf
3501 * of a logical write. See the comment in dbuf_write_physdone() for details.
3504 arc_write_physdone(zio_t
*zio
)
3506 arc_write_callback_t
*cb
= zio
->io_private
;
3507 if (cb
->awcb_physdone
!= NULL
)
3508 cb
->awcb_physdone(zio
, cb
->awcb_buf
, cb
->awcb_private
);
3512 arc_write_done(zio_t
*zio
)
3514 arc_write_callback_t
*callback
= zio
->io_private
;
3515 arc_buf_t
*buf
= callback
->awcb_buf
;
3516 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3518 ASSERT(hdr
->b_acb
== NULL
);
3520 if (zio
->io_error
== 0) {
3521 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
3522 buf_discard_identity(hdr
);
3524 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
3525 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
3526 hdr
->b_cksum0
= zio
->io_bp
->blk_cksum
.zc_word
[0];
3529 ASSERT(BUF_EMPTY(hdr
));
3533 * If the block to be written was all-zero or compressed enough to be
3534 * embedded in the BP, no write was performed so there will be no
3535 * dva/birth/checksum. The buffer must therefore remain anonymous
3538 if (!BUF_EMPTY(hdr
)) {
3539 arc_buf_hdr_t
*exists
;
3540 kmutex_t
*hash_lock
;
3542 ASSERT(zio
->io_error
== 0);
3544 arc_cksum_verify(buf
);
3546 exists
= buf_hash_insert(hdr
, &hash_lock
);
3549 * This can only happen if we overwrite for
3550 * sync-to-convergence, because we remove
3551 * buffers from the hash table when we arc_free().
3553 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
3554 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
3555 panic("bad overwrite, hdr=%p exists=%p",
3556 (void *)hdr
, (void *)exists
);
3557 ASSERT(refcount_is_zero(&exists
->b_refcnt
));
3558 arc_change_state(arc_anon
, exists
, hash_lock
);
3559 mutex_exit(hash_lock
);
3560 arc_hdr_destroy(exists
);
3561 exists
= buf_hash_insert(hdr
, &hash_lock
);
3562 ASSERT3P(exists
, ==, NULL
);
3563 } else if (zio
->io_flags
& ZIO_FLAG_NOPWRITE
) {
3565 ASSERT(zio
->io_prop
.zp_nopwrite
);
3566 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
3567 panic("bad nopwrite, hdr=%p exists=%p",
3568 (void *)hdr
, (void *)exists
);
3571 ASSERT(hdr
->b_datacnt
== 1);
3572 ASSERT(hdr
->b_state
== arc_anon
);
3573 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
3574 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
3577 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3578 /* if it's not anon, we are doing a scrub */
3579 if (!exists
&& hdr
->b_state
== arc_anon
)
3580 arc_access(hdr
, hash_lock
);
3581 mutex_exit(hash_lock
);
3583 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3586 ASSERT(!refcount_is_zero(&hdr
->b_refcnt
));
3587 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
3589 kmem_free(callback
, sizeof (arc_write_callback_t
));
3593 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
,
3594 blkptr_t
*bp
, arc_buf_t
*buf
, boolean_t l2arc
, boolean_t l2arc_compress
,
3595 const zio_prop_t
*zp
, arc_done_func_t
*ready
, arc_done_func_t
*physdone
,
3596 arc_done_func_t
*done
, void *private, zio_priority_t priority
,
3597 int zio_flags
, const zbookmark_phys_t
*zb
)
3599 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3600 arc_write_callback_t
*callback
;
3603 ASSERT(ready
!= NULL
);
3604 ASSERT(done
!= NULL
);
3605 ASSERT(!HDR_IO_ERROR(hdr
));
3606 ASSERT((hdr
->b_flags
& ARC_IO_IN_PROGRESS
) == 0);
3607 ASSERT(hdr
->b_acb
== NULL
);
3609 hdr
->b_flags
|= ARC_L2CACHE
;
3611 hdr
->b_flags
|= ARC_L2COMPRESS
;
3612 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
3613 callback
->awcb_ready
= ready
;
3614 callback
->awcb_physdone
= physdone
;
3615 callback
->awcb_done
= done
;
3616 callback
->awcb_private
= private;
3617 callback
->awcb_buf
= buf
;
3619 zio
= zio_write(pio
, spa
, txg
, bp
, buf
->b_data
, hdr
->b_size
, zp
,
3620 arc_write_ready
, arc_write_physdone
, arc_write_done
, callback
,
3621 priority
, zio_flags
, zb
);
3627 arc_memory_throttle(uint64_t reserve
, uint64_t txg
)
3630 uint64_t available_memory
= ptob(freemem
);
3631 static uint64_t page_load
= 0;
3632 static uint64_t last_txg
= 0;
3636 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
3639 if (freemem
> physmem
* arc_lotsfree_percent
/ 100)
3642 if (txg
> last_txg
) {
3647 * If we are in pageout, we know that memory is already tight,
3648 * the arc is already going to be evicting, so we just want to
3649 * continue to let page writes occur as quickly as possible.
3651 if (curproc
== proc_pageout
) {
3652 if (page_load
> MAX(ptob(minfree
), available_memory
) / 4)
3653 return (SET_ERROR(ERESTART
));
3654 /* Note: reserve is inflated, so we deflate */
3655 page_load
+= reserve
/ 8;
3657 } else if (page_load
> 0 && arc_reclaim_needed()) {
3658 /* memory is low, delay before restarting */
3659 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3660 return (SET_ERROR(EAGAIN
));
3668 arc_tempreserve_clear(uint64_t reserve
)
3670 atomic_add_64(&arc_tempreserve
, -reserve
);
3671 ASSERT((int64_t)arc_tempreserve
>= 0);
3675 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
3680 if (reserve
> arc_c
/4 && !arc_no_grow
)
3681 arc_c
= MIN(arc_c_max
, reserve
* 4);
3682 if (reserve
> arc_c
)
3683 return (SET_ERROR(ENOMEM
));
3686 * Don't count loaned bufs as in flight dirty data to prevent long
3687 * network delays from blocking transactions that are ready to be
3688 * assigned to a txg.
3690 anon_size
= MAX((int64_t)(arc_anon
->arcs_size
- arc_loaned_bytes
), 0);
3693 * Writes will, almost always, require additional memory allocations
3694 * in order to compress/encrypt/etc the data. We therefore need to
3695 * make sure that there is sufficient available memory for this.
3697 error
= arc_memory_throttle(reserve
, txg
);
3702 * Throttle writes when the amount of dirty data in the cache
3703 * gets too large. We try to keep the cache less than half full
3704 * of dirty blocks so that our sync times don't grow too large.
3705 * Note: if two requests come in concurrently, we might let them
3706 * both succeed, when one of them should fail. Not a huge deal.
3709 if (reserve
+ arc_tempreserve
+ anon_size
> arc_c
/ 2 &&
3710 anon_size
> arc_c
/ 4) {
3711 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3712 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3713 arc_tempreserve
>>10,
3714 arc_anon
->arcs_lsize
[ARC_BUFC_METADATA
]>>10,
3715 arc_anon
->arcs_lsize
[ARC_BUFC_DATA
]>>10,
3716 reserve
>>10, arc_c
>>10);
3717 return (SET_ERROR(ERESTART
));
3719 atomic_add_64(&arc_tempreserve
, reserve
);
3726 mutex_init(&arc_reclaim_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3727 cv_init(&arc_reclaim_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
3729 /* Convert seconds to clock ticks */
3730 arc_min_prefetch_lifespan
= 1 * hz
;
3732 /* Start out with 1/8 of all memory */
3733 arc_c
= physmem
* PAGESIZE
/ 8;
3737 * On architectures where the physical memory can be larger
3738 * than the addressable space (intel in 32-bit mode), we may
3739 * need to limit the cache to 1/8 of VM size.
3741 arc_c
= MIN(arc_c
, vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 8);
3744 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3745 arc_c_min
= MAX(arc_c
/ 4, 64<<20);
3746 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3747 if (arc_c
* 8 >= 1<<30)
3748 arc_c_max
= (arc_c
* 8) - (1<<30);
3750 arc_c_max
= arc_c_min
;
3751 arc_c_max
= MAX(arc_c
* 6, arc_c_max
);
3754 * Allow the tunables to override our calculations if they are
3755 * reasonable (ie. over 64MB)
3757 if (zfs_arc_max
> 64<<20 && zfs_arc_max
< physmem
* PAGESIZE
)
3758 arc_c_max
= zfs_arc_max
;
3759 if (zfs_arc_min
> 64<<20 && zfs_arc_min
<= arc_c_max
)
3760 arc_c_min
= zfs_arc_min
;
3763 arc_p
= (arc_c
>> 1);
3765 /* limit meta-data to 1/4 of the arc capacity */
3766 arc_meta_limit
= arc_c_max
/ 4;
3768 /* Allow the tunable to override if it is reasonable */
3769 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
3770 arc_meta_limit
= zfs_arc_meta_limit
;
3772 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
3773 arc_c_min
= arc_meta_limit
/ 2;
3775 if (zfs_arc_grow_retry
> 0)
3776 arc_grow_retry
= zfs_arc_grow_retry
;
3778 if (zfs_arc_shrink_shift
> 0)
3779 arc_shrink_shift
= zfs_arc_shrink_shift
;
3781 if (zfs_arc_p_min_shift
> 0)
3782 arc_p_min_shift
= zfs_arc_p_min_shift
;
3784 /* if kmem_flags are set, lets try to use less memory */
3785 if (kmem_debugging())
3787 if (arc_c
< arc_c_min
)
3790 arc_anon
= &ARC_anon
;
3792 arc_mru_ghost
= &ARC_mru_ghost
;
3794 arc_mfu_ghost
= &ARC_mfu_ghost
;
3795 arc_l2c_only
= &ARC_l2c_only
;
3798 mutex_init(&arc_anon
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3799 mutex_init(&arc_mru
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3800 mutex_init(&arc_mru_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3801 mutex_init(&arc_mfu
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3802 mutex_init(&arc_mfu_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3803 mutex_init(&arc_l2c_only
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3805 list_create(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
],
3806 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3807 list_create(&arc_mru
->arcs_list
[ARC_BUFC_DATA
],
3808 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3809 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3810 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3811 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
],
3812 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3813 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
],
3814 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3815 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
],
3816 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3817 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3818 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3819 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
],
3820 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3821 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
],
3822 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3823 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
],
3824 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3828 arc_thread_exit
= 0;
3829 arc_eviction_list
= NULL
;
3830 mutex_init(&arc_eviction_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3831 bzero(&arc_eviction_hdr
, sizeof (arc_buf_hdr_t
));
3833 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
3834 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
3836 if (arc_ksp
!= NULL
) {
3837 arc_ksp
->ks_data
= &arc_stats
;
3838 kstat_install(arc_ksp
);
3841 (void) thread_create(NULL
, 0, arc_reclaim_thread
, NULL
, 0, &p0
,
3842 TS_RUN
, minclsyspri
);
3848 * Calculate maximum amount of dirty data per pool.
3850 * If it has been set by /etc/system, take that.
3851 * Otherwise, use a percentage of physical memory defined by
3852 * zfs_dirty_data_max_percent (default 10%) with a cap at
3853 * zfs_dirty_data_max_max (default 4GB).
3855 if (zfs_dirty_data_max
== 0) {
3856 zfs_dirty_data_max
= physmem
* PAGESIZE
*
3857 zfs_dirty_data_max_percent
/ 100;
3858 zfs_dirty_data_max
= MIN(zfs_dirty_data_max
,
3859 zfs_dirty_data_max_max
);
3866 mutex_enter(&arc_reclaim_thr_lock
);
3867 arc_thread_exit
= 1;
3868 while (arc_thread_exit
!= 0)
3869 cv_wait(&arc_reclaim_thr_cv
, &arc_reclaim_thr_lock
);
3870 mutex_exit(&arc_reclaim_thr_lock
);
3876 if (arc_ksp
!= NULL
) {
3877 kstat_delete(arc_ksp
);
3881 mutex_destroy(&arc_eviction_mtx
);
3882 mutex_destroy(&arc_reclaim_thr_lock
);
3883 cv_destroy(&arc_reclaim_thr_cv
);
3885 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
3886 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3887 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
3888 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3889 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
3890 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3891 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
3892 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3894 mutex_destroy(&arc_anon
->arcs_mtx
);
3895 mutex_destroy(&arc_mru
->arcs_mtx
);
3896 mutex_destroy(&arc_mru_ghost
->arcs_mtx
);
3897 mutex_destroy(&arc_mfu
->arcs_mtx
);
3898 mutex_destroy(&arc_mfu_ghost
->arcs_mtx
);
3899 mutex_destroy(&arc_l2c_only
->arcs_mtx
);
3903 ASSERT(arc_loaned_bytes
== 0);
3909 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3910 * It uses dedicated storage devices to hold cached data, which are populated
3911 * using large infrequent writes. The main role of this cache is to boost
3912 * the performance of random read workloads. The intended L2ARC devices
3913 * include short-stroked disks, solid state disks, and other media with
3914 * substantially faster read latency than disk.
3916 * +-----------------------+
3918 * +-----------------------+
3921 * l2arc_feed_thread() arc_read()
3925 * +---------------+ |
3927 * +---------------+ |
3932 * +-------+ +-------+
3934 * | cache | | cache |
3935 * +-------+ +-------+
3936 * +=========+ .-----.
3937 * : L2ARC : |-_____-|
3938 * : devices : | Disks |
3939 * +=========+ `-_____-'
3941 * Read requests are satisfied from the following sources, in order:
3944 * 2) vdev cache of L2ARC devices
3946 * 4) vdev cache of disks
3949 * Some L2ARC device types exhibit extremely slow write performance.
3950 * To accommodate for this there are some significant differences between
3951 * the L2ARC and traditional cache design:
3953 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3954 * the ARC behave as usual, freeing buffers and placing headers on ghost
3955 * lists. The ARC does not send buffers to the L2ARC during eviction as
3956 * this would add inflated write latencies for all ARC memory pressure.
3958 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3959 * It does this by periodically scanning buffers from the eviction-end of
3960 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3961 * not already there. It scans until a headroom of buffers is satisfied,
3962 * which itself is a buffer for ARC eviction. If a compressible buffer is
3963 * found during scanning and selected for writing to an L2ARC device, we
3964 * temporarily boost scanning headroom during the next scan cycle to make
3965 * sure we adapt to compression effects (which might significantly reduce
3966 * the data volume we write to L2ARC). The thread that does this is
3967 * l2arc_feed_thread(), illustrated below; example sizes are included to
3968 * provide a better sense of ratio than this diagram:
3971 * +---------------------+----------+
3972 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3973 * +---------------------+----------+ | o L2ARC eligible
3974 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3975 * +---------------------+----------+ |
3976 * 15.9 Gbytes ^ 32 Mbytes |
3978 * l2arc_feed_thread()
3980 * l2arc write hand <--[oooo]--'
3984 * +==============================+
3985 * L2ARC dev |####|#|###|###| |####| ... |
3986 * +==============================+
3989 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3990 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3991 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3992 * safe to say that this is an uncommon case, since buffers at the end of
3993 * the ARC lists have moved there due to inactivity.
3995 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3996 * then the L2ARC simply misses copying some buffers. This serves as a
3997 * pressure valve to prevent heavy read workloads from both stalling the ARC
3998 * with waits and clogging the L2ARC with writes. This also helps prevent
3999 * the potential for the L2ARC to churn if it attempts to cache content too
4000 * quickly, such as during backups of the entire pool.
4002 * 5. After system boot and before the ARC has filled main memory, there are
4003 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4004 * lists can remain mostly static. Instead of searching from tail of these
4005 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4006 * for eligible buffers, greatly increasing its chance of finding them.
4008 * The L2ARC device write speed is also boosted during this time so that
4009 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4010 * there are no L2ARC reads, and no fear of degrading read performance
4011 * through increased writes.
4013 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4014 * the vdev queue can aggregate them into larger and fewer writes. Each
4015 * device is written to in a rotor fashion, sweeping writes through
4016 * available space then repeating.
4018 * 7. The L2ARC does not store dirty content. It never needs to flush
4019 * write buffers back to disk based storage.
4021 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4022 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4024 * The performance of the L2ARC can be tweaked by a number of tunables, which
4025 * may be necessary for different workloads:
4027 * l2arc_write_max max write bytes per interval
4028 * l2arc_write_boost extra write bytes during device warmup
4029 * l2arc_noprefetch skip caching prefetched buffers
4030 * l2arc_headroom number of max device writes to precache
4031 * l2arc_headroom_boost when we find compressed buffers during ARC
4032 * scanning, we multiply headroom by this
4033 * percentage factor for the next scan cycle,
4034 * since more compressed buffers are likely to
4036 * l2arc_feed_secs seconds between L2ARC writing
4038 * Tunables may be removed or added as future performance improvements are
4039 * integrated, and also may become zpool properties.
4041 * There are three key functions that control how the L2ARC warms up:
4043 * l2arc_write_eligible() check if a buffer is eligible to cache
4044 * l2arc_write_size() calculate how much to write
4045 * l2arc_write_interval() calculate sleep delay between writes
4047 * These three functions determine what to write, how much, and how quickly
4052 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*ab
)
4055 * A buffer is *not* eligible for the L2ARC if it:
4056 * 1. belongs to a different spa.
4057 * 2. is already cached on the L2ARC.
4058 * 3. has an I/O in progress (it may be an incomplete read).
4059 * 4. is flagged not eligible (zfs property).
4061 if (ab
->b_spa
!= spa_guid
|| ab
->b_l2hdr
!= NULL
||
4062 HDR_IO_IN_PROGRESS(ab
) || !HDR_L2CACHE(ab
))
4069 l2arc_write_size(void)
4074 * Make sure our globals have meaningful values in case the user
4077 size
= l2arc_write_max
;
4079 cmn_err(CE_NOTE
, "Bad value for l2arc_write_max, value must "
4080 "be greater than zero, resetting it to the default (%d)",
4082 size
= l2arc_write_max
= L2ARC_WRITE_SIZE
;
4085 if (arc_warm
== B_FALSE
)
4086 size
+= l2arc_write_boost
;
4093 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
4095 clock_t interval
, next
, now
;
4098 * If the ARC lists are busy, increase our write rate; if the
4099 * lists are stale, idle back. This is achieved by checking
4100 * how much we previously wrote - if it was more than half of
4101 * what we wanted, schedule the next write much sooner.
4103 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
4104 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
4106 interval
= hz
* l2arc_feed_secs
;
4108 now
= ddi_get_lbolt();
4109 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
4115 l2arc_hdr_stat_add(void)
4117 ARCSTAT_INCR(arcstat_l2_hdr_size
, HDR_SIZE
+ L2HDR_SIZE
);
4118 ARCSTAT_INCR(arcstat_hdr_size
, -HDR_SIZE
);
4122 l2arc_hdr_stat_remove(void)
4124 ARCSTAT_INCR(arcstat_l2_hdr_size
, -(HDR_SIZE
+ L2HDR_SIZE
));
4125 ARCSTAT_INCR(arcstat_hdr_size
, HDR_SIZE
);
4129 * Cycle through L2ARC devices. This is how L2ARC load balances.
4130 * If a device is returned, this also returns holding the spa config lock.
4132 static l2arc_dev_t
*
4133 l2arc_dev_get_next(void)
4135 l2arc_dev_t
*first
, *next
= NULL
;
4138 * Lock out the removal of spas (spa_namespace_lock), then removal
4139 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4140 * both locks will be dropped and a spa config lock held instead.
4142 mutex_enter(&spa_namespace_lock
);
4143 mutex_enter(&l2arc_dev_mtx
);
4145 /* if there are no vdevs, there is nothing to do */
4146 if (l2arc_ndev
== 0)
4150 next
= l2arc_dev_last
;
4152 /* loop around the list looking for a non-faulted vdev */
4154 next
= list_head(l2arc_dev_list
);
4156 next
= list_next(l2arc_dev_list
, next
);
4158 next
= list_head(l2arc_dev_list
);
4161 /* if we have come back to the start, bail out */
4164 else if (next
== first
)
4167 } while (vdev_is_dead(next
->l2ad_vdev
));
4169 /* if we were unable to find any usable vdevs, return NULL */
4170 if (vdev_is_dead(next
->l2ad_vdev
))
4173 l2arc_dev_last
= next
;
4176 mutex_exit(&l2arc_dev_mtx
);
4179 * Grab the config lock to prevent the 'next' device from being
4180 * removed while we are writing to it.
4183 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
4184 mutex_exit(&spa_namespace_lock
);
4190 * Free buffers that were tagged for destruction.
4193 l2arc_do_free_on_write()
4196 l2arc_data_free_t
*df
, *df_prev
;
4198 mutex_enter(&l2arc_free_on_write_mtx
);
4199 buflist
= l2arc_free_on_write
;
4201 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
4202 df_prev
= list_prev(buflist
, df
);
4203 ASSERT(df
->l2df_data
!= NULL
);
4204 ASSERT(df
->l2df_func
!= NULL
);
4205 df
->l2df_func(df
->l2df_data
, df
->l2df_size
);
4206 list_remove(buflist
, df
);
4207 kmem_free(df
, sizeof (l2arc_data_free_t
));
4210 mutex_exit(&l2arc_free_on_write_mtx
);
4214 * A write to a cache device has completed. Update all headers to allow
4215 * reads from these buffers to begin.
4218 l2arc_write_done(zio_t
*zio
)
4220 l2arc_write_callback_t
*cb
;
4223 arc_buf_hdr_t
*head
, *ab
, *ab_prev
;
4224 l2arc_buf_hdr_t
*abl2
;
4225 kmutex_t
*hash_lock
;
4226 int64_t bytes_dropped
= 0;
4228 cb
= zio
->io_private
;
4230 dev
= cb
->l2wcb_dev
;
4231 ASSERT(dev
!= NULL
);
4232 head
= cb
->l2wcb_head
;
4233 ASSERT(head
!= NULL
);
4234 buflist
= dev
->l2ad_buflist
;
4235 ASSERT(buflist
!= NULL
);
4236 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
4237 l2arc_write_callback_t
*, cb
);
4239 if (zio
->io_error
!= 0)
4240 ARCSTAT_BUMP(arcstat_l2_writes_error
);
4242 mutex_enter(&l2arc_buflist_mtx
);
4245 * All writes completed, or an error was hit.
4247 for (ab
= list_prev(buflist
, head
); ab
; ab
= ab_prev
) {
4248 ab_prev
= list_prev(buflist
, ab
);
4252 * Release the temporary compressed buffer as soon as possible.
4254 if (abl2
->b_compress
!= ZIO_COMPRESS_OFF
)
4255 l2arc_release_cdata_buf(ab
);
4257 hash_lock
= HDR_LOCK(ab
);
4258 if (!mutex_tryenter(hash_lock
)) {
4260 * This buffer misses out. It may be in a stage
4261 * of eviction. Its ARC_L2_WRITING flag will be
4262 * left set, denying reads to this buffer.
4264 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss
);
4268 if (zio
->io_error
!= 0) {
4270 * Error - drop L2ARC entry.
4272 list_remove(buflist
, ab
);
4273 ARCSTAT_INCR(arcstat_l2_asize
, -abl2
->b_asize
);
4274 bytes_dropped
+= abl2
->b_asize
;
4276 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4277 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4281 * Allow ARC to begin reads to this L2ARC entry.
4283 ab
->b_flags
&= ~ARC_L2_WRITING
;
4285 mutex_exit(hash_lock
);
4288 atomic_inc_64(&l2arc_writes_done
);
4289 list_remove(buflist
, head
);
4290 kmem_cache_free(hdr_cache
, head
);
4291 mutex_exit(&l2arc_buflist_mtx
);
4293 vdev_space_update(dev
->l2ad_vdev
, -bytes_dropped
, 0, 0);
4295 l2arc_do_free_on_write();
4297 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
4301 * A read to a cache device completed. Validate buffer contents before
4302 * handing over to the regular ARC routines.
4305 l2arc_read_done(zio_t
*zio
)
4307 l2arc_read_callback_t
*cb
;
4310 kmutex_t
*hash_lock
;
4313 ASSERT(zio
->io_vd
!= NULL
);
4314 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
4316 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
4318 cb
= zio
->io_private
;
4320 buf
= cb
->l2rcb_buf
;
4321 ASSERT(buf
!= NULL
);
4323 hash_lock
= HDR_LOCK(buf
->b_hdr
);
4324 mutex_enter(hash_lock
);
4326 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
4329 * If the buffer was compressed, decompress it first.
4331 if (cb
->l2rcb_compress
!= ZIO_COMPRESS_OFF
)
4332 l2arc_decompress_zio(zio
, hdr
, cb
->l2rcb_compress
);
4333 ASSERT(zio
->io_data
!= NULL
);
4336 * Check this survived the L2ARC journey.
4338 equal
= arc_cksum_equal(buf
);
4339 if (equal
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
4340 mutex_exit(hash_lock
);
4341 zio
->io_private
= buf
;
4342 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
4343 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
4346 mutex_exit(hash_lock
);
4348 * Buffer didn't survive caching. Increment stats and
4349 * reissue to the original storage device.
4351 if (zio
->io_error
!= 0) {
4352 ARCSTAT_BUMP(arcstat_l2_io_error
);
4354 zio
->io_error
= SET_ERROR(EIO
);
4357 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
4360 * If there's no waiter, issue an async i/o to the primary
4361 * storage now. If there *is* a waiter, the caller must
4362 * issue the i/o in a context where it's OK to block.
4364 if (zio
->io_waiter
== NULL
) {
4365 zio_t
*pio
= zio_unique_parent(zio
);
4367 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
4369 zio_nowait(zio_read(pio
, cb
->l2rcb_spa
, &cb
->l2rcb_bp
,
4370 buf
->b_data
, zio
->io_size
, arc_read_done
, buf
,
4371 zio
->io_priority
, cb
->l2rcb_flags
, &cb
->l2rcb_zb
));
4375 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
4379 * This is the list priority from which the L2ARC will search for pages to
4380 * cache. This is used within loops (0..3) to cycle through lists in the
4381 * desired order. This order can have a significant effect on cache
4384 * Currently the metadata lists are hit first, MFU then MRU, followed by
4385 * the data lists. This function returns a locked list, and also returns
4389 l2arc_list_locked(int list_num
, kmutex_t
**lock
)
4391 list_t
*list
= NULL
;
4393 ASSERT(list_num
>= 0 && list_num
<= 3);
4397 list
= &arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
4398 *lock
= &arc_mfu
->arcs_mtx
;
4401 list
= &arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
4402 *lock
= &arc_mru
->arcs_mtx
;
4405 list
= &arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
4406 *lock
= &arc_mfu
->arcs_mtx
;
4409 list
= &arc_mru
->arcs_list
[ARC_BUFC_DATA
];
4410 *lock
= &arc_mru
->arcs_mtx
;
4414 ASSERT(!(MUTEX_HELD(*lock
)));
4420 * Evict buffers from the device write hand to the distance specified in
4421 * bytes. This distance may span populated buffers, it may span nothing.
4422 * This is clearing a region on the L2ARC device ready for writing.
4423 * If the 'all' boolean is set, every buffer is evicted.
4426 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
4429 l2arc_buf_hdr_t
*abl2
;
4430 arc_buf_hdr_t
*ab
, *ab_prev
;
4431 kmutex_t
*hash_lock
;
4433 int64_t bytes_evicted
= 0;
4435 buflist
= dev
->l2ad_buflist
;
4437 if (buflist
== NULL
)
4440 if (!all
&& dev
->l2ad_first
) {
4442 * This is the first sweep through the device. There is
4448 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
4450 * When nearing the end of the device, evict to the end
4451 * before the device write hand jumps to the start.
4453 taddr
= dev
->l2ad_end
;
4455 taddr
= dev
->l2ad_hand
+ distance
;
4457 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
4458 uint64_t, taddr
, boolean_t
, all
);
4461 mutex_enter(&l2arc_buflist_mtx
);
4462 for (ab
= list_tail(buflist
); ab
; ab
= ab_prev
) {
4463 ab_prev
= list_prev(buflist
, ab
);
4465 hash_lock
= HDR_LOCK(ab
);
4466 if (!mutex_tryenter(hash_lock
)) {
4468 * Missed the hash lock. Retry.
4470 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
4471 mutex_exit(&l2arc_buflist_mtx
);
4472 mutex_enter(hash_lock
);
4473 mutex_exit(hash_lock
);
4477 if (HDR_L2_WRITE_HEAD(ab
)) {
4479 * We hit a write head node. Leave it for
4480 * l2arc_write_done().
4482 list_remove(buflist
, ab
);
4483 mutex_exit(hash_lock
);
4487 if (!all
&& ab
->b_l2hdr
!= NULL
&&
4488 (ab
->b_l2hdr
->b_daddr
> taddr
||
4489 ab
->b_l2hdr
->b_daddr
< dev
->l2ad_hand
)) {
4491 * We've evicted to the target address,
4492 * or the end of the device.
4494 mutex_exit(hash_lock
);
4498 if (HDR_FREE_IN_PROGRESS(ab
)) {
4500 * Already on the path to destruction.
4502 mutex_exit(hash_lock
);
4506 if (ab
->b_state
== arc_l2c_only
) {
4507 ASSERT(!HDR_L2_READING(ab
));
4509 * This doesn't exist in the ARC. Destroy.
4510 * arc_hdr_destroy() will call list_remove()
4511 * and decrement arcstat_l2_size.
4513 arc_change_state(arc_anon
, ab
, hash_lock
);
4514 arc_hdr_destroy(ab
);
4517 * Invalidate issued or about to be issued
4518 * reads, since we may be about to write
4519 * over this location.
4521 if (HDR_L2_READING(ab
)) {
4522 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
4523 ab
->b_flags
|= ARC_L2_EVICTED
;
4527 * Tell ARC this no longer exists in L2ARC.
4529 if (ab
->b_l2hdr
!= NULL
) {
4531 ARCSTAT_INCR(arcstat_l2_asize
, -abl2
->b_asize
);
4532 bytes_evicted
+= abl2
->b_asize
;
4534 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4535 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4537 list_remove(buflist
, ab
);
4540 * This may have been leftover after a
4543 ab
->b_flags
&= ~ARC_L2_WRITING
;
4545 mutex_exit(hash_lock
);
4547 mutex_exit(&l2arc_buflist_mtx
);
4549 vdev_space_update(dev
->l2ad_vdev
, -bytes_evicted
, 0, 0);
4550 dev
->l2ad_evict
= taddr
;
4554 * Find and write ARC buffers to the L2ARC device.
4556 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4557 * for reading until they have completed writing.
4558 * The headroom_boost is an in-out parameter used to maintain headroom boost
4559 * state between calls to this function.
4561 * Returns the number of bytes actually written (which may be smaller than
4562 * the delta by which the device hand has changed due to alignment).
4565 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
,
4566 boolean_t
*headroom_boost
)
4568 arc_buf_hdr_t
*ab
, *ab_prev
, *head
;
4570 uint64_t write_asize
, write_psize
, write_sz
, headroom
,
4573 kmutex_t
*list_lock
;
4575 l2arc_write_callback_t
*cb
;
4577 uint64_t guid
= spa_load_guid(spa
);
4578 const boolean_t do_headroom_boost
= *headroom_boost
;
4580 ASSERT(dev
->l2ad_vdev
!= NULL
);
4582 /* Lower the flag now, we might want to raise it again later. */
4583 *headroom_boost
= B_FALSE
;
4586 write_sz
= write_asize
= write_psize
= 0;
4588 head
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
4589 head
->b_flags
|= ARC_L2_WRITE_HEAD
;
4592 * We will want to try to compress buffers that are at least 2x the
4593 * device sector size.
4595 buf_compress_minsz
= 2 << dev
->l2ad_vdev
->vdev_ashift
;
4598 * Copy buffers for L2ARC writing.
4600 mutex_enter(&l2arc_buflist_mtx
);
4601 for (int try = 0; try <= 3; try++) {
4602 uint64_t passed_sz
= 0;
4604 list
= l2arc_list_locked(try, &list_lock
);
4607 * L2ARC fast warmup.
4609 * Until the ARC is warm and starts to evict, read from the
4610 * head of the ARC lists rather than the tail.
4612 if (arc_warm
== B_FALSE
)
4613 ab
= list_head(list
);
4615 ab
= list_tail(list
);
4617 headroom
= target_sz
* l2arc_headroom
;
4618 if (do_headroom_boost
)
4619 headroom
= (headroom
* l2arc_headroom_boost
) / 100;
4621 for (; ab
; ab
= ab_prev
) {
4622 l2arc_buf_hdr_t
*l2hdr
;
4623 kmutex_t
*hash_lock
;
4626 if (arc_warm
== B_FALSE
)
4627 ab_prev
= list_next(list
, ab
);
4629 ab_prev
= list_prev(list
, ab
);
4631 hash_lock
= HDR_LOCK(ab
);
4632 if (!mutex_tryenter(hash_lock
)) {
4634 * Skip this buffer rather than waiting.
4639 passed_sz
+= ab
->b_size
;
4640 if (passed_sz
> headroom
) {
4644 mutex_exit(hash_lock
);
4648 if (!l2arc_write_eligible(guid
, ab
)) {
4649 mutex_exit(hash_lock
);
4653 if ((write_sz
+ ab
->b_size
) > target_sz
) {
4655 mutex_exit(hash_lock
);
4661 * Insert a dummy header on the buflist so
4662 * l2arc_write_done() can find where the
4663 * write buffers begin without searching.
4665 list_insert_head(dev
->l2ad_buflist
, head
);
4668 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
4669 cb
->l2wcb_dev
= dev
;
4670 cb
->l2wcb_head
= head
;
4671 pio
= zio_root(spa
, l2arc_write_done
, cb
,
4676 * Create and add a new L2ARC header.
4678 l2hdr
= kmem_zalloc(sizeof (l2arc_buf_hdr_t
), KM_SLEEP
);
4680 ab
->b_flags
|= ARC_L2_WRITING
;
4683 * Temporarily stash the data buffer in b_tmp_cdata.
4684 * The subsequent write step will pick it up from
4685 * there. This is because can't access ab->b_buf
4686 * without holding the hash_lock, which we in turn
4687 * can't access without holding the ARC list locks
4688 * (which we want to avoid during compression/writing).
4690 l2hdr
->b_compress
= ZIO_COMPRESS_OFF
;
4691 l2hdr
->b_asize
= ab
->b_size
;
4692 l2hdr
->b_tmp_cdata
= ab
->b_buf
->b_data
;
4694 buf_sz
= ab
->b_size
;
4695 ab
->b_l2hdr
= l2hdr
;
4697 list_insert_head(dev
->l2ad_buflist
, ab
);
4700 * Compute and store the buffer cksum before
4701 * writing. On debug the cksum is verified first.
4703 arc_cksum_verify(ab
->b_buf
);
4704 arc_cksum_compute(ab
->b_buf
, B_TRUE
);
4706 mutex_exit(hash_lock
);
4711 mutex_exit(list_lock
);
4717 /* No buffers selected for writing? */
4720 mutex_exit(&l2arc_buflist_mtx
);
4721 kmem_cache_free(hdr_cache
, head
);
4726 * Now start writing the buffers. We're starting at the write head
4727 * and work backwards, retracing the course of the buffer selector
4730 for (ab
= list_prev(dev
->l2ad_buflist
, head
); ab
;
4731 ab
= list_prev(dev
->l2ad_buflist
, ab
)) {
4732 l2arc_buf_hdr_t
*l2hdr
;
4736 * We shouldn't need to lock the buffer here, since we flagged
4737 * it as ARC_L2_WRITING in the previous step, but we must take
4738 * care to only access its L2 cache parameters. In particular,
4739 * ab->b_buf may be invalid by now due to ARC eviction.
4741 l2hdr
= ab
->b_l2hdr
;
4742 l2hdr
->b_daddr
= dev
->l2ad_hand
;
4744 if ((ab
->b_flags
& ARC_L2COMPRESS
) &&
4745 l2hdr
->b_asize
>= buf_compress_minsz
) {
4746 if (l2arc_compress_buf(l2hdr
)) {
4748 * If compression succeeded, enable headroom
4749 * boost on the next scan cycle.
4751 *headroom_boost
= B_TRUE
;
4756 * Pick up the buffer data we had previously stashed away
4757 * (and now potentially also compressed).
4759 buf_data
= l2hdr
->b_tmp_cdata
;
4760 buf_sz
= l2hdr
->b_asize
;
4762 /* Compression may have squashed the buffer to zero length. */
4766 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
4767 dev
->l2ad_hand
, buf_sz
, buf_data
, ZIO_CHECKSUM_OFF
,
4768 NULL
, NULL
, ZIO_PRIORITY_ASYNC_WRITE
,
4769 ZIO_FLAG_CANFAIL
, B_FALSE
);
4771 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
4773 (void) zio_nowait(wzio
);
4775 write_asize
+= buf_sz
;
4777 * Keep the clock hand suitably device-aligned.
4779 buf_p_sz
= vdev_psize_to_asize(dev
->l2ad_vdev
, buf_sz
);
4780 write_psize
+= buf_p_sz
;
4781 dev
->l2ad_hand
+= buf_p_sz
;
4785 mutex_exit(&l2arc_buflist_mtx
);
4787 ASSERT3U(write_asize
, <=, target_sz
);
4788 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
4789 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_asize
);
4790 ARCSTAT_INCR(arcstat_l2_size
, write_sz
);
4791 ARCSTAT_INCR(arcstat_l2_asize
, write_asize
);
4792 vdev_space_update(dev
->l2ad_vdev
, write_asize
, 0, 0);
4795 * Bump device hand to the device start if it is approaching the end.
4796 * l2arc_evict() will already have evicted ahead for this case.
4798 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
4799 dev
->l2ad_hand
= dev
->l2ad_start
;
4800 dev
->l2ad_evict
= dev
->l2ad_start
;
4801 dev
->l2ad_first
= B_FALSE
;
4804 dev
->l2ad_writing
= B_TRUE
;
4805 (void) zio_wait(pio
);
4806 dev
->l2ad_writing
= B_FALSE
;
4808 return (write_asize
);
4812 * Compresses an L2ARC buffer.
4813 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
4814 * size in l2hdr->b_asize. This routine tries to compress the data and
4815 * depending on the compression result there are three possible outcomes:
4816 * *) The buffer was incompressible. The original l2hdr contents were left
4817 * untouched and are ready for writing to an L2 device.
4818 * *) The buffer was all-zeros, so there is no need to write it to an L2
4819 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
4820 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
4821 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
4822 * data buffer which holds the compressed data to be written, and b_asize
4823 * tells us how much data there is. b_compress is set to the appropriate
4824 * compression algorithm. Once writing is done, invoke
4825 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
4827 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
4828 * buffer was incompressible).
4831 l2arc_compress_buf(l2arc_buf_hdr_t
*l2hdr
)
4834 size_t csize
, len
, rounded
;
4836 ASSERT(l2hdr
->b_compress
== ZIO_COMPRESS_OFF
);
4837 ASSERT(l2hdr
->b_tmp_cdata
!= NULL
);
4839 len
= l2hdr
->b_asize
;
4840 cdata
= zio_data_buf_alloc(len
);
4841 csize
= zio_compress_data(ZIO_COMPRESS_LZ4
, l2hdr
->b_tmp_cdata
,
4842 cdata
, l2hdr
->b_asize
);
4844 rounded
= P2ROUNDUP(csize
, (size_t)SPA_MINBLOCKSIZE
);
4845 if (rounded
> csize
) {
4846 bzero((char *)cdata
+ csize
, rounded
- csize
);
4851 /* zero block, indicate that there's nothing to write */
4852 zio_data_buf_free(cdata
, len
);
4853 l2hdr
->b_compress
= ZIO_COMPRESS_EMPTY
;
4855 l2hdr
->b_tmp_cdata
= NULL
;
4856 ARCSTAT_BUMP(arcstat_l2_compress_zeros
);
4858 } else if (csize
> 0 && csize
< len
) {
4860 * Compression succeeded, we'll keep the cdata around for
4861 * writing and release it afterwards.
4863 l2hdr
->b_compress
= ZIO_COMPRESS_LZ4
;
4864 l2hdr
->b_asize
= csize
;
4865 l2hdr
->b_tmp_cdata
= cdata
;
4866 ARCSTAT_BUMP(arcstat_l2_compress_successes
);
4870 * Compression failed, release the compressed buffer.
4871 * l2hdr will be left unmodified.
4873 zio_data_buf_free(cdata
, len
);
4874 ARCSTAT_BUMP(arcstat_l2_compress_failures
);
4880 * Decompresses a zio read back from an l2arc device. On success, the
4881 * underlying zio's io_data buffer is overwritten by the uncompressed
4882 * version. On decompression error (corrupt compressed stream), the
4883 * zio->io_error value is set to signal an I/O error.
4885 * Please note that the compressed data stream is not checksummed, so
4886 * if the underlying device is experiencing data corruption, we may feed
4887 * corrupt data to the decompressor, so the decompressor needs to be
4888 * able to handle this situation (LZ4 does).
4891 l2arc_decompress_zio(zio_t
*zio
, arc_buf_hdr_t
*hdr
, enum zio_compress c
)
4893 ASSERT(L2ARC_IS_VALID_COMPRESS(c
));
4895 if (zio
->io_error
!= 0) {
4897 * An io error has occured, just restore the original io
4898 * size in preparation for a main pool read.
4900 zio
->io_orig_size
= zio
->io_size
= hdr
->b_size
;
4904 if (c
== ZIO_COMPRESS_EMPTY
) {
4906 * An empty buffer results in a null zio, which means we
4907 * need to fill its io_data after we're done restoring the
4908 * buffer's contents.
4910 ASSERT(hdr
->b_buf
!= NULL
);
4911 bzero(hdr
->b_buf
->b_data
, hdr
->b_size
);
4912 zio
->io_data
= zio
->io_orig_data
= hdr
->b_buf
->b_data
;
4914 ASSERT(zio
->io_data
!= NULL
);
4916 * We copy the compressed data from the start of the arc buffer
4917 * (the zio_read will have pulled in only what we need, the
4918 * rest is garbage which we will overwrite at decompression)
4919 * and then decompress back to the ARC data buffer. This way we
4920 * can minimize copying by simply decompressing back over the
4921 * original compressed data (rather than decompressing to an
4922 * aux buffer and then copying back the uncompressed buffer,
4923 * which is likely to be much larger).
4928 csize
= zio
->io_size
;
4929 cdata
= zio_data_buf_alloc(csize
);
4930 bcopy(zio
->io_data
, cdata
, csize
);
4931 if (zio_decompress_data(c
, cdata
, zio
->io_data
, csize
,
4933 zio
->io_error
= EIO
;
4934 zio_data_buf_free(cdata
, csize
);
4937 /* Restore the expected uncompressed IO size. */
4938 zio
->io_orig_size
= zio
->io_size
= hdr
->b_size
;
4942 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
4943 * This buffer serves as a temporary holder of compressed data while
4944 * the buffer entry is being written to an l2arc device. Once that is
4945 * done, we can dispose of it.
4948 l2arc_release_cdata_buf(arc_buf_hdr_t
*ab
)
4950 l2arc_buf_hdr_t
*l2hdr
= ab
->b_l2hdr
;
4952 if (l2hdr
->b_compress
== ZIO_COMPRESS_LZ4
) {
4954 * If the data was compressed, then we've allocated a
4955 * temporary buffer for it, so now we need to release it.
4957 ASSERT(l2hdr
->b_tmp_cdata
!= NULL
);
4958 zio_data_buf_free(l2hdr
->b_tmp_cdata
, ab
->b_size
);
4960 l2hdr
->b_tmp_cdata
= NULL
;
4964 * This thread feeds the L2ARC at regular intervals. This is the beating
4965 * heart of the L2ARC.
4968 l2arc_feed_thread(void)
4973 uint64_t size
, wrote
;
4974 clock_t begin
, next
= ddi_get_lbolt();
4975 boolean_t headroom_boost
= B_FALSE
;
4977 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
4979 mutex_enter(&l2arc_feed_thr_lock
);
4981 while (l2arc_thread_exit
== 0) {
4982 CALLB_CPR_SAFE_BEGIN(&cpr
);
4983 (void) cv_timedwait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
,
4985 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
4986 next
= ddi_get_lbolt() + hz
;
4989 * Quick check for L2ARC devices.
4991 mutex_enter(&l2arc_dev_mtx
);
4992 if (l2arc_ndev
== 0) {
4993 mutex_exit(&l2arc_dev_mtx
);
4996 mutex_exit(&l2arc_dev_mtx
);
4997 begin
= ddi_get_lbolt();
5000 * This selects the next l2arc device to write to, and in
5001 * doing so the next spa to feed from: dev->l2ad_spa. This
5002 * will return NULL if there are now no l2arc devices or if
5003 * they are all faulted.
5005 * If a device is returned, its spa's config lock is also
5006 * held to prevent device removal. l2arc_dev_get_next()
5007 * will grab and release l2arc_dev_mtx.
5009 if ((dev
= l2arc_dev_get_next()) == NULL
)
5012 spa
= dev
->l2ad_spa
;
5013 ASSERT(spa
!= NULL
);
5016 * If the pool is read-only then force the feed thread to
5017 * sleep a little longer.
5019 if (!spa_writeable(spa
)) {
5020 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
5021 spa_config_exit(spa
, SCL_L2ARC
, dev
);
5026 * Avoid contributing to memory pressure.
5028 if (arc_reclaim_needed()) {
5029 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
5030 spa_config_exit(spa
, SCL_L2ARC
, dev
);
5034 ARCSTAT_BUMP(arcstat_l2_feeds
);
5036 size
= l2arc_write_size();
5039 * Evict L2ARC buffers that will be overwritten.
5041 l2arc_evict(dev
, size
, B_FALSE
);
5044 * Write ARC buffers.
5046 wrote
= l2arc_write_buffers(spa
, dev
, size
, &headroom_boost
);
5049 * Calculate interval between writes.
5051 next
= l2arc_write_interval(begin
, size
, wrote
);
5052 spa_config_exit(spa
, SCL_L2ARC
, dev
);
5055 l2arc_thread_exit
= 0;
5056 cv_broadcast(&l2arc_feed_thr_cv
);
5057 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
5062 l2arc_vdev_present(vdev_t
*vd
)
5066 mutex_enter(&l2arc_dev_mtx
);
5067 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
5068 dev
= list_next(l2arc_dev_list
, dev
)) {
5069 if (dev
->l2ad_vdev
== vd
)
5072 mutex_exit(&l2arc_dev_mtx
);
5074 return (dev
!= NULL
);
5078 * Add a vdev for use by the L2ARC. By this point the spa has already
5079 * validated the vdev and opened it.
5082 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
5084 l2arc_dev_t
*adddev
;
5086 ASSERT(!l2arc_vdev_present(vd
));
5089 * Create a new l2arc device entry.
5091 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
5092 adddev
->l2ad_spa
= spa
;
5093 adddev
->l2ad_vdev
= vd
;
5094 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
;
5095 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
5096 adddev
->l2ad_hand
= adddev
->l2ad_start
;
5097 adddev
->l2ad_evict
= adddev
->l2ad_start
;
5098 adddev
->l2ad_first
= B_TRUE
;
5099 adddev
->l2ad_writing
= B_FALSE
;
5102 * This is a list of all ARC buffers that are still valid on the
5105 adddev
->l2ad_buflist
= kmem_zalloc(sizeof (list_t
), KM_SLEEP
);
5106 list_create(adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
5107 offsetof(arc_buf_hdr_t
, b_l2node
));
5109 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
5112 * Add device to global list
5114 mutex_enter(&l2arc_dev_mtx
);
5115 list_insert_head(l2arc_dev_list
, adddev
);
5116 atomic_inc_64(&l2arc_ndev
);
5117 mutex_exit(&l2arc_dev_mtx
);
5121 * Remove a vdev from the L2ARC.
5124 l2arc_remove_vdev(vdev_t
*vd
)
5126 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
5129 * Find the device by vdev
5131 mutex_enter(&l2arc_dev_mtx
);
5132 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
5133 nextdev
= list_next(l2arc_dev_list
, dev
);
5134 if (vd
== dev
->l2ad_vdev
) {
5139 ASSERT(remdev
!= NULL
);
5142 * Remove device from global list
5144 list_remove(l2arc_dev_list
, remdev
);
5145 l2arc_dev_last
= NULL
; /* may have been invalidated */
5146 atomic_dec_64(&l2arc_ndev
);
5147 mutex_exit(&l2arc_dev_mtx
);
5150 * Clear all buflists and ARC references. L2ARC device flush.
5152 l2arc_evict(remdev
, 0, B_TRUE
);
5153 list_destroy(remdev
->l2ad_buflist
);
5154 kmem_free(remdev
->l2ad_buflist
, sizeof (list_t
));
5155 kmem_free(remdev
, sizeof (l2arc_dev_t
));
5161 l2arc_thread_exit
= 0;
5163 l2arc_writes_sent
= 0;
5164 l2arc_writes_done
= 0;
5166 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5167 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
5168 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
5169 mutex_init(&l2arc_buflist_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
5170 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
5172 l2arc_dev_list
= &L2ARC_dev_list
;
5173 l2arc_free_on_write
= &L2ARC_free_on_write
;
5174 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
5175 offsetof(l2arc_dev_t
, l2ad_node
));
5176 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
5177 offsetof(l2arc_data_free_t
, l2df_list_node
));
5184 * This is called from dmu_fini(), which is called from spa_fini();
5185 * Because of this, we can assume that all l2arc devices have
5186 * already been removed when the pools themselves were removed.
5189 l2arc_do_free_on_write();
5191 mutex_destroy(&l2arc_feed_thr_lock
);
5192 cv_destroy(&l2arc_feed_thr_cv
);
5193 mutex_destroy(&l2arc_dev_mtx
);
5194 mutex_destroy(&l2arc_buflist_mtx
);
5195 mutex_destroy(&l2arc_free_on_write_mtx
);
5197 list_destroy(l2arc_dev_list
);
5198 list_destroy(l2arc_free_on_write
);
5204 if (!(spa_mode_global
& FWRITE
))
5207 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
5208 TS_RUN
, minclsyspri
);
5214 if (!(spa_mode_global
& FWRITE
))
5217 mutex_enter(&l2arc_feed_thr_lock
);
5218 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
5219 l2arc_thread_exit
= 1;
5220 while (l2arc_thread_exit
!= 0)
5221 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
5222 mutex_exit(&l2arc_feed_thr_lock
);