4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
30 * DVA-based Adjustable Replacement Cache
32 * While much of the theory of operation used here is
33 * based on the self-tuning, low overhead replacement cache
34 * presented by Megiddo and Modha at FAST 2003, there are some
35 * significant differences:
37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 * Pages in its cache cannot be "locked" into memory. This makes
39 * the eviction algorithm simple: evict the last page in the list.
40 * This also make the performance characteristics easy to reason
41 * about. Our cache is not so simple. At any given moment, some
42 * subset of the blocks in the cache are un-evictable because we
43 * have handed out a reference to them. Blocks are only evictable
44 * when there are no external references active. This makes
45 * eviction far more problematic: we choose to evict the evictable
46 * blocks that are the "lowest" in the list.
48 * There are times when it is not possible to evict the requested
49 * space. In these circumstances we are unable to adjust the cache
50 * size. To prevent the cache growing unbounded at these times we
51 * implement a "cache throttle" that slows the flow of new data
52 * into the cache until we can make space available.
54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 * Pages are evicted when the cache is full and there is a cache
56 * miss. Our model has a variable sized cache. It grows with
57 * high use, but also tries to react to memory pressure from the
58 * operating system: decreasing its size when system memory is
61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 * elements of the cache are therefore exactly the same size. So
63 * when adjusting the cache size following a cache miss, its simply
64 * a matter of choosing a single page to evict. In our model, we
65 * have variable sized cache blocks (rangeing from 512 bytes to
66 * 128K bytes). We therefore choose a set of blocks to evict to make
67 * space for a cache miss that approximates as closely as possible
68 * the space used by the new block.
70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 * by N. Megiddo & D. Modha, FAST 2003
77 * A new reference to a cache buffer can be obtained in two
78 * ways: 1) via a hash table lookup using the DVA as a key,
79 * or 2) via one of the ARC lists. The arc_read() interface
80 * uses method 1, while the internal arc algorithms for
81 * adjusting the cache use method 2. We therefore provide two
82 * types of locks: 1) the hash table lock array, and 2) the
85 * Buffers do not have their own mutexes, rather they rely on the
86 * hash table mutexes for the bulk of their protection (i.e. most
87 * fields in the arc_buf_hdr_t are protected by these mutexes).
89 * buf_hash_find() returns the appropriate mutex (held) when it
90 * locates the requested buffer in the hash table. It returns
91 * NULL for the mutex if the buffer was not in the table.
93 * buf_hash_remove() expects the appropriate hash mutex to be
94 * already held before it is invoked.
96 * Each arc state also has a mutex which is used to protect the
97 * buffer list associated with the state. When attempting to
98 * obtain a hash table lock while holding an arc list lock you
99 * must use: mutex_tryenter() to avoid deadlock. Also note that
100 * the active state mutex must be held before the ghost state mutex.
102 * Arc buffers may have an associated eviction callback function.
103 * This function will be invoked prior to removing the buffer (e.g.
104 * in arc_do_user_evicts()). Note however that the data associated
105 * with the buffer may be evicted prior to the callback. The callback
106 * must be made with *no locks held* (to prevent deadlock). Additionally,
107 * the users of callbacks must ensure that their private data is
108 * protected from simultaneous callbacks from arc_clear_callback()
109 * and arc_do_user_evicts().
111 * Note that the majority of the performance stats are manipulated
112 * with atomic operations.
114 * The L2ARC uses the l2ad_mtx on each vdev for the following:
116 * - L2ARC buflist creation
117 * - L2ARC buflist eviction
118 * - L2ARC write completion, which walks L2ARC buflists
119 * - ARC header destruction, as it removes from L2ARC buflists
120 * - ARC header release, as it removes from L2ARC buflists
125 #include <sys/zio_compress.h>
126 #include <sys/zfs_context.h>
128 #include <sys/refcount.h>
129 #include <sys/vdev.h>
130 #include <sys/vdev_impl.h>
131 #include <sys/dsl_pool.h>
133 #include <sys/vmsystm.h>
135 #include <sys/fs/swapnode.h>
136 #include <sys/dnlc.h>
138 #include <sys/callb.h>
139 #include <sys/kstat.h>
140 #include <zfs_fletcher.h>
143 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
144 boolean_t arc_watch
= B_FALSE
;
148 static kmutex_t arc_reclaim_thr_lock
;
149 static kcondvar_t arc_reclaim_thr_cv
; /* used to signal reclaim thr */
150 static uint8_t arc_thread_exit
;
152 uint_t arc_reduce_dnlc_percent
= 3;
155 * The number of iterations through arc_evict_*() before we
156 * drop & reacquire the lock.
158 int arc_evict_iterations
= 100;
160 /* number of seconds before growing cache again */
161 static int arc_grow_retry
= 60;
163 /* shift of arc_c for calculating both min and max arc_p */
164 static int arc_p_min_shift
= 4;
166 /* log2(fraction of arc to reclaim) */
167 static int arc_shrink_shift
= 7;
170 * log2(fraction of ARC which must be free to allow growing).
171 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
172 * when reading a new block into the ARC, we will evict an equal-sized block
175 * This must be less than arc_shrink_shift, so that when we shrink the ARC,
176 * we will still not allow it to grow.
178 int arc_no_grow_shift
= 5;
182 * minimum lifespan of a prefetch block in clock ticks
183 * (initialized in arc_init())
185 static int arc_min_prefetch_lifespan
;
188 * If this percent of memory is free, don't throttle.
190 int arc_lotsfree_percent
= 10;
195 * The arc has filled available memory and has now warmed up.
197 static boolean_t arc_warm
;
200 * These tunables are for performance analysis.
202 uint64_t zfs_arc_max
;
203 uint64_t zfs_arc_min
;
204 uint64_t zfs_arc_meta_limit
= 0;
205 uint64_t zfs_arc_meta_min
= 0;
206 int zfs_arc_grow_retry
= 0;
207 int zfs_arc_shrink_shift
= 0;
208 int zfs_arc_p_min_shift
= 0;
209 int zfs_disable_dup_eviction
= 0;
210 int zfs_arc_average_blocksize
= 8 * 1024; /* 8KB */
213 * Note that buffers can be in one of 6 states:
214 * ARC_anon - anonymous (discussed below)
215 * ARC_mru - recently used, currently cached
216 * ARC_mru_ghost - recentely used, no longer in cache
217 * ARC_mfu - frequently used, currently cached
218 * ARC_mfu_ghost - frequently used, no longer in cache
219 * ARC_l2c_only - exists in L2ARC but not other states
220 * When there are no active references to the buffer, they are
221 * are linked onto a list in one of these arc states. These are
222 * the only buffers that can be evicted or deleted. Within each
223 * state there are multiple lists, one for meta-data and one for
224 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
225 * etc.) is tracked separately so that it can be managed more
226 * explicitly: favored over data, limited explicitly.
228 * Anonymous buffers are buffers that are not associated with
229 * a DVA. These are buffers that hold dirty block copies
230 * before they are written to stable storage. By definition,
231 * they are "ref'd" and are considered part of arc_mru
232 * that cannot be freed. Generally, they will aquire a DVA
233 * as they are written and migrate onto the arc_mru list.
235 * The ARC_l2c_only state is for buffers that are in the second
236 * level ARC but no longer in any of the ARC_m* lists. The second
237 * level ARC itself may also contain buffers that are in any of
238 * the ARC_m* states - meaning that a buffer can exist in two
239 * places. The reason for the ARC_l2c_only state is to keep the
240 * buffer header in the hash table, so that reads that hit the
241 * second level ARC benefit from these fast lookups.
244 typedef struct arc_state
{
245 list_t arcs_list
[ARC_BUFC_NUMTYPES
]; /* list of evictable buffers */
246 uint64_t arcs_lsize
[ARC_BUFC_NUMTYPES
]; /* amount of evictable data */
247 uint64_t arcs_size
; /* total amount of data in this state */
252 static arc_state_t ARC_anon
;
253 static arc_state_t ARC_mru
;
254 static arc_state_t ARC_mru_ghost
;
255 static arc_state_t ARC_mfu
;
256 static arc_state_t ARC_mfu_ghost
;
257 static arc_state_t ARC_l2c_only
;
259 typedef struct arc_stats
{
260 kstat_named_t arcstat_hits
;
261 kstat_named_t arcstat_misses
;
262 kstat_named_t arcstat_demand_data_hits
;
263 kstat_named_t arcstat_demand_data_misses
;
264 kstat_named_t arcstat_demand_metadata_hits
;
265 kstat_named_t arcstat_demand_metadata_misses
;
266 kstat_named_t arcstat_prefetch_data_hits
;
267 kstat_named_t arcstat_prefetch_data_misses
;
268 kstat_named_t arcstat_prefetch_metadata_hits
;
269 kstat_named_t arcstat_prefetch_metadata_misses
;
270 kstat_named_t arcstat_mru_hits
;
271 kstat_named_t arcstat_mru_ghost_hits
;
272 kstat_named_t arcstat_mfu_hits
;
273 kstat_named_t arcstat_mfu_ghost_hits
;
274 kstat_named_t arcstat_deleted
;
275 kstat_named_t arcstat_recycle_miss
;
277 * Number of buffers that could not be evicted because the hash lock
278 * was held by another thread. The lock may not necessarily be held
279 * by something using the same buffer, since hash locks are shared
280 * by multiple buffers.
282 kstat_named_t arcstat_mutex_miss
;
284 * Number of buffers skipped because they have I/O in progress, are
285 * indrect prefetch buffers that have not lived long enough, or are
286 * not from the spa we're trying to evict from.
288 kstat_named_t arcstat_evict_skip
;
289 kstat_named_t arcstat_evict_l2_cached
;
290 kstat_named_t arcstat_evict_l2_eligible
;
291 kstat_named_t arcstat_evict_l2_ineligible
;
292 kstat_named_t arcstat_hash_elements
;
293 kstat_named_t arcstat_hash_elements_max
;
294 kstat_named_t arcstat_hash_collisions
;
295 kstat_named_t arcstat_hash_chains
;
296 kstat_named_t arcstat_hash_chain_max
;
297 kstat_named_t arcstat_p
;
298 kstat_named_t arcstat_c
;
299 kstat_named_t arcstat_c_min
;
300 kstat_named_t arcstat_c_max
;
301 kstat_named_t arcstat_size
;
303 * Number of bytes consumed by internal ARC structures necessary
304 * for tracking purposes; these structures are not actually
305 * backed by ARC buffers. This includes arc_buf_hdr_t structures
306 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
307 * caches), and arc_buf_t structures (allocated via arc_buf_t
310 kstat_named_t arcstat_hdr_size
;
312 * Number of bytes consumed by ARC buffers of type equal to
313 * ARC_BUFC_DATA. This is generally consumed by buffers backing
314 * on disk user data (e.g. plain file contents).
316 kstat_named_t arcstat_data_size
;
318 * Number of bytes consumed by ARC buffers of type equal to
319 * ARC_BUFC_METADATA. This is generally consumed by buffers
320 * backing on disk data that is used for internal ZFS
321 * structures (e.g. ZAP, dnode, indirect blocks, etc).
323 kstat_named_t arcstat_metadata_size
;
325 * Number of bytes consumed by various buffers and structures
326 * not actually backed with ARC buffers. This includes bonus
327 * buffers (allocated directly via zio_buf_* functions),
328 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
329 * cache), and dnode_t structures (allocated via dnode_t cache).
331 kstat_named_t arcstat_other_size
;
333 * Total number of bytes consumed by ARC buffers residing in the
334 * arc_anon state. This includes *all* buffers in the arc_anon
335 * state; e.g. data, metadata, evictable, and unevictable buffers
336 * are all included in this value.
338 kstat_named_t arcstat_anon_size
;
340 * Number of bytes consumed by ARC buffers that meet the
341 * following criteria: backing buffers of type ARC_BUFC_DATA,
342 * residing in the arc_anon state, and are eligible for eviction
343 * (e.g. have no outstanding holds on the buffer).
345 kstat_named_t arcstat_anon_evictable_data
;
347 * Number of bytes consumed by ARC buffers that meet the
348 * following criteria: backing buffers of type ARC_BUFC_METADATA,
349 * residing in the arc_anon state, and are eligible for eviction
350 * (e.g. have no outstanding holds on the buffer).
352 kstat_named_t arcstat_anon_evictable_metadata
;
354 * Total number of bytes consumed by ARC buffers residing in the
355 * arc_mru state. This includes *all* buffers in the arc_mru
356 * state; e.g. data, metadata, evictable, and unevictable buffers
357 * are all included in this value.
359 kstat_named_t arcstat_mru_size
;
361 * Number of bytes consumed by ARC buffers that meet the
362 * following criteria: backing buffers of type ARC_BUFC_DATA,
363 * residing in the arc_mru state, and are eligible for eviction
364 * (e.g. have no outstanding holds on the buffer).
366 kstat_named_t arcstat_mru_evictable_data
;
368 * Number of bytes consumed by ARC buffers that meet the
369 * following criteria: backing buffers of type ARC_BUFC_METADATA,
370 * residing in the arc_mru state, and are eligible for eviction
371 * (e.g. have no outstanding holds on the buffer).
373 kstat_named_t arcstat_mru_evictable_metadata
;
375 * Total number of bytes that *would have been* consumed by ARC
376 * buffers in the arc_mru_ghost state. The key thing to note
377 * here, is the fact that this size doesn't actually indicate
378 * RAM consumption. The ghost lists only consist of headers and
379 * don't actually have ARC buffers linked off of these headers.
380 * Thus, *if* the headers had associated ARC buffers, these
381 * buffers *would have* consumed this number of bytes.
383 kstat_named_t arcstat_mru_ghost_size
;
385 * Number of bytes that *would have been* consumed by ARC
386 * buffers that are eligible for eviction, of type
387 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
389 kstat_named_t arcstat_mru_ghost_evictable_data
;
391 * Number of bytes that *would have been* consumed by ARC
392 * buffers that are eligible for eviction, of type
393 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
395 kstat_named_t arcstat_mru_ghost_evictable_metadata
;
397 * Total number of bytes consumed by ARC buffers residing in the
398 * arc_mfu state. This includes *all* buffers in the arc_mfu
399 * state; e.g. data, metadata, evictable, and unevictable buffers
400 * are all included in this value.
402 kstat_named_t arcstat_mfu_size
;
404 * Number of bytes consumed by ARC buffers that are eligible for
405 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
408 kstat_named_t arcstat_mfu_evictable_data
;
410 * Number of bytes consumed by ARC buffers that are eligible for
411 * eviction, of type ARC_BUFC_METADATA, and reside in the
414 kstat_named_t arcstat_mfu_evictable_metadata
;
416 * Total number of bytes that *would have been* consumed by ARC
417 * buffers in the arc_mfu_ghost state. See the comment above
418 * arcstat_mru_ghost_size for more details.
420 kstat_named_t arcstat_mfu_ghost_size
;
422 * Number of bytes that *would have been* consumed by ARC
423 * buffers that are eligible for eviction, of type
424 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
426 kstat_named_t arcstat_mfu_ghost_evictable_data
;
428 * Number of bytes that *would have been* consumed by ARC
429 * buffers that are eligible for eviction, of type
430 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
432 kstat_named_t arcstat_mfu_ghost_evictable_metadata
;
433 kstat_named_t arcstat_l2_hits
;
434 kstat_named_t arcstat_l2_misses
;
435 kstat_named_t arcstat_l2_feeds
;
436 kstat_named_t arcstat_l2_rw_clash
;
437 kstat_named_t arcstat_l2_read_bytes
;
438 kstat_named_t arcstat_l2_write_bytes
;
439 kstat_named_t arcstat_l2_writes_sent
;
440 kstat_named_t arcstat_l2_writes_done
;
441 kstat_named_t arcstat_l2_writes_error
;
442 kstat_named_t arcstat_l2_writes_hdr_miss
;
443 kstat_named_t arcstat_l2_evict_lock_retry
;
444 kstat_named_t arcstat_l2_evict_reading
;
445 kstat_named_t arcstat_l2_evict_l1cached
;
446 kstat_named_t arcstat_l2_free_on_write
;
447 kstat_named_t arcstat_l2_abort_lowmem
;
448 kstat_named_t arcstat_l2_cksum_bad
;
449 kstat_named_t arcstat_l2_io_error
;
450 kstat_named_t arcstat_l2_size
;
451 kstat_named_t arcstat_l2_asize
;
452 kstat_named_t arcstat_l2_hdr_size
;
453 kstat_named_t arcstat_l2_compress_successes
;
454 kstat_named_t arcstat_l2_compress_zeros
;
455 kstat_named_t arcstat_l2_compress_failures
;
456 kstat_named_t arcstat_memory_throttle_count
;
457 kstat_named_t arcstat_duplicate_buffers
;
458 kstat_named_t arcstat_duplicate_buffers_size
;
459 kstat_named_t arcstat_duplicate_reads
;
460 kstat_named_t arcstat_meta_used
;
461 kstat_named_t arcstat_meta_limit
;
462 kstat_named_t arcstat_meta_max
;
463 kstat_named_t arcstat_meta_min
;
466 static arc_stats_t arc_stats
= {
467 { "hits", KSTAT_DATA_UINT64
},
468 { "misses", KSTAT_DATA_UINT64
},
469 { "demand_data_hits", KSTAT_DATA_UINT64
},
470 { "demand_data_misses", KSTAT_DATA_UINT64
},
471 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
472 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
473 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
474 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
475 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
476 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
477 { "mru_hits", KSTAT_DATA_UINT64
},
478 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
479 { "mfu_hits", KSTAT_DATA_UINT64
},
480 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
481 { "deleted", KSTAT_DATA_UINT64
},
482 { "recycle_miss", KSTAT_DATA_UINT64
},
483 { "mutex_miss", KSTAT_DATA_UINT64
},
484 { "evict_skip", KSTAT_DATA_UINT64
},
485 { "evict_l2_cached", KSTAT_DATA_UINT64
},
486 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
487 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
488 { "hash_elements", KSTAT_DATA_UINT64
},
489 { "hash_elements_max", KSTAT_DATA_UINT64
},
490 { "hash_collisions", KSTAT_DATA_UINT64
},
491 { "hash_chains", KSTAT_DATA_UINT64
},
492 { "hash_chain_max", KSTAT_DATA_UINT64
},
493 { "p", KSTAT_DATA_UINT64
},
494 { "c", KSTAT_DATA_UINT64
},
495 { "c_min", KSTAT_DATA_UINT64
},
496 { "c_max", KSTAT_DATA_UINT64
},
497 { "size", KSTAT_DATA_UINT64
},
498 { "hdr_size", KSTAT_DATA_UINT64
},
499 { "data_size", KSTAT_DATA_UINT64
},
500 { "metadata_size", KSTAT_DATA_UINT64
},
501 { "other_size", KSTAT_DATA_UINT64
},
502 { "anon_size", KSTAT_DATA_UINT64
},
503 { "anon_evictable_data", KSTAT_DATA_UINT64
},
504 { "anon_evictable_metadata", KSTAT_DATA_UINT64
},
505 { "mru_size", KSTAT_DATA_UINT64
},
506 { "mru_evictable_data", KSTAT_DATA_UINT64
},
507 { "mru_evictable_metadata", KSTAT_DATA_UINT64
},
508 { "mru_ghost_size", KSTAT_DATA_UINT64
},
509 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64
},
510 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
511 { "mfu_size", KSTAT_DATA_UINT64
},
512 { "mfu_evictable_data", KSTAT_DATA_UINT64
},
513 { "mfu_evictable_metadata", KSTAT_DATA_UINT64
},
514 { "mfu_ghost_size", KSTAT_DATA_UINT64
},
515 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64
},
516 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
517 { "l2_hits", KSTAT_DATA_UINT64
},
518 { "l2_misses", KSTAT_DATA_UINT64
},
519 { "l2_feeds", KSTAT_DATA_UINT64
},
520 { "l2_rw_clash", KSTAT_DATA_UINT64
},
521 { "l2_read_bytes", KSTAT_DATA_UINT64
},
522 { "l2_write_bytes", KSTAT_DATA_UINT64
},
523 { "l2_writes_sent", KSTAT_DATA_UINT64
},
524 { "l2_writes_done", KSTAT_DATA_UINT64
},
525 { "l2_writes_error", KSTAT_DATA_UINT64
},
526 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64
},
527 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
528 { "l2_evict_reading", KSTAT_DATA_UINT64
},
529 { "l2_evict_l1cached", KSTAT_DATA_UINT64
},
530 { "l2_free_on_write", KSTAT_DATA_UINT64
},
531 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
532 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
533 { "l2_io_error", KSTAT_DATA_UINT64
},
534 { "l2_size", KSTAT_DATA_UINT64
},
535 { "l2_asize", KSTAT_DATA_UINT64
},
536 { "l2_hdr_size", KSTAT_DATA_UINT64
},
537 { "l2_compress_successes", KSTAT_DATA_UINT64
},
538 { "l2_compress_zeros", KSTAT_DATA_UINT64
},
539 { "l2_compress_failures", KSTAT_DATA_UINT64
},
540 { "memory_throttle_count", KSTAT_DATA_UINT64
},
541 { "duplicate_buffers", KSTAT_DATA_UINT64
},
542 { "duplicate_buffers_size", KSTAT_DATA_UINT64
},
543 { "duplicate_reads", KSTAT_DATA_UINT64
},
544 { "arc_meta_used", KSTAT_DATA_UINT64
},
545 { "arc_meta_limit", KSTAT_DATA_UINT64
},
546 { "arc_meta_max", KSTAT_DATA_UINT64
},
547 { "arc_meta_min", KSTAT_DATA_UINT64
}
550 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
552 #define ARCSTAT_INCR(stat, val) \
553 atomic_add_64(&arc_stats.stat.value.ui64, (val))
555 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
556 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
558 #define ARCSTAT_MAX(stat, val) { \
560 while ((val) > (m = arc_stats.stat.value.ui64) && \
561 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
565 #define ARCSTAT_MAXSTAT(stat) \
566 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
569 * We define a macro to allow ARC hits/misses to be easily broken down by
570 * two separate conditions, giving a total of four different subtypes for
571 * each of hits and misses (so eight statistics total).
573 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
576 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
578 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
582 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
584 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
589 static arc_state_t
*arc_anon
;
590 static arc_state_t
*arc_mru
;
591 static arc_state_t
*arc_mru_ghost
;
592 static arc_state_t
*arc_mfu
;
593 static arc_state_t
*arc_mfu_ghost
;
594 static arc_state_t
*arc_l2c_only
;
597 * There are several ARC variables that are critical to export as kstats --
598 * but we don't want to have to grovel around in the kstat whenever we wish to
599 * manipulate them. For these variables, we therefore define them to be in
600 * terms of the statistic variable. This assures that we are not introducing
601 * the possibility of inconsistency by having shadow copies of the variables,
602 * while still allowing the code to be readable.
604 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
605 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
606 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
607 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
608 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
609 #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
610 #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
611 #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
612 #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
614 #define L2ARC_IS_VALID_COMPRESS(_c_) \
615 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
617 static int arc_no_grow
; /* Don't try to grow cache size */
618 static uint64_t arc_tempreserve
;
619 static uint64_t arc_loaned_bytes
;
621 typedef struct arc_callback arc_callback_t
;
623 struct arc_callback
{
625 arc_done_func_t
*acb_done
;
627 zio_t
*acb_zio_dummy
;
628 arc_callback_t
*acb_next
;
631 typedef struct arc_write_callback arc_write_callback_t
;
633 struct arc_write_callback
{
635 arc_done_func_t
*awcb_ready
;
636 arc_done_func_t
*awcb_physdone
;
637 arc_done_func_t
*awcb_done
;
642 * ARC buffers are separated into multiple structs as a memory saving measure:
643 * - Common fields struct, always defined, and embedded within it:
644 * - L2-only fields, always allocated but undefined when not in L2ARC
645 * - L1-only fields, only allocated when in L1ARC
647 * Buffer in L1 Buffer only in L2
648 * +------------------------+ +------------------------+
649 * | arc_buf_hdr_t | | arc_buf_hdr_t |
653 * +------------------------+ +------------------------+
654 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t |
655 * | (undefined if L1-only) | | |
656 * +------------------------+ +------------------------+
657 * | l1arc_buf_hdr_t |
662 * +------------------------+
664 * Because it's possible for the L2ARC to become extremely large, we can wind
665 * up eating a lot of memory in L2ARC buffer headers, so the size of a header
666 * is minimized by only allocating the fields necessary for an L1-cached buffer
667 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
668 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
669 * words in pointers. arc_hdr_realloc() is used to switch a header between
670 * these two allocation states.
672 typedef struct l1arc_buf_hdr
{
673 kmutex_t b_freeze_lock
;
676 * used for debugging wtih kmem_flags - by allocating and freeing
677 * b_thawed when the buffer is thawed, we get a record of the stack
678 * trace that thawed it.
685 /* for waiting on writes to complete */
688 /* protected by arc state mutex */
689 arc_state_t
*b_state
;
690 list_node_t b_arc_node
;
692 /* updated atomically */
693 clock_t b_arc_access
;
695 /* self protecting */
698 arc_callback_t
*b_acb
;
699 /* temporary buffer holder for in-flight compressed data */
703 typedef struct l2arc_dev l2arc_dev_t
;
705 typedef struct l2arc_buf_hdr
{
706 /* protected by arc_buf_hdr mutex */
707 l2arc_dev_t
*b_dev
; /* L2ARC device */
708 uint64_t b_daddr
; /* disk address, offset byte */
709 /* real alloc'd buffer size depending on b_compress applied */
712 list_node_t b_l2node
;
716 /* protected by hash lock */
720 * Even though this checksum is only set/verified when a buffer is in
721 * the L1 cache, it needs to be in the set of common fields because it
722 * must be preserved from the time before a buffer is written out to
723 * L2ARC until after it is read back in.
725 zio_cksum_t
*b_freeze_cksum
;
727 arc_buf_hdr_t
*b_hash_next
;
734 /* L2ARC fields. Undefined when not in L2ARC. */
735 l2arc_buf_hdr_t b_l2hdr
;
736 /* L1ARC fields. Undefined when in l2arc_only state */
737 l1arc_buf_hdr_t b_l1hdr
;
740 static arc_buf_t
*arc_eviction_list
;
741 static kmutex_t arc_eviction_mtx
;
742 static arc_buf_hdr_t arc_eviction_hdr
;
744 #define GHOST_STATE(state) \
745 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
746 (state) == arc_l2c_only)
748 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
749 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
750 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
751 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
752 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ)
753 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE)
755 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
756 #define HDR_L2COMPRESS(hdr) ((hdr)->b_flags & ARC_FLAG_L2COMPRESS)
757 #define HDR_L2_READING(hdr) \
758 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
759 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
760 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
761 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
762 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
764 #define HDR_ISTYPE_METADATA(hdr) \
765 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
766 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
768 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
769 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
771 /* For storing compression mode in b_flags */
772 #define HDR_COMPRESS_OFFSET 24
773 #define HDR_COMPRESS_NBITS 7
775 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET(hdr->b_flags, \
776 HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS))
777 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET(hdr->b_flags, \
778 HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS, (cmp))
784 #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
785 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
788 * Hash table routines
791 #define HT_LOCK_PAD 64
796 unsigned char pad
[(HT_LOCK_PAD
- sizeof (kmutex_t
))];
800 #define BUF_LOCKS 256
801 typedef struct buf_hash_table
{
803 arc_buf_hdr_t
**ht_table
;
804 struct ht_lock ht_locks
[BUF_LOCKS
];
807 static buf_hash_table_t buf_hash_table
;
809 #define BUF_HASH_INDEX(spa, dva, birth) \
810 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
811 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
812 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
813 #define HDR_LOCK(hdr) \
814 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
816 uint64_t zfs_crc64_table
[256];
822 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
823 #define L2ARC_HEADROOM 2 /* num of writes */
825 * If we discover during ARC scan any buffers to be compressed, we boost
826 * our headroom for the next scanning cycle by this percentage multiple.
828 #define L2ARC_HEADROOM_BOOST 200
829 #define L2ARC_FEED_SECS 1 /* caching interval secs */
830 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
832 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
833 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
835 /* L2ARC Performance Tunables */
836 uint64_t l2arc_write_max
= L2ARC_WRITE_SIZE
; /* default max write size */
837 uint64_t l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra write during warmup */
838 uint64_t l2arc_headroom
= L2ARC_HEADROOM
; /* number of dev writes */
839 uint64_t l2arc_headroom_boost
= L2ARC_HEADROOM_BOOST
;
840 uint64_t l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
841 uint64_t l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval milliseconds */
842 boolean_t l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
843 boolean_t l2arc_feed_again
= B_TRUE
; /* turbo warmup */
844 boolean_t l2arc_norw
= B_TRUE
; /* no reads during writes */
850 vdev_t
*l2ad_vdev
; /* vdev */
851 spa_t
*l2ad_spa
; /* spa */
852 uint64_t l2ad_hand
; /* next write location */
853 uint64_t l2ad_start
; /* first addr on device */
854 uint64_t l2ad_end
; /* last addr on device */
855 uint64_t l2ad_evict
; /* last addr eviction reached */
856 boolean_t l2ad_first
; /* first sweep through */
857 boolean_t l2ad_writing
; /* currently writing */
858 kmutex_t l2ad_mtx
; /* lock for buffer list */
859 list_t l2ad_buflist
; /* buffer list */
860 list_node_t l2ad_node
; /* device list node */
863 static list_t L2ARC_dev_list
; /* device list */
864 static list_t
*l2arc_dev_list
; /* device list pointer */
865 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
866 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
867 static list_t L2ARC_free_on_write
; /* free after write buf list */
868 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
869 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
870 static uint64_t l2arc_ndev
; /* number of devices */
872 typedef struct l2arc_read_callback
{
873 arc_buf_t
*l2rcb_buf
; /* read buffer */
874 spa_t
*l2rcb_spa
; /* spa */
875 blkptr_t l2rcb_bp
; /* original blkptr */
876 zbookmark_phys_t l2rcb_zb
; /* original bookmark */
877 int l2rcb_flags
; /* original flags */
878 enum zio_compress l2rcb_compress
; /* applied compress */
879 } l2arc_read_callback_t
;
881 typedef struct l2arc_write_callback
{
882 l2arc_dev_t
*l2wcb_dev
; /* device info */
883 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
884 } l2arc_write_callback_t
;
886 typedef struct l2arc_data_free
{
887 /* protected by l2arc_free_on_write_mtx */
890 void (*l2df_func
)(void *, size_t);
891 list_node_t l2df_list_node
;
894 static kmutex_t l2arc_feed_thr_lock
;
895 static kcondvar_t l2arc_feed_thr_cv
;
896 static uint8_t l2arc_thread_exit
;
898 static void arc_get_data_buf(arc_buf_t
*);
899 static void arc_access(arc_buf_hdr_t
*, kmutex_t
*);
900 static int arc_evict_needed(arc_buf_contents_t
);
901 static void arc_evict_ghost(arc_state_t
*, uint64_t, int64_t);
902 static void arc_buf_watch(arc_buf_t
*);
904 static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t
*);
905 static uint32_t arc_bufc_to_flags(arc_buf_contents_t
);
907 static boolean_t
l2arc_write_eligible(uint64_t, arc_buf_hdr_t
*);
908 static void l2arc_read_done(zio_t
*);
910 static boolean_t
l2arc_compress_buf(arc_buf_hdr_t
*);
911 static void l2arc_decompress_zio(zio_t
*, arc_buf_hdr_t
*, enum zio_compress
);
912 static void l2arc_release_cdata_buf(arc_buf_hdr_t
*);
915 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
917 uint8_t *vdva
= (uint8_t *)dva
;
918 uint64_t crc
= -1ULL;
921 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
923 for (i
= 0; i
< sizeof (dva_t
); i
++)
924 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ vdva
[i
]) & 0xFF];
926 crc
^= (spa
>>8) ^ birth
;
931 #define BUF_EMPTY(buf) \
932 ((buf)->b_dva.dva_word[0] == 0 && \
933 (buf)->b_dva.dva_word[1] == 0)
935 #define BUF_EQUAL(spa, dva, birth, buf) \
936 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
937 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
938 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
941 buf_discard_identity(arc_buf_hdr_t
*hdr
)
943 hdr
->b_dva
.dva_word
[0] = 0;
944 hdr
->b_dva
.dva_word
[1] = 0;
948 static arc_buf_hdr_t
*
949 buf_hash_find(uint64_t spa
, const blkptr_t
*bp
, kmutex_t
**lockp
)
951 const dva_t
*dva
= BP_IDENTITY(bp
);
952 uint64_t birth
= BP_PHYSICAL_BIRTH(bp
);
953 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
954 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
957 mutex_enter(hash_lock
);
958 for (hdr
= buf_hash_table
.ht_table
[idx
]; hdr
!= NULL
;
959 hdr
= hdr
->b_hash_next
) {
960 if (BUF_EQUAL(spa
, dva
, birth
, hdr
)) {
965 mutex_exit(hash_lock
);
971 * Insert an entry into the hash table. If there is already an element
972 * equal to elem in the hash table, then the already existing element
973 * will be returned and the new element will not be inserted.
974 * Otherwise returns NULL.
975 * If lockp == NULL, the caller is assumed to already hold the hash lock.
977 static arc_buf_hdr_t
*
978 buf_hash_insert(arc_buf_hdr_t
*hdr
, kmutex_t
**lockp
)
980 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
981 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
985 ASSERT(!DVA_IS_EMPTY(&hdr
->b_dva
));
986 ASSERT(hdr
->b_birth
!= 0);
987 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
991 mutex_enter(hash_lock
);
993 ASSERT(MUTEX_HELD(hash_lock
));
996 for (fhdr
= buf_hash_table
.ht_table
[idx
], i
= 0; fhdr
!= NULL
;
997 fhdr
= fhdr
->b_hash_next
, i
++) {
998 if (BUF_EQUAL(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
, fhdr
))
1002 hdr
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
1003 buf_hash_table
.ht_table
[idx
] = hdr
;
1004 hdr
->b_flags
|= ARC_FLAG_IN_HASH_TABLE
;
1006 /* collect some hash table performance data */
1008 ARCSTAT_BUMP(arcstat_hash_collisions
);
1010 ARCSTAT_BUMP(arcstat_hash_chains
);
1012 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
1015 ARCSTAT_BUMP(arcstat_hash_elements
);
1016 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
1022 buf_hash_remove(arc_buf_hdr_t
*hdr
)
1024 arc_buf_hdr_t
*fhdr
, **hdrp
;
1025 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1027 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
1028 ASSERT(HDR_IN_HASH_TABLE(hdr
));
1030 hdrp
= &buf_hash_table
.ht_table
[idx
];
1031 while ((fhdr
= *hdrp
) != hdr
) {
1032 ASSERT(fhdr
!= NULL
);
1033 hdrp
= &fhdr
->b_hash_next
;
1035 *hdrp
= hdr
->b_hash_next
;
1036 hdr
->b_hash_next
= NULL
;
1037 hdr
->b_flags
&= ~ARC_FLAG_IN_HASH_TABLE
;
1039 /* collect some hash table performance data */
1040 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
1042 if (buf_hash_table
.ht_table
[idx
] &&
1043 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
1044 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
1048 * Global data structures and functions for the buf kmem cache.
1050 static kmem_cache_t
*hdr_full_cache
;
1051 static kmem_cache_t
*hdr_l2only_cache
;
1052 static kmem_cache_t
*buf_cache
;
1059 kmem_free(buf_hash_table
.ht_table
,
1060 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
1061 for (i
= 0; i
< BUF_LOCKS
; i
++)
1062 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
1063 kmem_cache_destroy(hdr_full_cache
);
1064 kmem_cache_destroy(hdr_l2only_cache
);
1065 kmem_cache_destroy(buf_cache
);
1069 * Constructor callback - called when the cache is empty
1070 * and a new buf is requested.
1074 hdr_full_cons(void *vbuf
, void *unused
, int kmflag
)
1076 arc_buf_hdr_t
*hdr
= vbuf
;
1078 bzero(hdr
, HDR_FULL_SIZE
);
1079 cv_init(&hdr
->b_l1hdr
.b_cv
, NULL
, CV_DEFAULT
, NULL
);
1080 refcount_create(&hdr
->b_l1hdr
.b_refcnt
);
1081 mutex_init(&hdr
->b_l1hdr
.b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1082 arc_space_consume(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1089 hdr_l2only_cons(void *vbuf
, void *unused
, int kmflag
)
1091 arc_buf_hdr_t
*hdr
= vbuf
;
1093 bzero(hdr
, HDR_L2ONLY_SIZE
);
1094 arc_space_consume(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1101 buf_cons(void *vbuf
, void *unused
, int kmflag
)
1103 arc_buf_t
*buf
= vbuf
;
1105 bzero(buf
, sizeof (arc_buf_t
));
1106 mutex_init(&buf
->b_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1107 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1113 * Destructor callback - called when a cached buf is
1114 * no longer required.
1118 hdr_full_dest(void *vbuf
, void *unused
)
1120 arc_buf_hdr_t
*hdr
= vbuf
;
1122 ASSERT(BUF_EMPTY(hdr
));
1123 cv_destroy(&hdr
->b_l1hdr
.b_cv
);
1124 refcount_destroy(&hdr
->b_l1hdr
.b_refcnt
);
1125 mutex_destroy(&hdr
->b_l1hdr
.b_freeze_lock
);
1126 arc_space_return(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1131 hdr_l2only_dest(void *vbuf
, void *unused
)
1133 arc_buf_hdr_t
*hdr
= vbuf
;
1135 ASSERT(BUF_EMPTY(hdr
));
1136 arc_space_return(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1141 buf_dest(void *vbuf
, void *unused
)
1143 arc_buf_t
*buf
= vbuf
;
1145 mutex_destroy(&buf
->b_evict_lock
);
1146 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1150 * Reclaim callback -- invoked when memory is low.
1154 hdr_recl(void *unused
)
1156 dprintf("hdr_recl called\n");
1158 * umem calls the reclaim func when we destroy the buf cache,
1159 * which is after we do arc_fini().
1162 cv_signal(&arc_reclaim_thr_cv
);
1169 uint64_t hsize
= 1ULL << 12;
1173 * The hash table is big enough to fill all of physical memory
1174 * with an average block size of zfs_arc_average_blocksize (default 8K).
1175 * By default, the table will take up
1176 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1178 while (hsize
* zfs_arc_average_blocksize
< physmem
* PAGESIZE
)
1181 buf_hash_table
.ht_mask
= hsize
- 1;
1182 buf_hash_table
.ht_table
=
1183 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
1184 if (buf_hash_table
.ht_table
== NULL
) {
1185 ASSERT(hsize
> (1ULL << 8));
1190 hdr_full_cache
= kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE
,
1191 0, hdr_full_cons
, hdr_full_dest
, hdr_recl
, NULL
, NULL
, 0);
1192 hdr_l2only_cache
= kmem_cache_create("arc_buf_hdr_t_l2only",
1193 HDR_L2ONLY_SIZE
, 0, hdr_l2only_cons
, hdr_l2only_dest
, hdr_recl
,
1195 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
1196 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
1198 for (i
= 0; i
< 256; i
++)
1199 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
1200 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
1202 for (i
= 0; i
< BUF_LOCKS
; i
++) {
1203 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
1204 NULL
, MUTEX_DEFAULT
, NULL
);
1209 * Transition between the two allocation states for the arc_buf_hdr struct.
1210 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
1211 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
1212 * version is used when a cache buffer is only in the L2ARC in order to reduce
1215 static arc_buf_hdr_t
*
1216 arc_hdr_realloc(arc_buf_hdr_t
*hdr
, kmem_cache_t
*old
, kmem_cache_t
*new)
1218 ASSERT(HDR_HAS_L2HDR(hdr
));
1220 arc_buf_hdr_t
*nhdr
;
1221 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
1223 ASSERT((old
== hdr_full_cache
&& new == hdr_l2only_cache
) ||
1224 (old
== hdr_l2only_cache
&& new == hdr_full_cache
));
1226 nhdr
= kmem_cache_alloc(new, KM_PUSHPAGE
);
1228 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
1229 buf_hash_remove(hdr
);
1231 bcopy(hdr
, nhdr
, HDR_L2ONLY_SIZE
);
1232 if (new == hdr_full_cache
) {
1233 nhdr
->b_flags
|= ARC_FLAG_HAS_L1HDR
;
1235 * arc_access and arc_change_state need to be aware that a
1236 * header has just come out of L2ARC, so we set its state to
1237 * l2c_only even though it's about to change.
1239 nhdr
->b_l1hdr
.b_state
= arc_l2c_only
;
1241 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
);
1242 ASSERT0(hdr
->b_l1hdr
.b_datacnt
);
1243 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1245 * We might be removing the L1hdr of a buffer which was just
1246 * written out to L2ARC. If such a buffer is compressed then we
1247 * need to free its b_tmp_cdata before destroying the header.
1249 if (hdr
->b_l1hdr
.b_tmp_cdata
!= NULL
&&
1250 HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
)
1251 l2arc_release_cdata_buf(hdr
);
1252 nhdr
->b_flags
&= ~ARC_FLAG_HAS_L1HDR
;
1255 * The header has been reallocated so we need to re-insert it into any
1258 (void) buf_hash_insert(nhdr
, NULL
);
1260 ASSERT(list_link_active(&hdr
->b_l2hdr
.b_l2node
));
1262 mutex_enter(&dev
->l2ad_mtx
);
1265 * We must place the realloc'ed header back into the list at
1266 * the same spot. Otherwise, if it's placed earlier in the list,
1267 * l2arc_write_buffers() could find it during the function's
1268 * write phase, and try to write it out to the l2arc.
1270 list_insert_after(&dev
->l2ad_buflist
, hdr
, nhdr
);
1271 list_remove(&dev
->l2ad_buflist
, hdr
);
1273 mutex_exit(&dev
->l2ad_mtx
);
1275 buf_discard_identity(hdr
);
1276 hdr
->b_freeze_cksum
= NULL
;
1277 kmem_cache_free(old
, hdr
);
1283 #define ARC_MINTIME (hz>>4) /* 62 ms */
1286 arc_cksum_verify(arc_buf_t
*buf
)
1290 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1293 mutex_enter(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1294 if (buf
->b_hdr
->b_freeze_cksum
== NULL
|| HDR_IO_ERROR(buf
->b_hdr
)) {
1295 mutex_exit(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1298 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
1299 if (!ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
))
1300 panic("buffer modified while frozen!");
1301 mutex_exit(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1305 arc_cksum_equal(arc_buf_t
*buf
)
1310 mutex_enter(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1311 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
1312 equal
= ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
);
1313 mutex_exit(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1319 arc_cksum_compute(arc_buf_t
*buf
, boolean_t force
)
1321 if (!force
&& !(zfs_flags
& ZFS_DEBUG_MODIFY
))
1324 mutex_enter(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1325 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
1326 mutex_exit(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1329 buf
->b_hdr
->b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
), KM_SLEEP
);
1330 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
,
1331 buf
->b_hdr
->b_freeze_cksum
);
1332 mutex_exit(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1337 typedef struct procctl
{
1345 arc_buf_unwatch(arc_buf_t
*buf
)
1352 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1353 ctl
.prwatch
.pr_size
= 0;
1354 ctl
.prwatch
.pr_wflags
= 0;
1355 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1356 ASSERT3U(result
, ==, sizeof (ctl
));
1363 arc_buf_watch(arc_buf_t
*buf
)
1370 ctl
.prwatch
.pr_vaddr
= (uintptr_t)buf
->b_data
;
1371 ctl
.prwatch
.pr_size
= buf
->b_hdr
->b_size
;
1372 ctl
.prwatch
.pr_wflags
= WA_WRITE
;
1373 result
= write(arc_procfd
, &ctl
, sizeof (ctl
));
1374 ASSERT3U(result
, ==, sizeof (ctl
));
1379 static arc_buf_contents_t
1380 arc_buf_type(arc_buf_hdr_t
*hdr
)
1382 if (HDR_ISTYPE_METADATA(hdr
)) {
1383 return (ARC_BUFC_METADATA
);
1385 return (ARC_BUFC_DATA
);
1390 arc_bufc_to_flags(arc_buf_contents_t type
)
1394 /* metadata field is 0 if buffer contains normal data */
1396 case ARC_BUFC_METADATA
:
1397 return (ARC_FLAG_BUFC_METADATA
);
1401 panic("undefined ARC buffer type!");
1402 return ((uint32_t)-1);
1406 arc_buf_thaw(arc_buf_t
*buf
)
1408 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1409 if (buf
->b_hdr
->b_l1hdr
.b_state
!= arc_anon
)
1410 panic("modifying non-anon buffer!");
1411 if (HDR_IO_IN_PROGRESS(buf
->b_hdr
))
1412 panic("modifying buffer while i/o in progress!");
1413 arc_cksum_verify(buf
);
1416 mutex_enter(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1417 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
1418 kmem_free(buf
->b_hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1419 buf
->b_hdr
->b_freeze_cksum
= NULL
;
1423 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1424 if (buf
->b_hdr
->b_l1hdr
.b_thawed
!= NULL
)
1425 kmem_free(buf
->b_hdr
->b_l1hdr
.b_thawed
, 1);
1426 buf
->b_hdr
->b_l1hdr
.b_thawed
= kmem_alloc(1, KM_SLEEP
);
1430 mutex_exit(&buf
->b_hdr
->b_l1hdr
.b_freeze_lock
);
1432 arc_buf_unwatch(buf
);
1436 arc_buf_freeze(arc_buf_t
*buf
)
1438 kmutex_t
*hash_lock
;
1440 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1443 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1444 mutex_enter(hash_lock
);
1446 ASSERT(buf
->b_hdr
->b_freeze_cksum
!= NULL
||
1447 buf
->b_hdr
->b_l1hdr
.b_state
== arc_anon
);
1448 arc_cksum_compute(buf
, B_FALSE
);
1449 mutex_exit(hash_lock
);
1454 add_reference(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
, void *tag
)
1456 ASSERT(HDR_HAS_L1HDR(hdr
));
1457 ASSERT(MUTEX_HELD(hash_lock
));
1458 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
1460 if ((refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
) == 1) &&
1461 (state
!= arc_anon
)) {
1462 /* We don't use the L2-only state list. */
1463 if (state
!= arc_l2c_only
) {
1464 uint64_t delta
= hdr
->b_size
* hdr
->b_l1hdr
.b_datacnt
;
1465 list_t
*list
= &state
->arcs_list
[arc_buf_type(hdr
)];
1466 uint64_t *size
= &state
->arcs_lsize
[arc_buf_type(hdr
)];
1468 ASSERT(!MUTEX_HELD(&state
->arcs_mtx
));
1469 mutex_enter(&state
->arcs_mtx
);
1470 ASSERT(list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1471 list_remove(list
, hdr
);
1472 if (GHOST_STATE(state
)) {
1473 ASSERT0(hdr
->b_l1hdr
.b_datacnt
);
1474 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
1475 delta
= hdr
->b_size
;
1478 ASSERT3U(*size
, >=, delta
);
1479 atomic_add_64(size
, -delta
);
1480 mutex_exit(&state
->arcs_mtx
);
1482 /* remove the prefetch flag if we get a reference */
1483 hdr
->b_flags
&= ~ARC_FLAG_PREFETCH
;
1488 remove_reference(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
, void *tag
)
1491 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
1493 ASSERT(HDR_HAS_L1HDR(hdr
));
1494 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
1495 ASSERT(!GHOST_STATE(state
));
1498 * arc_l2c_only counts as a ghost state so we don't need to explicitly
1499 * check to prevent usage of the arc_l2c_only list.
1501 if (((cnt
= refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
)) == 0) &&
1502 (state
!= arc_anon
)) {
1503 uint64_t *size
= &state
->arcs_lsize
[arc_buf_type(hdr
)];
1505 ASSERT(!MUTEX_HELD(&state
->arcs_mtx
));
1506 mutex_enter(&state
->arcs_mtx
);
1507 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1508 list_insert_head(&state
->arcs_list
[arc_buf_type(hdr
)], hdr
);
1509 ASSERT(hdr
->b_l1hdr
.b_datacnt
> 0);
1510 atomic_add_64(size
, hdr
->b_size
*
1511 hdr
->b_l1hdr
.b_datacnt
);
1512 mutex_exit(&state
->arcs_mtx
);
1518 * Move the supplied buffer to the indicated state. The mutex
1519 * for the buffer must be held by the caller.
1522 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*hdr
,
1523 kmutex_t
*hash_lock
)
1525 arc_state_t
*old_state
;
1528 uint64_t from_delta
, to_delta
;
1529 arc_buf_contents_t buftype
= arc_buf_type(hdr
);
1532 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
1533 * in arc_read() when bringing a buffer out of the L2ARC. However, the
1534 * L1 hdr doesn't always exist when we change state to arc_anon before
1535 * destroying a header, in which case reallocating to add the L1 hdr is
1538 if (HDR_HAS_L1HDR(hdr
)) {
1539 old_state
= hdr
->b_l1hdr
.b_state
;
1540 refcnt
= refcount_count(&hdr
->b_l1hdr
.b_refcnt
);
1541 datacnt
= hdr
->b_l1hdr
.b_datacnt
;
1543 old_state
= arc_l2c_only
;
1548 ASSERT(MUTEX_HELD(hash_lock
));
1549 ASSERT3P(new_state
, !=, old_state
);
1550 ASSERT(refcnt
== 0 || datacnt
> 0);
1551 ASSERT(!GHOST_STATE(new_state
) || datacnt
== 0);
1552 ASSERT(old_state
!= arc_anon
|| datacnt
<= 1);
1554 from_delta
= to_delta
= datacnt
* hdr
->b_size
;
1557 * If this buffer is evictable, transfer it from the
1558 * old state list to the new state list.
1561 if (old_state
!= arc_anon
&& old_state
!= arc_l2c_only
) {
1562 int use_mutex
= !MUTEX_HELD(&old_state
->arcs_mtx
);
1563 uint64_t *size
= &old_state
->arcs_lsize
[buftype
];
1566 mutex_enter(&old_state
->arcs_mtx
);
1568 ASSERT(HDR_HAS_L1HDR(hdr
));
1569 ASSERT(list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1570 list_remove(&old_state
->arcs_list
[buftype
], hdr
);
1573 * If prefetching out of the ghost cache,
1574 * we will have a non-zero datacnt.
1576 if (GHOST_STATE(old_state
) && datacnt
== 0) {
1577 /* ghost elements have a ghost size */
1578 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
);
1579 from_delta
= hdr
->b_size
;
1581 ASSERT3U(*size
, >=, from_delta
);
1582 atomic_add_64(size
, -from_delta
);
1585 mutex_exit(&old_state
->arcs_mtx
);
1587 if (new_state
!= arc_anon
&& new_state
!= arc_l2c_only
) {
1588 int use_mutex
= !MUTEX_HELD(&new_state
->arcs_mtx
);
1589 uint64_t *size
= &new_state
->arcs_lsize
[buftype
];
1592 * An L1 header always exists here, since if we're
1593 * moving to some L1-cached state (i.e. not l2c_only or
1594 * anonymous), we realloc the header to add an L1hdr
1597 ASSERT(HDR_HAS_L1HDR(hdr
));
1599 mutex_enter(&new_state
->arcs_mtx
);
1601 list_insert_head(&new_state
->arcs_list
[buftype
], hdr
);
1603 /* ghost elements have a ghost size */
1604 if (GHOST_STATE(new_state
)) {
1606 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
);
1607 to_delta
= hdr
->b_size
;
1609 atomic_add_64(size
, to_delta
);
1612 mutex_exit(&new_state
->arcs_mtx
);
1616 ASSERT(!BUF_EMPTY(hdr
));
1617 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(hdr
))
1618 buf_hash_remove(hdr
);
1620 /* adjust state sizes (ignore arc_l2c_only) */
1621 if (to_delta
&& new_state
!= arc_l2c_only
)
1622 atomic_add_64(&new_state
->arcs_size
, to_delta
);
1623 if (from_delta
&& old_state
!= arc_l2c_only
) {
1624 ASSERT3U(old_state
->arcs_size
, >=, from_delta
);
1625 atomic_add_64(&old_state
->arcs_size
, -from_delta
);
1627 if (HDR_HAS_L1HDR(hdr
))
1628 hdr
->b_l1hdr
.b_state
= new_state
;
1631 * L2 headers should never be on the L2 state list since they don't
1632 * have L1 headers allocated.
1634 ASSERT(list_is_empty(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
]) &&
1635 list_is_empty(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
]));
1639 arc_space_consume(uint64_t space
, arc_space_type_t type
)
1641 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1644 case ARC_SPACE_DATA
:
1645 ARCSTAT_INCR(arcstat_data_size
, space
);
1647 case ARC_SPACE_META
:
1648 ARCSTAT_INCR(arcstat_metadata_size
, space
);
1650 case ARC_SPACE_OTHER
:
1651 ARCSTAT_INCR(arcstat_other_size
, space
);
1653 case ARC_SPACE_HDRS
:
1654 ARCSTAT_INCR(arcstat_hdr_size
, space
);
1656 case ARC_SPACE_L2HDRS
:
1657 ARCSTAT_INCR(arcstat_l2_hdr_size
, space
);
1661 if (type
!= ARC_SPACE_DATA
)
1662 ARCSTAT_INCR(arcstat_meta_used
, space
);
1664 atomic_add_64(&arc_size
, space
);
1668 arc_space_return(uint64_t space
, arc_space_type_t type
)
1670 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1673 case ARC_SPACE_DATA
:
1674 ARCSTAT_INCR(arcstat_data_size
, -space
);
1676 case ARC_SPACE_META
:
1677 ARCSTAT_INCR(arcstat_metadata_size
, -space
);
1679 case ARC_SPACE_OTHER
:
1680 ARCSTAT_INCR(arcstat_other_size
, -space
);
1682 case ARC_SPACE_HDRS
:
1683 ARCSTAT_INCR(arcstat_hdr_size
, -space
);
1685 case ARC_SPACE_L2HDRS
:
1686 ARCSTAT_INCR(arcstat_l2_hdr_size
, -space
);
1690 if (type
!= ARC_SPACE_DATA
) {
1691 ASSERT(arc_meta_used
>= space
);
1692 if (arc_meta_max
< arc_meta_used
)
1693 arc_meta_max
= arc_meta_used
;
1694 ARCSTAT_INCR(arcstat_meta_used
, -space
);
1697 ASSERT(arc_size
>= space
);
1698 atomic_add_64(&arc_size
, -space
);
1702 arc_buf_alloc(spa_t
*spa
, int32_t size
, void *tag
, arc_buf_contents_t type
)
1707 ASSERT3U(size
, >, 0);
1708 hdr
= kmem_cache_alloc(hdr_full_cache
, KM_PUSHPAGE
);
1709 ASSERT(BUF_EMPTY(hdr
));
1710 ASSERT3P(hdr
->b_freeze_cksum
, ==, NULL
);
1712 hdr
->b_spa
= spa_load_guid(spa
);
1714 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1717 buf
->b_efunc
= NULL
;
1718 buf
->b_private
= NULL
;
1721 hdr
->b_flags
= arc_bufc_to_flags(type
);
1722 hdr
->b_flags
|= ARC_FLAG_HAS_L1HDR
;
1724 hdr
->b_l1hdr
.b_buf
= buf
;
1725 hdr
->b_l1hdr
.b_state
= arc_anon
;
1726 hdr
->b_l1hdr
.b_arc_access
= 0;
1727 hdr
->b_l1hdr
.b_datacnt
= 1;
1729 arc_get_data_buf(buf
);
1730 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
1731 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
);
1736 static char *arc_onloan_tag
= "onloan";
1739 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1740 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1741 * buffers must be returned to the arc before they can be used by the DMU or
1745 arc_loan_buf(spa_t
*spa
, int size
)
1749 buf
= arc_buf_alloc(spa
, size
, arc_onloan_tag
, ARC_BUFC_DATA
);
1751 atomic_add_64(&arc_loaned_bytes
, size
);
1756 * Return a loaned arc buffer to the arc.
1759 arc_return_buf(arc_buf_t
*buf
, void *tag
)
1761 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1763 ASSERT(buf
->b_data
!= NULL
);
1764 ASSERT(HDR_HAS_L1HDR(hdr
));
1765 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
);
1766 (void) refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
1768 atomic_add_64(&arc_loaned_bytes
, -hdr
->b_size
);
1771 /* Detach an arc_buf from a dbuf (tag) */
1773 arc_loan_inuse_buf(arc_buf_t
*buf
, void *tag
)
1775 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1777 ASSERT(buf
->b_data
!= NULL
);
1778 ASSERT(HDR_HAS_L1HDR(hdr
));
1779 (void) refcount_add(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
1780 (void) refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
);
1781 buf
->b_efunc
= NULL
;
1782 buf
->b_private
= NULL
;
1784 atomic_add_64(&arc_loaned_bytes
, hdr
->b_size
);
1788 arc_buf_clone(arc_buf_t
*from
)
1791 arc_buf_hdr_t
*hdr
= from
->b_hdr
;
1792 uint64_t size
= hdr
->b_size
;
1794 ASSERT(HDR_HAS_L1HDR(hdr
));
1795 ASSERT(hdr
->b_l1hdr
.b_state
!= arc_anon
);
1797 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1800 buf
->b_efunc
= NULL
;
1801 buf
->b_private
= NULL
;
1802 buf
->b_next
= hdr
->b_l1hdr
.b_buf
;
1803 hdr
->b_l1hdr
.b_buf
= buf
;
1804 arc_get_data_buf(buf
);
1805 bcopy(from
->b_data
, buf
->b_data
, size
);
1808 * This buffer already exists in the arc so create a duplicate
1809 * copy for the caller. If the buffer is associated with user data
1810 * then track the size and number of duplicates. These stats will be
1811 * updated as duplicate buffers are created and destroyed.
1813 if (HDR_ISTYPE_DATA(hdr
)) {
1814 ARCSTAT_BUMP(arcstat_duplicate_buffers
);
1815 ARCSTAT_INCR(arcstat_duplicate_buffers_size
, size
);
1817 hdr
->b_l1hdr
.b_datacnt
+= 1;
1822 arc_buf_add_ref(arc_buf_t
*buf
, void* tag
)
1825 kmutex_t
*hash_lock
;
1828 * Check to see if this buffer is evicted. Callers
1829 * must verify b_data != NULL to know if the add_ref
1832 mutex_enter(&buf
->b_evict_lock
);
1833 if (buf
->b_data
== NULL
) {
1834 mutex_exit(&buf
->b_evict_lock
);
1837 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1838 mutex_enter(hash_lock
);
1840 ASSERT(HDR_HAS_L1HDR(hdr
));
1841 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1842 mutex_exit(&buf
->b_evict_lock
);
1844 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
1845 hdr
->b_l1hdr
.b_state
== arc_mfu
);
1847 add_reference(hdr
, hash_lock
, tag
);
1848 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
1849 arc_access(hdr
, hash_lock
);
1850 mutex_exit(hash_lock
);
1851 ARCSTAT_BUMP(arcstat_hits
);
1852 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
1853 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
1854 data
, metadata
, hits
);
1858 * Free the arc data buffer. If it is an l2arc write in progress,
1859 * the buffer is placed on l2arc_free_on_write to be freed later.
1862 arc_buf_data_free(arc_buf_t
*buf
, void (*free_func
)(void *, size_t))
1864 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1866 if (HDR_L2_WRITING(hdr
)) {
1867 l2arc_data_free_t
*df
;
1868 df
= kmem_alloc(sizeof (l2arc_data_free_t
), KM_SLEEP
);
1869 df
->l2df_data
= buf
->b_data
;
1870 df
->l2df_size
= hdr
->b_size
;
1871 df
->l2df_func
= free_func
;
1872 mutex_enter(&l2arc_free_on_write_mtx
);
1873 list_insert_head(l2arc_free_on_write
, df
);
1874 mutex_exit(&l2arc_free_on_write_mtx
);
1875 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
1877 free_func(buf
->b_data
, hdr
->b_size
);
1882 * Free up buf->b_data and if 'remove' is set, then pull the
1883 * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
1886 arc_buf_destroy(arc_buf_t
*buf
, boolean_t recycle
, boolean_t remove
)
1890 /* free up data associated with the buf */
1891 if (buf
->b_data
!= NULL
) {
1892 arc_state_t
*state
= buf
->b_hdr
->b_l1hdr
.b_state
;
1893 uint64_t size
= buf
->b_hdr
->b_size
;
1894 arc_buf_contents_t type
= arc_buf_type(buf
->b_hdr
);
1896 arc_cksum_verify(buf
);
1897 arc_buf_unwatch(buf
);
1900 if (type
== ARC_BUFC_METADATA
) {
1901 arc_buf_data_free(buf
, zio_buf_free
);
1902 arc_space_return(size
, ARC_SPACE_META
);
1904 ASSERT(type
== ARC_BUFC_DATA
);
1905 arc_buf_data_free(buf
, zio_data_buf_free
);
1906 arc_space_return(size
, ARC_SPACE_DATA
);
1909 if (list_link_active(&buf
->b_hdr
->b_l1hdr
.b_arc_node
)) {
1910 uint64_t *cnt
= &state
->arcs_lsize
[type
];
1912 ASSERT(refcount_is_zero(
1913 &buf
->b_hdr
->b_l1hdr
.b_refcnt
));
1914 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
1916 ASSERT3U(*cnt
, >=, size
);
1917 atomic_add_64(cnt
, -size
);
1919 ASSERT3U(state
->arcs_size
, >=, size
);
1920 atomic_add_64(&state
->arcs_size
, -size
);
1924 * If we're destroying a duplicate buffer make sure
1925 * that the appropriate statistics are updated.
1927 if (buf
->b_hdr
->b_l1hdr
.b_datacnt
> 1 &&
1928 HDR_ISTYPE_DATA(buf
->b_hdr
)) {
1929 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers
);
1930 ARCSTAT_INCR(arcstat_duplicate_buffers_size
, -size
);
1932 ASSERT(buf
->b_hdr
->b_l1hdr
.b_datacnt
> 0);
1933 buf
->b_hdr
->b_l1hdr
.b_datacnt
-= 1;
1936 /* only remove the buf if requested */
1940 /* remove the buf from the hdr list */
1941 for (bufp
= &buf
->b_hdr
->b_l1hdr
.b_buf
; *bufp
!= buf
;
1942 bufp
= &(*bufp
)->b_next
)
1944 *bufp
= buf
->b_next
;
1947 ASSERT(buf
->b_efunc
== NULL
);
1949 /* clean up the buf */
1951 kmem_cache_free(buf_cache
, buf
);
1955 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
1957 if (HDR_HAS_L1HDR(hdr
)) {
1958 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
||
1959 hdr
->b_l1hdr
.b_datacnt
> 0);
1960 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
1961 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
1963 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1964 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1966 if (HDR_HAS_L2HDR(hdr
)) {
1967 l2arc_buf_hdr_t
*l2hdr
= &hdr
->b_l2hdr
;
1968 boolean_t buflist_held
= MUTEX_HELD(&l2hdr
->b_dev
->l2ad_mtx
);
1970 if (!buflist_held
) {
1971 mutex_enter(&l2hdr
->b_dev
->l2ad_mtx
);
1972 l2hdr
= &hdr
->b_l2hdr
;
1975 list_remove(&l2hdr
->b_dev
->l2ad_buflist
, hdr
);
1977 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
1978 ARCSTAT_INCR(arcstat_l2_asize
, -l2hdr
->b_asize
);
1981 mutex_exit(&l2hdr
->b_dev
->l2ad_mtx
);
1983 hdr
->b_flags
&= ~ARC_FLAG_HAS_L2HDR
;
1986 if (!BUF_EMPTY(hdr
))
1987 buf_discard_identity(hdr
);
1989 if (hdr
->b_freeze_cksum
!= NULL
) {
1990 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1991 hdr
->b_freeze_cksum
= NULL
;
1994 if (HDR_HAS_L1HDR(hdr
)) {
1995 while (hdr
->b_l1hdr
.b_buf
) {
1996 arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
;
1998 if (buf
->b_efunc
!= NULL
) {
1999 mutex_enter(&arc_eviction_mtx
);
2000 mutex_enter(&buf
->b_evict_lock
);
2001 ASSERT(buf
->b_hdr
!= NULL
);
2002 arc_buf_destroy(hdr
->b_l1hdr
.b_buf
, FALSE
,
2004 hdr
->b_l1hdr
.b_buf
= buf
->b_next
;
2005 buf
->b_hdr
= &arc_eviction_hdr
;
2006 buf
->b_next
= arc_eviction_list
;
2007 arc_eviction_list
= buf
;
2008 mutex_exit(&buf
->b_evict_lock
);
2009 mutex_exit(&arc_eviction_mtx
);
2011 arc_buf_destroy(hdr
->b_l1hdr
.b_buf
, FALSE
,
2016 if (hdr
->b_l1hdr
.b_thawed
!= NULL
) {
2017 kmem_free(hdr
->b_l1hdr
.b_thawed
, 1);
2018 hdr
->b_l1hdr
.b_thawed
= NULL
;
2023 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
2024 if (HDR_HAS_L1HDR(hdr
)) {
2025 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
2026 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
2027 kmem_cache_free(hdr_full_cache
, hdr
);
2029 kmem_cache_free(hdr_l2only_cache
, hdr
);
2034 arc_buf_free(arc_buf_t
*buf
, void *tag
)
2036 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2037 int hashed
= hdr
->b_l1hdr
.b_state
!= arc_anon
;
2039 ASSERT(buf
->b_efunc
== NULL
);
2040 ASSERT(buf
->b_data
!= NULL
);
2043 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
2045 mutex_enter(hash_lock
);
2047 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
2049 (void) remove_reference(hdr
, hash_lock
, tag
);
2050 if (hdr
->b_l1hdr
.b_datacnt
> 1) {
2051 arc_buf_destroy(buf
, FALSE
, TRUE
);
2053 ASSERT(buf
== hdr
->b_l1hdr
.b_buf
);
2054 ASSERT(buf
->b_efunc
== NULL
);
2055 hdr
->b_flags
|= ARC_FLAG_BUF_AVAILABLE
;
2057 mutex_exit(hash_lock
);
2058 } else if (HDR_IO_IN_PROGRESS(hdr
)) {
2061 * We are in the middle of an async write. Don't destroy
2062 * this buffer unless the write completes before we finish
2063 * decrementing the reference count.
2065 mutex_enter(&arc_eviction_mtx
);
2066 (void) remove_reference(hdr
, NULL
, tag
);
2067 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2068 destroy_hdr
= !HDR_IO_IN_PROGRESS(hdr
);
2069 mutex_exit(&arc_eviction_mtx
);
2071 arc_hdr_destroy(hdr
);
2073 if (remove_reference(hdr
, NULL
, tag
) > 0)
2074 arc_buf_destroy(buf
, FALSE
, TRUE
);
2076 arc_hdr_destroy(hdr
);
2081 arc_buf_remove_ref(arc_buf_t
*buf
, void* tag
)
2083 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2084 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
2085 boolean_t no_callback
= (buf
->b_efunc
== NULL
);
2087 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
2088 ASSERT(hdr
->b_l1hdr
.b_datacnt
== 1);
2089 arc_buf_free(buf
, tag
);
2090 return (no_callback
);
2093 mutex_enter(hash_lock
);
2095 ASSERT(hdr
->b_l1hdr
.b_datacnt
> 0);
2096 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
2097 ASSERT(hdr
->b_l1hdr
.b_state
!= arc_anon
);
2098 ASSERT(buf
->b_data
!= NULL
);
2100 (void) remove_reference(hdr
, hash_lock
, tag
);
2101 if (hdr
->b_l1hdr
.b_datacnt
> 1) {
2103 arc_buf_destroy(buf
, FALSE
, TRUE
);
2104 } else if (no_callback
) {
2105 ASSERT(hdr
->b_l1hdr
.b_buf
== buf
&& buf
->b_next
== NULL
);
2106 ASSERT(buf
->b_efunc
== NULL
);
2107 hdr
->b_flags
|= ARC_FLAG_BUF_AVAILABLE
;
2109 ASSERT(no_callback
|| hdr
->b_l1hdr
.b_datacnt
> 1 ||
2110 refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2111 mutex_exit(hash_lock
);
2112 return (no_callback
);
2116 arc_buf_size(arc_buf_t
*buf
)
2118 return (buf
->b_hdr
->b_size
);
2122 * Called from the DMU to determine if the current buffer should be
2123 * evicted. In order to ensure proper locking, the eviction must be initiated
2124 * from the DMU. Return true if the buffer is associated with user data and
2125 * duplicate buffers still exist.
2128 arc_buf_eviction_needed(arc_buf_t
*buf
)
2131 boolean_t evict_needed
= B_FALSE
;
2133 if (zfs_disable_dup_eviction
)
2136 mutex_enter(&buf
->b_evict_lock
);
2140 * We are in arc_do_user_evicts(); let that function
2141 * perform the eviction.
2143 ASSERT(buf
->b_data
== NULL
);
2144 mutex_exit(&buf
->b_evict_lock
);
2146 } else if (buf
->b_data
== NULL
) {
2148 * We have already been added to the arc eviction list;
2149 * recommend eviction.
2151 ASSERT3P(hdr
, ==, &arc_eviction_hdr
);
2152 mutex_exit(&buf
->b_evict_lock
);
2156 if (hdr
->b_l1hdr
.b_datacnt
> 1 && HDR_ISTYPE_DATA(hdr
))
2157 evict_needed
= B_TRUE
;
2159 mutex_exit(&buf
->b_evict_lock
);
2160 return (evict_needed
);
2164 * Evict buffers from list until we've removed the specified number of
2165 * bytes. Move the removed buffers to the appropriate evict state.
2166 * If the recycle flag is set, then attempt to "recycle" a buffer:
2167 * - look for a buffer to evict that is `bytes' long.
2168 * - return the data block from this buffer rather than freeing it.
2169 * This flag is used by callers that are trying to make space for a
2170 * new buffer in a full arc cache.
2172 * This function makes a "best effort". It skips over any buffers
2173 * it can't get a hash_lock on, and so may not catch all candidates.
2174 * It may also return without evicting as much space as requested.
2177 arc_evict(arc_state_t
*state
, uint64_t spa
, int64_t bytes
, boolean_t recycle
,
2178 arc_buf_contents_t type
)
2180 arc_state_t
*evicted_state
;
2181 uint64_t bytes_evicted
= 0, skipped
= 0, missed
= 0;
2182 arc_buf_hdr_t
*hdr
, *hdr_prev
= NULL
;
2183 kmutex_t
*hash_lock
;
2184 boolean_t have_lock
;
2185 void *stolen
= NULL
;
2186 arc_buf_hdr_t marker
= { 0 };
2189 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
2191 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
2194 * The ghost list lock must be acquired first in order to prevent
2195 * a 3 party deadlock:
2197 * - arc_evict_ghost acquires arc_*_ghost->arcs_mtx, followed by
2198 * l2ad_mtx in arc_hdr_realloc
2199 * - l2arc_write_buffers acquires l2ad_mtx, followed by arc_*->arcs_mtx
2200 * - arc_evict acquires arc_*_ghost->arcs_mtx, followed by
2201 * arc_*_ghost->arcs_mtx and forms a deadlock cycle.
2203 * This situation is avoided by acquiring the ghost list lock first.
2205 mutex_enter(&evicted_state
->arcs_mtx
);
2206 mutex_enter(&state
->arcs_mtx
);
2209 * Decide which "type" (data vs metadata) to recycle from.
2211 * If we are over the metadata limit, recycle from metadata.
2212 * If we are under the metadata minimum, recycle from data.
2213 * Otherwise, recycle from whichever type has the oldest (least
2214 * recently accessed) header.
2217 arc_buf_hdr_t
*data_hdr
=
2218 list_tail(&state
->arcs_list
[ARC_BUFC_DATA
]);
2219 arc_buf_hdr_t
*metadata_hdr
=
2220 list_tail(&state
->arcs_list
[ARC_BUFC_METADATA
]);
2221 arc_buf_contents_t realtype
;
2223 if (data_hdr
== NULL
) {
2224 realtype
= ARC_BUFC_METADATA
;
2225 } else if (metadata_hdr
== NULL
) {
2226 realtype
= ARC_BUFC_DATA
;
2227 } else if (arc_meta_used
>= arc_meta_limit
) {
2228 realtype
= ARC_BUFC_METADATA
;
2229 } else if (arc_meta_used
<= arc_meta_min
) {
2230 realtype
= ARC_BUFC_DATA
;
2231 } else if (HDR_HAS_L1HDR(data_hdr
) &&
2232 HDR_HAS_L1HDR(metadata_hdr
) &&
2233 data_hdr
->b_l1hdr
.b_arc_access
<
2234 metadata_hdr
->b_l1hdr
.b_arc_access
) {
2235 realtype
= ARC_BUFC_DATA
;
2237 realtype
= ARC_BUFC_METADATA
;
2239 if (realtype
!= type
) {
2241 * If we want to evict from a different list,
2242 * we can not recycle, because DATA vs METADATA
2243 * buffers are segregated into different kmem
2244 * caches (and vmem arenas).
2251 list_t
*list
= &state
->arcs_list
[type
];
2253 for (hdr
= list_tail(list
); hdr
; hdr
= hdr_prev
) {
2254 hdr_prev
= list_prev(list
, hdr
);
2255 /* prefetch buffers have a minimum lifespan */
2256 if (HDR_IO_IN_PROGRESS(hdr
) ||
2257 (spa
&& hdr
->b_spa
!= spa
) ||
2258 ((hdr
->b_flags
& (ARC_FLAG_PREFETCH
| ARC_FLAG_INDIRECT
)) &&
2259 ddi_get_lbolt() - hdr
->b_l1hdr
.b_arc_access
<
2260 arc_min_prefetch_lifespan
)) {
2264 /* "lookahead" for better eviction candidate */
2265 if (recycle
&& hdr
->b_size
!= bytes
&&
2266 hdr_prev
&& hdr_prev
->b_size
== bytes
)
2269 /* ignore markers */
2270 if (hdr
->b_spa
== 0)
2274 * It may take a long time to evict all the bufs requested.
2275 * To avoid blocking all arc activity, periodically drop
2276 * the arcs_mtx and give other threads a chance to run
2277 * before reacquiring the lock.
2279 * If we are looking for a buffer to recycle, we are in
2280 * the hot code path, so don't sleep.
2282 if (!recycle
&& count
++ > arc_evict_iterations
) {
2283 list_insert_after(list
, hdr
, &marker
);
2284 mutex_exit(&state
->arcs_mtx
);
2285 mutex_exit(&evicted_state
->arcs_mtx
);
2286 kpreempt(KPREEMPT_SYNC
);
2287 mutex_enter(&evicted_state
->arcs_mtx
);
2288 mutex_enter(&state
->arcs_mtx
);
2289 hdr_prev
= list_prev(list
, &marker
);
2290 list_remove(list
, &marker
);
2295 hash_lock
= HDR_LOCK(hdr
);
2296 have_lock
= MUTEX_HELD(hash_lock
);
2297 if (have_lock
|| mutex_tryenter(hash_lock
)) {
2298 ASSERT0(refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
2299 ASSERT3U(hdr
->b_l1hdr
.b_datacnt
, >, 0);
2300 while (hdr
->b_l1hdr
.b_buf
) {
2301 arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
;
2302 if (!mutex_tryenter(&buf
->b_evict_lock
)) {
2306 if (buf
->b_data
!= NULL
) {
2307 bytes_evicted
+= hdr
->b_size
;
2309 arc_buf_type(hdr
) == type
&&
2310 hdr
->b_size
== bytes
&&
2311 !HDR_L2_WRITING(hdr
)) {
2312 stolen
= buf
->b_data
;
2316 if (buf
->b_efunc
!= NULL
) {
2317 mutex_enter(&arc_eviction_mtx
);
2318 arc_buf_destroy(buf
,
2319 buf
->b_data
== stolen
, FALSE
);
2320 hdr
->b_l1hdr
.b_buf
= buf
->b_next
;
2321 buf
->b_hdr
= &arc_eviction_hdr
;
2322 buf
->b_next
= arc_eviction_list
;
2323 arc_eviction_list
= buf
;
2324 mutex_exit(&arc_eviction_mtx
);
2325 mutex_exit(&buf
->b_evict_lock
);
2327 mutex_exit(&buf
->b_evict_lock
);
2328 arc_buf_destroy(buf
,
2329 buf
->b_data
== stolen
, TRUE
);
2333 if (HDR_HAS_L2HDR(hdr
)) {
2334 ARCSTAT_INCR(arcstat_evict_l2_cached
,
2337 if (l2arc_write_eligible(hdr
->b_spa
, hdr
)) {
2338 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
2342 arcstat_evict_l2_ineligible
,
2347 if (hdr
->b_l1hdr
.b_datacnt
== 0) {
2348 arc_change_state(evicted_state
, hdr
, hash_lock
);
2349 ASSERT(HDR_IN_HASH_TABLE(hdr
));
2350 hdr
->b_flags
|= ARC_FLAG_IN_HASH_TABLE
;
2351 hdr
->b_flags
&= ~ARC_FLAG_BUF_AVAILABLE
;
2352 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, hdr
);
2355 mutex_exit(hash_lock
);
2356 if (bytes
>= 0 && bytes_evicted
>= bytes
)
2363 mutex_exit(&state
->arcs_mtx
);
2364 mutex_exit(&evicted_state
->arcs_mtx
);
2366 if (bytes_evicted
< bytes
)
2367 dprintf("only evicted %lld bytes from %x",
2368 (longlong_t
)bytes_evicted
, state
);
2371 ARCSTAT_INCR(arcstat_evict_skip
, skipped
);
2374 ARCSTAT_INCR(arcstat_mutex_miss
, missed
);
2377 * Note: we have just evicted some data into the ghost state,
2378 * potentially putting the ghost size over the desired size. Rather
2379 * that evicting from the ghost list in this hot code path, leave
2380 * this chore to the arc_reclaim_thread().
2387 * Remove buffers from list until we've removed the specified number of
2388 * bytes. Destroy the buffers that are removed.
2391 arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
)
2393 arc_buf_hdr_t
*hdr
, *hdr_prev
;
2394 arc_buf_hdr_t marker
= { 0 };
2395 list_t
*list
= &state
->arcs_list
[ARC_BUFC_DATA
];
2396 kmutex_t
*hash_lock
;
2397 uint64_t bytes_deleted
= 0;
2398 uint64_t bufs_skipped
= 0;
2401 ASSERT(GHOST_STATE(state
));
2403 mutex_enter(&state
->arcs_mtx
);
2404 for (hdr
= list_tail(list
); hdr
; hdr
= hdr_prev
) {
2405 hdr_prev
= list_prev(list
, hdr
);
2406 if (arc_buf_type(hdr
) >= ARC_BUFC_NUMTYPES
)
2407 panic("invalid hdr=%p", (void *)hdr
);
2408 if (spa
&& hdr
->b_spa
!= spa
)
2411 /* ignore markers */
2412 if (hdr
->b_spa
== 0)
2415 hash_lock
= HDR_LOCK(hdr
);
2416 /* caller may be trying to modify this buffer, skip it */
2417 if (MUTEX_HELD(hash_lock
))
2421 * It may take a long time to evict all the bufs requested.
2422 * To avoid blocking all arc activity, periodically drop
2423 * the arcs_mtx and give other threads a chance to run
2424 * before reacquiring the lock.
2426 if (count
++ > arc_evict_iterations
) {
2427 list_insert_after(list
, hdr
, &marker
);
2428 mutex_exit(&state
->arcs_mtx
);
2429 kpreempt(KPREEMPT_SYNC
);
2430 mutex_enter(&state
->arcs_mtx
);
2431 hdr_prev
= list_prev(list
, &marker
);
2432 list_remove(list
, &marker
);
2436 if (mutex_tryenter(hash_lock
)) {
2437 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
2438 ASSERT(!HDR_HAS_L1HDR(hdr
) ||
2439 hdr
->b_l1hdr
.b_buf
== NULL
);
2440 ARCSTAT_BUMP(arcstat_deleted
);
2441 bytes_deleted
+= hdr
->b_size
;
2443 if (HDR_HAS_L2HDR(hdr
)) {
2445 * This buffer is cached on the 2nd Level ARC;
2446 * don't destroy the header.
2448 arc_change_state(arc_l2c_only
, hdr
, hash_lock
);
2450 * dropping from L1+L2 cached to L2-only,
2451 * realloc to remove the L1 header.
2453 hdr
= arc_hdr_realloc(hdr
, hdr_full_cache
,
2455 mutex_exit(hash_lock
);
2457 arc_change_state(arc_anon
, hdr
, hash_lock
);
2458 mutex_exit(hash_lock
);
2459 arc_hdr_destroy(hdr
);
2462 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, hdr
);
2463 if (bytes
>= 0 && bytes_deleted
>= bytes
)
2465 } else if (bytes
< 0) {
2467 * Insert a list marker and then wait for the
2468 * hash lock to become available. Once its
2469 * available, restart from where we left off.
2471 list_insert_after(list
, hdr
, &marker
);
2472 mutex_exit(&state
->arcs_mtx
);
2473 mutex_enter(hash_lock
);
2474 mutex_exit(hash_lock
);
2475 mutex_enter(&state
->arcs_mtx
);
2476 hdr_prev
= list_prev(list
, &marker
);
2477 list_remove(list
, &marker
);
2483 mutex_exit(&state
->arcs_mtx
);
2485 if (list
== &state
->arcs_list
[ARC_BUFC_DATA
] &&
2486 (bytes
< 0 || bytes_deleted
< bytes
)) {
2487 list
= &state
->arcs_list
[ARC_BUFC_METADATA
];
2492 ARCSTAT_INCR(arcstat_mutex_miss
, bufs_skipped
);
2496 if (bytes_deleted
< bytes
)
2497 dprintf("only deleted %lld bytes from %p",
2498 (longlong_t
)bytes_deleted
, state
);
2504 int64_t adjustment
, delta
;
2510 adjustment
= MIN((int64_t)(arc_size
- arc_c
),
2511 (int64_t)(arc_anon
->arcs_size
+ arc_mru
->arcs_size
+ arc_meta_used
-
2514 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
2515 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_DATA
], adjustment
);
2516 (void) arc_evict(arc_mru
, NULL
, delta
, FALSE
, ARC_BUFC_DATA
);
2517 adjustment
-= delta
;
2520 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
2521 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
], adjustment
);
2522 (void) arc_evict(arc_mru
, NULL
, delta
, FALSE
,
2530 adjustment
= arc_size
- arc_c
;
2532 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
2533 delta
= MIN(adjustment
, arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
]);
2534 (void) arc_evict(arc_mfu
, NULL
, delta
, FALSE
, ARC_BUFC_DATA
);
2535 adjustment
-= delta
;
2538 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
2539 int64_t delta
= MIN(adjustment
,
2540 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
]);
2541 (void) arc_evict(arc_mfu
, NULL
, delta
, FALSE
,
2546 * Adjust ghost lists
2549 adjustment
= arc_mru
->arcs_size
+ arc_mru_ghost
->arcs_size
- arc_c
;
2551 if (adjustment
> 0 && arc_mru_ghost
->arcs_size
> 0) {
2552 delta
= MIN(arc_mru_ghost
->arcs_size
, adjustment
);
2553 arc_evict_ghost(arc_mru_ghost
, NULL
, delta
);
2557 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
- arc_c
;
2559 if (adjustment
> 0 && arc_mfu_ghost
->arcs_size
> 0) {
2560 delta
= MIN(arc_mfu_ghost
->arcs_size
, adjustment
);
2561 arc_evict_ghost(arc_mfu_ghost
, NULL
, delta
);
2566 arc_do_user_evicts(void)
2568 mutex_enter(&arc_eviction_mtx
);
2569 while (arc_eviction_list
!= NULL
) {
2570 arc_buf_t
*buf
= arc_eviction_list
;
2571 arc_eviction_list
= buf
->b_next
;
2572 mutex_enter(&buf
->b_evict_lock
);
2574 mutex_exit(&buf
->b_evict_lock
);
2575 mutex_exit(&arc_eviction_mtx
);
2577 if (buf
->b_efunc
!= NULL
)
2578 VERIFY0(buf
->b_efunc(buf
->b_private
));
2580 buf
->b_efunc
= NULL
;
2581 buf
->b_private
= NULL
;
2582 kmem_cache_free(buf_cache
, buf
);
2583 mutex_enter(&arc_eviction_mtx
);
2585 mutex_exit(&arc_eviction_mtx
);
2589 * Flush all *evictable* data from the cache for the given spa.
2590 * NOTE: this will not touch "active" (i.e. referenced) data.
2593 arc_flush(spa_t
*spa
)
2598 guid
= spa_load_guid(spa
);
2600 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_DATA
])) {
2601 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
2605 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
])) {
2606 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
2610 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
])) {
2611 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
2615 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
])) {
2616 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
2621 arc_evict_ghost(arc_mru_ghost
, guid
, -1);
2622 arc_evict_ghost(arc_mfu_ghost
, guid
, -1);
2624 mutex_enter(&arc_reclaim_thr_lock
);
2625 arc_do_user_evicts();
2626 mutex_exit(&arc_reclaim_thr_lock
);
2627 ASSERT(spa
|| arc_eviction_list
== NULL
);
2631 arc_shrink(int64_t to_free
)
2633 if (arc_c
> arc_c_min
) {
2635 if (arc_c
> arc_c_min
+ to_free
)
2636 atomic_add_64(&arc_c
, -to_free
);
2640 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
2641 if (arc_c
> arc_size
)
2642 arc_c
= MAX(arc_size
, arc_c_min
);
2644 arc_p
= (arc_c
>> 1);
2645 ASSERT(arc_c
>= arc_c_min
);
2646 ASSERT((int64_t)arc_p
>= 0);
2649 if (arc_size
> arc_c
)
2653 typedef enum free_memory_reason_t
{
2658 FMR_PAGES_PP_MAXIMUM
,
2661 } free_memory_reason_t
;
2663 int64_t last_free_memory
;
2664 free_memory_reason_t last_free_reason
;
2667 * Additional reserve of pages for pp_reserve.
2669 int64_t arc_pages_pp_reserve
= 64;
2672 * Additional reserve of pages for swapfs.
2674 int64_t arc_swapfs_reserve
= 64;
2677 * Return the amount of memory that can be consumed before reclaim will be
2678 * needed. Positive if there is sufficient free memory, negative indicates
2679 * the amount of memory that needs to be freed up.
2682 arc_available_memory(void)
2684 int64_t lowest
= INT64_MAX
;
2686 free_memory_reason_t r
= FMR_UNKNOWN
;
2690 n
= PAGESIZE
* (-needfree
);
2698 * check that we're out of range of the pageout scanner. It starts to
2699 * schedule paging if freemem is less than lotsfree and needfree.
2700 * lotsfree is the high-water mark for pageout, and needfree is the
2701 * number of needed free pages. We add extra pages here to make sure
2702 * the scanner doesn't start up while we're freeing memory.
2704 n
= PAGESIZE
* (freemem
- lotsfree
- needfree
- desfree
);
2711 * check to make sure that swapfs has enough space so that anon
2712 * reservations can still succeed. anon_resvmem() checks that the
2713 * availrmem is greater than swapfs_minfree, and the number of reserved
2714 * swap pages. We also add a bit of extra here just to prevent
2715 * circumstances from getting really dire.
2717 n
= PAGESIZE
* (availrmem
- swapfs_minfree
- swapfs_reserve
-
2718 desfree
- arc_swapfs_reserve
);
2721 r
= FMR_SWAPFS_MINFREE
;
2726 * Check that we have enough availrmem that memory locking (e.g., via
2727 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
2728 * stores the number of pages that cannot be locked; when availrmem
2729 * drops below pages_pp_maximum, page locking mechanisms such as
2730 * page_pp_lock() will fail.)
2732 n
= PAGESIZE
* (availrmem
- pages_pp_maximum
-
2733 arc_pages_pp_reserve
);
2736 r
= FMR_PAGES_PP_MAXIMUM
;
2741 * If we're on an i386 platform, it's possible that we'll exhaust the
2742 * kernel heap space before we ever run out of available physical
2743 * memory. Most checks of the size of the heap_area compare against
2744 * tune.t_minarmem, which is the minimum available real memory that we
2745 * can have in the system. However, this is generally fixed at 25 pages
2746 * which is so low that it's useless. In this comparison, we seek to
2747 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2748 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2751 n
= vmem_size(heap_arena
, VMEM_FREE
) -
2752 (vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
) >> 2);
2760 * If zio data pages are being allocated out of a separate heap segment,
2761 * then enforce that the size of available vmem for this arena remains
2762 * above about 1/16th free.
2764 * Note: The 1/16th arena free requirement was put in place
2765 * to aggressively evict memory from the arc in order to avoid
2766 * memory fragmentation issues.
2768 if (zio_arena
!= NULL
) {
2769 n
= vmem_size(zio_arena
, VMEM_FREE
) -
2770 (vmem_size(zio_arena
, VMEM_ALLOC
) >> 4);
2777 /* Every 100 calls, free a small amount */
2778 if (spa_get_random(100) == 0)
2782 last_free_memory
= lowest
;
2783 last_free_reason
= r
;
2790 * Determine if the system is under memory pressure and is asking
2791 * to reclaim memory. A return value of TRUE indicates that the system
2792 * is under memory pressure and that the arc should adjust accordingly.
2795 arc_reclaim_needed(void)
2797 return (arc_available_memory() < 0);
2801 arc_kmem_reap_now(void)
2804 kmem_cache_t
*prev_cache
= NULL
;
2805 kmem_cache_t
*prev_data_cache
= NULL
;
2806 extern kmem_cache_t
*zio_buf_cache
[];
2807 extern kmem_cache_t
*zio_data_buf_cache
[];
2808 extern kmem_cache_t
*range_seg_cache
;
2811 if (arc_meta_used
>= arc_meta_limit
) {
2813 * We are exceeding our meta-data cache limit.
2814 * Purge some DNLC entries to release holds on meta-data.
2816 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent
);
2820 * Reclaim unused memory from all kmem caches.
2826 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
2827 if (zio_buf_cache
[i
] != prev_cache
) {
2828 prev_cache
= zio_buf_cache
[i
];
2829 kmem_cache_reap_now(zio_buf_cache
[i
]);
2831 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
2832 prev_data_cache
= zio_data_buf_cache
[i
];
2833 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
2836 kmem_cache_reap_now(buf_cache
);
2837 kmem_cache_reap_now(hdr_full_cache
);
2838 kmem_cache_reap_now(hdr_l2only_cache
);
2839 kmem_cache_reap_now(range_seg_cache
);
2841 if (zio_arena
!= NULL
) {
2843 * Ask the vmem arena to reclaim unused memory from its
2846 vmem_qcache_reap(zio_arena
);
2851 arc_reclaim_thread(void)
2853 clock_t growtime
= 0;
2856 CALLB_CPR_INIT(&cpr
, &arc_reclaim_thr_lock
, callb_generic_cpr
, FTAG
);
2858 mutex_enter(&arc_reclaim_thr_lock
);
2859 while (arc_thread_exit
== 0) {
2860 int64_t free_memory
= arc_available_memory();
2861 if (free_memory
< 0) {
2863 arc_no_grow
= B_TRUE
;
2867 * Wait at least zfs_grow_retry (default 60) seconds
2868 * before considering growing.
2870 growtime
= ddi_get_lbolt() + (arc_grow_retry
* hz
);
2872 arc_kmem_reap_now();
2875 * If we are still low on memory, shrink the ARC
2876 * so that we have arc_shrink_min free space.
2878 free_memory
= arc_available_memory();
2881 (arc_c
>> arc_shrink_shift
) - free_memory
;
2884 to_free
= MAX(to_free
, ptob(needfree
));
2886 arc_shrink(to_free
);
2888 } else if (free_memory
< arc_c
>> arc_no_grow_shift
) {
2889 arc_no_grow
= B_TRUE
;
2890 } else if (ddi_get_lbolt() >= growtime
) {
2891 arc_no_grow
= B_FALSE
;
2896 if (arc_eviction_list
!= NULL
)
2897 arc_do_user_evicts();
2900 * This is necessary in order for the mdb ::arc dcmd to
2901 * show up to date information. Since the ::arc command
2902 * does not call the kstat's update function, without
2903 * this call, the command may show stale stats for the
2904 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
2905 * with this change, the data might be up to 1 second
2906 * out of date; but that should suffice. The arc_state_t
2907 * structures can be queried directly if more accurate
2908 * information is needed.
2910 if (arc_ksp
!= NULL
)
2911 arc_ksp
->ks_update(arc_ksp
, KSTAT_READ
);
2913 /* block until needed, or one second, whichever is shorter */
2914 CALLB_CPR_SAFE_BEGIN(&cpr
);
2915 (void) cv_timedwait(&arc_reclaim_thr_cv
,
2916 &arc_reclaim_thr_lock
, (ddi_get_lbolt() + hz
));
2917 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_thr_lock
);
2920 arc_thread_exit
= 0;
2921 cv_broadcast(&arc_reclaim_thr_cv
);
2922 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_thr_lock */
2927 * Adapt arc info given the number of bytes we are trying to add and
2928 * the state that we are comming from. This function is only called
2929 * when we are adding new content to the cache.
2932 arc_adapt(int bytes
, arc_state_t
*state
)
2935 uint64_t arc_p_min
= (arc_c
>> arc_p_min_shift
);
2937 if (state
== arc_l2c_only
)
2942 * Adapt the target size of the MRU list:
2943 * - if we just hit in the MRU ghost list, then increase
2944 * the target size of the MRU list.
2945 * - if we just hit in the MFU ghost list, then increase
2946 * the target size of the MFU list by decreasing the
2947 * target size of the MRU list.
2949 if (state
== arc_mru_ghost
) {
2950 mult
= ((arc_mru_ghost
->arcs_size
>= arc_mfu_ghost
->arcs_size
) ?
2951 1 : (arc_mfu_ghost
->arcs_size
/arc_mru_ghost
->arcs_size
));
2952 mult
= MIN(mult
, 10); /* avoid wild arc_p adjustment */
2954 arc_p
= MIN(arc_c
- arc_p_min
, arc_p
+ bytes
* mult
);
2955 } else if (state
== arc_mfu_ghost
) {
2958 mult
= ((arc_mfu_ghost
->arcs_size
>= arc_mru_ghost
->arcs_size
) ?
2959 1 : (arc_mru_ghost
->arcs_size
/arc_mfu_ghost
->arcs_size
));
2960 mult
= MIN(mult
, 10);
2962 delta
= MIN(bytes
* mult
, arc_p
);
2963 arc_p
= MAX(arc_p_min
, arc_p
- delta
);
2965 ASSERT((int64_t)arc_p
>= 0);
2967 if (arc_reclaim_needed()) {
2968 cv_signal(&arc_reclaim_thr_cv
);
2975 if (arc_c
>= arc_c_max
)
2979 * If we're within (2 * maxblocksize) bytes of the target
2980 * cache size, increment the target cache size
2982 if (arc_size
> arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) {
2983 atomic_add_64(&arc_c
, (int64_t)bytes
);
2984 if (arc_c
> arc_c_max
)
2986 else if (state
== arc_anon
)
2987 atomic_add_64(&arc_p
, (int64_t)bytes
);
2991 ASSERT((int64_t)arc_p
>= 0);
2995 * Check if the cache has reached its limits and eviction is required
2999 arc_evict_needed(arc_buf_contents_t type
)
3001 if (type
== ARC_BUFC_METADATA
&& arc_meta_used
>= arc_meta_limit
)
3004 if (arc_reclaim_needed())
3007 return (arc_size
> arc_c
);
3011 * The buffer, supplied as the first argument, needs a data block.
3012 * So, if we are at cache max, determine which cache should be victimized.
3013 * We have the following cases:
3015 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
3016 * In this situation if we're out of space, but the resident size of the MFU is
3017 * under the limit, victimize the MFU cache to satisfy this insertion request.
3019 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
3020 * Here, we've used up all of the available space for the MRU, so we need to
3021 * evict from our own cache instead. Evict from the set of resident MRU
3024 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
3025 * c minus p represents the MFU space in the cache, since p is the size of the
3026 * cache that is dedicated to the MRU. In this situation there's still space on
3027 * the MFU side, so the MRU side needs to be victimized.
3029 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
3030 * MFU's resident set is consuming more space than it has been allotted. In
3031 * this situation, we must victimize our own cache, the MFU, for this insertion.
3034 arc_get_data_buf(arc_buf_t
*buf
)
3036 arc_state_t
*state
= buf
->b_hdr
->b_l1hdr
.b_state
;
3037 uint64_t size
= buf
->b_hdr
->b_size
;
3038 arc_buf_contents_t type
= arc_buf_type(buf
->b_hdr
);
3040 arc_adapt(size
, state
);
3043 * We have not yet reached cache maximum size,
3044 * just allocate a new buffer.
3046 if (!arc_evict_needed(type
)) {
3047 if (type
== ARC_BUFC_METADATA
) {
3048 buf
->b_data
= zio_buf_alloc(size
);
3049 arc_space_consume(size
, ARC_SPACE_META
);
3051 ASSERT(type
== ARC_BUFC_DATA
);
3052 buf
->b_data
= zio_data_buf_alloc(size
);
3053 arc_space_consume(size
, ARC_SPACE_DATA
);
3059 * If we are prefetching from the mfu ghost list, this buffer
3060 * will end up on the mru list; so steal space from there.
3062 if (state
== arc_mfu_ghost
)
3063 state
= HDR_PREFETCH(buf
->b_hdr
) ? arc_mru
: arc_mfu
;
3064 else if (state
== arc_mru_ghost
)
3067 if (state
== arc_mru
|| state
== arc_anon
) {
3068 uint64_t mru_used
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
3069 state
= (arc_mfu
->arcs_lsize
[type
] >= size
&&
3070 arc_p
> mru_used
) ? arc_mfu
: arc_mru
;
3073 uint64_t mfu_space
= arc_c
- arc_p
;
3074 state
= (arc_mru
->arcs_lsize
[type
] >= size
&&
3075 mfu_space
> arc_mfu
->arcs_size
) ? arc_mru
: arc_mfu
;
3077 if ((buf
->b_data
= arc_evict(state
, NULL
, size
, TRUE
, type
)) == NULL
) {
3078 if (type
== ARC_BUFC_METADATA
) {
3079 buf
->b_data
= zio_buf_alloc(size
);
3080 arc_space_consume(size
, ARC_SPACE_META
);
3082 ASSERT(type
== ARC_BUFC_DATA
);
3083 buf
->b_data
= zio_data_buf_alloc(size
);
3084 arc_space_consume(size
, ARC_SPACE_DATA
);
3086 ARCSTAT_BUMP(arcstat_recycle_miss
);
3088 ASSERT(buf
->b_data
!= NULL
);
3091 * Update the state size. Note that ghost states have a
3092 * "ghost size" and so don't need to be updated.
3094 if (!GHOST_STATE(buf
->b_hdr
->b_l1hdr
.b_state
)) {
3095 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3097 atomic_add_64(&hdr
->b_l1hdr
.b_state
->arcs_size
, size
);
3098 if (list_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
3099 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3100 atomic_add_64(&hdr
->b_l1hdr
.b_state
->arcs_lsize
[type
],
3104 * If we are growing the cache, and we are adding anonymous
3105 * data, and we have outgrown arc_p, update arc_p
3107 if (arc_size
< arc_c
&& hdr
->b_l1hdr
.b_state
== arc_anon
&&
3108 arc_anon
->arcs_size
+ arc_mru
->arcs_size
> arc_p
)
3109 arc_p
= MIN(arc_c
, arc_p
+ size
);
3114 * This routine is called whenever a buffer is accessed.
3115 * NOTE: the hash lock is dropped in this function.
3118 arc_access(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
)
3122 ASSERT(MUTEX_HELD(hash_lock
));
3123 ASSERT(HDR_HAS_L1HDR(hdr
));
3125 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
3127 * This buffer is not in the cache, and does not
3128 * appear in our "ghost" list. Add the new buffer
3132 ASSERT0(hdr
->b_l1hdr
.b_arc_access
);
3133 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
3134 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
3135 arc_change_state(arc_mru
, hdr
, hash_lock
);
3137 } else if (hdr
->b_l1hdr
.b_state
== arc_mru
) {
3138 now
= ddi_get_lbolt();
3141 * If this buffer is here because of a prefetch, then either:
3142 * - clear the flag if this is a "referencing" read
3143 * (any subsequent access will bump this into the MFU state).
3145 * - move the buffer to the head of the list if this is
3146 * another prefetch (to make it less likely to be evicted).
3148 if (HDR_PREFETCH(hdr
)) {
3149 if (refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 0) {
3150 ASSERT(list_link_active(
3151 &hdr
->b_l1hdr
.b_arc_node
));
3153 hdr
->b_flags
&= ~ARC_FLAG_PREFETCH
;
3154 ARCSTAT_BUMP(arcstat_mru_hits
);
3156 hdr
->b_l1hdr
.b_arc_access
= now
;
3161 * This buffer has been "accessed" only once so far,
3162 * but it is still in the cache. Move it to the MFU
3165 if (now
> hdr
->b_l1hdr
.b_arc_access
+ ARC_MINTIME
) {
3167 * More than 125ms have passed since we
3168 * instantiated this buffer. Move it to the
3169 * most frequently used state.
3171 hdr
->b_l1hdr
.b_arc_access
= now
;
3172 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
3173 arc_change_state(arc_mfu
, hdr
, hash_lock
);
3175 ARCSTAT_BUMP(arcstat_mru_hits
);
3176 } else if (hdr
->b_l1hdr
.b_state
== arc_mru_ghost
) {
3177 arc_state_t
*new_state
;
3179 * This buffer has been "accessed" recently, but
3180 * was evicted from the cache. Move it to the
3184 if (HDR_PREFETCH(hdr
)) {
3185 new_state
= arc_mru
;
3186 if (refcount_count(&hdr
->b_l1hdr
.b_refcnt
) > 0)
3187 hdr
->b_flags
&= ~ARC_FLAG_PREFETCH
;
3188 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
3190 new_state
= arc_mfu
;
3191 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
3194 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
3195 arc_change_state(new_state
, hdr
, hash_lock
);
3197 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
3198 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu
) {
3200 * This buffer has been accessed more than once and is
3201 * still in the cache. Keep it in the MFU state.
3203 * NOTE: an add_reference() that occurred when we did
3204 * the arc_read() will have kicked this off the list.
3205 * If it was a prefetch, we will explicitly move it to
3206 * the head of the list now.
3208 if ((HDR_PREFETCH(hdr
)) != 0) {
3209 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3210 ASSERT(list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3212 ARCSTAT_BUMP(arcstat_mfu_hits
);
3213 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
3214 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu_ghost
) {
3215 arc_state_t
*new_state
= arc_mfu
;
3217 * This buffer has been accessed more than once but has
3218 * been evicted from the cache. Move it back to the
3222 if (HDR_PREFETCH(hdr
)) {
3224 * This is a prefetch access...
3225 * move this block back to the MRU state.
3227 ASSERT0(refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
3228 new_state
= arc_mru
;
3231 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
3232 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
3233 arc_change_state(new_state
, hdr
, hash_lock
);
3235 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
3236 } else if (hdr
->b_l1hdr
.b_state
== arc_l2c_only
) {
3238 * This buffer is on the 2nd Level ARC.
3241 hdr
->b_l1hdr
.b_arc_access
= ddi_get_lbolt();
3242 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
3243 arc_change_state(arc_mfu
, hdr
, hash_lock
);
3245 ASSERT(!"invalid arc state");
3249 /* a generic arc_done_func_t which you can use */
3252 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
3254 if (zio
== NULL
|| zio
->io_error
== 0)
3255 bcopy(buf
->b_data
, arg
, buf
->b_hdr
->b_size
);
3256 VERIFY(arc_buf_remove_ref(buf
, arg
));
3259 /* a generic arc_done_func_t */
3261 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
3263 arc_buf_t
**bufp
= arg
;
3264 if (zio
&& zio
->io_error
) {
3265 VERIFY(arc_buf_remove_ref(buf
, arg
));
3269 ASSERT(buf
->b_data
);
3274 arc_read_done(zio_t
*zio
)
3278 arc_buf_t
*abuf
; /* buffer we're assigning to callback */
3279 kmutex_t
*hash_lock
= NULL
;
3280 arc_callback_t
*callback_list
, *acb
;
3281 int freeable
= FALSE
;
3283 buf
= zio
->io_private
;
3287 * The hdr was inserted into hash-table and removed from lists
3288 * prior to starting I/O. We should find this header, since
3289 * it's in the hash table, and it should be legit since it's
3290 * not possible to evict it during the I/O. The only possible
3291 * reason for it not to be found is if we were freed during the
3294 if (HDR_IN_HASH_TABLE(hdr
)) {
3295 ASSERT3U(hdr
->b_birth
, ==, BP_PHYSICAL_BIRTH(zio
->io_bp
));
3296 ASSERT3U(hdr
->b_dva
.dva_word
[0], ==,
3297 BP_IDENTITY(zio
->io_bp
)->dva_word
[0]);
3298 ASSERT3U(hdr
->b_dva
.dva_word
[1], ==,
3299 BP_IDENTITY(zio
->io_bp
)->dva_word
[1]);
3301 arc_buf_hdr_t
*found
= buf_hash_find(hdr
->b_spa
, zio
->io_bp
,
3304 ASSERT((found
== NULL
&& HDR_FREED_IN_READ(hdr
) &&
3305 hash_lock
== NULL
) ||
3307 DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
3308 (found
== hdr
&& HDR_L2_READING(hdr
)));
3311 hdr
->b_flags
&= ~ARC_FLAG_L2_EVICTED
;
3312 if (l2arc_noprefetch
&& HDR_PREFETCH(hdr
))
3313 hdr
->b_flags
&= ~ARC_FLAG_L2CACHE
;
3315 /* byteswap if necessary */
3316 callback_list
= hdr
->b_l1hdr
.b_acb
;
3317 ASSERT(callback_list
!= NULL
);
3318 if (BP_SHOULD_BYTESWAP(zio
->io_bp
) && zio
->io_error
== 0) {
3319 dmu_object_byteswap_t bswap
=
3320 DMU_OT_BYTESWAP(BP_GET_TYPE(zio
->io_bp
));
3321 arc_byteswap_func_t
*func
= BP_GET_LEVEL(zio
->io_bp
) > 0 ?
3322 byteswap_uint64_array
:
3323 dmu_ot_byteswap
[bswap
].ob_func
;
3324 func(buf
->b_data
, hdr
->b_size
);
3327 arc_cksum_compute(buf
, B_FALSE
);
3330 if (hash_lock
&& zio
->io_error
== 0 &&
3331 hdr
->b_l1hdr
.b_state
== arc_anon
) {
3333 * Only call arc_access on anonymous buffers. This is because
3334 * if we've issued an I/O for an evicted buffer, we've already
3335 * called arc_access (to prevent any simultaneous readers from
3336 * getting confused).
3338 arc_access(hdr
, hash_lock
);
3341 /* create copies of the data buffer for the callers */
3343 for (acb
= callback_list
; acb
; acb
= acb
->acb_next
) {
3344 if (acb
->acb_done
) {
3346 ARCSTAT_BUMP(arcstat_duplicate_reads
);
3347 abuf
= arc_buf_clone(buf
);
3349 acb
->acb_buf
= abuf
;
3353 hdr
->b_l1hdr
.b_acb
= NULL
;
3354 hdr
->b_flags
&= ~ARC_FLAG_IO_IN_PROGRESS
;
3355 ASSERT(!HDR_BUF_AVAILABLE(hdr
));
3357 ASSERT(buf
->b_efunc
== NULL
);
3358 ASSERT(hdr
->b_l1hdr
.b_datacnt
== 1);
3359 hdr
->b_flags
|= ARC_FLAG_BUF_AVAILABLE
;
3362 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
) ||
3363 callback_list
!= NULL
);
3365 if (zio
->io_error
!= 0) {
3366 hdr
->b_flags
|= ARC_FLAG_IO_ERROR
;
3367 if (hdr
->b_l1hdr
.b_state
!= arc_anon
)
3368 arc_change_state(arc_anon
, hdr
, hash_lock
);
3369 if (HDR_IN_HASH_TABLE(hdr
))
3370 buf_hash_remove(hdr
);
3371 freeable
= refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
);
3375 * Broadcast before we drop the hash_lock to avoid the possibility
3376 * that the hdr (and hence the cv) might be freed before we get to
3377 * the cv_broadcast().
3379 cv_broadcast(&hdr
->b_l1hdr
.b_cv
);
3381 if (hash_lock
!= NULL
) {
3382 mutex_exit(hash_lock
);
3385 * This block was freed while we waited for the read to
3386 * complete. It has been removed from the hash table and
3387 * moved to the anonymous state (so that it won't show up
3390 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
3391 freeable
= refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
);
3394 /* execute each callback and free its structure */
3395 while ((acb
= callback_list
) != NULL
) {
3397 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
3399 if (acb
->acb_zio_dummy
!= NULL
) {
3400 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
3401 zio_nowait(acb
->acb_zio_dummy
);
3404 callback_list
= acb
->acb_next
;
3405 kmem_free(acb
, sizeof (arc_callback_t
));
3409 arc_hdr_destroy(hdr
);
3413 * "Read" the block at the specified DVA (in bp) via the
3414 * cache. If the block is found in the cache, invoke the provided
3415 * callback immediately and return. Note that the `zio' parameter
3416 * in the callback will be NULL in this case, since no IO was
3417 * required. If the block is not in the cache pass the read request
3418 * on to the spa with a substitute callback function, so that the
3419 * requested block will be added to the cache.
3421 * If a read request arrives for a block that has a read in-progress,
3422 * either wait for the in-progress read to complete (and return the
3423 * results); or, if this is a read with a "done" func, add a record
3424 * to the read to invoke the "done" func when the read completes,
3425 * and return; or just return.
3427 * arc_read_done() will invoke all the requested "done" functions
3428 * for readers of this block.
3431 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
, arc_done_func_t
*done
,
3432 void *private, zio_priority_t priority
, int zio_flags
,
3433 arc_flags_t
*arc_flags
, const zbookmark_phys_t
*zb
)
3435 arc_buf_hdr_t
*hdr
= NULL
;
3436 arc_buf_t
*buf
= NULL
;
3437 kmutex_t
*hash_lock
= NULL
;
3439 uint64_t guid
= spa_load_guid(spa
);
3441 ASSERT(!BP_IS_EMBEDDED(bp
) ||
3442 BPE_GET_ETYPE(bp
) == BP_EMBEDDED_TYPE_DATA
);
3445 if (!BP_IS_EMBEDDED(bp
)) {
3447 * Embedded BP's have no DVA and require no I/O to "read".
3448 * Create an anonymous arc buf to back it.
3450 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
3453 if (hdr
!= NULL
&& HDR_HAS_L1HDR(hdr
) && hdr
->b_l1hdr
.b_datacnt
> 0) {
3455 *arc_flags
|= ARC_FLAG_CACHED
;
3457 if (HDR_IO_IN_PROGRESS(hdr
)) {
3459 if (*arc_flags
& ARC_FLAG_WAIT
) {
3460 cv_wait(&hdr
->b_l1hdr
.b_cv
, hash_lock
);
3461 mutex_exit(hash_lock
);
3464 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
3467 arc_callback_t
*acb
= NULL
;
3469 acb
= kmem_zalloc(sizeof (arc_callback_t
),
3471 acb
->acb_done
= done
;
3472 acb
->acb_private
= private;
3474 acb
->acb_zio_dummy
= zio_null(pio
,
3475 spa
, NULL
, NULL
, NULL
, zio_flags
);
3477 ASSERT(acb
->acb_done
!= NULL
);
3478 acb
->acb_next
= hdr
->b_l1hdr
.b_acb
;
3479 hdr
->b_l1hdr
.b_acb
= acb
;
3480 add_reference(hdr
, hash_lock
, private);
3481 mutex_exit(hash_lock
);
3484 mutex_exit(hash_lock
);
3488 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
3489 hdr
->b_l1hdr
.b_state
== arc_mfu
);
3492 add_reference(hdr
, hash_lock
, private);
3494 * If this block is already in use, create a new
3495 * copy of the data so that we will be guaranteed
3496 * that arc_release() will always succeed.
3498 buf
= hdr
->b_l1hdr
.b_buf
;
3500 ASSERT(buf
->b_data
);
3501 if (HDR_BUF_AVAILABLE(hdr
)) {
3502 ASSERT(buf
->b_efunc
== NULL
);
3503 hdr
->b_flags
&= ~ARC_FLAG_BUF_AVAILABLE
;
3505 buf
= arc_buf_clone(buf
);
3508 } else if (*arc_flags
& ARC_FLAG_PREFETCH
&&
3509 refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 0) {
3510 hdr
->b_flags
|= ARC_FLAG_PREFETCH
;
3512 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
3513 arc_access(hdr
, hash_lock
);
3514 if (*arc_flags
& ARC_FLAG_L2CACHE
)
3515 hdr
->b_flags
|= ARC_FLAG_L2CACHE
;
3516 if (*arc_flags
& ARC_FLAG_L2COMPRESS
)
3517 hdr
->b_flags
|= ARC_FLAG_L2COMPRESS
;
3518 mutex_exit(hash_lock
);
3519 ARCSTAT_BUMP(arcstat_hits
);
3520 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
3521 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
3522 data
, metadata
, hits
);
3525 done(NULL
, buf
, private);
3527 uint64_t size
= BP_GET_LSIZE(bp
);
3528 arc_callback_t
*acb
;
3531 boolean_t devw
= B_FALSE
;
3532 enum zio_compress b_compress
= ZIO_COMPRESS_OFF
;
3533 int32_t b_asize
= 0;
3536 /* this block is not in the cache */
3537 arc_buf_hdr_t
*exists
= NULL
;
3538 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
3539 buf
= arc_buf_alloc(spa
, size
, private, type
);
3541 if (!BP_IS_EMBEDDED(bp
)) {
3542 hdr
->b_dva
= *BP_IDENTITY(bp
);
3543 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
3544 exists
= buf_hash_insert(hdr
, &hash_lock
);
3546 if (exists
!= NULL
) {
3547 /* somebody beat us to the hash insert */
3548 mutex_exit(hash_lock
);
3549 buf_discard_identity(hdr
);
3550 (void) arc_buf_remove_ref(buf
, private);
3551 goto top
; /* restart the IO request */
3554 /* if this is a prefetch, we don't have a reference */
3555 if (*arc_flags
& ARC_FLAG_PREFETCH
) {
3556 (void) remove_reference(hdr
, hash_lock
,
3558 hdr
->b_flags
|= ARC_FLAG_PREFETCH
;
3560 if (*arc_flags
& ARC_FLAG_L2CACHE
)
3561 hdr
->b_flags
|= ARC_FLAG_L2CACHE
;
3562 if (*arc_flags
& ARC_FLAG_L2COMPRESS
)
3563 hdr
->b_flags
|= ARC_FLAG_L2COMPRESS
;
3564 if (BP_GET_LEVEL(bp
) > 0)
3565 hdr
->b_flags
|= ARC_FLAG_INDIRECT
;
3568 * This block is in the ghost cache. If it was L2-only
3569 * (and thus didn't have an L1 hdr), we realloc the
3570 * header to add an L1 hdr.
3572 if (!HDR_HAS_L1HDR(hdr
)) {
3573 hdr
= arc_hdr_realloc(hdr
, hdr_l2only_cache
,
3577 ASSERT(GHOST_STATE(hdr
->b_l1hdr
.b_state
));
3578 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3579 ASSERT(refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3580 ASSERT(hdr
->b_l1hdr
.b_buf
== NULL
);
3582 /* if this is a prefetch, we don't have a reference */
3583 if (*arc_flags
& ARC_FLAG_PREFETCH
)
3584 hdr
->b_flags
|= ARC_FLAG_PREFETCH
;
3586 add_reference(hdr
, hash_lock
, private);
3587 if (*arc_flags
& ARC_FLAG_L2CACHE
)
3588 hdr
->b_flags
|= ARC_FLAG_L2CACHE
;
3589 if (*arc_flags
& ARC_FLAG_L2COMPRESS
)
3590 hdr
->b_flags
|= ARC_FLAG_L2COMPRESS
;
3591 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
3594 buf
->b_efunc
= NULL
;
3595 buf
->b_private
= NULL
;
3597 hdr
->b_l1hdr
.b_buf
= buf
;
3598 ASSERT0(hdr
->b_l1hdr
.b_datacnt
);
3599 hdr
->b_l1hdr
.b_datacnt
= 1;
3600 arc_get_data_buf(buf
);
3601 arc_access(hdr
, hash_lock
);
3604 ASSERT(!GHOST_STATE(hdr
->b_l1hdr
.b_state
));
3606 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_SLEEP
);
3607 acb
->acb_done
= done
;
3608 acb
->acb_private
= private;
3610 ASSERT(hdr
->b_l1hdr
.b_acb
== NULL
);
3611 hdr
->b_l1hdr
.b_acb
= acb
;
3612 hdr
->b_flags
|= ARC_FLAG_IO_IN_PROGRESS
;
3614 if (HDR_HAS_L2HDR(hdr
) &&
3615 (vd
= hdr
->b_l2hdr
.b_dev
->l2ad_vdev
) != NULL
) {
3616 devw
= hdr
->b_l2hdr
.b_dev
->l2ad_writing
;
3617 addr
= hdr
->b_l2hdr
.b_daddr
;
3618 b_compress
= HDR_GET_COMPRESS(hdr
);
3619 b_asize
= hdr
->b_l2hdr
.b_asize
;
3621 * Lock out device removal.
3623 if (vdev_is_dead(vd
) ||
3624 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
3628 if (hash_lock
!= NULL
)
3629 mutex_exit(hash_lock
);
3632 * At this point, we have a level 1 cache miss. Try again in
3633 * L2ARC if possible.
3635 ASSERT3U(hdr
->b_size
, ==, size
);
3636 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
, blkptr_t
*, bp
,
3637 uint64_t, size
, zbookmark_phys_t
*, zb
);
3638 ARCSTAT_BUMP(arcstat_misses
);
3639 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr
),
3640 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
),
3641 data
, metadata
, misses
);
3643 if (vd
!= NULL
&& l2arc_ndev
!= 0 && !(l2arc_norw
&& devw
)) {
3645 * Read from the L2ARC if the following are true:
3646 * 1. The L2ARC vdev was previously cached.
3647 * 2. This buffer still has L2ARC metadata.
3648 * 3. This buffer isn't currently writing to the L2ARC.
3649 * 4. The L2ARC entry wasn't evicted, which may
3650 * also have invalidated the vdev.
3651 * 5. This isn't prefetch and l2arc_noprefetch is set.
3653 if (HDR_HAS_L2HDR(hdr
) &&
3654 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
3655 !(l2arc_noprefetch
&& HDR_PREFETCH(hdr
))) {
3656 l2arc_read_callback_t
*cb
;
3658 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
3659 ARCSTAT_BUMP(arcstat_l2_hits
);
3661 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
3663 cb
->l2rcb_buf
= buf
;
3664 cb
->l2rcb_spa
= spa
;
3667 cb
->l2rcb_flags
= zio_flags
;
3668 cb
->l2rcb_compress
= b_compress
;
3670 ASSERT(addr
>= VDEV_LABEL_START_SIZE
&&
3671 addr
+ size
< vd
->vdev_psize
-
3672 VDEV_LABEL_END_SIZE
);
3675 * l2arc read. The SCL_L2ARC lock will be
3676 * released by l2arc_read_done().
3677 * Issue a null zio if the underlying buffer
3678 * was squashed to zero size by compression.
3680 if (b_compress
== ZIO_COMPRESS_EMPTY
) {
3681 rzio
= zio_null(pio
, spa
, vd
,
3682 l2arc_read_done
, cb
,
3683 zio_flags
| ZIO_FLAG_DONT_CACHE
|
3685 ZIO_FLAG_DONT_PROPAGATE
|
3686 ZIO_FLAG_DONT_RETRY
);
3688 rzio
= zio_read_phys(pio
, vd
, addr
,
3689 b_asize
, buf
->b_data
,
3691 l2arc_read_done
, cb
, priority
,
3692 zio_flags
| ZIO_FLAG_DONT_CACHE
|
3694 ZIO_FLAG_DONT_PROPAGATE
|
3695 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
3697 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
3699 ARCSTAT_INCR(arcstat_l2_read_bytes
, b_asize
);
3701 if (*arc_flags
& ARC_FLAG_NOWAIT
) {
3706 ASSERT(*arc_flags
& ARC_FLAG_WAIT
);
3707 if (zio_wait(rzio
) == 0)
3710 /* l2arc read error; goto zio_read() */
3712 DTRACE_PROBE1(l2arc__miss
,
3713 arc_buf_hdr_t
*, hdr
);
3714 ARCSTAT_BUMP(arcstat_l2_misses
);
3715 if (HDR_L2_WRITING(hdr
))
3716 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
3717 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3721 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3722 if (l2arc_ndev
!= 0) {
3723 DTRACE_PROBE1(l2arc__miss
,
3724 arc_buf_hdr_t
*, hdr
);
3725 ARCSTAT_BUMP(arcstat_l2_misses
);
3729 rzio
= zio_read(pio
, spa
, bp
, buf
->b_data
, size
,
3730 arc_read_done
, buf
, priority
, zio_flags
, zb
);
3732 if (*arc_flags
& ARC_FLAG_WAIT
)
3733 return (zio_wait(rzio
));
3735 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
3742 arc_set_callback(arc_buf_t
*buf
, arc_evict_func_t
*func
, void *private)
3744 ASSERT(buf
->b_hdr
!= NULL
);
3745 ASSERT(buf
->b_hdr
->b_l1hdr
.b_state
!= arc_anon
);
3746 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_l1hdr
.b_refcnt
) ||
3748 ASSERT(buf
->b_efunc
== NULL
);
3749 ASSERT(!HDR_BUF_AVAILABLE(buf
->b_hdr
));
3751 buf
->b_efunc
= func
;
3752 buf
->b_private
= private;
3756 * Notify the arc that a block was freed, and thus will never be used again.
3759 arc_freed(spa_t
*spa
, const blkptr_t
*bp
)
3762 kmutex_t
*hash_lock
;
3763 uint64_t guid
= spa_load_guid(spa
);
3765 ASSERT(!BP_IS_EMBEDDED(bp
));
3767 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
3770 if (HDR_BUF_AVAILABLE(hdr
)) {
3771 arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
;
3772 add_reference(hdr
, hash_lock
, FTAG
);
3773 hdr
->b_flags
&= ~ARC_FLAG_BUF_AVAILABLE
;
3774 mutex_exit(hash_lock
);
3776 arc_release(buf
, FTAG
);
3777 (void) arc_buf_remove_ref(buf
, FTAG
);
3779 mutex_exit(hash_lock
);
3785 * Clear the user eviction callback set by arc_set_callback(), first calling
3786 * it if it exists. Because the presence of a callback keeps an arc_buf cached
3787 * clearing the callback may result in the arc_buf being destroyed. However,
3788 * it will not result in the *last* arc_buf being destroyed, hence the data
3789 * will remain cached in the ARC. We make a copy of the arc buffer here so
3790 * that we can process the callback without holding any locks.
3792 * It's possible that the callback is already in the process of being cleared
3793 * by another thread. In this case we can not clear the callback.
3795 * Returns B_TRUE if the callback was successfully called and cleared.
3798 arc_clear_callback(arc_buf_t
*buf
)
3801 kmutex_t
*hash_lock
;
3802 arc_evict_func_t
*efunc
= buf
->b_efunc
;
3803 void *private = buf
->b_private
;
3805 mutex_enter(&buf
->b_evict_lock
);
3809 * We are in arc_do_user_evicts().
3811 ASSERT(buf
->b_data
== NULL
);
3812 mutex_exit(&buf
->b_evict_lock
);
3814 } else if (buf
->b_data
== NULL
) {
3816 * We are on the eviction list; process this buffer now
3817 * but let arc_do_user_evicts() do the reaping.
3819 buf
->b_efunc
= NULL
;
3820 mutex_exit(&buf
->b_evict_lock
);
3821 VERIFY0(efunc(private));
3824 hash_lock
= HDR_LOCK(hdr
);
3825 mutex_enter(hash_lock
);
3827 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3829 ASSERT3U(refcount_count(&hdr
->b_l1hdr
.b_refcnt
), <,
3830 hdr
->b_l1hdr
.b_datacnt
);
3831 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
3832 hdr
->b_l1hdr
.b_state
== arc_mfu
);
3834 buf
->b_efunc
= NULL
;
3835 buf
->b_private
= NULL
;
3837 if (hdr
->b_l1hdr
.b_datacnt
> 1) {
3838 mutex_exit(&buf
->b_evict_lock
);
3839 arc_buf_destroy(buf
, FALSE
, TRUE
);
3841 ASSERT(buf
== hdr
->b_l1hdr
.b_buf
);
3842 hdr
->b_flags
|= ARC_FLAG_BUF_AVAILABLE
;
3843 mutex_exit(&buf
->b_evict_lock
);
3846 mutex_exit(hash_lock
);
3847 VERIFY0(efunc(private));
3852 * Release this buffer from the cache, making it an anonymous buffer. This
3853 * must be done after a read and prior to modifying the buffer contents.
3854 * If the buffer has more than one reference, we must make
3855 * a new hdr for the buffer.
3858 arc_release(arc_buf_t
*buf
, void *tag
)
3860 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3863 * It would be nice to assert that if it's DMU metadata (level >
3864 * 0 || it's the dnode file), then it must be syncing context.
3865 * But we don't know that information at this level.
3868 mutex_enter(&buf
->b_evict_lock
);
3871 * We don't grab the hash lock prior to this check, because if
3872 * the buffer's header is in the arc_anon state, it won't be
3873 * linked into the hash table.
3875 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
3876 mutex_exit(&buf
->b_evict_lock
);
3877 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3878 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
3879 ASSERT(!HDR_HAS_L2HDR(hdr
));
3880 ASSERT(BUF_EMPTY(hdr
));
3882 ASSERT3U(hdr
->b_l1hdr
.b_datacnt
, ==, 1);
3883 ASSERT3S(refcount_count(&hdr
->b_l1hdr
.b_refcnt
), ==, 1);
3884 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3886 ASSERT3P(buf
->b_efunc
, ==, NULL
);
3887 ASSERT3P(buf
->b_private
, ==, NULL
);
3889 hdr
->b_l1hdr
.b_arc_access
= 0;
3895 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
3896 mutex_enter(hash_lock
);
3899 * This assignment is only valid as long as the hash_lock is
3900 * held, we must be careful not to reference state or the
3901 * b_state field after dropping the lock.
3903 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
3904 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3905 ASSERT3P(state
, !=, arc_anon
);
3907 /* this buffer is not on any list */
3908 ASSERT(refcount_count(&hdr
->b_l1hdr
.b_refcnt
) > 0);
3910 if (HDR_HAS_L2HDR(hdr
)) {
3911 ARCSTAT_INCR(arcstat_l2_asize
, -hdr
->b_l2hdr
.b_asize
);
3912 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
3914 mutex_enter(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
3915 list_remove(&hdr
->b_l2hdr
.b_dev
->l2ad_buflist
, hdr
);
3916 mutex_exit(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
3918 hdr
->b_flags
&= ~ARC_FLAG_HAS_L2HDR
;
3922 * Do we have more than one buf?
3924 if (hdr
->b_l1hdr
.b_datacnt
> 1) {
3925 arc_buf_hdr_t
*nhdr
;
3927 uint64_t blksz
= hdr
->b_size
;
3928 uint64_t spa
= hdr
->b_spa
;
3929 arc_buf_contents_t type
= arc_buf_type(hdr
);
3930 uint32_t flags
= hdr
->b_flags
;
3932 ASSERT(hdr
->b_l1hdr
.b_buf
!= buf
|| buf
->b_next
!= NULL
);
3934 * Pull the data off of this hdr and attach it to
3935 * a new anonymous hdr.
3937 (void) remove_reference(hdr
, hash_lock
, tag
);
3938 bufp
= &hdr
->b_l1hdr
.b_buf
;
3939 while (*bufp
!= buf
)
3940 bufp
= &(*bufp
)->b_next
;
3941 *bufp
= buf
->b_next
;
3944 ASSERT3P(state
, !=, arc_l2c_only
);
3945 ASSERT3U(state
->arcs_size
, >=, hdr
->b_size
);
3946 atomic_add_64(&state
->arcs_size
, -hdr
->b_size
);
3947 if (refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
)) {
3948 ASSERT3P(state
, !=, arc_l2c_only
);
3949 uint64_t *size
= &state
->arcs_lsize
[type
];
3950 ASSERT3U(*size
, >=, hdr
->b_size
);
3951 atomic_add_64(size
, -hdr
->b_size
);
3955 * We're releasing a duplicate user data buffer, update
3956 * our statistics accordingly.
3958 if (HDR_ISTYPE_DATA(hdr
)) {
3959 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers
);
3960 ARCSTAT_INCR(arcstat_duplicate_buffers_size
,
3963 hdr
->b_l1hdr
.b_datacnt
-= 1;
3964 arc_cksum_verify(buf
);
3965 arc_buf_unwatch(buf
);
3967 mutex_exit(hash_lock
);
3969 nhdr
= kmem_cache_alloc(hdr_full_cache
, KM_PUSHPAGE
);
3970 nhdr
->b_size
= blksz
;
3973 nhdr
->b_flags
= flags
& ARC_FLAG_L2_WRITING
;
3974 nhdr
->b_flags
|= arc_bufc_to_flags(type
);
3975 nhdr
->b_flags
|= ARC_FLAG_HAS_L1HDR
;
3977 nhdr
->b_l1hdr
.b_buf
= buf
;
3978 nhdr
->b_l1hdr
.b_datacnt
= 1;
3979 nhdr
->b_l1hdr
.b_state
= arc_anon
;
3980 nhdr
->b_l1hdr
.b_arc_access
= 0;
3981 nhdr
->b_freeze_cksum
= NULL
;
3983 (void) refcount_add(&nhdr
->b_l1hdr
.b_refcnt
, tag
);
3985 mutex_exit(&buf
->b_evict_lock
);
3986 atomic_add_64(&arc_anon
->arcs_size
, blksz
);
3988 mutex_exit(&buf
->b_evict_lock
);
3989 ASSERT(refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 1);
3990 /* protected by hash lock */
3991 ASSERT(!list_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3992 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3993 arc_change_state(arc_anon
, hdr
, hash_lock
);
3994 hdr
->b_l1hdr
.b_arc_access
= 0;
3995 mutex_exit(hash_lock
);
3997 buf_discard_identity(hdr
);
4000 buf
->b_efunc
= NULL
;
4001 buf
->b_private
= NULL
;
4005 arc_released(arc_buf_t
*buf
)
4009 mutex_enter(&buf
->b_evict_lock
);
4010 released
= (buf
->b_data
!= NULL
&&
4011 buf
->b_hdr
->b_l1hdr
.b_state
== arc_anon
);
4012 mutex_exit(&buf
->b_evict_lock
);
4018 arc_referenced(arc_buf_t
*buf
)
4022 mutex_enter(&buf
->b_evict_lock
);
4023 referenced
= (refcount_count(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
4024 mutex_exit(&buf
->b_evict_lock
);
4025 return (referenced
);
4030 arc_write_ready(zio_t
*zio
)
4032 arc_write_callback_t
*callback
= zio
->io_private
;
4033 arc_buf_t
*buf
= callback
->awcb_buf
;
4034 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
4036 ASSERT(HDR_HAS_L1HDR(hdr
));
4037 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
4038 ASSERT(hdr
->b_l1hdr
.b_datacnt
> 0);
4039 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
4042 * If the IO is already in progress, then this is a re-write
4043 * attempt, so we need to thaw and re-compute the cksum.
4044 * It is the responsibility of the callback to handle the
4045 * accounting for any re-write attempt.
4047 if (HDR_IO_IN_PROGRESS(hdr
)) {
4048 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
4049 if (hdr
->b_freeze_cksum
!= NULL
) {
4050 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
4051 hdr
->b_freeze_cksum
= NULL
;
4053 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
4055 arc_cksum_compute(buf
, B_FALSE
);
4056 hdr
->b_flags
|= ARC_FLAG_IO_IN_PROGRESS
;
4060 * The SPA calls this callback for each physical write that happens on behalf
4061 * of a logical write. See the comment in dbuf_write_physdone() for details.
4064 arc_write_physdone(zio_t
*zio
)
4066 arc_write_callback_t
*cb
= zio
->io_private
;
4067 if (cb
->awcb_physdone
!= NULL
)
4068 cb
->awcb_physdone(zio
, cb
->awcb_buf
, cb
->awcb_private
);
4072 arc_write_done(zio_t
*zio
)
4074 arc_write_callback_t
*callback
= zio
->io_private
;
4075 arc_buf_t
*buf
= callback
->awcb_buf
;
4076 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
4078 ASSERT(hdr
->b_l1hdr
.b_acb
== NULL
);
4080 if (zio
->io_error
== 0) {
4081 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
4082 buf_discard_identity(hdr
);
4084 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
4085 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
4088 ASSERT(BUF_EMPTY(hdr
));
4092 * If the block to be written was all-zero or compressed enough to be
4093 * embedded in the BP, no write was performed so there will be no
4094 * dva/birth/checksum. The buffer must therefore remain anonymous
4097 if (!BUF_EMPTY(hdr
)) {
4098 arc_buf_hdr_t
*exists
;
4099 kmutex_t
*hash_lock
;
4101 ASSERT(zio
->io_error
== 0);
4103 arc_cksum_verify(buf
);
4105 exists
= buf_hash_insert(hdr
, &hash_lock
);
4106 if (exists
!= NULL
) {
4108 * This can only happen if we overwrite for
4109 * sync-to-convergence, because we remove
4110 * buffers from the hash table when we arc_free().
4112 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
4113 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
4114 panic("bad overwrite, hdr=%p exists=%p",
4115 (void *)hdr
, (void *)exists
);
4116 ASSERT(refcount_is_zero(
4117 &exists
->b_l1hdr
.b_refcnt
));
4118 arc_change_state(arc_anon
, exists
, hash_lock
);
4119 mutex_exit(hash_lock
);
4120 arc_hdr_destroy(exists
);
4121 exists
= buf_hash_insert(hdr
, &hash_lock
);
4122 ASSERT3P(exists
, ==, NULL
);
4123 } else if (zio
->io_flags
& ZIO_FLAG_NOPWRITE
) {
4125 ASSERT(zio
->io_prop
.zp_nopwrite
);
4126 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
4127 panic("bad nopwrite, hdr=%p exists=%p",
4128 (void *)hdr
, (void *)exists
);
4131 ASSERT(hdr
->b_l1hdr
.b_datacnt
== 1);
4132 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
);
4133 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
4134 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
4137 hdr
->b_flags
&= ~ARC_FLAG_IO_IN_PROGRESS
;
4138 /* if it's not anon, we are doing a scrub */
4139 if (exists
== NULL
&& hdr
->b_l1hdr
.b_state
== arc_anon
)
4140 arc_access(hdr
, hash_lock
);
4141 mutex_exit(hash_lock
);
4143 hdr
->b_flags
&= ~ARC_FLAG_IO_IN_PROGRESS
;
4146 ASSERT(!refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
4147 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
4149 kmem_free(callback
, sizeof (arc_write_callback_t
));
4153 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
,
4154 blkptr_t
*bp
, arc_buf_t
*buf
, boolean_t l2arc
, boolean_t l2arc_compress
,
4155 const zio_prop_t
*zp
, arc_done_func_t
*ready
, arc_done_func_t
*physdone
,
4156 arc_done_func_t
*done
, void *private, zio_priority_t priority
,
4157 int zio_flags
, const zbookmark_phys_t
*zb
)
4159 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
4160 arc_write_callback_t
*callback
;
4163 ASSERT(ready
!= NULL
);
4164 ASSERT(done
!= NULL
);
4165 ASSERT(!HDR_IO_ERROR(hdr
));
4166 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
4167 ASSERT(hdr
->b_l1hdr
.b_acb
== NULL
);
4168 ASSERT(hdr
->b_l1hdr
.b_datacnt
> 0);
4170 hdr
->b_flags
|= ARC_FLAG_L2CACHE
;
4172 hdr
->b_flags
|= ARC_FLAG_L2COMPRESS
;
4173 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
4174 callback
->awcb_ready
= ready
;
4175 callback
->awcb_physdone
= physdone
;
4176 callback
->awcb_done
= done
;
4177 callback
->awcb_private
= private;
4178 callback
->awcb_buf
= buf
;
4180 zio
= zio_write(pio
, spa
, txg
, bp
, buf
->b_data
, hdr
->b_size
, zp
,
4181 arc_write_ready
, arc_write_physdone
, arc_write_done
, callback
,
4182 priority
, zio_flags
, zb
);
4188 arc_memory_throttle(uint64_t reserve
, uint64_t txg
)
4191 uint64_t available_memory
= ptob(freemem
);
4192 static uint64_t page_load
= 0;
4193 static uint64_t last_txg
= 0;
4197 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
4200 if (freemem
> physmem
* arc_lotsfree_percent
/ 100)
4203 if (txg
> last_txg
) {
4208 * If we are in pageout, we know that memory is already tight,
4209 * the arc is already going to be evicting, so we just want to
4210 * continue to let page writes occur as quickly as possible.
4212 if (curproc
== proc_pageout
) {
4213 if (page_load
> MAX(ptob(minfree
), available_memory
) / 4)
4214 return (SET_ERROR(ERESTART
));
4215 /* Note: reserve is inflated, so we deflate */
4216 page_load
+= reserve
/ 8;
4218 } else if (page_load
> 0 && arc_reclaim_needed()) {
4219 /* memory is low, delay before restarting */
4220 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
4221 return (SET_ERROR(EAGAIN
));
4229 arc_tempreserve_clear(uint64_t reserve
)
4231 atomic_add_64(&arc_tempreserve
, -reserve
);
4232 ASSERT((int64_t)arc_tempreserve
>= 0);
4236 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
4241 if (reserve
> arc_c
/4 && !arc_no_grow
)
4242 arc_c
= MIN(arc_c_max
, reserve
* 4);
4243 if (reserve
> arc_c
)
4244 return (SET_ERROR(ENOMEM
));
4247 * Don't count loaned bufs as in flight dirty data to prevent long
4248 * network delays from blocking transactions that are ready to be
4249 * assigned to a txg.
4251 anon_size
= MAX((int64_t)(arc_anon
->arcs_size
- arc_loaned_bytes
), 0);
4254 * Writes will, almost always, require additional memory allocations
4255 * in order to compress/encrypt/etc the data. We therefore need to
4256 * make sure that there is sufficient available memory for this.
4258 error
= arc_memory_throttle(reserve
, txg
);
4263 * Throttle writes when the amount of dirty data in the cache
4264 * gets too large. We try to keep the cache less than half full
4265 * of dirty blocks so that our sync times don't grow too large.
4266 * Note: if two requests come in concurrently, we might let them
4267 * both succeed, when one of them should fail. Not a huge deal.
4270 if (reserve
+ arc_tempreserve
+ anon_size
> arc_c
/ 2 &&
4271 anon_size
> arc_c
/ 4) {
4272 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
4273 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
4274 arc_tempreserve
>>10,
4275 arc_anon
->arcs_lsize
[ARC_BUFC_METADATA
]>>10,
4276 arc_anon
->arcs_lsize
[ARC_BUFC_DATA
]>>10,
4277 reserve
>>10, arc_c
>>10);
4278 return (SET_ERROR(ERESTART
));
4280 atomic_add_64(&arc_tempreserve
, reserve
);
4285 arc_kstat_update_state(arc_state_t
*state
, kstat_named_t
*size
,
4286 kstat_named_t
*evict_data
, kstat_named_t
*evict_metadata
)
4288 size
->value
.ui64
= state
->arcs_size
;
4289 evict_data
->value
.ui64
= state
->arcs_lsize
[ARC_BUFC_DATA
];
4290 evict_metadata
->value
.ui64
= state
->arcs_lsize
[ARC_BUFC_METADATA
];
4294 arc_kstat_update(kstat_t
*ksp
, int rw
)
4296 arc_stats_t
*as
= ksp
->ks_data
;
4298 if (rw
== KSTAT_WRITE
) {
4301 arc_kstat_update_state(arc_anon
,
4302 &as
->arcstat_anon_size
,
4303 &as
->arcstat_anon_evictable_data
,
4304 &as
->arcstat_anon_evictable_metadata
);
4305 arc_kstat_update_state(arc_mru
,
4306 &as
->arcstat_mru_size
,
4307 &as
->arcstat_mru_evictable_data
,
4308 &as
->arcstat_mru_evictable_metadata
);
4309 arc_kstat_update_state(arc_mru_ghost
,
4310 &as
->arcstat_mru_ghost_size
,
4311 &as
->arcstat_mru_ghost_evictable_data
,
4312 &as
->arcstat_mru_ghost_evictable_metadata
);
4313 arc_kstat_update_state(arc_mfu
,
4314 &as
->arcstat_mfu_size
,
4315 &as
->arcstat_mfu_evictable_data
,
4316 &as
->arcstat_mfu_evictable_metadata
);
4317 arc_kstat_update_state(arc_mfu_ghost
,
4318 &as
->arcstat_mfu_ghost_size
,
4319 &as
->arcstat_mfu_ghost_evictable_data
,
4320 &as
->arcstat_mfu_ghost_evictable_metadata
);
4330 * allmem is "all memory that we could possibly use".
4333 uint64_t allmem
= ptob(physmem
- swapfs_minfree
);
4335 uint64_t allmem
= (physmem
* PAGESIZE
) / 2;
4338 mutex_init(&arc_reclaim_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
4339 cv_init(&arc_reclaim_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
4341 /* Convert seconds to clock ticks */
4342 arc_min_prefetch_lifespan
= 1 * hz
;
4344 /* Start out with 1/8 of all memory */
4349 * On architectures where the physical memory can be larger
4350 * than the addressable space (intel in 32-bit mode), we may
4351 * need to limit the cache to 1/8 of VM size.
4353 arc_c
= MIN(arc_c
, vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 8);
4356 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
4357 arc_c_min
= MAX(allmem
/ 32, 64 << 20);
4358 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
4359 if (allmem
>= 1 << 30)
4360 arc_c_max
= allmem
- (1 << 30);
4362 arc_c_max
= arc_c_min
;
4363 arc_c_max
= MAX(allmem
* 3 / 4, arc_c_max
);
4366 * Allow the tunables to override our calculations if they are
4367 * reasonable (ie. over 64MB)
4369 if (zfs_arc_max
> 64 << 20 && zfs_arc_max
< allmem
)
4370 arc_c_max
= zfs_arc_max
;
4371 if (zfs_arc_min
> 64 << 20 && zfs_arc_min
<= arc_c_max
)
4372 arc_c_min
= zfs_arc_min
;
4375 arc_p
= (arc_c
>> 1);
4377 /* limit meta-data to 1/4 of the arc capacity */
4378 arc_meta_limit
= arc_c_max
/ 4;
4380 /* Allow the tunable to override if it is reasonable */
4381 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
4382 arc_meta_limit
= zfs_arc_meta_limit
;
4384 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
4385 arc_c_min
= arc_meta_limit
/ 2;
4387 if (zfs_arc_meta_min
> 0) {
4388 arc_meta_min
= zfs_arc_meta_min
;
4390 arc_meta_min
= arc_c_min
/ 2;
4393 if (zfs_arc_grow_retry
> 0)
4394 arc_grow_retry
= zfs_arc_grow_retry
;
4396 if (zfs_arc_shrink_shift
> 0)
4397 arc_shrink_shift
= zfs_arc_shrink_shift
;
4400 * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
4402 if (arc_no_grow_shift
>= arc_shrink_shift
)
4403 arc_no_grow_shift
= arc_shrink_shift
- 1;
4405 if (zfs_arc_p_min_shift
> 0)
4406 arc_p_min_shift
= zfs_arc_p_min_shift
;
4408 /* if kmem_flags are set, lets try to use less memory */
4409 if (kmem_debugging())
4411 if (arc_c
< arc_c_min
)
4414 arc_anon
= &ARC_anon
;
4416 arc_mru_ghost
= &ARC_mru_ghost
;
4418 arc_mfu_ghost
= &ARC_mfu_ghost
;
4419 arc_l2c_only
= &ARC_l2c_only
;
4422 mutex_init(&arc_anon
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4423 mutex_init(&arc_mru
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4424 mutex_init(&arc_mru_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4425 mutex_init(&arc_mfu
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4426 mutex_init(&arc_mfu_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4427 mutex_init(&arc_l2c_only
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4429 list_create(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
],
4430 sizeof (arc_buf_hdr_t
),
4431 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4432 list_create(&arc_mru
->arcs_list
[ARC_BUFC_DATA
],
4433 sizeof (arc_buf_hdr_t
),
4434 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4435 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
],
4436 sizeof (arc_buf_hdr_t
),
4437 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4438 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
],
4439 sizeof (arc_buf_hdr_t
),
4440 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4441 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
],
4442 sizeof (arc_buf_hdr_t
),
4443 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4444 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
],
4445 sizeof (arc_buf_hdr_t
),
4446 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4447 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
],
4448 sizeof (arc_buf_hdr_t
),
4449 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4450 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
],
4451 sizeof (arc_buf_hdr_t
),
4452 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4453 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
],
4454 sizeof (arc_buf_hdr_t
),
4455 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4456 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
],
4457 sizeof (arc_buf_hdr_t
),
4458 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
));
4462 arc_thread_exit
= 0;
4463 arc_eviction_list
= NULL
;
4464 mutex_init(&arc_eviction_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4465 bzero(&arc_eviction_hdr
, sizeof (arc_buf_hdr_t
));
4467 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
4468 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
4470 if (arc_ksp
!= NULL
) {
4471 arc_ksp
->ks_data
= &arc_stats
;
4472 arc_ksp
->ks_update
= arc_kstat_update
;
4473 kstat_install(arc_ksp
);
4476 (void) thread_create(NULL
, 0, arc_reclaim_thread
, NULL
, 0, &p0
,
4477 TS_RUN
, minclsyspri
);
4483 * Calculate maximum amount of dirty data per pool.
4485 * If it has been set by /etc/system, take that.
4486 * Otherwise, use a percentage of physical memory defined by
4487 * zfs_dirty_data_max_percent (default 10%) with a cap at
4488 * zfs_dirty_data_max_max (default 4GB).
4490 if (zfs_dirty_data_max
== 0) {
4491 zfs_dirty_data_max
= physmem
* PAGESIZE
*
4492 zfs_dirty_data_max_percent
/ 100;
4493 zfs_dirty_data_max
= MIN(zfs_dirty_data_max
,
4494 zfs_dirty_data_max_max
);
4501 mutex_enter(&arc_reclaim_thr_lock
);
4502 arc_thread_exit
= 1;
4503 while (arc_thread_exit
!= 0)
4504 cv_wait(&arc_reclaim_thr_cv
, &arc_reclaim_thr_lock
);
4505 mutex_exit(&arc_reclaim_thr_lock
);
4511 if (arc_ksp
!= NULL
) {
4512 kstat_delete(arc_ksp
);
4516 mutex_destroy(&arc_eviction_mtx
);
4517 mutex_destroy(&arc_reclaim_thr_lock
);
4518 cv_destroy(&arc_reclaim_thr_cv
);
4520 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
4521 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
4522 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
4523 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
4524 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
4525 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
4526 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
4527 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
4529 mutex_destroy(&arc_anon
->arcs_mtx
);
4530 mutex_destroy(&arc_mru
->arcs_mtx
);
4531 mutex_destroy(&arc_mru_ghost
->arcs_mtx
);
4532 mutex_destroy(&arc_mfu
->arcs_mtx
);
4533 mutex_destroy(&arc_mfu_ghost
->arcs_mtx
);
4534 mutex_destroy(&arc_l2c_only
->arcs_mtx
);
4538 ASSERT0(arc_loaned_bytes
);
4544 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4545 * It uses dedicated storage devices to hold cached data, which are populated
4546 * using large infrequent writes. The main role of this cache is to boost
4547 * the performance of random read workloads. The intended L2ARC devices
4548 * include short-stroked disks, solid state disks, and other media with
4549 * substantially faster read latency than disk.
4551 * +-----------------------+
4553 * +-----------------------+
4556 * l2arc_feed_thread() arc_read()
4560 * +---------------+ |
4562 * +---------------+ |
4567 * +-------+ +-------+
4569 * | cache | | cache |
4570 * +-------+ +-------+
4571 * +=========+ .-----.
4572 * : L2ARC : |-_____-|
4573 * : devices : | Disks |
4574 * +=========+ `-_____-'
4576 * Read requests are satisfied from the following sources, in order:
4579 * 2) vdev cache of L2ARC devices
4581 * 4) vdev cache of disks
4584 * Some L2ARC device types exhibit extremely slow write performance.
4585 * To accommodate for this there are some significant differences between
4586 * the L2ARC and traditional cache design:
4588 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4589 * the ARC behave as usual, freeing buffers and placing headers on ghost
4590 * lists. The ARC does not send buffers to the L2ARC during eviction as
4591 * this would add inflated write latencies for all ARC memory pressure.
4593 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4594 * It does this by periodically scanning buffers from the eviction-end of
4595 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4596 * not already there. It scans until a headroom of buffers is satisfied,
4597 * which itself is a buffer for ARC eviction. If a compressible buffer is
4598 * found during scanning and selected for writing to an L2ARC device, we
4599 * temporarily boost scanning headroom during the next scan cycle to make
4600 * sure we adapt to compression effects (which might significantly reduce
4601 * the data volume we write to L2ARC). The thread that does this is
4602 * l2arc_feed_thread(), illustrated below; example sizes are included to
4603 * provide a better sense of ratio than this diagram:
4606 * +---------------------+----------+
4607 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4608 * +---------------------+----------+ | o L2ARC eligible
4609 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
4610 * +---------------------+----------+ |
4611 * 15.9 Gbytes ^ 32 Mbytes |
4613 * l2arc_feed_thread()
4615 * l2arc write hand <--[oooo]--'
4619 * +==============================+
4620 * L2ARC dev |####|#|###|###| |####| ... |
4621 * +==============================+
4624 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4625 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4626 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
4627 * safe to say that this is an uncommon case, since buffers at the end of
4628 * the ARC lists have moved there due to inactivity.
4630 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4631 * then the L2ARC simply misses copying some buffers. This serves as a
4632 * pressure valve to prevent heavy read workloads from both stalling the ARC
4633 * with waits and clogging the L2ARC with writes. This also helps prevent
4634 * the potential for the L2ARC to churn if it attempts to cache content too
4635 * quickly, such as during backups of the entire pool.
4637 * 5. After system boot and before the ARC has filled main memory, there are
4638 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4639 * lists can remain mostly static. Instead of searching from tail of these
4640 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4641 * for eligible buffers, greatly increasing its chance of finding them.
4643 * The L2ARC device write speed is also boosted during this time so that
4644 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4645 * there are no L2ARC reads, and no fear of degrading read performance
4646 * through increased writes.
4648 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4649 * the vdev queue can aggregate them into larger and fewer writes. Each
4650 * device is written to in a rotor fashion, sweeping writes through
4651 * available space then repeating.
4653 * 7. The L2ARC does not store dirty content. It never needs to flush
4654 * write buffers back to disk based storage.
4656 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4657 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4659 * The performance of the L2ARC can be tweaked by a number of tunables, which
4660 * may be necessary for different workloads:
4662 * l2arc_write_max max write bytes per interval
4663 * l2arc_write_boost extra write bytes during device warmup
4664 * l2arc_noprefetch skip caching prefetched buffers
4665 * l2arc_headroom number of max device writes to precache
4666 * l2arc_headroom_boost when we find compressed buffers during ARC
4667 * scanning, we multiply headroom by this
4668 * percentage factor for the next scan cycle,
4669 * since more compressed buffers are likely to
4671 * l2arc_feed_secs seconds between L2ARC writing
4673 * Tunables may be removed or added as future performance improvements are
4674 * integrated, and also may become zpool properties.
4676 * There are three key functions that control how the L2ARC warms up:
4678 * l2arc_write_eligible() check if a buffer is eligible to cache
4679 * l2arc_write_size() calculate how much to write
4680 * l2arc_write_interval() calculate sleep delay between writes
4682 * These three functions determine what to write, how much, and how quickly
4687 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*hdr
)
4690 * A buffer is *not* eligible for the L2ARC if it:
4691 * 1. belongs to a different spa.
4692 * 2. is already cached on the L2ARC.
4693 * 3. has an I/O in progress (it may be an incomplete read).
4694 * 4. is flagged not eligible (zfs property).
4696 if (hdr
->b_spa
!= spa_guid
|| HDR_HAS_L2HDR(hdr
) ||
4697 HDR_IO_IN_PROGRESS(hdr
) || !HDR_L2CACHE(hdr
))
4704 l2arc_write_size(void)
4709 * Make sure our globals have meaningful values in case the user
4712 size
= l2arc_write_max
;
4714 cmn_err(CE_NOTE
, "Bad value for l2arc_write_max, value must "
4715 "be greater than zero, resetting it to the default (%d)",
4717 size
= l2arc_write_max
= L2ARC_WRITE_SIZE
;
4720 if (arc_warm
== B_FALSE
)
4721 size
+= l2arc_write_boost
;
4728 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
4730 clock_t interval
, next
, now
;
4733 * If the ARC lists are busy, increase our write rate; if the
4734 * lists are stale, idle back. This is achieved by checking
4735 * how much we previously wrote - if it was more than half of
4736 * what we wanted, schedule the next write much sooner.
4738 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
4739 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
4741 interval
= hz
* l2arc_feed_secs
;
4743 now
= ddi_get_lbolt();
4744 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
4750 * Cycle through L2ARC devices. This is how L2ARC load balances.
4751 * If a device is returned, this also returns holding the spa config lock.
4753 static l2arc_dev_t
*
4754 l2arc_dev_get_next(void)
4756 l2arc_dev_t
*first
, *next
= NULL
;
4759 * Lock out the removal of spas (spa_namespace_lock), then removal
4760 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4761 * both locks will be dropped and a spa config lock held instead.
4763 mutex_enter(&spa_namespace_lock
);
4764 mutex_enter(&l2arc_dev_mtx
);
4766 /* if there are no vdevs, there is nothing to do */
4767 if (l2arc_ndev
== 0)
4771 next
= l2arc_dev_last
;
4773 /* loop around the list looking for a non-faulted vdev */
4775 next
= list_head(l2arc_dev_list
);
4777 next
= list_next(l2arc_dev_list
, next
);
4779 next
= list_head(l2arc_dev_list
);
4782 /* if we have come back to the start, bail out */
4785 else if (next
== first
)
4788 } while (vdev_is_dead(next
->l2ad_vdev
));
4790 /* if we were unable to find any usable vdevs, return NULL */
4791 if (vdev_is_dead(next
->l2ad_vdev
))
4794 l2arc_dev_last
= next
;
4797 mutex_exit(&l2arc_dev_mtx
);
4800 * Grab the config lock to prevent the 'next' device from being
4801 * removed while we are writing to it.
4804 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
4805 mutex_exit(&spa_namespace_lock
);
4811 * Free buffers that were tagged for destruction.
4814 l2arc_do_free_on_write()
4817 l2arc_data_free_t
*df
, *df_prev
;
4819 mutex_enter(&l2arc_free_on_write_mtx
);
4820 buflist
= l2arc_free_on_write
;
4822 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
4823 df_prev
= list_prev(buflist
, df
);
4824 ASSERT(df
->l2df_data
!= NULL
);
4825 ASSERT(df
->l2df_func
!= NULL
);
4826 df
->l2df_func(df
->l2df_data
, df
->l2df_size
);
4827 list_remove(buflist
, df
);
4828 kmem_free(df
, sizeof (l2arc_data_free_t
));
4831 mutex_exit(&l2arc_free_on_write_mtx
);
4835 * A write to a cache device has completed. Update all headers to allow
4836 * reads from these buffers to begin.
4839 l2arc_write_done(zio_t
*zio
)
4841 l2arc_write_callback_t
*cb
;
4844 arc_buf_hdr_t
*head
, *hdr
, *hdr_prev
;
4845 kmutex_t
*hash_lock
;
4846 int64_t bytes_dropped
= 0;
4848 cb
= zio
->io_private
;
4850 dev
= cb
->l2wcb_dev
;
4851 ASSERT(dev
!= NULL
);
4852 head
= cb
->l2wcb_head
;
4853 ASSERT(head
!= NULL
);
4854 buflist
= &dev
->l2ad_buflist
;
4855 ASSERT(buflist
!= NULL
);
4856 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
4857 l2arc_write_callback_t
*, cb
);
4859 if (zio
->io_error
!= 0)
4860 ARCSTAT_BUMP(arcstat_l2_writes_error
);
4862 mutex_enter(&dev
->l2ad_mtx
);
4865 * All writes completed, or an error was hit.
4867 for (hdr
= list_prev(buflist
, head
); hdr
; hdr
= hdr_prev
) {
4868 hdr_prev
= list_prev(buflist
, hdr
);
4870 hash_lock
= HDR_LOCK(hdr
);
4871 if (!mutex_tryenter(hash_lock
)) {
4873 * This buffer misses out. It may be in a stage
4874 * of eviction. Its ARC_FLAG_L2_WRITING flag will be
4875 * left set, denying reads to this buffer.
4877 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss
);
4882 * It's possible that this buffer got evicted from the L1 cache
4883 * before we grabbed the vdev + hash locks, in which case
4884 * arc_hdr_realloc freed b_tmp_cdata for us if it was allocated.
4885 * Only free the buffer if we still have an L1 hdr.
4887 if (HDR_HAS_L1HDR(hdr
) && hdr
->b_l1hdr
.b_tmp_cdata
!= NULL
&&
4888 HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
)
4889 l2arc_release_cdata_buf(hdr
);
4891 if (zio
->io_error
!= 0) {
4893 * Error - drop L2ARC entry.
4895 list_remove(buflist
, hdr
);
4896 hdr
->b_flags
&= ~ARC_FLAG_HAS_L2HDR
;
4898 ARCSTAT_INCR(arcstat_l2_asize
, -hdr
->b_l2hdr
.b_asize
);
4899 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
4903 * Allow ARC to begin reads to this L2ARC entry.
4905 hdr
->b_flags
&= ~ARC_FLAG_L2_WRITING
;
4907 mutex_exit(hash_lock
);
4910 atomic_inc_64(&l2arc_writes_done
);
4911 list_remove(buflist
, head
);
4912 ASSERT(!HDR_HAS_L1HDR(head
));
4913 kmem_cache_free(hdr_l2only_cache
, head
);
4914 mutex_exit(&dev
->l2ad_mtx
);
4916 vdev_space_update(dev
->l2ad_vdev
, -bytes_dropped
, 0, 0);
4918 l2arc_do_free_on_write();
4920 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
4924 * A read to a cache device completed. Validate buffer contents before
4925 * handing over to the regular ARC routines.
4928 l2arc_read_done(zio_t
*zio
)
4930 l2arc_read_callback_t
*cb
;
4933 kmutex_t
*hash_lock
;
4936 ASSERT(zio
->io_vd
!= NULL
);
4937 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
4939 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
4941 cb
= zio
->io_private
;
4943 buf
= cb
->l2rcb_buf
;
4944 ASSERT(buf
!= NULL
);
4946 hash_lock
= HDR_LOCK(buf
->b_hdr
);
4947 mutex_enter(hash_lock
);
4949 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
4952 * If the buffer was compressed, decompress it first.
4954 if (cb
->l2rcb_compress
!= ZIO_COMPRESS_OFF
)
4955 l2arc_decompress_zio(zio
, hdr
, cb
->l2rcb_compress
);
4956 ASSERT(zio
->io_data
!= NULL
);
4959 * Check this survived the L2ARC journey.
4961 equal
= arc_cksum_equal(buf
);
4962 if (equal
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
4963 mutex_exit(hash_lock
);
4964 zio
->io_private
= buf
;
4965 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
4966 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
4969 mutex_exit(hash_lock
);
4971 * Buffer didn't survive caching. Increment stats and
4972 * reissue to the original storage device.
4974 if (zio
->io_error
!= 0) {
4975 ARCSTAT_BUMP(arcstat_l2_io_error
);
4977 zio
->io_error
= SET_ERROR(EIO
);
4980 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
4983 * If there's no waiter, issue an async i/o to the primary
4984 * storage now. If there *is* a waiter, the caller must
4985 * issue the i/o in a context where it's OK to block.
4987 if (zio
->io_waiter
== NULL
) {
4988 zio_t
*pio
= zio_unique_parent(zio
);
4990 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
4992 zio_nowait(zio_read(pio
, cb
->l2rcb_spa
, &cb
->l2rcb_bp
,
4993 buf
->b_data
, zio
->io_size
, arc_read_done
, buf
,
4994 zio
->io_priority
, cb
->l2rcb_flags
, &cb
->l2rcb_zb
));
4998 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
5002 * This is the list priority from which the L2ARC will search for pages to
5003 * cache. This is used within loops (0..3) to cycle through lists in the
5004 * desired order. This order can have a significant effect on cache
5007 * Currently the metadata lists are hit first, MFU then MRU, followed by
5008 * the data lists. This function returns a locked list, and also returns
5012 l2arc_list_locked(int list_num
, kmutex_t
**lock
)
5014 list_t
*list
= NULL
;
5016 ASSERT(list_num
>= 0 && list_num
<= 3);
5020 list
= &arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
5021 *lock
= &arc_mfu
->arcs_mtx
;
5024 list
= &arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
5025 *lock
= &arc_mru
->arcs_mtx
;
5028 list
= &arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
5029 *lock
= &arc_mfu
->arcs_mtx
;
5032 list
= &arc_mru
->arcs_list
[ARC_BUFC_DATA
];
5033 *lock
= &arc_mru
->arcs_mtx
;
5037 ASSERT(!(MUTEX_HELD(*lock
)));
5043 * Evict buffers from the device write hand to the distance specified in
5044 * bytes. This distance may span populated buffers, it may span nothing.
5045 * This is clearing a region on the L2ARC device ready for writing.
5046 * If the 'all' boolean is set, every buffer is evicted.
5049 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
5052 arc_buf_hdr_t
*hdr
, *hdr_prev
;
5053 kmutex_t
*hash_lock
;
5055 int64_t bytes_evicted
= 0;
5057 buflist
= &dev
->l2ad_buflist
;
5059 if (!all
&& dev
->l2ad_first
) {
5061 * This is the first sweep through the device. There is
5067 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
5069 * When nearing the end of the device, evict to the end
5070 * before the device write hand jumps to the start.
5072 taddr
= dev
->l2ad_end
;
5074 taddr
= dev
->l2ad_hand
+ distance
;
5076 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
5077 uint64_t, taddr
, boolean_t
, all
);
5080 mutex_enter(&dev
->l2ad_mtx
);
5081 for (hdr
= list_tail(buflist
); hdr
; hdr
= hdr_prev
) {
5082 hdr_prev
= list_prev(buflist
, hdr
);
5084 hash_lock
= HDR_LOCK(hdr
);
5085 if (!mutex_tryenter(hash_lock
)) {
5087 * Missed the hash lock. Retry.
5089 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
5090 mutex_exit(&dev
->l2ad_mtx
);
5091 mutex_enter(hash_lock
);
5092 mutex_exit(hash_lock
);
5096 if (HDR_L2_WRITE_HEAD(hdr
)) {
5098 * We hit a write head node. Leave it for
5099 * l2arc_write_done().
5101 list_remove(buflist
, hdr
);
5102 mutex_exit(hash_lock
);
5106 if (!all
&& HDR_HAS_L2HDR(hdr
) &&
5107 (hdr
->b_l2hdr
.b_daddr
> taddr
||
5108 hdr
->b_l2hdr
.b_daddr
< dev
->l2ad_hand
)) {
5110 * We've evicted to the target address,
5111 * or the end of the device.
5113 mutex_exit(hash_lock
);
5117 ASSERT(HDR_HAS_L2HDR(hdr
));
5118 if (!HDR_HAS_L1HDR(hdr
)) {
5119 ASSERT(!HDR_L2_READING(hdr
));
5121 * This doesn't exist in the ARC. Destroy.
5122 * arc_hdr_destroy() will call list_remove()
5123 * and decrement arcstat_l2_size.
5125 arc_change_state(arc_anon
, hdr
, hash_lock
);
5126 arc_hdr_destroy(hdr
);
5128 ASSERT(hdr
->b_l1hdr
.b_state
!= arc_l2c_only
);
5129 ARCSTAT_BUMP(arcstat_l2_evict_l1cached
);
5131 * Invalidate issued or about to be issued
5132 * reads, since we may be about to write
5133 * over this location.
5135 if (HDR_L2_READING(hdr
)) {
5136 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
5137 hdr
->b_flags
|= ARC_FLAG_L2_EVICTED
;
5140 /* Tell ARC this no longer exists in L2ARC. */
5141 ARCSTAT_INCR(arcstat_l2_asize
, -hdr
->b_l2hdr
.b_asize
);
5142 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
5143 hdr
->b_flags
&= ~ARC_FLAG_HAS_L2HDR
;
5144 list_remove(buflist
, hdr
);
5146 /* This may have been leftover after a failed write. */
5147 hdr
->b_flags
&= ~ARC_FLAG_L2_WRITING
;
5149 mutex_exit(hash_lock
);
5151 mutex_exit(&dev
->l2ad_mtx
);
5153 vdev_space_update(dev
->l2ad_vdev
, -bytes_evicted
, 0, 0);
5154 dev
->l2ad_evict
= taddr
;
5158 * Find and write ARC buffers to the L2ARC device.
5160 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
5161 * for reading until they have completed writing.
5162 * The headroom_boost is an in-out parameter used to maintain headroom boost
5163 * state between calls to this function.
5165 * Returns the number of bytes actually written (which may be smaller than
5166 * the delta by which the device hand has changed due to alignment).
5169 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
,
5170 boolean_t
*headroom_boost
)
5172 arc_buf_hdr_t
*hdr
, *hdr_prev
, *head
;
5174 uint64_t write_asize
, write_psize
, write_sz
, headroom
,
5177 kmutex_t
*list_lock
;
5179 l2arc_write_callback_t
*cb
;
5181 uint64_t guid
= spa_load_guid(spa
);
5182 const boolean_t do_headroom_boost
= *headroom_boost
;
5184 ASSERT(dev
->l2ad_vdev
!= NULL
);
5186 /* Lower the flag now, we might want to raise it again later. */
5187 *headroom_boost
= B_FALSE
;
5190 write_sz
= write_asize
= write_psize
= 0;
5192 head
= kmem_cache_alloc(hdr_l2only_cache
, KM_PUSHPAGE
);
5193 head
->b_flags
|= ARC_FLAG_L2_WRITE_HEAD
;
5194 head
->b_flags
|= ARC_FLAG_HAS_L2HDR
;
5197 * We will want to try to compress buffers that are at least 2x the
5198 * device sector size.
5200 buf_compress_minsz
= 2 << dev
->l2ad_vdev
->vdev_ashift
;
5203 * Copy buffers for L2ARC writing.
5205 mutex_enter(&dev
->l2ad_mtx
);
5206 for (int try = 0; try <= 3; try++) {
5207 uint64_t passed_sz
= 0;
5209 list
= l2arc_list_locked(try, &list_lock
);
5212 * L2ARC fast warmup.
5214 * Until the ARC is warm and starts to evict, read from the
5215 * head of the ARC lists rather than the tail.
5217 if (arc_warm
== B_FALSE
)
5218 hdr
= list_head(list
);
5220 hdr
= list_tail(list
);
5222 headroom
= target_sz
* l2arc_headroom
;
5223 if (do_headroom_boost
)
5224 headroom
= (headroom
* l2arc_headroom_boost
) / 100;
5226 for (; hdr
; hdr
= hdr_prev
) {
5227 kmutex_t
*hash_lock
;
5230 if (arc_warm
== B_FALSE
)
5231 hdr_prev
= list_next(list
, hdr
);
5233 hdr_prev
= list_prev(list
, hdr
);
5235 hash_lock
= HDR_LOCK(hdr
);
5236 if (!mutex_tryenter(hash_lock
)) {
5238 * Skip this buffer rather than waiting.
5243 passed_sz
+= hdr
->b_size
;
5244 if (passed_sz
> headroom
) {
5248 mutex_exit(hash_lock
);
5252 if (!l2arc_write_eligible(guid
, hdr
)) {
5253 mutex_exit(hash_lock
);
5257 if ((write_sz
+ hdr
->b_size
) > target_sz
) {
5259 mutex_exit(hash_lock
);
5265 * Insert a dummy header on the buflist so
5266 * l2arc_write_done() can find where the
5267 * write buffers begin without searching.
5269 list_insert_head(&dev
->l2ad_buflist
, head
);
5272 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
5273 cb
->l2wcb_dev
= dev
;
5274 cb
->l2wcb_head
= head
;
5275 pio
= zio_root(spa
, l2arc_write_done
, cb
,
5280 * Create and add a new L2ARC header.
5282 hdr
->b_l2hdr
.b_dev
= dev
;
5283 hdr
->b_flags
|= ARC_FLAG_L2_WRITING
;
5285 * Temporarily stash the data buffer in b_tmp_cdata.
5286 * The subsequent write step will pick it up from
5287 * there. This is because can't access b_l1hdr.b_buf
5288 * without holding the hash_lock, which we in turn
5289 * can't access without holding the ARC list locks
5290 * (which we want to avoid during compression/writing).
5292 HDR_SET_COMPRESS(hdr
, ZIO_COMPRESS_OFF
);
5293 hdr
->b_l2hdr
.b_asize
= hdr
->b_size
;
5294 hdr
->b_l1hdr
.b_tmp_cdata
= hdr
->b_l1hdr
.b_buf
->b_data
;
5296 buf_sz
= hdr
->b_size
;
5297 hdr
->b_flags
|= ARC_FLAG_HAS_L2HDR
;
5299 list_insert_head(&dev
->l2ad_buflist
, hdr
);
5302 * Compute and store the buffer cksum before
5303 * writing. On debug the cksum is verified first.
5305 arc_cksum_verify(hdr
->b_l1hdr
.b_buf
);
5306 arc_cksum_compute(hdr
->b_l1hdr
.b_buf
, B_TRUE
);
5308 mutex_exit(hash_lock
);
5313 mutex_exit(list_lock
);
5319 /* No buffers selected for writing? */
5322 mutex_exit(&dev
->l2ad_mtx
);
5323 ASSERT(!HDR_HAS_L1HDR(head
));
5324 kmem_cache_free(hdr_l2only_cache
, head
);
5329 * Now start writing the buffers. We're starting at the write head
5330 * and work backwards, retracing the course of the buffer selector
5333 for (hdr
= list_prev(&dev
->l2ad_buflist
, head
); hdr
;
5334 hdr
= list_prev(&dev
->l2ad_buflist
, hdr
)) {
5338 * We shouldn't need to lock the buffer here, since we flagged
5339 * it as ARC_FLAG_L2_WRITING in the previous step, but we must
5340 * take care to only access its L2 cache parameters. In
5341 * particular, hdr->l1hdr.b_buf may be invalid by now due to
5344 hdr
->b_l2hdr
.b_daddr
= dev
->l2ad_hand
;
5346 if ((HDR_L2COMPRESS(hdr
)) &&
5347 hdr
->b_l2hdr
.b_asize
>= buf_compress_minsz
) {
5348 if (l2arc_compress_buf(hdr
)) {
5350 * If compression succeeded, enable headroom
5351 * boost on the next scan cycle.
5353 *headroom_boost
= B_TRUE
;
5358 * Pick up the buffer data we had previously stashed away
5359 * (and now potentially also compressed).
5361 buf_data
= hdr
->b_l1hdr
.b_tmp_cdata
;
5362 buf_sz
= hdr
->b_l2hdr
.b_asize
;
5364 /* Compression may have squashed the buffer to zero length. */
5368 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
5369 dev
->l2ad_hand
, buf_sz
, buf_data
, ZIO_CHECKSUM_OFF
,
5370 NULL
, NULL
, ZIO_PRIORITY_ASYNC_WRITE
,
5371 ZIO_FLAG_CANFAIL
, B_FALSE
);
5373 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
5375 (void) zio_nowait(wzio
);
5377 write_asize
+= buf_sz
;
5379 * Keep the clock hand suitably device-aligned.
5381 buf_p_sz
= vdev_psize_to_asize(dev
->l2ad_vdev
, buf_sz
);
5382 write_psize
+= buf_p_sz
;
5383 dev
->l2ad_hand
+= buf_p_sz
;
5387 mutex_exit(&dev
->l2ad_mtx
);
5389 ASSERT3U(write_asize
, <=, target_sz
);
5390 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
5391 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_asize
);
5392 ARCSTAT_INCR(arcstat_l2_size
, write_sz
);
5393 ARCSTAT_INCR(arcstat_l2_asize
, write_asize
);
5394 vdev_space_update(dev
->l2ad_vdev
, write_asize
, 0, 0);
5397 * Bump device hand to the device start if it is approaching the end.
5398 * l2arc_evict() will already have evicted ahead for this case.
5400 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
5401 dev
->l2ad_hand
= dev
->l2ad_start
;
5402 dev
->l2ad_evict
= dev
->l2ad_start
;
5403 dev
->l2ad_first
= B_FALSE
;
5406 dev
->l2ad_writing
= B_TRUE
;
5407 (void) zio_wait(pio
);
5408 dev
->l2ad_writing
= B_FALSE
;
5410 return (write_asize
);
5414 * Compresses an L2ARC buffer.
5415 * The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its
5416 * size in l2hdr->b_asize. This routine tries to compress the data and
5417 * depending on the compression result there are three possible outcomes:
5418 * *) The buffer was incompressible. The original l2hdr contents were left
5419 * untouched and are ready for writing to an L2 device.
5420 * *) The buffer was all-zeros, so there is no need to write it to an L2
5421 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5422 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5423 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5424 * data buffer which holds the compressed data to be written, and b_asize
5425 * tells us how much data there is. b_compress is set to the appropriate
5426 * compression algorithm. Once writing is done, invoke
5427 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5429 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5430 * buffer was incompressible).
5433 l2arc_compress_buf(arc_buf_hdr_t
*hdr
)
5436 size_t csize
, len
, rounded
;
5437 ASSERT(HDR_HAS_L2HDR(hdr
));
5438 l2arc_buf_hdr_t
*l2hdr
= &hdr
->b_l2hdr
;
5440 ASSERT(HDR_HAS_L1HDR(hdr
));
5441 ASSERT(HDR_GET_COMPRESS(hdr
) == ZIO_COMPRESS_OFF
);
5442 ASSERT(hdr
->b_l1hdr
.b_tmp_cdata
!= NULL
);
5444 len
= l2hdr
->b_asize
;
5445 cdata
= zio_data_buf_alloc(len
);
5446 ASSERT3P(cdata
, !=, NULL
);
5447 csize
= zio_compress_data(ZIO_COMPRESS_LZ4
, hdr
->b_l1hdr
.b_tmp_cdata
,
5448 cdata
, l2hdr
->b_asize
);
5450 rounded
= P2ROUNDUP(csize
, (size_t)SPA_MINBLOCKSIZE
);
5451 if (rounded
> csize
) {
5452 bzero((char *)cdata
+ csize
, rounded
- csize
);
5457 /* zero block, indicate that there's nothing to write */
5458 zio_data_buf_free(cdata
, len
);
5459 HDR_SET_COMPRESS(hdr
, ZIO_COMPRESS_EMPTY
);
5461 hdr
->b_l1hdr
.b_tmp_cdata
= NULL
;
5462 ARCSTAT_BUMP(arcstat_l2_compress_zeros
);
5464 } else if (csize
> 0 && csize
< len
) {
5466 * Compression succeeded, we'll keep the cdata around for
5467 * writing and release it afterwards.
5469 HDR_SET_COMPRESS(hdr
, ZIO_COMPRESS_LZ4
);
5470 l2hdr
->b_asize
= csize
;
5471 hdr
->b_l1hdr
.b_tmp_cdata
= cdata
;
5472 ARCSTAT_BUMP(arcstat_l2_compress_successes
);
5476 * Compression failed, release the compressed buffer.
5477 * l2hdr will be left unmodified.
5479 zio_data_buf_free(cdata
, len
);
5480 ARCSTAT_BUMP(arcstat_l2_compress_failures
);
5486 * Decompresses a zio read back from an l2arc device. On success, the
5487 * underlying zio's io_data buffer is overwritten by the uncompressed
5488 * version. On decompression error (corrupt compressed stream), the
5489 * zio->io_error value is set to signal an I/O error.
5491 * Please note that the compressed data stream is not checksummed, so
5492 * if the underlying device is experiencing data corruption, we may feed
5493 * corrupt data to the decompressor, so the decompressor needs to be
5494 * able to handle this situation (LZ4 does).
5497 l2arc_decompress_zio(zio_t
*zio
, arc_buf_hdr_t
*hdr
, enum zio_compress c
)
5499 ASSERT(L2ARC_IS_VALID_COMPRESS(c
));
5501 if (zio
->io_error
!= 0) {
5503 * An io error has occured, just restore the original io
5504 * size in preparation for a main pool read.
5506 zio
->io_orig_size
= zio
->io_size
= hdr
->b_size
;
5510 if (c
== ZIO_COMPRESS_EMPTY
) {
5512 * An empty buffer results in a null zio, which means we
5513 * need to fill its io_data after we're done restoring the
5514 * buffer's contents.
5516 ASSERT(hdr
->b_l1hdr
.b_buf
!= NULL
);
5517 bzero(hdr
->b_l1hdr
.b_buf
->b_data
, hdr
->b_size
);
5518 zio
->io_data
= zio
->io_orig_data
= hdr
->b_l1hdr
.b_buf
->b_data
;
5520 ASSERT(zio
->io_data
!= NULL
);
5522 * We copy the compressed data from the start of the arc buffer
5523 * (the zio_read will have pulled in only what we need, the
5524 * rest is garbage which we will overwrite at decompression)
5525 * and then decompress back to the ARC data buffer. This way we
5526 * can minimize copying by simply decompressing back over the
5527 * original compressed data (rather than decompressing to an
5528 * aux buffer and then copying back the uncompressed buffer,
5529 * which is likely to be much larger).
5534 csize
= zio
->io_size
;
5535 cdata
= zio_data_buf_alloc(csize
);
5536 bcopy(zio
->io_data
, cdata
, csize
);
5537 if (zio_decompress_data(c
, cdata
, zio
->io_data
, csize
,
5539 zio
->io_error
= EIO
;
5540 zio_data_buf_free(cdata
, csize
);
5543 /* Restore the expected uncompressed IO size. */
5544 zio
->io_orig_size
= zio
->io_size
= hdr
->b_size
;
5548 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5549 * This buffer serves as a temporary holder of compressed data while
5550 * the buffer entry is being written to an l2arc device. Once that is
5551 * done, we can dispose of it.
5554 l2arc_release_cdata_buf(arc_buf_hdr_t
*hdr
)
5556 ASSERT(HDR_HAS_L1HDR(hdr
));
5557 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_EMPTY
) {
5559 * If the data was compressed, then we've allocated a
5560 * temporary buffer for it, so now we need to release it.
5562 ASSERT(hdr
->b_l1hdr
.b_tmp_cdata
!= NULL
);
5563 zio_data_buf_free(hdr
->b_l1hdr
.b_tmp_cdata
,
5566 hdr
->b_l1hdr
.b_tmp_cdata
= NULL
;
5570 * This thread feeds the L2ARC at regular intervals. This is the beating
5571 * heart of the L2ARC.
5574 l2arc_feed_thread(void)
5579 uint64_t size
, wrote
;
5580 clock_t begin
, next
= ddi_get_lbolt();
5581 boolean_t headroom_boost
= B_FALSE
;
5583 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
5585 mutex_enter(&l2arc_feed_thr_lock
);
5587 while (l2arc_thread_exit
== 0) {
5588 CALLB_CPR_SAFE_BEGIN(&cpr
);
5589 (void) cv_timedwait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
,
5591 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
5592 next
= ddi_get_lbolt() + hz
;
5595 * Quick check for L2ARC devices.
5597 mutex_enter(&l2arc_dev_mtx
);
5598 if (l2arc_ndev
== 0) {
5599 mutex_exit(&l2arc_dev_mtx
);
5602 mutex_exit(&l2arc_dev_mtx
);
5603 begin
= ddi_get_lbolt();
5606 * This selects the next l2arc device to write to, and in
5607 * doing so the next spa to feed from: dev->l2ad_spa. This
5608 * will return NULL if there are now no l2arc devices or if
5609 * they are all faulted.
5611 * If a device is returned, its spa's config lock is also
5612 * held to prevent device removal. l2arc_dev_get_next()
5613 * will grab and release l2arc_dev_mtx.
5615 if ((dev
= l2arc_dev_get_next()) == NULL
)
5618 spa
= dev
->l2ad_spa
;
5619 ASSERT(spa
!= NULL
);
5622 * If the pool is read-only then force the feed thread to
5623 * sleep a little longer.
5625 if (!spa_writeable(spa
)) {
5626 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
5627 spa_config_exit(spa
, SCL_L2ARC
, dev
);
5632 * Avoid contributing to memory pressure.
5634 if (arc_reclaim_needed()) {
5635 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
5636 spa_config_exit(spa
, SCL_L2ARC
, dev
);
5640 ARCSTAT_BUMP(arcstat_l2_feeds
);
5642 size
= l2arc_write_size();
5645 * Evict L2ARC buffers that will be overwritten.
5647 l2arc_evict(dev
, size
, B_FALSE
);
5650 * Write ARC buffers.
5652 wrote
= l2arc_write_buffers(spa
, dev
, size
, &headroom_boost
);
5655 * Calculate interval between writes.
5657 next
= l2arc_write_interval(begin
, size
, wrote
);
5658 spa_config_exit(spa
, SCL_L2ARC
, dev
);
5661 l2arc_thread_exit
= 0;
5662 cv_broadcast(&l2arc_feed_thr_cv
);
5663 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
5668 l2arc_vdev_present(vdev_t
*vd
)
5672 mutex_enter(&l2arc_dev_mtx
);
5673 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
5674 dev
= list_next(l2arc_dev_list
, dev
)) {
5675 if (dev
->l2ad_vdev
== vd
)
5678 mutex_exit(&l2arc_dev_mtx
);
5680 return (dev
!= NULL
);
5684 * Add a vdev for use by the L2ARC. By this point the spa has already
5685 * validated the vdev and opened it.
5688 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
5690 l2arc_dev_t
*adddev
;
5692 ASSERT(!l2arc_vdev_present(vd
));
5695 * Create a new l2arc device entry.
5697 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
5698 adddev
->l2ad_spa
= spa
;
5699 adddev
->l2ad_vdev
= vd
;
5700 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
;
5701 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
5702 adddev
->l2ad_hand
= adddev
->l2ad_start
;
5703 adddev
->l2ad_evict
= adddev
->l2ad_start
;
5704 adddev
->l2ad_first
= B_TRUE
;
5705 adddev
->l2ad_writing
= B_FALSE
;
5707 mutex_init(&adddev
->l2ad_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
5709 * This is a list of all ARC buffers that are still valid on the
5712 list_create(&adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
5713 offsetof(arc_buf_hdr_t
, b_l2hdr
.b_l2node
));
5715 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
5718 * Add device to global list
5720 mutex_enter(&l2arc_dev_mtx
);
5721 list_insert_head(l2arc_dev_list
, adddev
);
5722 atomic_inc_64(&l2arc_ndev
);
5723 mutex_exit(&l2arc_dev_mtx
);
5727 * Remove a vdev from the L2ARC.
5730 l2arc_remove_vdev(vdev_t
*vd
)
5732 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
5735 * Find the device by vdev
5737 mutex_enter(&l2arc_dev_mtx
);
5738 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
5739 nextdev
= list_next(l2arc_dev_list
, dev
);
5740 if (vd
== dev
->l2ad_vdev
) {
5745 ASSERT(remdev
!= NULL
);
5748 * Remove device from global list
5750 list_remove(l2arc_dev_list
, remdev
);
5751 l2arc_dev_last
= NULL
; /* may have been invalidated */
5752 atomic_dec_64(&l2arc_ndev
);
5753 mutex_exit(&l2arc_dev_mtx
);
5756 * Clear all buflists and ARC references. L2ARC device flush.
5758 l2arc_evict(remdev
, 0, B_TRUE
);
5759 list_destroy(&remdev
->l2ad_buflist
);
5760 mutex_destroy(&remdev
->l2ad_mtx
);
5761 kmem_free(remdev
, sizeof (l2arc_dev_t
));
5767 l2arc_thread_exit
= 0;
5769 l2arc_writes_sent
= 0;
5770 l2arc_writes_done
= 0;
5772 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
5773 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
5774 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
5775 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
5777 l2arc_dev_list
= &L2ARC_dev_list
;
5778 l2arc_free_on_write
= &L2ARC_free_on_write
;
5779 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
5780 offsetof(l2arc_dev_t
, l2ad_node
));
5781 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
5782 offsetof(l2arc_data_free_t
, l2df_list_node
));
5789 * This is called from dmu_fini(), which is called from spa_fini();
5790 * Because of this, we can assume that all l2arc devices have
5791 * already been removed when the pools themselves were removed.
5794 l2arc_do_free_on_write();
5796 mutex_destroy(&l2arc_feed_thr_lock
);
5797 cv_destroy(&l2arc_feed_thr_cv
);
5798 mutex_destroy(&l2arc_dev_mtx
);
5799 mutex_destroy(&l2arc_free_on_write_mtx
);
5801 list_destroy(l2arc_dev_list
);
5802 list_destroy(l2arc_free_on_write
);
5808 if (!(spa_mode_global
& FWRITE
))
5811 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
5812 TS_RUN
, minclsyspri
);
5818 if (!(spa_mode_global
& FWRITE
))
5821 mutex_enter(&l2arc_feed_thr_lock
);
5822 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
5823 l2arc_thread_exit
= 1;
5824 while (l2arc_thread_exit
!= 0)
5825 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
5826 mutex_exit(&l2arc_feed_thr_lock
);