4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
28 #include <sys/vdev_impl.h>
30 #include <sys/kstat.h>
33 * Virtual device read-ahead caching.
35 * This file implements a simple LRU read-ahead cache. When the DMU reads
36 * a given block, it will often want other, nearby blocks soon thereafter.
37 * We take advantage of this by reading a larger disk region and caching
38 * the result. In the best case, this can turn 128 back-to-back 512-byte
39 * reads into a single 64k read followed by 127 cache hits; this reduces
40 * latency dramatically. In the worst case, it can turn an isolated 512-byte
41 * read into a 64k read, which doesn't affect latency all that much but is
42 * terribly wasteful of bandwidth. A more intelligent version of the cache
43 * could keep track of access patterns and not do read-ahead unless it sees
44 * at least two temporally close I/Os to the same region. Currently, only
45 * metadata I/O is inflated. A futher enhancement could take advantage of
46 * more semantic information about the I/O. And it could use something
47 * faster than an AVL tree; that was chosen solely for convenience.
49 * There are five cache operations: allocate, fill, read, write, evict.
51 * (1) Allocate. This reserves a cache entry for the specified region.
52 * We separate the allocate and fill operations so that multiple threads
53 * don't generate I/O for the same cache miss.
55 * (2) Fill. When the I/O for a cache miss completes, the fill routine
56 * places the data in the previously allocated cache entry.
58 * (3) Read. Read data from the cache.
60 * (4) Write. Update cache contents after write completion.
62 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
63 * if the total cache size exceeds zfs_vdev_cache_size.
67 * These tunables are for performance analysis.
70 * All i/os smaller than zfs_vdev_cache_max will be turned into
71 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
72 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
75 * TODO: Note that with the current ZFS code, it turns out that the
76 * vdev cache is not helpful, and in some cases actually harmful. It
77 * is better if we disable this. Once some time has passed, we should
78 * actually remove this to simplify the code. For now we just disable
79 * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
80 * has made these same changes.
82 int zfs_vdev_cache_max
= 1<<14; /* 16KB */
83 int zfs_vdev_cache_size
= 0;
84 int zfs_vdev_cache_bshift
= 16;
86 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
88 kstat_t
*vdc_ksp
= NULL
;
90 typedef struct vdc_stats
{
91 kstat_named_t vdc_stat_delegations
;
92 kstat_named_t vdc_stat_hits
;
93 kstat_named_t vdc_stat_misses
;
96 static vdc_stats_t vdc_stats
= {
97 { "delegations", KSTAT_DATA_UINT64
},
98 { "hits", KSTAT_DATA_UINT64
},
99 { "misses", KSTAT_DATA_UINT64
}
102 #define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1);
105 vdev_cache_offset_compare(const void *a1
, const void *a2
)
107 const vdev_cache_entry_t
*ve1
= a1
;
108 const vdev_cache_entry_t
*ve2
= a2
;
110 if (ve1
->ve_offset
< ve2
->ve_offset
)
112 if (ve1
->ve_offset
> ve2
->ve_offset
)
118 vdev_cache_lastused_compare(const void *a1
, const void *a2
)
120 const vdev_cache_entry_t
*ve1
= a1
;
121 const vdev_cache_entry_t
*ve2
= a2
;
123 if (ve1
->ve_lastused
< ve2
->ve_lastused
)
125 if (ve1
->ve_lastused
> ve2
->ve_lastused
)
129 * Among equally old entries, sort by offset to ensure uniqueness.
131 return (vdev_cache_offset_compare(a1
, a2
));
135 * Evict the specified entry from the cache.
138 vdev_cache_evict(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
)
140 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
141 ASSERT(ve
->ve_fill_io
== NULL
);
142 ASSERT(ve
->ve_data
!= NULL
);
144 avl_remove(&vc
->vc_lastused_tree
, ve
);
145 avl_remove(&vc
->vc_offset_tree
, ve
);
146 zio_buf_free(ve
->ve_data
, VCBS
);
147 kmem_free(ve
, sizeof (vdev_cache_entry_t
));
151 * Allocate an entry in the cache. At the point we don't have the data,
152 * we're just creating a placeholder so that multiple threads don't all
153 * go off and read the same blocks.
155 static vdev_cache_entry_t
*
156 vdev_cache_allocate(zio_t
*zio
)
158 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
159 uint64_t offset
= P2ALIGN(zio
->io_offset
, VCBS
);
160 vdev_cache_entry_t
*ve
;
162 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
164 if (zfs_vdev_cache_size
== 0)
168 * If adding a new entry would exceed the cache size,
169 * evict the oldest entry (LRU).
171 if ((avl_numnodes(&vc
->vc_lastused_tree
) << zfs_vdev_cache_bshift
) >
172 zfs_vdev_cache_size
) {
173 ve
= avl_first(&vc
->vc_lastused_tree
);
174 if (ve
->ve_fill_io
!= NULL
)
176 ASSERT(ve
->ve_hits
!= 0);
177 vdev_cache_evict(vc
, ve
);
180 ve
= kmem_zalloc(sizeof (vdev_cache_entry_t
), KM_SLEEP
);
181 ve
->ve_offset
= offset
;
182 ve
->ve_lastused
= ddi_get_lbolt();
183 ve
->ve_data
= zio_buf_alloc(VCBS
);
185 avl_add(&vc
->vc_offset_tree
, ve
);
186 avl_add(&vc
->vc_lastused_tree
, ve
);
192 vdev_cache_hit(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
, zio_t
*zio
)
194 uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
);
196 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
197 ASSERT(ve
->ve_fill_io
== NULL
);
199 if (ve
->ve_lastused
!= ddi_get_lbolt()) {
200 avl_remove(&vc
->vc_lastused_tree
, ve
);
201 ve
->ve_lastused
= ddi_get_lbolt();
202 avl_add(&vc
->vc_lastused_tree
, ve
);
206 bcopy(ve
->ve_data
+ cache_phase
, zio
->io_data
, zio
->io_size
);
210 * Fill a previously allocated cache entry with data.
213 vdev_cache_fill(zio_t
*fio
)
215 vdev_t
*vd
= fio
->io_vd
;
216 vdev_cache_t
*vc
= &vd
->vdev_cache
;
217 vdev_cache_entry_t
*ve
= fio
->io_private
;
220 ASSERT(fio
->io_size
== VCBS
);
223 * Add data to the cache.
225 mutex_enter(&vc
->vc_lock
);
227 ASSERT(ve
->ve_fill_io
== fio
);
228 ASSERT(ve
->ve_offset
== fio
->io_offset
);
229 ASSERT(ve
->ve_data
== fio
->io_data
);
231 ve
->ve_fill_io
= NULL
;
234 * Even if this cache line was invalidated by a missed write update,
235 * any reads that were queued up before the missed update are still
236 * valid, so we can satisfy them from this line before we evict it.
238 while ((pio
= zio_walk_parents(fio
)) != NULL
)
239 vdev_cache_hit(vc
, ve
, pio
);
241 if (fio
->io_error
|| ve
->ve_missed_update
)
242 vdev_cache_evict(vc
, ve
);
244 mutex_exit(&vc
->vc_lock
);
248 * Read data from the cache. Returns 0 on cache hit, errno on a miss.
251 vdev_cache_read(zio_t
*zio
)
253 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
254 vdev_cache_entry_t
*ve
, ve_search
;
255 uint64_t cache_offset
= P2ALIGN(zio
->io_offset
, VCBS
);
256 uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
);
259 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
261 if (zio
->io_flags
& ZIO_FLAG_DONT_CACHE
)
264 if (zio
->io_size
> zfs_vdev_cache_max
)
268 * If the I/O straddles two or more cache blocks, don't cache it.
270 if (P2BOUNDARY(zio
->io_offset
, zio
->io_size
, VCBS
))
273 ASSERT(cache_phase
+ zio
->io_size
<= VCBS
);
275 mutex_enter(&vc
->vc_lock
);
277 ve_search
.ve_offset
= cache_offset
;
278 ve
= avl_find(&vc
->vc_offset_tree
, &ve_search
, NULL
);
281 if (ve
->ve_missed_update
) {
282 mutex_exit(&vc
->vc_lock
);
286 if ((fio
= ve
->ve_fill_io
) != NULL
) {
287 zio_vdev_io_bypass(zio
);
288 zio_add_child(zio
, fio
);
289 mutex_exit(&vc
->vc_lock
);
290 VDCSTAT_BUMP(vdc_stat_delegations
);
294 vdev_cache_hit(vc
, ve
, zio
);
295 zio_vdev_io_bypass(zio
);
297 mutex_exit(&vc
->vc_lock
);
298 VDCSTAT_BUMP(vdc_stat_hits
);
302 ve
= vdev_cache_allocate(zio
);
305 mutex_exit(&vc
->vc_lock
);
309 fio
= zio_vdev_delegated_io(zio
->io_vd
, cache_offset
,
310 ve
->ve_data
, VCBS
, ZIO_TYPE_READ
, ZIO_PRIORITY_CACHE_FILL
,
311 ZIO_FLAG_DONT_CACHE
, vdev_cache_fill
, ve
);
313 ve
->ve_fill_io
= fio
;
314 zio_vdev_io_bypass(zio
);
315 zio_add_child(zio
, fio
);
317 mutex_exit(&vc
->vc_lock
);
319 VDCSTAT_BUMP(vdc_stat_misses
);
325 * Update cache contents upon write completion.
328 vdev_cache_write(zio_t
*zio
)
330 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
331 vdev_cache_entry_t
*ve
, ve_search
;
332 uint64_t io_start
= zio
->io_offset
;
333 uint64_t io_end
= io_start
+ zio
->io_size
;
334 uint64_t min_offset
= P2ALIGN(io_start
, VCBS
);
335 uint64_t max_offset
= P2ROUNDUP(io_end
, VCBS
);
338 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
340 mutex_enter(&vc
->vc_lock
);
342 ve_search
.ve_offset
= min_offset
;
343 ve
= avl_find(&vc
->vc_offset_tree
, &ve_search
, &where
);
346 ve
= avl_nearest(&vc
->vc_offset_tree
, where
, AVL_AFTER
);
348 while (ve
!= NULL
&& ve
->ve_offset
< max_offset
) {
349 uint64_t start
= MAX(ve
->ve_offset
, io_start
);
350 uint64_t end
= MIN(ve
->ve_offset
+ VCBS
, io_end
);
352 if (ve
->ve_fill_io
!= NULL
) {
353 ve
->ve_missed_update
= 1;
355 bcopy((char *)zio
->io_data
+ start
- io_start
,
356 ve
->ve_data
+ start
- ve
->ve_offset
, end
- start
);
358 ve
= AVL_NEXT(&vc
->vc_offset_tree
, ve
);
360 mutex_exit(&vc
->vc_lock
);
364 vdev_cache_purge(vdev_t
*vd
)
366 vdev_cache_t
*vc
= &vd
->vdev_cache
;
367 vdev_cache_entry_t
*ve
;
369 mutex_enter(&vc
->vc_lock
);
370 while ((ve
= avl_first(&vc
->vc_offset_tree
)) != NULL
)
371 vdev_cache_evict(vc
, ve
);
372 mutex_exit(&vc
->vc_lock
);
376 vdev_cache_init(vdev_t
*vd
)
378 vdev_cache_t
*vc
= &vd
->vdev_cache
;
380 mutex_init(&vc
->vc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
382 avl_create(&vc
->vc_offset_tree
, vdev_cache_offset_compare
,
383 sizeof (vdev_cache_entry_t
),
384 offsetof(struct vdev_cache_entry
, ve_offset_node
));
386 avl_create(&vc
->vc_lastused_tree
, vdev_cache_lastused_compare
,
387 sizeof (vdev_cache_entry_t
),
388 offsetof(struct vdev_cache_entry
, ve_lastused_node
));
392 vdev_cache_fini(vdev_t
*vd
)
394 vdev_cache_t
*vc
= &vd
->vdev_cache
;
396 vdev_cache_purge(vd
);
398 avl_destroy(&vc
->vc_offset_tree
);
399 avl_destroy(&vc
->vc_lastused_tree
);
401 mutex_destroy(&vc
->vc_lock
);
405 vdev_cache_stat_init(void)
407 vdc_ksp
= kstat_create("zfs", 0, "vdev_cache_stats", "misc",
408 KSTAT_TYPE_NAMED
, sizeof (vdc_stats
) / sizeof (kstat_named_t
),
410 if (vdc_ksp
!= NULL
) {
411 vdc_ksp
->ks_data
= &vdc_stats
;
412 kstat_install(vdc_ksp
);
417 vdev_cache_stat_fini(void)
419 if (vdc_ksp
!= NULL
) {
420 kstat_delete(vdc_ksp
);