4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
31 #include <sys/dnode.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/dmu_zfetch.h>
36 #include <sys/kstat.h>
39 * This tunable disables predictive prefetch. Note that it leaves "prescient"
40 * prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
41 * prescient prefetch never issues i/os that end up not being needed,
42 * so it can't hurt performance.
44 boolean_t zfs_prefetch_disable
= B_FALSE
;
46 /* max # of streams per zfetch */
47 uint32_t zfetch_max_streams
= 8;
48 /* min time before stream reclaim */
49 uint32_t zfetch_min_sec_reap
= 2;
50 /* max bytes to prefetch per stream (default 8MB) */
51 uint32_t zfetch_max_distance
= 8 * 1024 * 1024;
52 /* max bytes to prefetch indirects for per stream (default 64MB) */
53 uint32_t zfetch_max_idistance
= 64 * 1024 * 1024;
54 /* max number of bytes in an array_read in which we allow prefetching (1MB) */
55 uint64_t zfetch_array_rd_sz
= 1024 * 1024;
57 typedef struct zfetch_stats
{
58 kstat_named_t zfetchstat_hits
;
59 kstat_named_t zfetchstat_misses
;
60 kstat_named_t zfetchstat_max_streams
;
63 static zfetch_stats_t zfetch_stats
= {
64 { "hits", KSTAT_DATA_UINT64
},
65 { "misses", KSTAT_DATA_UINT64
},
66 { "max_streams", KSTAT_DATA_UINT64
},
69 #define ZFETCHSTAT_BUMP(stat) \
70 atomic_inc_64(&zfetch_stats.stat.value.ui64);
77 zfetch_ksp
= kstat_create("zfs", 0, "zfetchstats", "misc",
78 KSTAT_TYPE_NAMED
, sizeof (zfetch_stats
) / sizeof (kstat_named_t
),
81 if (zfetch_ksp
!= NULL
) {
82 zfetch_ksp
->ks_data
= &zfetch_stats
;
83 kstat_install(zfetch_ksp
);
90 if (zfetch_ksp
!= NULL
) {
91 kstat_delete(zfetch_ksp
);
97 * This takes a pointer to a zfetch structure and a dnode. It performs the
98 * necessary setup for the zfetch structure, grokking data from the
102 dmu_zfetch_init(zfetch_t
*zf
, dnode_t
*dno
)
109 list_create(&zf
->zf_stream
, sizeof (zstream_t
),
110 offsetof(zstream_t
, zs_node
));
112 rw_init(&zf
->zf_rwlock
, NULL
, RW_DEFAULT
, NULL
);
116 dmu_zfetch_stream_remove(zfetch_t
*zf
, zstream_t
*zs
)
118 ASSERT(RW_WRITE_HELD(&zf
->zf_rwlock
));
119 list_remove(&zf
->zf_stream
, zs
);
120 mutex_destroy(&zs
->zs_lock
);
121 kmem_free(zs
, sizeof (*zs
));
125 * Clean-up state associated with a zfetch structure (e.g. destroy the
126 * streams). This doesn't free the zfetch_t itself, that's left to the caller.
129 dmu_zfetch_fini(zfetch_t
*zf
)
133 ASSERT(!RW_LOCK_HELD(&zf
->zf_rwlock
));
135 rw_enter(&zf
->zf_rwlock
, RW_WRITER
);
136 while ((zs
= list_head(&zf
->zf_stream
)) != NULL
)
137 dmu_zfetch_stream_remove(zf
, zs
);
138 rw_exit(&zf
->zf_rwlock
);
139 list_destroy(&zf
->zf_stream
);
140 rw_destroy(&zf
->zf_rwlock
);
146 * If there aren't too many streams already, create a new stream.
147 * The "blkid" argument is the next block that we expect this stream to access.
148 * While we're here, clean up old streams (which haven't been
149 * accessed for at least zfetch_min_sec_reap seconds).
152 dmu_zfetch_stream_create(zfetch_t
*zf
, uint64_t blkid
)
157 ASSERT(RW_WRITE_HELD(&zf
->zf_rwlock
));
160 * Clean up old streams.
162 for (zstream_t
*zs
= list_head(&zf
->zf_stream
);
163 zs
!= NULL
; zs
= zs_next
) {
164 zs_next
= list_next(&zf
->zf_stream
, zs
);
165 if (((gethrtime() - zs
->zs_atime
) / NANOSEC
) >
167 dmu_zfetch_stream_remove(zf
, zs
);
173 * The maximum number of streams is normally zfetch_max_streams,
174 * but for small files we lower it such that it's at least possible
175 * for all the streams to be non-overlapping.
177 * If we are already at the maximum number of streams for this file,
178 * even after removing old streams, then don't create this stream.
180 uint32_t max_streams
= MAX(1, MIN(zfetch_max_streams
,
181 zf
->zf_dnode
->dn_maxblkid
* zf
->zf_dnode
->dn_datablksz
/
182 zfetch_max_distance
));
183 if (numstreams
>= max_streams
) {
184 ZFETCHSTAT_BUMP(zfetchstat_max_streams
);
188 zstream_t
*zs
= kmem_zalloc(sizeof (*zs
), KM_SLEEP
);
189 zs
->zs_blkid
= blkid
;
190 zs
->zs_pf_blkid
= blkid
;
191 zs
->zs_ipf_blkid
= blkid
;
192 zs
->zs_atime
= gethrtime();
193 mutex_init(&zs
->zs_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
195 list_insert_head(&zf
->zf_stream
, zs
);
199 * This is the predictive prefetch entry point. It associates dnode access
200 * specified with blkid and nblks arguments with prefetch stream, predicts
201 * further accesses based on that stats and initiates speculative prefetch.
202 * fetch_data argument specifies whether actual data blocks should be fetched:
203 * FALSE -- prefetch only indirect blocks for predicted data blocks;
204 * TRUE -- prefetch predicted data blocks plus following indirect blocks.
207 dmu_zfetch(zfetch_t
*zf
, uint64_t blkid
, uint64_t nblks
, boolean_t fetch_data
)
210 int64_t pf_start
, ipf_start
, ipf_istart
, ipf_iend
;
211 int64_t pf_ahead_blks
, max_blks
;
212 int epbs
, max_dist_blks
, pf_nblks
, ipf_nblks
;
213 uint64_t end_of_access_blkid
= blkid
+ nblks
;
215 if (zfs_prefetch_disable
)
219 * As a fast path for small (single-block) files, ignore access
220 * to the first block.
225 rw_enter(&zf
->zf_rwlock
, RW_READER
);
227 for (zs
= list_head(&zf
->zf_stream
); zs
!= NULL
;
228 zs
= list_next(&zf
->zf_stream
, zs
)) {
229 if (blkid
== zs
->zs_blkid
) {
230 mutex_enter(&zs
->zs_lock
);
232 * zs_blkid could have changed before we
233 * acquired zs_lock; re-check them here.
235 if (blkid
!= zs
->zs_blkid
) {
236 mutex_exit(&zs
->zs_lock
);
245 * This access is not part of any existing stream. Create
246 * a new stream for it.
248 ZFETCHSTAT_BUMP(zfetchstat_misses
);
249 if (rw_tryupgrade(&zf
->zf_rwlock
))
250 dmu_zfetch_stream_create(zf
, end_of_access_blkid
);
251 rw_exit(&zf
->zf_rwlock
);
256 * This access was to a block that we issued a prefetch for on
257 * behalf of this stream. Issue further prefetches for this stream.
259 * Normally, we start prefetching where we stopped
260 * prefetching last (zs_pf_blkid). But when we get our first
261 * hit on this stream, zs_pf_blkid == zs_blkid, we don't
262 * want to prefetch the block we just accessed. In this case,
263 * start just after the block we just accessed.
265 pf_start
= MAX(zs
->zs_pf_blkid
, end_of_access_blkid
);
268 * Double our amount of prefetched data, but don't let the
269 * prefetch get further ahead than zfetch_max_distance.
273 zfetch_max_distance
>> zf
->zf_dnode
->dn_datablkshift
;
275 * Previously, we were (zs_pf_blkid - blkid) ahead. We
276 * want to now be double that, so read that amount again,
277 * plus the amount we are catching up by (i.e. the amount
280 pf_ahead_blks
= zs
->zs_pf_blkid
- blkid
+ nblks
;
281 max_blks
= max_dist_blks
- (pf_start
- end_of_access_blkid
);
282 pf_nblks
= MIN(pf_ahead_blks
, max_blks
);
287 zs
->zs_pf_blkid
= pf_start
+ pf_nblks
;
290 * Do the same for indirects, starting from where we stopped last,
291 * or where we will stop reading data blocks (and the indirects
292 * that point to them).
294 ipf_start
= MAX(zs
->zs_ipf_blkid
, zs
->zs_pf_blkid
);
295 max_dist_blks
= zfetch_max_idistance
>> zf
->zf_dnode
->dn_datablkshift
;
297 * We want to double our distance ahead of the data prefetch
298 * (or reader, if we are not prefetching data). Previously, we
299 * were (zs_ipf_blkid - blkid) ahead. To double that, we read
300 * that amount again, plus the amount we are catching up by
301 * (i.e. the amount read now + the amount of data prefetched now).
303 pf_ahead_blks
= zs
->zs_ipf_blkid
- blkid
+ nblks
+ pf_nblks
;
304 max_blks
= max_dist_blks
- (ipf_start
- end_of_access_blkid
);
305 ipf_nblks
= MIN(pf_ahead_blks
, max_blks
);
306 zs
->zs_ipf_blkid
= ipf_start
+ ipf_nblks
;
308 epbs
= zf
->zf_dnode
->dn_indblkshift
- SPA_BLKPTRSHIFT
;
309 ipf_istart
= P2ROUNDUP(ipf_start
, 1 << epbs
) >> epbs
;
310 ipf_iend
= P2ROUNDUP(zs
->zs_ipf_blkid
, 1 << epbs
) >> epbs
;
312 zs
->zs_atime
= gethrtime();
313 zs
->zs_blkid
= end_of_access_blkid
;
314 mutex_exit(&zs
->zs_lock
);
315 rw_exit(&zf
->zf_rwlock
);
318 * dbuf_prefetch() is asynchronous (even when it needs to read
319 * indirect blocks), but we still prefer to drop our locks before
320 * calling it to reduce the time we hold them.
323 for (int i
= 0; i
< pf_nblks
; i
++) {
324 dbuf_prefetch(zf
->zf_dnode
, 0, pf_start
+ i
,
325 ZIO_PRIORITY_ASYNC_READ
, ARC_FLAG_PREDICTIVE_PREFETCH
);
327 for (int64_t iblk
= ipf_istart
; iblk
< ipf_iend
; iblk
++) {
328 dbuf_prefetch(zf
->zf_dnode
, 1, iblk
,
329 ZIO_PRIORITY_ASYNC_READ
, ARC_FLAG_PREDICTIVE_PREFETCH
);
331 ZFETCHSTAT_BUMP(zfetchstat_hits
);