2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/boot/common/bcache.c,v 1.12 2003/08/25 23:30:41 obrien Exp $
30 * Simple LRU block cache
35 #include <bitstring.h>
37 #include "bootstrap.h"
39 /* #define BCACHE_DEBUG */
42 #define BCACHE_TIMEOUT 10
43 # define DEBUG(fmt, args...) printf("%s: " fmt "\n" , __func__ , ## args)
45 #define BCACHE_TIMEOUT 2
46 # define DEBUG(fmt, args...)
57 static struct bcachectl
*bcache_ctl
;
58 static caddr_t bcache_data
;
59 static bitstr_t
*bcache_miss
;
60 static u_int bcache_nblks
;
61 static u_int bcache_blksize
;
62 static u_int bcache_hits
, bcache_misses
, bcache_ops
, bcache_bypasses
;
63 static u_int bcache_flushes
;
64 static u_int bcache_bcount
;
66 static void bcache_invalidate(daddr_t blkno
);
67 static void bcache_insert(caddr_t buf
, daddr_t blkno
);
68 static int bcache_lookup(caddr_t buf
, daddr_t blkno
);
71 * Initialise the cache for (nblks) of (bsize).
74 bcache_init(u_int nblks
, size_t bsize
)
76 /* discard any old contents */
77 if (bcache_data
!= NULL
) {
83 /* Allocate control structures */
85 bcache_blksize
= bsize
;
86 bcache_data
= malloc(bcache_nblks
* bcache_blksize
);
87 bcache_ctl
= (struct bcachectl
*)malloc(bcache_nblks
* sizeof(struct bcachectl
));
88 bcache_miss
= bit_alloc((bcache_nblks
+ 1) / 2);
89 if ((bcache_data
== NULL
) || (bcache_ctl
== NULL
) || (bcache_miss
== NULL
)) {
113 /* Flush the cache */
114 for (i
= 0; i
< bcache_nblks
; i
++) {
115 bcache_ctl
[i
].bc_count
= -1;
116 bcache_ctl
[i
].bc_blkno
= -1;
121 * Handle a write request; write directly to the disk, and populate the
122 * cache with the new values.
125 write_strategy(void *devdata
, int unit
, int rw
, daddr_t blk
, size_t size
,
126 char *buf
, size_t *rsize
)
128 struct bcache_devdata
*dd
= (struct bcache_devdata
*)devdata
;
132 nblk
= size
/ bcache_blksize
;
134 /* Invalidate the blocks being written */
135 for (i
= 0; i
< nblk
; i
++) {
136 bcache_invalidate(blk
+ i
);
139 /* Write the blocks */
140 err
= dd
->dv_strategy(dd
->dv_devdata
, rw
, blk
, size
, buf
, rsize
);
142 /* Populate the block cache with the new data */
144 for (i
= 0; i
< nblk
; i
++) {
145 bcache_insert(buf
+ (i
* bcache_blksize
),blk
+ i
);
153 * Handle a read request; fill in parts of the request that can
154 * be satisfied by the cache, use the supplied strategy routine to do
155 * device I/O and then use the I/O results to populate the cache.
158 read_strategy(void *devdata
, int unit
, int rw
, daddr_t blk
, size_t size
,
159 char *buf
, size_t *rsize
)
161 struct bcache_devdata
*dd
= (struct bcache_devdata
*)devdata
;
163 daddr_t p_blk
, i
, j
, nblk
;
166 nblk
= size
/ bcache_blksize
;
169 /* Satisfy any cache hits up front */
170 for (i
= 0; i
< nblk
; i
++) {
171 if (bcache_lookup(buf
+ (bcache_blksize
* i
), blk
+ i
)) {
172 bit_set(bcache_miss
, i
); /* cache miss */
175 bit_clear(bcache_miss
, i
); /* cache hit */
180 /* Go back and fill in any misses XXX optimise */
184 for (i
= 0; i
< nblk
; i
++) {
185 if (bit_test(bcache_miss
, i
)) {
186 /* miss, add to pending transfer */
189 p_buf
= buf
+ (bcache_blksize
* i
);
194 } else if (p_blk
!= -1) {
195 /* hit, complete pending transfer */
196 result
= dd
->dv_strategy(dd
->dv_devdata
, rw
, p_blk
, p_size
* bcache_blksize
, p_buf
, NULL
);
199 for (j
= 0; j
< p_size
; j
++)
200 bcache_insert(p_buf
+ (j
* bcache_blksize
), p_blk
+ j
);
205 /* pending transfer left */
206 result
= dd
->dv_strategy(dd
->dv_devdata
, rw
, p_blk
, p_size
* bcache_blksize
, p_buf
, NULL
);
209 for (j
= 0; j
< p_size
; j
++)
210 bcache_insert(p_buf
+ (j
* bcache_blksize
), p_blk
+ j
);
214 if ((result
== 0) && (rsize
!= NULL
))
220 * Requests larger than 1/2 the cache size will be bypassed and go
221 * directly to the disk. XXX tune this.
224 bcache_strategy(void *devdata
, int unit
, int rw
, daddr_t blk
, size_t size
,
225 char *buf
, size_t *rsize
)
227 static int bcache_unit
= -1;
228 struct bcache_devdata
*dd
= (struct bcache_devdata
*)devdata
;
232 if(bcache_unit
!= unit
) {
237 /* bypass large requests, or when the cache is inactive */
238 if ((bcache_data
== NULL
) || ((size
* 2 / bcache_blksize
) > bcache_nblks
)) {
239 DEBUG("bypass %d from %d", size
/ bcache_blksize
, blk
);
241 return(dd
->dv_strategy(dd
->dv_devdata
, rw
, blk
, size
, buf
, rsize
));
246 return read_strategy(devdata
, unit
, rw
, blk
, size
, buf
, rsize
);
248 return write_strategy(devdata
, unit
, rw
, blk
, size
, buf
, rsize
);
255 * Insert a block into the cache. Retire the oldest block to do so, if required.
257 * XXX the LRU algorithm will fail after 2^31 blocks have been transferred.
260 bcache_insert(caddr_t buf
, daddr_t blkno
)
267 cand
= 0; /* assume the first block */
268 ocount
= bcache_ctl
[0].bc_count
;
270 /* find the oldest block */
271 for (i
= 1; i
< bcache_nblks
; i
++) {
272 if (bcache_ctl
[i
].bc_blkno
== blkno
) {
273 /* reuse old entry */
277 if (bcache_ctl
[i
].bc_count
< ocount
) {
278 ocount
= bcache_ctl
[i
].bc_count
;
283 DEBUG("insert blk %d -> %d @ %d # %d", blkno
, cand
, now
, bcache_bcount
);
284 bcopy(buf
, bcache_data
+ (bcache_blksize
* cand
), bcache_blksize
);
285 bcache_ctl
[cand
].bc_blkno
= blkno
;
286 bcache_ctl
[cand
].bc_stamp
= now
;
287 bcache_ctl
[cand
].bc_count
= bcache_bcount
++;
291 * Look for a block in the cache. Blocks more than BCACHE_TIMEOUT seconds old
292 * may be stale (removable media) and thus are discarded. Copy the block out
293 * if successful and return zero, or return nonzero on failure.
296 bcache_lookup(caddr_t buf
, daddr_t blkno
)
303 for (i
= 0; i
< bcache_nblks
; i
++)
305 if ((bcache_ctl
[i
].bc_blkno
== blkno
) && ((bcache_ctl
[i
].bc_stamp
+ BCACHE_TIMEOUT
) >= now
)) {
306 bcopy(bcache_data
+ (bcache_blksize
* i
), buf
, bcache_blksize
);
307 DEBUG("hit blk %d <- %d (now %d then %d)", blkno
, i
, now
, bcache_ctl
[i
].bc_stamp
);
314 * Invalidate a block from the cache.
317 bcache_invalidate(daddr_t blkno
)
321 for (i
= 0; i
< bcache_nblks
; i
++) {
322 if (bcache_ctl
[i
].bc_blkno
== blkno
) {
323 bcache_ctl
[i
].bc_count
= -1;
324 bcache_ctl
[i
].bc_blkno
= -1;
325 DEBUG("invalidate blk %d", blkno
);
331 COMMAND_SET(bcachestat
, "bcachestat", "get disk block cache stats", command_bcache
);
334 command_bcache(int argc
, char *argv
[])
338 for (i
= 0; i
< bcache_nblks
; i
++) {
339 printf("%08x %04x %04x|", bcache_ctl
[i
].bc_blkno
, (unsigned int)bcache_ctl
[i
].bc_stamp
& 0xffff, bcache_ctl
[i
].bc_count
& 0xffff);
340 if (((i
+ 1) % 4) == 0)
343 printf("\n%u ops %u bypasses %u hits %u misses %u flushes\n", bcache_ops
, bcache_bypasses
, bcache_hits
, bcache_misses
, bcache_flushes
);