kernel - Change machdep.pmap_dynamic_delete default
[dragonfly.git] / sys / boot / common / bcache.c
blobcaaa602f188656eb6ffadaed55ffa75734e7506a
1 /*-
2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
26 * $FreeBSD: src/sys/boot/common/bcache.c,v 1.12 2003/08/25 23:30:41 obrien Exp $
30 * Simple LRU block cache
33 #include <stand.h>
34 #include <string.h>
35 #include <bitstring.h>
37 #include "bootstrap.h"
39 /* #define BCACHE_DEBUG */
41 #ifdef BCACHE_DEBUG
42 #define BCACHE_TIMEOUT 10
43 # define DEBUG(fmt, args...) printf("%s: " fmt "\n" , __func__ , ## args)
44 #else
45 #define BCACHE_TIMEOUT 2
46 # define DEBUG(fmt, args...)
47 #endif
50 struct bcachectl
52 daddr_t bc_blkno;
53 time_t bc_stamp;
54 int bc_count;
57 static struct bcachectl *bcache_ctl;
58 static caddr_t bcache_data;
59 static bitstr_t *bcache_miss;
60 static u_int bcache_nblks;
61 static u_int bcache_blksize;
62 static u_int bcache_hits, bcache_misses, bcache_ops, bcache_bypasses;
63 static u_int bcache_flushes;
64 static u_int bcache_bcount;
66 static void bcache_invalidate(daddr_t blkno);
67 static void bcache_insert(caddr_t buf, daddr_t blkno);
68 static int bcache_lookup(caddr_t buf, daddr_t blkno);
71 * Initialise the cache for (nblks) of (bsize).
73 int
74 bcache_init(u_int nblks, size_t bsize)
76 /* discard any old contents */
77 if (bcache_data != NULL) {
78 free(bcache_data);
79 bcache_data = NULL;
80 free(bcache_ctl);
83 /* Allocate control structures */
84 bcache_nblks = nblks;
85 bcache_blksize = bsize;
86 bcache_data = malloc(bcache_nblks * bcache_blksize);
87 bcache_ctl = (struct bcachectl *)malloc(bcache_nblks * sizeof(struct bcachectl));
88 bcache_miss = bit_alloc((bcache_nblks + 1) / 2);
89 if ((bcache_data == NULL) || (bcache_ctl == NULL) || (bcache_miss == NULL)) {
90 if (bcache_miss)
91 free(bcache_miss);
92 if (bcache_ctl)
93 free(bcache_ctl);
94 if (bcache_data)
95 free(bcache_data);
96 bcache_data = NULL;
97 return(ENOMEM);
100 return(0);
104 * Flush the cache
106 void
107 bcache_flush(void)
109 u_int i;
111 bcache_flushes++;
113 /* Flush the cache */
114 for (i = 0; i < bcache_nblks; i++) {
115 bcache_ctl[i].bc_count = -1;
116 bcache_ctl[i].bc_blkno = -1;
121 * Handle a write request; write directly to the disk, and populate the
122 * cache with the new values.
124 static int
125 write_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size,
126 char *buf, size_t *rsize)
128 struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
129 daddr_t i, nblk;
130 int err;
132 nblk = size / bcache_blksize;
134 /* Invalidate the blocks being written */
135 for (i = 0; i < nblk; i++) {
136 bcache_invalidate(blk + i);
139 /* Write the blocks */
140 err = dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize);
142 /* Populate the block cache with the new data */
143 if (err == 0) {
144 for (i = 0; i < nblk; i++) {
145 bcache_insert(buf + (i * bcache_blksize),blk + i);
149 return err;
153 * Handle a read request; fill in parts of the request that can
154 * be satisfied by the cache, use the supplied strategy routine to do
155 * device I/O and then use the I/O results to populate the cache.
157 static int
158 read_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size,
159 char *buf, size_t *rsize)
161 struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
162 int p_size, result;
163 daddr_t p_blk, i, j, nblk;
164 caddr_t p_buf;
166 nblk = size / bcache_blksize;
167 result = 0;
169 /* Satisfy any cache hits up front */
170 for (i = 0; i < nblk; i++) {
171 if (bcache_lookup(buf + (bcache_blksize * i), blk + i)) {
172 bit_set(bcache_miss, i); /* cache miss */
173 bcache_misses++;
174 } else {
175 bit_clear(bcache_miss, i); /* cache hit */
176 bcache_hits++;
180 /* Go back and fill in any misses XXX optimise */
181 p_blk = -1;
182 p_buf = NULL;
183 p_size = 0;
184 for (i = 0; i < nblk; i++) {
185 if (bit_test(bcache_miss, i)) {
186 /* miss, add to pending transfer */
187 if (p_blk == -1) {
188 p_blk = blk + i;
189 p_buf = buf + (bcache_blksize * i);
190 p_size = 1;
191 } else {
192 p_size++;
194 } else if (p_blk != -1) {
195 /* hit, complete pending transfer */
196 result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, NULL);
197 if (result != 0)
198 goto done;
199 for (j = 0; j < p_size; j++)
200 bcache_insert(p_buf + (j * bcache_blksize), p_blk + j);
201 p_blk = -1;
204 if (p_blk != -1) {
205 /* pending transfer left */
206 result = dd->dv_strategy(dd->dv_devdata, rw, p_blk, p_size * bcache_blksize, p_buf, NULL);
207 if (result != 0)
208 goto done;
209 for (j = 0; j < p_size; j++)
210 bcache_insert(p_buf + (j * bcache_blksize), p_blk + j);
213 done:
214 if ((result == 0) && (rsize != NULL))
215 *rsize = size;
216 return(result);
220 * Requests larger than 1/2 the cache size will be bypassed and go
221 * directly to the disk. XXX tune this.
224 bcache_strategy(void *devdata, int unit, int rw, daddr_t blk, size_t size,
225 char *buf, size_t *rsize)
227 static int bcache_unit = -1;
228 struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
230 bcache_ops++;
232 if(bcache_unit != unit) {
233 bcache_flush();
234 bcache_unit = unit;
237 /* bypass large requests, or when the cache is inactive */
238 if ((bcache_data == NULL) || ((size * 2 / bcache_blksize) > bcache_nblks)) {
239 DEBUG("bypass %d from %d", size / bcache_blksize, blk);
240 bcache_bypasses++;
241 return(dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
244 switch (rw) {
245 case F_READ:
246 return read_strategy(devdata, unit, rw, blk, size, buf, rsize);
247 case F_WRITE:
248 return write_strategy(devdata, unit, rw, blk, size, buf, rsize);
250 return -1;
255 * Insert a block into the cache. Retire the oldest block to do so, if required.
257 * XXX the LRU algorithm will fail after 2^31 blocks have been transferred.
259 static void
260 bcache_insert(caddr_t buf, daddr_t blkno)
262 time_t now;
263 int cand, ocount;
264 u_int i;
266 time(&now);
267 cand = 0; /* assume the first block */
268 ocount = bcache_ctl[0].bc_count;
270 /* find the oldest block */
271 for (i = 1; i < bcache_nblks; i++) {
272 if (bcache_ctl[i].bc_blkno == blkno) {
273 /* reuse old entry */
274 cand = i;
275 break;
277 if (bcache_ctl[i].bc_count < ocount) {
278 ocount = bcache_ctl[i].bc_count;
279 cand = i;
283 DEBUG("insert blk %d -> %d @ %d # %d", blkno, cand, now, bcache_bcount);
284 bcopy(buf, bcache_data + (bcache_blksize * cand), bcache_blksize);
285 bcache_ctl[cand].bc_blkno = blkno;
286 bcache_ctl[cand].bc_stamp = now;
287 bcache_ctl[cand].bc_count = bcache_bcount++;
291 * Look for a block in the cache. Blocks more than BCACHE_TIMEOUT seconds old
292 * may be stale (removable media) and thus are discarded. Copy the block out
293 * if successful and return zero, or return nonzero on failure.
295 static int
296 bcache_lookup(caddr_t buf, daddr_t blkno)
298 time_t now;
299 u_int i;
301 time(&now);
303 for (i = 0; i < bcache_nblks; i++)
304 /* cache hit? */
305 if ((bcache_ctl[i].bc_blkno == blkno) && ((bcache_ctl[i].bc_stamp + BCACHE_TIMEOUT) >= now)) {
306 bcopy(bcache_data + (bcache_blksize * i), buf, bcache_blksize);
307 DEBUG("hit blk %d <- %d (now %d then %d)", blkno, i, now, bcache_ctl[i].bc_stamp);
308 return(0);
310 return(ENOENT);
314 * Invalidate a block from the cache.
316 static void
317 bcache_invalidate(daddr_t blkno)
319 u_int i;
321 for (i = 0; i < bcache_nblks; i++) {
322 if (bcache_ctl[i].bc_blkno == blkno) {
323 bcache_ctl[i].bc_count = -1;
324 bcache_ctl[i].bc_blkno = -1;
325 DEBUG("invalidate blk %d", blkno);
326 break;
331 COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats", command_bcache);
333 static int
334 command_bcache(int argc, char *argv[])
336 u_int i;
338 for (i = 0; i < bcache_nblks; i++) {
339 printf("%08x %04x %04x|", bcache_ctl[i].bc_blkno, (unsigned int)bcache_ctl[i].bc_stamp & 0xffff, bcache_ctl[i].bc_count & 0xffff);
340 if (((i + 1) % 4) == 0)
341 printf("\n");
343 printf("\n%u ops %u bypasses %u hits %u misses %u flushes\n", bcache_ops, bcache_bypasses, bcache_hits, bcache_misses, bcache_flushes);
344 return(CMD_OK);