Replace the global buffer cache hash table with a per-vnode red-black tree.
[dragonfly.git] / sys / vfs / ufs / ufs_bmap.c
blob2808bbb0feaf13eb9d3b46274f51920ffd3f7d27
1 /*
2 * Copyright (c) 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95
39 * $FreeBSD: src/sys/ufs/ufs/ufs_bmap.c,v 1.34.2.1 2000/03/17 10:12:14 ps Exp $
40 * $DragonFly: src/sys/vfs/ufs/ufs_bmap.c,v 1.8 2006/03/05 18:38:39 dillon Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/buf.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48 #include <sys/mount.h>
49 #include <sys/resourcevar.h>
50 #include <sys/conf.h>
52 #include "quota.h"
53 #include "inode.h"
54 #include "ufsmount.h"
55 #include "ufs_extern.h"
58 * Bmap converts a the logical block number of a file to its physical block
59 * number on the disk. The conversion is done by using the logical block
60 * number to index into the array of block pointers described by the dinode.
62 * ufs_bmap(struct vnode *a_vp, ufs_daddr_t a_bn, struct vnode **a_vpp,
63 * ufs_daddr_t *a_bnp, int *a_runp, int *a_runb)
65 int
66 ufs_bmap(struct vop_bmap_args *ap)
69 * Check for underlying vnode requests and ensure that logical
70 * to physical mapping is requested.
72 if (ap->a_vpp != NULL)
73 *ap->a_vpp = VTOI(ap->a_vp)->i_devvp;
74 if (ap->a_bnp == NULL)
75 return (0);
77 return (ufs_bmaparray(ap->a_vp, ap->a_bn, ap->a_bnp, NULL, NULL,
78 ap->a_runp, ap->a_runb));
82 * Indirect blocks are now on the vnode for the file. They are given negative
83 * logical block numbers. Indirect blocks are addressed by the negative
84 * address of the first data block to which they point. Double indirect blocks
85 * are addressed by one less than the address of the first indirect block to
86 * which they point. Triple indirect blocks are addressed by one less than
87 * the address of the first double indirect block to which they point.
89 * ufs_bmaparray does the bmap conversion, and if requested returns the
90 * array of logical blocks which must be traversed to get to a block.
91 * Each entry contains the offset into that block that gets you to the
92 * next block and the disk address of the block (if it is assigned).
95 int
96 ufs_bmaparray(struct vnode *vp, ufs_daddr_t bn, ufs_daddr_t *bnp,
97 struct indir *ap, int *nump, int *runp, int *runb)
99 struct inode *ip;
100 struct buf *bp;
101 struct ufsmount *ump;
102 struct mount *mp;
103 struct vnode *devvp;
104 struct indir a[NIADDR+1], *xap;
105 ufs_daddr_t daddr;
106 long metalbn;
107 int error, maxrun, num;
109 ip = VTOI(vp);
110 mp = vp->v_mount;
111 ump = VFSTOUFS(mp);
112 devvp = ump->um_devvp;
113 #ifdef DIAGNOSTIC
114 if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL))
115 panic("ufs_bmaparray: invalid arguments");
116 #endif
118 if (runp) {
119 *runp = 0;
122 if (runb) {
123 *runb = 0;
126 maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1;
128 xap = ap == NULL ? a : ap;
129 if (!nump)
130 nump = &num;
131 error = ufs_getlbns(vp, bn, xap, nump);
132 if (error)
133 return (error);
135 num = *nump;
136 if (num == 0) {
137 *bnp = blkptrtodb(ump, ip->i_db[bn]);
138 if (*bnp == 0)
139 *bnp = -1;
140 else if (runp) {
141 daddr_t bnb = bn;
142 for (++bn; bn < NDADDR && *runp < maxrun &&
143 is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]);
144 ++bn, ++*runp);
145 bn = bnb;
146 if (runb && (bn > 0)) {
147 for (--bn; (bn >= 0) && (*runb < maxrun) &&
148 is_sequential(ump, ip->i_db[bn],
149 ip->i_db[bn+1]);
150 --bn, ++*runb);
153 return (0);
157 /* Get disk address out of indirect block array */
158 daddr = ip->i_ib[xap->in_off];
160 for (bp = NULL, ++xap; --num; ++xap) {
162 * Exit the loop if there is no disk address assigned yet and
163 * the indirect block isn't in the cache, or if we were
164 * looking for an indirect block and we've found it.
167 metalbn = xap->in_lbn;
168 if ((daddr == 0 && !findblk(vp, metalbn)) || metalbn == bn)
169 break;
171 * If we get here, we've either got the block in the cache
172 * or we have a disk address for it, go fetch it.
174 if (bp)
175 bqrelse(bp);
177 xap->in_exists = 1;
178 bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0);
179 if ((bp->b_flags & B_CACHE) == 0) {
180 #ifdef DIAGNOSTIC
181 if (!daddr)
182 panic("ufs_bmaparray: indirect block not in cache");
183 #endif
184 bp->b_bio2.bio_blkno = blkptrtodb(ump, daddr);
185 bp->b_flags |= B_READ;
186 bp->b_flags &= ~(B_INVAL|B_ERROR);
187 vfs_busy_pages(bp, 0);
188 vn_strategy(bp->b_vp, &bp->b_bio1);
189 error = biowait(bp);
190 if (error) {
191 brelse(bp);
192 return (error);
196 daddr = ((ufs_daddr_t *)bp->b_data)[xap->in_off];
197 if (num == 1 && daddr && runp) {
198 for (bn = xap->in_off + 1;
199 bn < MNINDIR(ump) && *runp < maxrun &&
200 is_sequential(ump,
201 ((ufs_daddr_t *)bp->b_data)[bn - 1],
202 ((ufs_daddr_t *)bp->b_data)[bn]);
203 ++bn, ++*runp);
204 bn = xap->in_off;
205 if (runb && bn) {
206 for(--bn; bn >= 0 && *runb < maxrun &&
207 is_sequential(ump, ((daddr_t *)bp->b_data)[bn],
208 ((daddr_t *)bp->b_data)[bn+1]);
209 --bn, ++*runb);
213 if (bp)
214 bqrelse(bp);
216 daddr = blkptrtodb(ump, daddr);
217 *bnp = daddr == 0 ? -1 : daddr;
218 return (0);
222 * Create an array of logical block number/offset pairs which represent the
223 * path of indirect blocks required to access a data block. The first "pair"
224 * contains the logical block number of the appropriate single, double or
225 * triple indirect block and the offset into the inode indirect block array.
226 * Note, the logical block number of the inode single/double/triple indirect
227 * block appears twice in the array, once with the offset into the i_ib and
228 * once with the offset into the page itself.
231 ufs_getlbns(struct vnode *vp, ufs_daddr_t bn, struct indir *ap, int *nump)
233 long blockcnt, metalbn, realbn;
234 struct ufsmount *ump;
235 int i, numlevels, off;
236 int64_t qblockcnt;
238 ump = VFSTOUFS(vp->v_mount);
239 if (nump)
240 *nump = 0;
241 numlevels = 0;
242 realbn = bn;
243 if ((long)bn < 0)
244 bn = -(long)bn;
246 /* The first NDADDR blocks are direct blocks. */
247 if (bn < NDADDR)
248 return (0);
251 * Determine the number of levels of indirection. After this loop
252 * is done, blockcnt indicates the number of data blocks possible
253 * at the previous level of indirection, and NIADDR - i is the number
254 * of levels of indirection needed to locate the requested block.
256 for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) {
257 if (i == 0)
258 return (EFBIG);
260 * Use int64_t's here to avoid overflow for triple indirect
261 * blocks when longs have 32 bits and the block size is more
262 * than 4K.
264 qblockcnt = (int64_t)blockcnt * MNINDIR(ump);
265 if (bn < qblockcnt)
266 break;
267 blockcnt = qblockcnt;
270 /* Calculate the address of the first meta-block. */
271 if (realbn >= 0)
272 metalbn = -(realbn - bn + NIADDR - i);
273 else
274 metalbn = -(-realbn - bn + NIADDR - i);
277 * At each iteration, off is the offset into the bap array which is
278 * an array of disk addresses at the current level of indirection.
279 * The logical block number and the offset in that block are stored
280 * into the argument array.
282 ap->in_lbn = metalbn;
283 ap->in_off = off = NIADDR - i;
284 ap->in_exists = 0;
285 ap++;
286 for (++numlevels; i <= NIADDR; i++) {
287 /* If searching for a meta-data block, quit when found. */
288 if (metalbn == realbn)
289 break;
291 off = (bn / blockcnt) % MNINDIR(ump);
293 ++numlevels;
294 ap->in_lbn = metalbn;
295 ap->in_off = off;
296 ap->in_exists = 0;
297 ++ap;
299 metalbn -= -1 + off * blockcnt;
300 blockcnt /= MNINDIR(ump);
302 if (nump)
303 *nump = numlevels;
304 return (0);