2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95
30 * $FreeBSD: src/sys/ufs/ffs/ffs_alloc.c,v 1.64.2.2 2001/09/21 19:15:21 dillon Exp $
33 #include "opt_quota.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
40 #include <sys/vnode.h>
41 #include <sys/mount.h>
42 #include <sys/kernel.h>
43 #include <sys/sysctl.h>
44 #include <sys/syslog.h>
46 #include <sys/taskqueue.h>
47 #include <machine/inttypes.h>
53 #include "ufs_extern.h"
57 #include "ffs_extern.h"
59 typedef ufs_daddr_t
allocfcn_t (struct inode
*ip
, int cg
, ufs_daddr_t bpref
,
62 static ufs_daddr_t
ffs_alloccg (struct inode
*, int, ufs_daddr_t
, int);
64 ffs_alloccgblk (struct inode
*, struct buf
*, ufs_daddr_t
);
65 static void ffs_blkfree_cg(struct fs
*, struct vnode
*, cdev_t
, ino_t
,
66 uint32_t , ufs_daddr_t
, long );
68 static int ffs_checkblk (struct inode
*, ufs_daddr_t
, long);
70 static void ffs_clusteracct (struct fs
*, struct cg
*, ufs_daddr_t
,
72 static ufs_daddr_t
ffs_clusteralloc (struct inode
*, int, ufs_daddr_t
,
74 static ino_t
ffs_dirpref (struct inode
*);
75 static ufs_daddr_t
ffs_fragextend (struct inode
*, int, long, int, int);
76 static void ffs_fserr (struct fs
*, uint
, char *);
77 static u_long ffs_hashalloc
78 (struct inode
*, int, long, int, allocfcn_t
*);
79 static ino_t
ffs_nodealloccg (struct inode
*, int, ufs_daddr_t
, int);
80 static ufs_daddr_t
ffs_mapsearch (struct fs
*, struct cg
*, ufs_daddr_t
,
84 * Allocate a block in the filesystem.
86 * The size of the requested block is given, which must be some
87 * multiple of fs_fsize and <= fs_bsize.
88 * A preference may be optionally specified. If a preference is given
89 * the following hierarchy is used to allocate a block:
90 * 1) allocate the requested block.
91 * 2) allocate a rotationally optimal block in the same cylinder.
92 * 3) allocate a block in the same cylinder group.
93 * 4) quadradically rehash into other cylinder groups, until an
94 * available block is located.
95 * If no block preference is given the following heirarchy is used
96 * to allocate a block:
97 * 1) allocate a block in the cylinder group that contains the
99 * 2) quadradically rehash into other cylinder groups, until an
100 * available block is located.
103 ffs_alloc(struct inode
*ip
, ufs_daddr_t lbn
, ufs_daddr_t bpref
, int size
,
104 struct ucred
*cred
, ufs_daddr_t
*bnp
)
116 if ((uint
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
117 kprintf("dev = %s, bsize = %ld, size = %d, fs = %s\n",
118 devtoname(ip
->i_dev
), (long)fs
->fs_bsize
, size
,
120 panic("ffs_alloc: bad size");
123 panic("ffs_alloc: missing credential");
124 #endif /* DIAGNOSTIC */
125 if (size
== fs
->fs_bsize
&& fs
->fs_cstotal
.cs_nbfree
== 0)
127 if (cred
->cr_uid
!= 0 &&
128 freespace(fs
, fs
->fs_minfree
) - numfrags(fs
, size
) < 0)
131 error
= ufs_chkdq(ip
, (long)btodb(size
), cred
, 0);
135 if (bpref
>= fs
->fs_size
)
138 cg
= ino_to_cg(fs
, ip
->i_number
);
140 cg
= dtog(fs
, bpref
);
141 bno
= (ufs_daddr_t
)ffs_hashalloc(ip
, cg
, (long)bpref
, size
,
144 ip
->i_blocks
+= btodb(size
);
145 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
151 * Restore user's disk quota because allocation failed.
153 (void) ufs_chkdq(ip
, (long)-btodb(size
), cred
, FORCE
);
156 ffs_fserr(fs
, cred
->cr_uid
, "filesystem full");
157 uprintf("\n%s: write failed, filesystem is full\n", fs
->fs_fsmnt
);
162 * Reallocate a fragment to a bigger size
164 * The number and size of the old block is given, and a preference
165 * and new size is also specified. The allocator attempts to extend
166 * the original block. Failing that, the regular block allocator is
167 * invoked to get an appropriate block.
170 ffs_realloccg(struct inode
*ip
, ufs_daddr_t lbprev
, ufs_daddr_t bpref
,
171 int osize
, int nsize
, struct ucred
*cred
, struct buf
**bpp
)
175 int cg
, request
, error
;
176 ufs_daddr_t bprev
, bno
;
181 if ((uint
)osize
> fs
->fs_bsize
|| fragoff(fs
, osize
) != 0 ||
182 (uint
)nsize
> fs
->fs_bsize
|| fragoff(fs
, nsize
) != 0) {
184 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n",
185 devtoname(ip
->i_dev
), (long)fs
->fs_bsize
, osize
,
186 nsize
, fs
->fs_fsmnt
);
187 panic("ffs_realloccg: bad size");
190 panic("ffs_realloccg: missing credential");
191 #endif /* DIAGNOSTIC */
192 if (cred
->cr_uid
!= 0 &&
193 freespace(fs
, fs
->fs_minfree
) - numfrags(fs
, nsize
- osize
) < 0)
195 if ((bprev
= ip
->i_db
[lbprev
]) == 0) {
196 kprintf("dev = %s, bsize = %ld, bprev = %ld, fs = %s\n",
197 devtoname(ip
->i_dev
), (long)fs
->fs_bsize
, (long)bprev
,
199 panic("ffs_realloccg: bad bprev");
202 * Allocate the extra space in the buffer.
204 error
= bread(ITOV(ip
), lblktodoff(fs
, lbprev
), osize
, &bp
);
210 if(bp
->b_bio2
.bio_offset
== NOOFFSET
) {
211 if( lbprev
>= NDADDR
)
212 panic("ffs_realloccg: lbprev out of range");
213 bp
->b_bio2
.bio_offset
= fsbtodoff(fs
, bprev
);
217 error
= ufs_chkdq(ip
, (long)btodb(nsize
- osize
), cred
, 0);
224 * Check for extension in the existing location.
226 cg
= dtog(fs
, bprev
);
227 bno
= ffs_fragextend(ip
, cg
, (long)bprev
, osize
, nsize
);
229 if (bp
->b_bio2
.bio_offset
!= fsbtodoff(fs
, bno
))
230 panic("ffs_realloccg: bad blockno");
231 ip
->i_blocks
+= btodb(nsize
- osize
);
232 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
234 bzero((char *)bp
->b_data
+ osize
, (uint
)nsize
- osize
);
239 * Allocate a new disk location.
241 if (bpref
>= fs
->fs_size
)
243 switch ((int)fs
->fs_optim
) {
246 * Allocate an exact sized fragment. Although this makes
247 * best use of space, we will waste time relocating it if
248 * the file continues to grow. If the fragmentation is
249 * less than half of the minimum free reserve, we choose
250 * to begin optimizing for time.
253 if (fs
->fs_minfree
<= 5 ||
254 fs
->fs_cstotal
.cs_nffree
>
255 (off_t
)fs
->fs_dsize
* fs
->fs_minfree
/ (2 * 100))
257 log(LOG_NOTICE
, "%s: optimization changed from SPACE to TIME\n",
259 fs
->fs_optim
= FS_OPTTIME
;
263 * At this point we have discovered a file that is trying to
264 * grow a small fragment to a larger fragment. To save time,
265 * we allocate a full sized block, then free the unused portion.
266 * If the file continues to grow, the `ffs_fragextend' call
267 * above will be able to grow it in place without further
268 * copying. If aberrant programs cause disk fragmentation to
269 * grow within 2% of the free reserve, we choose to begin
270 * optimizing for space.
272 request
= fs
->fs_bsize
;
273 if (fs
->fs_cstotal
.cs_nffree
<
274 (off_t
)fs
->fs_dsize
* (fs
->fs_minfree
- 2) / 100)
276 log(LOG_NOTICE
, "%s: optimization changed from TIME to SPACE\n",
278 fs
->fs_optim
= FS_OPTSPACE
;
281 kprintf("dev = %s, optim = %ld, fs = %s\n",
282 devtoname(ip
->i_dev
), (long)fs
->fs_optim
, fs
->fs_fsmnt
);
283 panic("ffs_realloccg: bad optim");
286 bno
= (ufs_daddr_t
)ffs_hashalloc(ip
, cg
, (long)bpref
, request
,
289 bp
->b_bio2
.bio_offset
= fsbtodoff(fs
, bno
);
290 if (!DOINGSOFTDEP(ITOV(ip
)))
291 ffs_blkfree(ip
, bprev
, (long)osize
);
293 ffs_blkfree(ip
, bno
+ numfrags(fs
, nsize
),
294 (long)(request
- nsize
));
295 ip
->i_blocks
+= btodb(nsize
- osize
);
296 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
298 bzero((char *)bp
->b_data
+ osize
, (uint
)nsize
- osize
);
304 * Restore user's disk quota because allocation failed.
306 (void) ufs_chkdq(ip
, (long)-btodb(nsize
- osize
), cred
, FORCE
);
313 ffs_fserr(fs
, cred
->cr_uid
, "filesystem full");
314 uprintf("\n%s: write failed, filesystem is full\n", fs
->fs_fsmnt
);
318 SYSCTL_NODE(_vfs
, OID_AUTO
, ffs
, CTLFLAG_RW
, 0, "FFS filesystem");
321 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
323 * The vnode and an array of buffer pointers for a range of sequential
324 * logical blocks to be made contiguous is given. The allocator attempts
325 * to find a range of sequential blocks starting as close as possible to
326 * an fs_rotdelay offset from the end of the allocation for the logical
327 * block immediately preceeding the current range. If successful, the
328 * physical block numbers in the buffer pointers and in the inode are
329 * changed to reflect the new allocation. If unsuccessful, the allocation
330 * is left unchanged. The success in doing the reallocation is returned.
331 * Note that the error return is not reflected back to the user. Rather
332 * the previous block allocation will be used.
334 static int doasyncfree
= 1;
335 SYSCTL_INT(_vfs_ffs
, FFS_ASYNCFREE
, doasyncfree
, CTLFLAG_RW
, &doasyncfree
, 0, "");
337 static int doreallocblks
= 1;
338 SYSCTL_INT(_vfs_ffs
, FFS_REALLOCBLKS
, doreallocblks
, CTLFLAG_RW
, &doreallocblks
, 0, "");
341 static volatile int prtrealloc
= 0;
345 * ffs_reallocblks(struct vnode *a_vp, struct cluster_save *a_buflist)
348 ffs_reallocblks(struct vop_reallocblks_args
*ap
)
353 struct buf
*sbp
, *ebp
;
354 ufs_daddr_t
*bap
, *sbap
, *ebap
= NULL
;
355 struct cluster_save
*buflist
;
356 ufs_daddr_t start_lbn
, end_lbn
, soff
, newblk
, blkno
;
360 struct indir start_ap
[NIADDR
+ 1], end_ap
[NIADDR
+ 1], *idp
;
361 int i
, len
, slen
, start_lvl
, end_lvl
, pref
, ssize
;
363 if (doreallocblks
== 0)
368 if (fs
->fs_contigsumsize
<= 0)
370 buflist
= ap
->a_buflist
;
371 len
= buflist
->bs_nchildren
;
372 start_lbn
= lblkno(fs
, buflist
->bs_children
[0]->b_loffset
);
373 end_lbn
= start_lbn
+ len
- 1;
375 for (i
= 0; i
< len
; i
++)
376 if (!ffs_checkblk(ip
,
377 dofftofsb(fs
, buflist
->bs_children
[i
]->b_bio2
.bio_offset
), fs
->fs_bsize
))
378 panic("ffs_reallocblks: unallocated block 1");
379 for (i
= 1; i
< len
; i
++) {
380 if (buflist
->bs_children
[i
]->b_loffset
!= lblktodoff(fs
, start_lbn
) + lblktodoff(fs
, i
))
381 panic("ffs_reallocblks: non-logical cluster");
383 boffset
= buflist
->bs_children
[0]->b_bio2
.bio_offset
;
384 ssize
= (int)fsbtodoff(fs
, fs
->fs_frag
);
385 for (i
= 1; i
< len
- 1; i
++)
386 if (buflist
->bs_children
[i
]->b_bio2
.bio_offset
!= boffset
+ (i
* ssize
))
387 panic("ffs_reallocblks: non-physical cluster %d", i
);
390 * If the latest allocation is in a new cylinder group, assume that
391 * the filesystem has decided to move and do not force it back to
392 * the previous cylinder group.
394 if (dtog(fs
, dofftofsb(fs
, buflist
->bs_children
[0]->b_bio2
.bio_offset
)) !=
395 dtog(fs
, dofftofsb(fs
, buflist
->bs_children
[len
- 1]->b_bio2
.bio_offset
)))
397 if (ufs_getlbns(vp
, start_lbn
, start_ap
, &start_lvl
) ||
398 ufs_getlbns(vp
, end_lbn
, end_ap
, &end_lvl
))
401 * Get the starting offset and block map for the first block and
402 * the number of blocks that will fit into sbap starting at soff.
404 if (start_lvl
== 0) {
407 slen
= NDADDR
- soff
;
409 idp
= &start_ap
[start_lvl
- 1];
410 if (bread(vp
, lblktodoff(fs
, idp
->in_lbn
), (int)fs
->fs_bsize
, &sbp
)) {
414 sbap
= (ufs_daddr_t
*)sbp
->b_data
;
416 slen
= fs
->fs_nindir
- soff
;
419 * Find the preferred location for the cluster.
421 pref
= ffs_blkpref(ip
, start_lbn
, soff
, sbap
);
424 * If the block range spans two block maps, get the second map.
426 if (end_lvl
== 0 || (idp
= &end_ap
[end_lvl
- 1])->in_off
+ 1 >= len
) {
430 if (start_ap
[start_lvl
-1].in_lbn
== idp
->in_lbn
)
431 panic("ffs_reallocblk: start == end");
433 ssize
= len
- (idp
->in_off
+ 1);
434 if (bread(vp
, lblktodoff(fs
, idp
->in_lbn
), (int)fs
->fs_bsize
, &ebp
))
436 ebap
= (ufs_daddr_t
*)ebp
->b_data
;
440 * Make sure we aren't spanning more then two blockmaps. ssize is
441 * our calculation of the span we have to scan in the first blockmap,
442 * while slen is our calculation of the number of entries available
443 * in the first blockmap (from soff).
446 panic("ffs_reallocblks: range spans more then two blockmaps!"
447 " start_lbn %ld len %d (%d/%d)",
448 (long)start_lbn
, len
, slen
, ssize
);
451 * Search the block map looking for an allocation of the desired size.
453 if ((newblk
= (ufs_daddr_t
)ffs_hashalloc(ip
, dtog(fs
, pref
), (long)pref
,
454 len
, ffs_clusteralloc
)) == 0)
457 * We have found a new contiguous block.
459 * First we have to replace the old block pointers with the new
460 * block pointers in the inode and indirect blocks associated
465 kprintf("realloc: ino %ju, lbns %d-%d\n\told:",
466 (uintmax_t)ip
->i_number
, start_lbn
, end_lbn
);
469 for (bap
= &sbap
[soff
], i
= 0; i
< len
; i
++, blkno
+= fs
->fs_frag
) {
475 if (!ffs_checkblk(ip
,
476 dofftofsb(fs
, buflist
->bs_children
[i
]->b_bio2
.bio_offset
), fs
->fs_bsize
))
477 panic("ffs_reallocblks: unallocated block 2");
478 if (dofftofsb(fs
, buflist
->bs_children
[i
]->b_bio2
.bio_offset
) != *bap
)
479 panic("ffs_reallocblks: alloc mismatch");
483 kprintf(" %d,", *bap
);
485 if (DOINGSOFTDEP(vp
)) {
486 if (sbap
== &ip
->i_db
[0] && i
< ssize
)
487 softdep_setup_allocdirect(ip
, start_lbn
+ i
,
488 blkno
, *bap
, fs
->fs_bsize
, fs
->fs_bsize
,
489 buflist
->bs_children
[i
]);
491 softdep_setup_allocindir_page(ip
, start_lbn
+ i
,
492 i
< ssize
? sbp
: ebp
, soff
+ i
, blkno
,
493 *bap
, buflist
->bs_children
[i
]);
498 * Next we must write out the modified inode and indirect blocks.
499 * For strict correctness, the writes should be synchronous since
500 * the old block values may have been written to disk. In practise
501 * they are almost never written, but if we are concerned about
502 * strict correctness, the `doasyncfree' flag should be set to zero.
504 * The test on `doasyncfree' should be changed to test a flag
505 * that shows whether the associated buffers and inodes have
506 * been written. The flag should be set when the cluster is
507 * started and cleared whenever the buffer or inode is flushed.
508 * We can then check below to see if it is set, and do the
509 * synchronous write only when it has been cleared.
511 if (sbap
!= &ip
->i_db
[0]) {
517 ip
->i_flag
|= IN_CHANGE
| IN_UPDATE
;
528 * Last, free the old blocks and assign the new blocks to the buffers.
534 for (blkno
= newblk
, i
= 0; i
< len
; i
++, blkno
+= fs
->fs_frag
) {
535 if (!DOINGSOFTDEP(vp
) &&
536 buflist
->bs_children
[i
]->b_bio2
.bio_offset
!= NOOFFSET
) {
538 dofftofsb(fs
, buflist
->bs_children
[i
]->b_bio2
.bio_offset
),
541 buflist
->bs_children
[i
]->b_bio2
.bio_offset
= fsbtodoff(fs
, blkno
);
543 if (!ffs_checkblk(ip
,
544 dofftofsb(fs
, buflist
->bs_children
[i
]->b_bio2
.bio_offset
), fs
->fs_bsize
))
545 panic("ffs_reallocblks: unallocated block 3");
549 kprintf(" %d,", blkno
);
563 if (sbap
!= &ip
->i_db
[0])
569 * Allocate an inode in the filesystem.
571 * If allocating a directory, use ffs_dirpref to select the inode.
572 * If allocating in a directory, the following hierarchy is followed:
573 * 1) allocate the preferred inode.
574 * 2) allocate an inode in the same cylinder group.
575 * 3) quadradically rehash into other cylinder groups, until an
576 * available inode is located.
577 * If no inode preference is given the following heirarchy is used
578 * to allocate an inode:
579 * 1) allocate an inode in cylinder group 0.
580 * 2) quadradically rehash into other cylinder groups, until an
581 * available inode is located.
584 ffs_valloc(struct vnode
*pvp
, int mode
, struct ucred
*cred
, struct vnode
**vpp
)
595 if (fs
->fs_cstotal
.cs_nifree
== 0)
598 if ((mode
& IFMT
) == IFDIR
)
599 ipref
= ffs_dirpref(pip
);
601 ipref
= pip
->i_number
;
602 if (ipref
>= fs
->fs_ncg
* fs
->fs_ipg
)
604 cg
= ino_to_cg(fs
, ipref
);
606 * Track number of dirs created one after another
607 * in a same cg without intervening by files.
609 if ((mode
& IFMT
) == IFDIR
) {
610 if (fs
->fs_contigdirs
[cg
] < 255)
611 fs
->fs_contigdirs
[cg
]++;
613 if (fs
->fs_contigdirs
[cg
] > 0)
614 fs
->fs_contigdirs
[cg
]--;
616 ino
= (ino_t
)ffs_hashalloc(pip
, cg
, (long)ipref
, mode
,
617 (allocfcn_t
*)ffs_nodealloccg
);
620 error
= VFS_VGET(pvp
->v_mount
, NULL
, ino
, vpp
);
622 ffs_vfree(pvp
, ino
, mode
);
627 kprintf("mode = 0%o, inum = %lu, fs = %s\n",
628 ip
->i_mode
, (u_long
)ip
->i_number
, fs
->fs_fsmnt
);
629 panic("ffs_valloc: dup alloc");
631 if (ip
->i_blocks
) { /* XXX */
632 kprintf("free inode %s/%lu had %ld blocks\n",
633 fs
->fs_fsmnt
, (u_long
)ino
, (long)ip
->i_blocks
);
638 * Set up a new generation number for this inode.
640 if (ip
->i_gen
== 0 || ++ip
->i_gen
== 0)
641 ip
->i_gen
= krandom() / 2 + 1;
644 ffs_fserr(fs
, cred
->cr_uid
, "out of inodes");
645 uprintf("\n%s: create/symlink failed, no inodes free\n", fs
->fs_fsmnt
);
650 * Find a cylinder group to place a directory.
652 * The policy implemented by this algorithm is to allocate a
653 * directory inode in the same cylinder group as its parent
654 * directory, but also to reserve space for its files inodes
655 * and data. Restrict the number of directories which may be
656 * allocated one after another in the same cylinder group
657 * without intervening allocation of files.
659 * If we allocate a first level directory then force allocation
660 * in another cylinder group.
663 ffs_dirpref(struct inode
*pip
)
666 int cg
, prefcg
, dirsize
, cgsize
;
668 int avgifree
, avgbfree
, avgndir
, curdirsize
;
669 int minifree
, minbfree
, maxndir
;
675 avgifree
= fs
->fs_cstotal
.cs_nifree
/ fs
->fs_ncg
;
676 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
677 avgndir
= fs
->fs_cstotal
.cs_ndir
/ fs
->fs_ncg
;
680 * Force allocation in another cg if creating a first level dir.
682 if (ITOV(pip
)->v_flag
& VROOT
) {
683 prefcg
= karc4random() % fs
->fs_ncg
;
685 minndir
= fs
->fs_ipg
;
686 for (cg
= prefcg
; cg
< fs
->fs_ncg
; cg
++)
687 if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
688 fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
&&
689 fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
691 minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
693 for (cg
= 0; cg
< prefcg
; cg
++)
694 if (fs
->fs_cs(fs
, cg
).cs_ndir
< minndir
&&
695 fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
&&
696 fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
698 minndir
= fs
->fs_cs(fs
, cg
).cs_ndir
;
700 return ((ino_t
)(fs
->fs_ipg
* mincg
));
704 * Count various limits which used for
705 * optimal allocation of a directory inode.
707 maxndir
= min(avgndir
+ fs
->fs_ipg
/ 16, fs
->fs_ipg
);
708 minifree
= avgifree
- avgifree
/ 4;
711 minbfree
= avgbfree
- avgbfree
/ 4;
714 cgsize
= fs
->fs_fsize
* fs
->fs_fpg
;
717 * fs_avgfilesize and fs_avgfpdir are user-settable entities and
718 * multiplying them may overflow a 32 bit integer.
720 dirsize64
= fs
->fs_avgfilesize
* (int64_t)fs
->fs_avgfpdir
;
721 if (dirsize64
> 0x7fffffff) {
724 dirsize
= (int)dirsize64
;
725 curdirsize
= avgndir
?
726 (cgsize
- avgbfree
* fs
->fs_bsize
) / avgndir
: 0;
727 if (dirsize
< curdirsize
)
728 dirsize
= curdirsize
;
729 maxcontigdirs
= min((avgbfree
* fs
->fs_bsize
) / dirsize
, 255);
730 if (fs
->fs_avgfpdir
> 0)
731 maxcontigdirs
= min(maxcontigdirs
,
732 fs
->fs_ipg
/ fs
->fs_avgfpdir
);
733 if (maxcontigdirs
== 0)
738 * Limit number of dirs in one cg and reserve space for
739 * regular files, but only if we have no deficit in
742 prefcg
= ino_to_cg(fs
, pip
->i_number
);
743 for (cg
= prefcg
; cg
< fs
->fs_ncg
; cg
++)
744 if (fs
->fs_cs(fs
, cg
).cs_ndir
< maxndir
&&
745 fs
->fs_cs(fs
, cg
).cs_nifree
>= minifree
&&
746 fs
->fs_cs(fs
, cg
).cs_nbfree
>= minbfree
) {
747 if (fs
->fs_contigdirs
[cg
] < maxcontigdirs
)
748 return ((ino_t
)(fs
->fs_ipg
* cg
));
750 for (cg
= 0; cg
< prefcg
; cg
++)
751 if (fs
->fs_cs(fs
, cg
).cs_ndir
< maxndir
&&
752 fs
->fs_cs(fs
, cg
).cs_nifree
>= minifree
&&
753 fs
->fs_cs(fs
, cg
).cs_nbfree
>= minbfree
) {
754 if (fs
->fs_contigdirs
[cg
] < maxcontigdirs
)
755 return ((ino_t
)(fs
->fs_ipg
* cg
));
758 * This is a backstop when we have deficit in space.
760 for (cg
= prefcg
; cg
< fs
->fs_ncg
; cg
++)
761 if (fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
)
762 return ((ino_t
)(fs
->fs_ipg
* cg
));
763 for (cg
= 0; cg
< prefcg
; cg
++)
764 if (fs
->fs_cs(fs
, cg
).cs_nifree
>= avgifree
)
766 return ((ino_t
)(fs
->fs_ipg
* cg
));
770 * Select the desired position for the next block in a file. The file is
771 * logically divided into sections. The first section is composed of the
772 * direct blocks. Each additional section contains fs_maxbpg blocks.
774 * If no blocks have been allocated in the first section, the policy is to
775 * request a block in the same cylinder group as the inode that describes
776 * the file. If no blocks have been allocated in any other section, the
777 * policy is to place the section in a cylinder group with a greater than
778 * average number of free blocks. An appropriate cylinder group is found
779 * by using a rotor that sweeps the cylinder groups. When a new group of
780 * blocks is needed, the sweep begins in the cylinder group following the
781 * cylinder group from which the previous allocation was made. The sweep
782 * continues until a cylinder group with greater than the average number
783 * of free blocks is found. If the allocation is for the first block in an
784 * indirect block, the information on the previous allocation is unavailable;
785 * here a best guess is made based upon the logical block number being
788 * If a section is already partially allocated, the policy is to
789 * contiguously allocate fs_maxcontig blocks. The end of one of these
790 * contiguous blocks and the beginning of the next is physically separated
791 * so that the disk head will be in transit between them for at least
792 * fs_rotdelay milliseconds. This is to allow time for the processor to
793 * schedule another I/O transfer.
796 ffs_blkpref(struct inode
*ip
, ufs_daddr_t lbn
, int indx
, ufs_daddr_t
*bap
)
800 int avgbfree
, startcg
;
804 if (indx
% fs
->fs_maxbpg
== 0 || bap
[indx
- 1] == 0) {
805 if (lbn
< NDADDR
+ NINDIR(fs
)) {
806 cg
= ino_to_cg(fs
, ip
->i_number
);
807 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
810 * Find a cylinder with greater than average number of
811 * unused data blocks.
813 if (indx
== 0 || bap
[indx
- 1] == 0)
815 ino_to_cg(fs
, ip
->i_number
) + lbn
/ fs
->fs_maxbpg
;
817 startcg
= dtog(fs
, bap
[indx
- 1]) + 1;
818 startcg
%= fs
->fs_ncg
;
819 avgbfree
= fs
->fs_cstotal
.cs_nbfree
/ fs
->fs_ncg
;
820 for (cg
= startcg
; cg
< fs
->fs_ncg
; cg
++)
821 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
823 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
825 for (cg
= 0; cg
<= startcg
; cg
++)
826 if (fs
->fs_cs(fs
, cg
).cs_nbfree
>= avgbfree
) {
828 return (fs
->fs_fpg
* cg
+ fs
->fs_frag
);
833 * One or more previous blocks have been laid out. If less
834 * than fs_maxcontig previous blocks are contiguous, the
835 * next block is requested contiguously, otherwise it is
836 * requested rotationally delayed by fs_rotdelay milliseconds.
838 nextblk
= bap
[indx
- 1] + fs
->fs_frag
;
839 if (fs
->fs_rotdelay
== 0 || indx
< fs
->fs_maxcontig
||
840 bap
[indx
- fs
->fs_maxcontig
] +
841 blkstofrags(fs
, fs
->fs_maxcontig
) != nextblk
)
844 * Here we convert ms of delay to frags as:
845 * (frags) = (ms) * (rev/sec) * (sect/rev) /
846 * ((sect/frag) * (ms/sec))
847 * then round up to the next block.
849 nextblk
+= roundup(fs
->fs_rotdelay
* fs
->fs_rps
* fs
->fs_nsect
/
850 (NSPF(fs
) * 1000), fs
->fs_frag
);
855 * Implement the cylinder overflow algorithm.
857 * The policy implemented by this algorithm is:
858 * 1) allocate the block in its requested cylinder group.
859 * 2) quadradically rehash on the cylinder group number.
860 * 3) brute force search for a free block.
864 ffs_hashalloc(struct inode
*ip
, int cg
, long pref
,
865 int size
, /* size for data blocks, mode for inodes */
866 allocfcn_t
*allocator
)
869 long result
; /* XXX why not same type as we return? */
874 * 1: preferred cylinder group
876 result
= (*allocator
)(ip
, cg
, pref
, size
);
880 * 2: quadratic rehash
882 for (i
= 1; i
< fs
->fs_ncg
; i
*= 2) {
884 if (cg
>= fs
->fs_ncg
)
886 result
= (*allocator
)(ip
, cg
, 0, size
);
891 * 3: brute force search
892 * Note that we start at i == 2, since 0 was checked initially,
893 * and 1 is always checked in the quadratic rehash.
895 cg
= (icg
+ 2) % fs
->fs_ncg
;
896 for (i
= 2; i
< fs
->fs_ncg
; i
++) {
897 result
= (*allocator
)(ip
, cg
, 0, size
);
901 if (cg
== fs
->fs_ncg
)
908 * Determine whether a fragment can be extended.
910 * Check to see if the necessary fragments are available, and
911 * if they are, allocate them.
914 ffs_fragextend(struct inode
*ip
, int cg
, long bprev
, int osize
, int nsize
)
925 if (fs
->fs_cs(fs
, cg
).cs_nffree
< numfrags(fs
, nsize
- osize
))
927 frags
= numfrags(fs
, nsize
);
928 bbase
= fragnum(fs
, bprev
);
929 if (bbase
> fragnum(fs
, (bprev
+ frags
- 1))) {
930 /* cannot extend across a block boundary */
933 KKASSERT(blknum(fs
, bprev
) == blknum(fs
, bprev
+ frags
- 1));
934 error
= bread(ip
->i_devvp
, fsbtodoff(fs
, cgtod(fs
, cg
)),
935 (int)fs
->fs_cgsize
, &bp
);
940 cgp
= (struct cg
*)bp
->b_data
;
941 if (!cg_chkmagic(cgp
)) {
945 cgp
->cg_time
= time_second
;
946 bno
= dtogd(fs
, bprev
);
947 blksfree
= cg_blksfree(cgp
);
948 for (i
= numfrags(fs
, osize
); i
< frags
; i
++) {
949 if (isclr(blksfree
, bno
+ i
)) {
956 * the current fragment can be extended
957 * deduct the count on fragment being extended into
958 * increase the count on the remaining fragment (if any)
959 * allocate the extended piece
961 * ---oooooooooonnnnnnn111----
966 for (i
= frags
; i
< fs
->fs_frag
- bbase
; i
++) {
967 if (isclr(blksfree
, bno
+ i
))
972 * Size of original free frag is [i - numfrags(fs, osize)]
973 * Size of remaining free frag is [i - frags]
975 cgp
->cg_frsum
[i
- numfrags(fs
, osize
)]--;
977 cgp
->cg_frsum
[i
- frags
]++;
978 for (i
= numfrags(fs
, osize
); i
< frags
; i
++) {
979 clrbit(blksfree
, bno
+ i
);
980 cgp
->cg_cs
.cs_nffree
--;
981 fs
->fs_cstotal
.cs_nffree
--;
982 fs
->fs_cs(fs
, cg
).cs_nffree
--;
985 if (DOINGSOFTDEP(ITOV(ip
)))
986 softdep_setup_blkmapdep(bp
, fs
, bprev
);
992 * Determine whether a block can be allocated.
994 * Check to see if a block of the appropriate size is available,
995 * and if it is, allocate it.
998 ffs_alloccg(struct inode
*ip
, int cg
, ufs_daddr_t bpref
, int size
)
1004 ufs_daddr_t bno
, blkno
;
1005 int allocsiz
, error
, frags
;
1009 if (fs
->fs_cs(fs
, cg
).cs_nbfree
== 0 && size
== fs
->fs_bsize
)
1011 error
= bread(ip
->i_devvp
, fsbtodoff(fs
, cgtod(fs
, cg
)),
1012 (int)fs
->fs_cgsize
, &bp
);
1017 cgp
= (struct cg
*)bp
->b_data
;
1018 if (!cg_chkmagic(cgp
) ||
1019 (cgp
->cg_cs
.cs_nbfree
== 0 && size
== fs
->fs_bsize
)) {
1023 cgp
->cg_time
= time_second
;
1024 if (size
== fs
->fs_bsize
) {
1025 bno
= ffs_alloccgblk(ip
, bp
, bpref
);
1030 * Check to see if any fragments of sufficient size are already
1031 * available. Fit the data into a larger fragment if necessary,
1032 * before allocating a whole new block.
1034 blksfree
= cg_blksfree(cgp
);
1035 frags
= numfrags(fs
, size
);
1036 for (allocsiz
= frags
; allocsiz
< fs
->fs_frag
; allocsiz
++) {
1037 if (cgp
->cg_frsum
[allocsiz
] != 0)
1040 if (allocsiz
== fs
->fs_frag
) {
1042 * No fragments were available, allocate a whole block and
1043 * cut the requested fragment (of size frags) out of it.
1045 if (cgp
->cg_cs
.cs_nbfree
== 0) {
1049 bno
= ffs_alloccgblk(ip
, bp
, bpref
);
1050 bpref
= dtogd(fs
, bno
);
1051 for (i
= frags
; i
< fs
->fs_frag
; i
++)
1052 setbit(blksfree
, bpref
+ i
);
1055 * Calculate the number of free frags still remaining after
1056 * we have cut out the requested allocation. Indicate that
1057 * a fragment of that size is now available for future
1060 i
= fs
->fs_frag
- frags
;
1061 cgp
->cg_cs
.cs_nffree
+= i
;
1062 fs
->fs_cstotal
.cs_nffree
+= i
;
1063 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
1071 * cg_frsum[] has told us that a free fragment of allocsiz size is
1072 * available. Find it, then clear the bitmap bits associated with
1075 bno
= ffs_mapsearch(fs
, cgp
, bpref
, allocsiz
);
1080 for (i
= 0; i
< frags
; i
++)
1081 clrbit(blksfree
, bno
+ i
);
1082 cgp
->cg_cs
.cs_nffree
-= frags
;
1083 fs
->fs_cstotal
.cs_nffree
-= frags
;
1084 fs
->fs_cs(fs
, cg
).cs_nffree
-= frags
;
1088 * Account for the allocation. The original searched size that we
1089 * found is no longer available. If we cut out a smaller piece then
1090 * a smaller fragment is now available.
1092 cgp
->cg_frsum
[allocsiz
]--;
1093 if (frags
!= allocsiz
)
1094 cgp
->cg_frsum
[allocsiz
- frags
]++;
1095 blkno
= cg
* fs
->fs_fpg
+ bno
;
1096 if (DOINGSOFTDEP(ITOV(ip
)))
1097 softdep_setup_blkmapdep(bp
, fs
, blkno
);
1099 return ((u_long
)blkno
);
1103 * Allocate a block in a cylinder group.
1105 * This algorithm implements the following policy:
1106 * 1) allocate the requested block.
1107 * 2) allocate a rotationally optimal block in the same cylinder.
1108 * 3) allocate the next available block on the block rotor for the
1109 * specified cylinder group.
1110 * Note that this routine only allocates fs_bsize blocks; these
1111 * blocks may be fragmented by the routine that allocates them.
1114 ffs_alloccgblk(struct inode
*ip
, struct buf
*bp
, ufs_daddr_t bpref
)
1118 ufs_daddr_t bno
, blkno
;
1119 int cylno
, pos
, delta
;
1125 cgp
= (struct cg
*)bp
->b_data
;
1126 blksfree
= cg_blksfree(cgp
);
1127 if (bpref
== 0 || dtog(fs
, bpref
) != cgp
->cg_cgx
) {
1128 bpref
= cgp
->cg_rotor
;
1131 bpref
= blknum(fs
, bpref
);
1132 bpref
= dtogd(fs
, bpref
);
1134 * if the requested block is available, use it
1136 if (ffs_isblock(fs
, blksfree
, fragstoblks(fs
, bpref
))) {
1140 if (fs
->fs_nrpos
<= 1 || fs
->fs_cpc
== 0) {
1142 * Block layout information is not available.
1143 * Leaving bpref unchanged means we take the
1144 * next available free block following the one
1145 * we just allocated. Hopefully this will at
1146 * least hit a track cache on drives of unknown
1147 * geometry (e.g. SCSI).
1152 * check for a block available on the same cylinder
1154 cylno
= cbtocylno(fs
, bpref
);
1155 if (cg_blktot(cgp
)[cylno
] == 0)
1158 * check the summary information to see if a block is
1159 * available in the requested cylinder starting at the
1160 * requested rotational position and proceeding around.
1162 cylbp
= cg_blks(fs
, cgp
, cylno
);
1163 pos
= cbtorpos(fs
, bpref
);
1164 for (i
= pos
; i
< fs
->fs_nrpos
; i
++)
1167 if (i
== fs
->fs_nrpos
)
1168 for (i
= 0; i
< pos
; i
++)
1173 * found a rotational position, now find the actual
1174 * block. A panic if none is actually there.
1176 pos
= cylno
% fs
->fs_cpc
;
1177 bno
= (cylno
- pos
) * fs
->fs_spc
/ NSPB(fs
);
1178 if (fs_postbl(fs
, pos
)[i
] == -1) {
1179 kprintf("pos = %d, i = %d, fs = %s\n",
1180 pos
, i
, fs
->fs_fsmnt
);
1181 panic("ffs_alloccgblk: cyl groups corrupted");
1183 for (i
= fs_postbl(fs
, pos
)[i
];; ) {
1184 if (ffs_isblock(fs
, blksfree
, bno
+ i
)) {
1185 bno
= blkstofrags(fs
, (bno
+ i
));
1188 delta
= fs_rotbl(fs
)[i
];
1190 delta
+ i
> fragstoblks(fs
, fs
->fs_fpg
))
1194 kprintf("pos = %d, i = %d, fs = %s\n", pos
, i
, fs
->fs_fsmnt
);
1195 panic("ffs_alloccgblk: can't find blk in cyl");
1199 * no blocks in the requested cylinder, so take next
1200 * available one in this cylinder group.
1202 bno
= ffs_mapsearch(fs
, cgp
, bpref
, (int)fs
->fs_frag
);
1205 cgp
->cg_rotor
= bno
;
1207 blkno
= fragstoblks(fs
, bno
);
1208 ffs_clrblock(fs
, blksfree
, (long)blkno
);
1209 ffs_clusteracct(fs
, cgp
, blkno
, -1);
1210 cgp
->cg_cs
.cs_nbfree
--;
1211 fs
->fs_cstotal
.cs_nbfree
--;
1212 fs
->fs_cs(fs
, cgp
->cg_cgx
).cs_nbfree
--;
1213 cylno
= cbtocylno(fs
, bno
);
1214 cg_blks(fs
, cgp
, cylno
)[cbtorpos(fs
, bno
)]--;
1215 cg_blktot(cgp
)[cylno
]--;
1217 blkno
= cgp
->cg_cgx
* fs
->fs_fpg
+ bno
;
1218 if (DOINGSOFTDEP(ITOV(ip
)))
1219 softdep_setup_blkmapdep(bp
, fs
, blkno
);
1224 * Determine whether a cluster can be allocated.
1226 * We do not currently check for optimal rotational layout if there
1227 * are multiple choices in the same cylinder group. Instead we just
1228 * take the first one that we find following bpref.
1231 ffs_clusteralloc(struct inode
*ip
, int cg
, ufs_daddr_t bpref
, int len
)
1236 int i
, got
, run
, bno
, bit
, map
;
1242 if (fs
->fs_maxcluster
[cg
] < len
)
1244 if (bread(ip
->i_devvp
, fsbtodoff(fs
, cgtod(fs
, cg
)),
1245 (int)fs
->fs_cgsize
, &bp
)) {
1248 cgp
= (struct cg
*)bp
->b_data
;
1249 if (!cg_chkmagic(cgp
))
1253 * Check to see if a cluster of the needed size (or bigger) is
1254 * available in this cylinder group.
1256 lp
= &cg_clustersum(cgp
)[len
];
1257 for (i
= len
; i
<= fs
->fs_contigsumsize
; i
++)
1260 if (i
> fs
->fs_contigsumsize
) {
1262 * This is the first time looking for a cluster in this
1263 * cylinder group. Update the cluster summary information
1264 * to reflect the true maximum sized cluster so that
1265 * future cluster allocation requests can avoid reading
1266 * the cylinder group map only to find no clusters.
1268 lp
= &cg_clustersum(cgp
)[len
- 1];
1269 for (i
= len
- 1; i
> 0; i
--)
1272 fs
->fs_maxcluster
[cg
] = i
;
1276 * Search the cluster map to find a big enough cluster.
1277 * We take the first one that we find, even if it is larger
1278 * than we need as we prefer to get one close to the previous
1279 * block allocation. We do not search before the current
1280 * preference point as we do not want to allocate a block
1281 * that is allocated before the previous one (as we will
1282 * then have to wait for another pass of the elevator
1283 * algorithm before it will be read). We prefer to fail and
1284 * be recalled to try an allocation in the next cylinder group.
1286 if (dtog(fs
, bpref
) != cg
)
1289 bpref
= fragstoblks(fs
, dtogd(fs
, blknum(fs
, bpref
)));
1290 mapp
= &cg_clustersfree(cgp
)[bpref
/ NBBY
];
1292 bit
= 1 << (bpref
% NBBY
);
1293 for (run
= 0, got
= bpref
; got
< cgp
->cg_nclusterblks
; got
++) {
1294 if ((map
& bit
) == 0) {
1301 if ((got
& (NBBY
- 1)) != (NBBY
- 1)) {
1308 if (got
>= cgp
->cg_nclusterblks
)
1311 * Allocate the cluster that we have found.
1313 blksfree
= cg_blksfree(cgp
);
1314 for (i
= 1; i
<= len
; i
++) {
1315 if (!ffs_isblock(fs
, blksfree
, got
- run
+ i
))
1316 panic("ffs_clusteralloc: map mismatch");
1318 bno
= cg
* fs
->fs_fpg
+ blkstofrags(fs
, got
- run
+ 1);
1319 if (dtog(fs
, bno
) != cg
)
1320 panic("ffs_clusteralloc: allocated out of group");
1321 len
= blkstofrags(fs
, len
);
1322 for (i
= 0; i
< len
; i
+= fs
->fs_frag
) {
1323 if ((got
= ffs_alloccgblk(ip
, bp
, bno
+ i
)) != bno
+ i
)
1324 panic("ffs_clusteralloc: lost block");
1335 * Determine whether an inode can be allocated.
1337 * Check to see if an inode is available, and if it is,
1338 * allocate it using the following policy:
1339 * 1) allocate the requested inode.
1340 * 2) allocate the next available inode after the requested
1341 * inode in the specified cylinder group.
1342 * 3) the inode must not already be in the inode hash table. We
1343 * can encounter such a case because the vnode reclamation sequence
1345 * 3) the inode must not already be in the inode hash, otherwise it
1346 * may be in the process of being deallocated. This can occur
1347 * because the bitmap is updated before the inode is removed from
1348 * hash. If we were to reallocate the inode the caller could wind
1349 * up returning a vnode/inode combination which is in an indeterminate
1353 ffs_nodealloccg(struct inode
*ip
, int cg
, ufs_daddr_t ipref
, int mode
)
1355 struct ufsmount
*ump
;
1361 int error
, len
, arraysize
, i
;
1367 ump
= VFSTOUFS(vp
->v_mount
);
1369 if (fs
->fs_cs(fs
, cg
).cs_nifree
== 0)
1371 error
= bread(ip
->i_devvp
, fsbtodoff(fs
, cgtod(fs
, cg
)),
1372 (int)fs
->fs_cgsize
, &bp
);
1377 cgp
= (struct cg
*)bp
->b_data
;
1378 if (!cg_chkmagic(cgp
) || cgp
->cg_cs
.cs_nifree
== 0) {
1382 inosused
= cg_inosused(cgp
);
1386 * Quick check, reuse the most recently free inode or continue
1387 * a scan from where we left off the last time.
1389 ibase
= cg
* fs
->fs_ipg
;
1391 ipref
%= fs
->fs_ipg
;
1392 if (isclr(inosused
, ipref
)) {
1393 if (ufs_ihashcheck(ump
, ip
->i_dev
, ibase
+ ipref
) == 0)
1399 * Scan the inode bitmap starting at irotor, be sure to handle
1400 * the edge case by going back to the beginning of the array.
1402 * If the number of inodes is not byte-aligned, the unused bits
1403 * should be set to 1. This will be sanity checked in gotit. Note
1404 * that we have to be sure not to overlap the beginning and end
1405 * when irotor is in the middle of a byte as this will cause the
1406 * same bitmap byte to be checked twice. To solve this problem we
1407 * just convert everything to a byte index for the loop.
1409 ipref
= (cgp
->cg_irotor
% fs
->fs_ipg
) >> 3; /* byte index */
1410 len
= (fs
->fs_ipg
+ 7) >> 3; /* byte size */
1414 map
= inosused
[ipref
];
1416 for (i
= 0; i
< NBBY
; ++i
) {
1418 * If we find a free bit we have to make sure
1419 * that the inode is not in the middle of
1420 * being destroyed. The inode should not exist
1421 * in the inode hash.
1423 * Adjust the rotor to try to hit the
1424 * quick-check up above.
1426 if ((map
& (1 << i
)) == 0) {
1427 if (ufs_ihashcheck(ump
, ip
->i_dev
, ibase
+ (ipref
<< 3) + i
) == 0) {
1428 ipref
= (ipref
<< 3) + i
;
1429 cgp
->cg_irotor
= (ipref
+ 1) % fs
->fs_ipg
;
1438 * Setup for the next byte, start at the beginning again if
1439 * we hit the end of the array.
1441 if (++ipref
== arraysize
)
1445 if (icheckmiss
== cgp
->cg_cs
.cs_nifree
) {
1449 kprintf("fs = %s\n", fs
->fs_fsmnt
);
1450 panic("ffs_nodealloccg: block not in map, icheckmiss/nfree %d/%d",
1451 icheckmiss
, cgp
->cg_cs
.cs_nifree
);
1455 * ipref is a bit index as of the gotit label.
1458 KKASSERT(ipref
>= 0 && ipref
< fs
->fs_ipg
);
1459 cgp
->cg_time
= time_second
;
1460 if (DOINGSOFTDEP(ITOV(ip
)))
1461 softdep_setup_inomapdep(bp
, ip
, ibase
+ ipref
);
1462 setbit(inosused
, ipref
);
1463 cgp
->cg_cs
.cs_nifree
--;
1464 fs
->fs_cstotal
.cs_nifree
--;
1465 fs
->fs_cs(fs
, cg
).cs_nifree
--;
1467 if ((mode
& IFMT
) == IFDIR
) {
1468 cgp
->cg_cs
.cs_ndir
++;
1469 fs
->fs_cstotal
.cs_ndir
++;
1470 fs
->fs_cs(fs
, cg
).cs_ndir
++;
1473 return (ibase
+ ipref
);
1477 * Free a block or fragment.
1479 * The specified block or fragment is placed back in the
1480 * free map. If a fragment is deallocated, a possible
1481 * block reassembly is checked.
1484 ffs_blkfree_cg(struct fs
* fs
, struct vnode
* i_devvp
, cdev_t i_dev
, ino_t i_number
,
1485 uint32_t i_din_uid
, ufs_daddr_t bno
, long size
)
1490 int i
, error
, cg
, blk
, frags
, bbase
;
1493 VOP_FREEBLKS(i_devvp
, fsbtodoff(fs
, bno
), size
);
1494 if ((uint
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0 ||
1495 fragnum(fs
, bno
) + numfrags(fs
, size
) > fs
->fs_frag
) {
1496 kprintf("dev=%s, bno = %ld, bsize = %ld, size = %ld, fs = %s\n",
1497 devtoname(i_dev
), (long)bno
, (long)fs
->fs_bsize
, size
,
1499 panic("ffs_blkfree: bad size");
1502 if ((uint
)bno
>= fs
->fs_size
) {
1503 kprintf("bad block %ld, ino %lu\n",
1504 (long)bno
, (u_long
)i_number
);
1505 ffs_fserr(fs
, i_din_uid
, "bad block");
1510 * Load the cylinder group
1512 error
= bread(i_devvp
, fsbtodoff(fs
, cgtod(fs
, cg
)),
1513 (int)fs
->fs_cgsize
, &bp
);
1518 cgp
= (struct cg
*)bp
->b_data
;
1519 if (!cg_chkmagic(cgp
)) {
1523 cgp
->cg_time
= time_second
;
1524 bno
= dtogd(fs
, bno
);
1525 blksfree
= cg_blksfree(cgp
);
1527 if (size
== fs
->fs_bsize
) {
1529 * Free a whole block
1531 blkno
= fragstoblks(fs
, bno
);
1532 if (!ffs_isfreeblock(fs
, blksfree
, blkno
)) {
1533 kprintf("dev = %s, block = %ld, fs = %s\n",
1534 devtoname(i_dev
), (long)bno
, fs
->fs_fsmnt
);
1535 panic("ffs_blkfree: freeing free block");
1537 ffs_setblock(fs
, blksfree
, blkno
);
1538 ffs_clusteracct(fs
, cgp
, blkno
, 1);
1539 cgp
->cg_cs
.cs_nbfree
++;
1540 fs
->fs_cstotal
.cs_nbfree
++;
1541 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1542 i
= cbtocylno(fs
, bno
);
1543 cg_blks(fs
, cgp
, i
)[cbtorpos(fs
, bno
)]++;
1544 cg_blktot(cgp
)[i
]++;
1547 * Free a fragment within a block.
1549 * bno is the starting block number of the fragment being
1552 * bbase is the starting block number for the filesystem
1553 * block containing the fragment.
1555 * blk is the current bitmap for the fragments within the
1556 * filesystem block containing the fragment.
1558 * frags is the number of fragments being freed
1560 * Call ffs_fragacct() to account for the removal of all
1561 * current fragments, then adjust the bitmap to free the
1562 * requested fragment, and finally call ffs_fragacct() again
1563 * to regenerate the accounting.
1565 bbase
= bno
- fragnum(fs
, bno
);
1566 blk
= blkmap(fs
, blksfree
, bbase
);
1567 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, -1);
1568 frags
= numfrags(fs
, size
);
1569 for (i
= 0; i
< frags
; i
++) {
1570 if (isset(blksfree
, bno
+ i
)) {
1571 kprintf("dev = %s, block = %ld, fs = %s\n",
1572 devtoname(i_dev
), (long)(bno
+ i
),
1574 panic("ffs_blkfree: freeing free frag");
1576 setbit(blksfree
, bno
+ i
);
1578 cgp
->cg_cs
.cs_nffree
+= i
;
1579 fs
->fs_cstotal
.cs_nffree
+= i
;
1580 fs
->fs_cs(fs
, cg
).cs_nffree
+= i
;
1583 * Add back in counts associated with the new frags
1585 blk
= blkmap(fs
, blksfree
, bbase
);
1586 ffs_fragacct(fs
, blk
, cgp
->cg_frsum
, 1);
1589 * If a complete block has been reassembled, account for it
1591 blkno
= fragstoblks(fs
, bbase
);
1592 if (ffs_isblock(fs
, blksfree
, blkno
)) {
1593 cgp
->cg_cs
.cs_nffree
-= fs
->fs_frag
;
1594 fs
->fs_cstotal
.cs_nffree
-= fs
->fs_frag
;
1595 fs
->fs_cs(fs
, cg
).cs_nffree
-= fs
->fs_frag
;
1596 ffs_clusteracct(fs
, cgp
, blkno
, 1);
1597 cgp
->cg_cs
.cs_nbfree
++;
1598 fs
->fs_cstotal
.cs_nbfree
++;
1599 fs
->fs_cs(fs
, cg
).cs_nbfree
++;
1600 i
= cbtocylno(fs
, bbase
);
1601 cg_blks(fs
, cgp
, i
)[cbtorpos(fs
, bbase
)]++;
1602 cg_blktot(cgp
)[i
]++;
1609 struct ffs_blkfree_trim_params
{
1615 * With TRIM, inode pointer is gone in the callback but we still need
1616 * the following fields for ffs_blkfree_cg()
1618 struct vnode
*i_devvp
;
1627 ffs_blkfree_trim_task(void *ctx
, int pending
)
1629 struct ffs_blkfree_trim_params
*tp
;
1632 ffs_blkfree_cg(tp
->i_fs
, tp
->i_devvp
, tp
->i_dev
, tp
->i_number
,
1633 tp
->i_din_uid
, tp
->bno
, tp
->size
);
1640 ffs_blkfree_trim_completed(struct bio
*biop
)
1642 struct buf
*bp
= biop
->bio_buf
;
1643 struct ffs_blkfree_trim_params
*tp
;
1645 tp
= bp
->b_bio1
.bio_caller_info1
.ptr
;
1646 TASK_INIT(&tp
->task
, 0, ffs_blkfree_trim_task
, tp
);
1647 tp
= biop
->bio_caller_info1
.ptr
;
1648 taskqueue_enqueue(taskqueue_swi
, &tp
->task
);
1654 * If TRIM is enabled, we TRIM the blocks first then free them. We do this
1655 * after TRIM is finished and the callback handler is called. The logic here
1656 * is that we free the blocks before updating the bitmap so that we don't
1657 * reuse a block before we actually trim it, which would result in trimming
1661 ffs_blkfree(struct inode
*ip
, ufs_daddr_t bno
, long size
)
1663 struct mount
*mp
= ip
->i_devvp
->v_mount
;
1664 struct ffs_blkfree_trim_params
*tp
;
1666 if (!(mp
->mnt_flag
& MNT_TRIM
)) {
1667 ffs_blkfree_cg(ip
->i_fs
, ip
->i_devvp
,ip
->i_dev
,ip
->i_number
,
1668 ip
->i_uid
, bno
, size
);
1674 tp
= kmalloc(sizeof(struct ffs_blkfree_trim_params
), M_TEMP
, M_WAITOK
);
1677 tp
->i_devvp
= ip
->i_devvp
;
1678 tp
->i_dev
= ip
->i_dev
;
1679 tp
->i_din_uid
= ip
->i_uid
;
1680 tp
->i_number
= ip
->i_number
;
1683 bp
= getnewbuf(0, 0, 0, 1, NULL
);
1685 bp
->b_cmd
= BUF_CMD_FREEBLKS
;
1686 bp
->b_bio1
.bio_offset
= fsbtodoff(ip
->i_fs
, bno
);
1687 bp
->b_bcount
= size
;
1688 bp
->b_bio1
.bio_caller_info1
.ptr
= tp
;
1689 bp
->b_bio1
.bio_done
= ffs_blkfree_trim_completed
;
1690 vn_strategy(ip
->i_devvp
, &bp
->b_bio1
);
1695 * Verify allocation of a block or fragment. Returns true if block or
1696 * fragment is allocated, false if it is free.
1699 ffs_checkblk(struct inode
*ip
, ufs_daddr_t bno
, long size
)
1704 int i
, error
, frags
, free
;
1708 if ((uint
)size
> fs
->fs_bsize
|| fragoff(fs
, size
) != 0) {
1709 kprintf("bsize = %ld, size = %ld, fs = %s\n",
1710 (long)fs
->fs_bsize
, size
, fs
->fs_fsmnt
);
1711 panic("ffs_checkblk: bad size");
1713 if ((uint
)bno
>= fs
->fs_size
)
1714 panic("ffs_checkblk: bad block %d", bno
);
1715 error
= bread(ip
->i_devvp
, fsbtodoff(fs
, cgtod(fs
, dtog(fs
, bno
))),
1716 (int)fs
->fs_cgsize
, &bp
);
1718 panic("ffs_checkblk: cg bread failed");
1719 cgp
= (struct cg
*)bp
->b_data
;
1720 if (!cg_chkmagic(cgp
))
1721 panic("ffs_checkblk: cg magic mismatch");
1722 blksfree
= cg_blksfree(cgp
);
1723 bno
= dtogd(fs
, bno
);
1724 if (size
== fs
->fs_bsize
) {
1725 free
= ffs_isblock(fs
, blksfree
, fragstoblks(fs
, bno
));
1727 frags
= numfrags(fs
, size
);
1728 for (free
= 0, i
= 0; i
< frags
; i
++)
1729 if (isset(blksfree
, bno
+ i
))
1731 if (free
!= 0 && free
!= frags
)
1732 panic("ffs_checkblk: partially free fragment");
1737 #endif /* DIAGNOSTIC */
1743 ffs_vfree(struct vnode
*pvp
, ino_t ino
, int mode
)
1745 if (DOINGSOFTDEP(pvp
)) {
1746 softdep_freefile(pvp
, ino
, mode
);
1749 return (ffs_freefile(pvp
, ino
, mode
));
1753 * Do the actual free operation.
1754 * The specified inode is placed back in the free map.
1757 ffs_freefile(struct vnode
*pvp
, ino_t ino
, int mode
)
1768 if ((uint
)ino
>= fs
->fs_ipg
* fs
->fs_ncg
)
1769 panic("ffs_vfree: range: dev = (%d,%d), ino = %"PRId64
", fs = %s",
1770 major(pip
->i_dev
), minor(pip
->i_dev
), ino
, fs
->fs_fsmnt
);
1771 cg
= ino_to_cg(fs
, ino
);
1772 error
= bread(pip
->i_devvp
, fsbtodoff(fs
, cgtod(fs
, cg
)),
1773 (int)fs
->fs_cgsize
, &bp
);
1778 cgp
= (struct cg
*)bp
->b_data
;
1779 if (!cg_chkmagic(cgp
)) {
1783 cgp
->cg_time
= time_second
;
1784 inosused
= cg_inosused(cgp
);
1786 if (isclr(inosused
, ino
)) {
1787 kprintf("dev = %s, ino = %lu, fs = %s\n",
1788 devtoname(pip
->i_dev
), (u_long
)ino
, fs
->fs_fsmnt
);
1789 if (fs
->fs_ronly
== 0)
1790 panic("ffs_vfree: freeing free inode");
1792 clrbit(inosused
, ino
);
1793 if (ino
< cgp
->cg_irotor
)
1794 cgp
->cg_irotor
= ino
;
1795 cgp
->cg_cs
.cs_nifree
++;
1796 fs
->fs_cstotal
.cs_nifree
++;
1797 fs
->fs_cs(fs
, cg
).cs_nifree
++;
1798 if ((mode
& IFMT
) == IFDIR
) {
1799 cgp
->cg_cs
.cs_ndir
--;
1800 fs
->fs_cstotal
.cs_ndir
--;
1801 fs
->fs_cs(fs
, cg
).cs_ndir
--;
1809 * Find a block of the specified size in the specified cylinder group.
1811 * It is a panic if a request is made to find a block if none are
1815 ffs_mapsearch(struct fs
*fs
, struct cg
*cgp
, ufs_daddr_t bpref
, int allocsiz
)
1818 int start
, len
, loc
, i
;
1819 int blk
, field
, subfield
, pos
;
1823 * find the fragment by searching through the free block
1824 * map for an appropriate bit pattern.
1827 start
= dtogd(fs
, bpref
) / NBBY
;
1829 start
= cgp
->cg_frotor
/ NBBY
;
1830 blksfree
= cg_blksfree(cgp
);
1831 len
= howmany(fs
->fs_fpg
, NBBY
) - start
;
1832 loc
= scanc((uint
)len
, (u_char
*)&blksfree
[start
],
1833 (u_char
*)fragtbl
[fs
->fs_frag
],
1834 (u_char
)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
1836 len
= start
+ 1; /* XXX why overlap here? */
1838 loc
= scanc((uint
)len
, (u_char
*)&blksfree
[0],
1839 (u_char
*)fragtbl
[fs
->fs_frag
],
1840 (u_char
)(1 << (allocsiz
- 1 + (fs
->fs_frag
% NBBY
))));
1842 kprintf("start = %d, len = %d, fs = %s\n",
1843 start
, len
, fs
->fs_fsmnt
);
1844 panic("ffs_alloccg: map corrupted");
1848 bno
= (start
+ len
- loc
) * NBBY
;
1849 cgp
->cg_frotor
= bno
;
1851 * found the byte in the map
1852 * sift through the bits to find the selected frag
1854 for (i
= bno
+ NBBY
; bno
< i
; bno
+= fs
->fs_frag
) {
1855 blk
= blkmap(fs
, blksfree
, bno
);
1857 field
= around
[allocsiz
];
1858 subfield
= inside
[allocsiz
];
1859 for (pos
= 0; pos
<= fs
->fs_frag
- allocsiz
; pos
++) {
1860 if ((blk
& field
) == subfield
)
1866 kprintf("bno = %lu, fs = %s\n", (u_long
)bno
, fs
->fs_fsmnt
);
1867 panic("ffs_alloccg: block not in map");
1872 * Update the cluster map because of an allocation or free.
1874 * Cnt == 1 means free; cnt == -1 means allocating.
1877 ffs_clusteracct(struct fs
*fs
, struct cg
*cgp
, ufs_daddr_t blkno
, int cnt
)
1881 u_char
*freemapp
, *mapp
;
1882 int i
, start
, end
, forw
, back
, map
, bit
;
1884 if (fs
->fs_contigsumsize
<= 0)
1886 freemapp
= cg_clustersfree(cgp
);
1887 sump
= cg_clustersum(cgp
);
1889 * Allocate or clear the actual block.
1892 setbit(freemapp
, blkno
);
1894 clrbit(freemapp
, blkno
);
1896 * Find the size of the cluster going forward.
1899 end
= start
+ fs
->fs_contigsumsize
;
1900 if (end
>= cgp
->cg_nclusterblks
)
1901 end
= cgp
->cg_nclusterblks
;
1902 mapp
= &freemapp
[start
/ NBBY
];
1904 bit
= 1 << (start
% NBBY
);
1905 for (i
= start
; i
< end
; i
++) {
1906 if ((map
& bit
) == 0)
1908 if ((i
& (NBBY
- 1)) != (NBBY
- 1)) {
1917 * Find the size of the cluster going backward.
1920 end
= start
- fs
->fs_contigsumsize
;
1923 mapp
= &freemapp
[start
/ NBBY
];
1925 bit
= 1 << (start
% NBBY
);
1926 for (i
= start
; i
> end
; i
--) {
1927 if ((map
& bit
) == 0)
1929 if ((i
& (NBBY
- 1)) != 0) {
1933 bit
= 1 << (NBBY
- 1);
1938 * Account for old cluster and the possibly new forward and
1941 i
= back
+ forw
+ 1;
1942 if (i
> fs
->fs_contigsumsize
)
1943 i
= fs
->fs_contigsumsize
;
1950 * Update cluster summary information.
1952 lp
= &sump
[fs
->fs_contigsumsize
];
1953 for (i
= fs
->fs_contigsumsize
; i
> 0; i
--)
1956 fs
->fs_maxcluster
[cgp
->cg_cgx
] = i
;
1960 * Fserr prints the name of a filesystem with an error diagnostic.
1962 * The form of the error message is:
1966 ffs_fserr(struct fs
*fs
, uint uid
, char *cp
)
1968 struct thread
*td
= curthread
;
1971 if ((p
= td
->td_proc
) != NULL
) {
1972 log(LOG_ERR
, "pid %d (%s), uid %d on %s: %s\n", p
? p
->p_pid
: -1,
1973 p
? p
->p_comm
: "-", uid
, fs
->fs_fsmnt
, cp
);
1975 log(LOG_ERR
, "system thread %p, uid %d on %s: %s\n",
1976 td
, uid
, fs
->fs_fsmnt
, cp
);