[PATCH] Kprobes: Fix deadlock in function-return probes
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / jfs / jfs_dmap.c
blob2967b73934151f2291b2d0123dacb2f125239020
1 /*
2 * Copyright (C) International Business Machines Corp., 2000-2004
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/fs.h>
20 #include "jfs_incore.h"
21 #include "jfs_superblock.h"
22 #include "jfs_dmap.h"
23 #include "jfs_imap.h"
24 #include "jfs_lock.h"
25 #include "jfs_metapage.h"
26 #include "jfs_debug.h"
29 * SERIALIZATION of the Block Allocation Map.
31 * the working state of the block allocation map is accessed in
32 * two directions:
34 * 1) allocation and free requests that start at the dmap
35 * level and move up through the dmap control pages (i.e.
36 * the vast majority of requests).
38 * 2) allocation requests that start at dmap control page
39 * level and work down towards the dmaps.
41 * the serialization scheme used here is as follows.
43 * requests which start at the bottom are serialized against each
44 * other through buffers and each requests holds onto its buffers
45 * as it works it way up from a single dmap to the required level
46 * of dmap control page.
47 * requests that start at the top are serialized against each other
48 * and request that start from the bottom by the multiple read/single
49 * write inode lock of the bmap inode. requests starting at the top
50 * take this lock in write mode while request starting at the bottom
51 * take the lock in read mode. a single top-down request may proceed
52 * exclusively while multiple bottoms-up requests may proceed
53 * simultaneously (under the protection of busy buffers).
55 * in addition to information found in dmaps and dmap control pages,
56 * the working state of the block allocation map also includes read/
57 * write information maintained in the bmap descriptor (i.e. total
58 * free block count, allocation group level free block counts).
59 * a single exclusive lock (BMAP_LOCK) is used to guard this information
60 * in the face of multiple-bottoms up requests.
61 * (lock ordering: IREAD_LOCK, BMAP_LOCK);
63 * accesses to the persistent state of the block allocation map (limited
64 * to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
67 #define BMAP_LOCK_INIT(bmp) init_MUTEX(&bmp->db_bmaplock)
68 #define BMAP_LOCK(bmp) down(&bmp->db_bmaplock)
69 #define BMAP_UNLOCK(bmp) up(&bmp->db_bmaplock)
72 * forward references
74 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
75 int nblocks);
76 static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
77 static int dbBackSplit(dmtree_t * tp, int leafno);
78 static int dbJoin(dmtree_t * tp, int leafno, int newval);
79 static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
80 static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
81 int level);
82 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
83 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
84 int nblocks);
85 static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
86 int nblocks,
87 int l2nb, s64 * results);
88 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
89 int nblocks);
90 static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
91 int l2nb,
92 s64 * results);
93 static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
94 s64 * results);
95 static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
96 s64 * results);
97 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
98 static int dbFindBits(u32 word, int l2nb);
99 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
100 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
101 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
102 int nblocks);
103 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
104 int nblocks);
105 static int dbMaxBud(u8 * cp);
106 s64 dbMapFileSizeToMapSize(struct inode *ipbmap);
107 static int blkstol2(s64 nb);
109 static int cntlz(u32 value);
110 static int cnttz(u32 word);
112 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
113 int nblocks);
114 static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
115 static int dbInitDmapTree(struct dmap * dp);
116 static int dbInitTree(struct dmaptree * dtp);
117 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
118 static int dbGetL2AGSize(s64 nblocks);
121 * buddy table
123 * table used for determining buddy sizes within characters of
124 * dmap bitmap words. the characters themselves serve as indexes
125 * into the table, with the table elements yielding the maximum
126 * binary buddy of free bits within the character.
128 static s8 budtab[256] = {
129 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
130 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
131 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
132 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
133 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
134 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
135 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
136 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
137 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
138 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
139 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
140 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
141 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
142 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
143 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
144 2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
149 * NAME: dbMount()
151 * FUNCTION: initializate the block allocation map.
153 * memory is allocated for the in-core bmap descriptor and
154 * the in-core descriptor is initialized from disk.
156 * PARAMETERS:
157 * ipbmap - pointer to in-core inode for the block map.
159 * RETURN VALUES:
160 * 0 - success
161 * -ENOMEM - insufficient memory
162 * -EIO - i/o error
164 int dbMount(struct inode *ipbmap)
166 struct bmap *bmp;
167 struct dbmap_disk *dbmp_le;
168 struct metapage *mp;
169 int i;
172 * allocate/initialize the in-memory bmap descriptor
174 /* allocate memory for the in-memory bmap descriptor */
175 bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL);
176 if (bmp == NULL)
177 return -ENOMEM;
179 /* read the on-disk bmap descriptor. */
180 mp = read_metapage(ipbmap,
181 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
182 PSIZE, 0);
183 if (mp == NULL) {
184 kfree(bmp);
185 return -EIO;
188 /* copy the on-disk bmap descriptor to its in-memory version. */
189 dbmp_le = (struct dbmap_disk *) mp->data;
190 bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize);
191 bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree);
192 bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage);
193 bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
194 bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel);
195 bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag);
196 bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref);
197 bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel);
198 bmp->db_agheigth = le32_to_cpu(dbmp_le->dn_agheigth);
199 bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth);
200 bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart);
201 bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size);
202 for (i = 0; i < MAXAG; i++)
203 bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]);
204 bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize);
205 bmp->db_maxfreebud = dbmp_le->dn_maxfreebud;
207 /* release the buffer. */
208 release_metapage(mp);
210 /* bind the bmap inode and the bmap descriptor to each other. */
211 bmp->db_ipbmap = ipbmap;
212 JFS_SBI(ipbmap->i_sb)->bmap = bmp;
214 memset(bmp->db_active, 0, sizeof(bmp->db_active));
217 * allocate/initialize the bmap lock
219 BMAP_LOCK_INIT(bmp);
221 return (0);
226 * NAME: dbUnmount()
228 * FUNCTION: terminate the block allocation map in preparation for
229 * file system unmount.
231 * the in-core bmap descriptor is written to disk and
232 * the memory for this descriptor is freed.
234 * PARAMETERS:
235 * ipbmap - pointer to in-core inode for the block map.
237 * RETURN VALUES:
238 * 0 - success
239 * -EIO - i/o error
241 int dbUnmount(struct inode *ipbmap, int mounterror)
243 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
245 if (!(mounterror || isReadOnly(ipbmap)))
246 dbSync(ipbmap);
249 * Invalidate the page cache buffers
251 truncate_inode_pages(ipbmap->i_mapping, 0);
253 /* free the memory for the in-memory bmap. */
254 kfree(bmp);
256 return (0);
260 * dbSync()
262 int dbSync(struct inode *ipbmap)
264 struct dbmap_disk *dbmp_le;
265 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
266 struct metapage *mp;
267 int i;
270 * write bmap global control page
272 /* get the buffer for the on-disk bmap descriptor. */
273 mp = read_metapage(ipbmap,
274 BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage,
275 PSIZE, 0);
276 if (mp == NULL) {
277 jfs_err("dbSync: read_metapage failed!");
278 return -EIO;
280 /* copy the in-memory version of the bmap to the on-disk version */
281 dbmp_le = (struct dbmap_disk *) mp->data;
282 dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize);
283 dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree);
284 dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage);
285 dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag);
286 dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel);
287 dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag);
288 dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref);
289 dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel);
290 dbmp_le->dn_agheigth = cpu_to_le32(bmp->db_agheigth);
291 dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth);
292 dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart);
293 dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size);
294 for (i = 0; i < MAXAG; i++)
295 dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]);
296 dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize);
297 dbmp_le->dn_maxfreebud = bmp->db_maxfreebud;
299 /* write the buffer */
300 write_metapage(mp);
303 * write out dirty pages of bmap
305 filemap_write_and_wait(ipbmap->i_mapping);
307 diWriteSpecial(ipbmap, 0);
309 return (0);
314 * NAME: dbFree()
316 * FUNCTION: free the specified block range from the working block
317 * allocation map.
319 * the blocks will be free from the working map one dmap
320 * at a time.
322 * PARAMETERS:
323 * ip - pointer to in-core inode;
324 * blkno - starting block number to be freed.
325 * nblocks - number of blocks to be freed.
327 * RETURN VALUES:
328 * 0 - success
329 * -EIO - i/o error
331 int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
333 struct metapage *mp;
334 struct dmap *dp;
335 int nb, rc;
336 s64 lblkno, rem;
337 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
338 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
340 IREAD_LOCK(ipbmap);
342 /* block to be freed better be within the mapsize. */
343 if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) {
344 IREAD_UNLOCK(ipbmap);
345 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
346 (unsigned long long) blkno,
347 (unsigned long long) nblocks);
348 jfs_error(ip->i_sb,
349 "dbFree: block to be freed is outside the map");
350 return -EIO;
354 * free the blocks a dmap at a time.
356 mp = NULL;
357 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
358 /* release previous dmap if any */
359 if (mp) {
360 write_metapage(mp);
363 /* get the buffer for the current dmap. */
364 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
365 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
366 if (mp == NULL) {
367 IREAD_UNLOCK(ipbmap);
368 return -EIO;
370 dp = (struct dmap *) mp->data;
372 /* determine the number of blocks to be freed from
373 * this dmap.
375 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
377 /* free the blocks. */
378 if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
379 jfs_error(ip->i_sb, "dbFree: error in block map\n");
380 release_metapage(mp);
381 IREAD_UNLOCK(ipbmap);
382 return (rc);
386 /* write the last buffer. */
387 write_metapage(mp);
389 IREAD_UNLOCK(ipbmap);
391 return (0);
396 * NAME: dbUpdatePMap()
398 * FUNCTION: update the allocation state (free or allocate) of the
399 * specified block range in the persistent block allocation map.
401 * the blocks will be updated in the persistent map one
402 * dmap at a time.
404 * PARAMETERS:
405 * ipbmap - pointer to in-core inode for the block map.
406 * free - TRUE if block range is to be freed from the persistent
407 * map; FALSE if it is to be allocated.
408 * blkno - starting block number of the range.
409 * nblocks - number of contiguous blocks in the range.
410 * tblk - transaction block;
412 * RETURN VALUES:
413 * 0 - success
414 * -EIO - i/o error
417 dbUpdatePMap(struct inode *ipbmap,
418 int free, s64 blkno, s64 nblocks, struct tblock * tblk)
420 int nblks, dbitno, wbitno, rbits;
421 int word, nbits, nwords;
422 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
423 s64 lblkno, rem, lastlblkno;
424 u32 mask;
425 struct dmap *dp;
426 struct metapage *mp;
427 struct jfs_log *log;
428 int lsn, difft, diffp;
429 unsigned long flags;
431 /* the blocks better be within the mapsize. */
432 if (blkno + nblocks > bmp->db_mapsize) {
433 printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n",
434 (unsigned long long) blkno,
435 (unsigned long long) nblocks);
436 jfs_error(ipbmap->i_sb,
437 "dbUpdatePMap: blocks are outside the map");
438 return -EIO;
441 /* compute delta of transaction lsn from log syncpt */
442 lsn = tblk->lsn;
443 log = (struct jfs_log *) JFS_SBI(tblk->sb)->log;
444 logdiff(difft, lsn, log);
447 * update the block state a dmap at a time.
449 mp = NULL;
450 lastlblkno = 0;
451 for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) {
452 /* get the buffer for the current dmap. */
453 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
454 if (lblkno != lastlblkno) {
455 if (mp) {
456 write_metapage(mp);
459 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE,
461 if (mp == NULL)
462 return -EIO;
463 metapage_wait_for_io(mp);
465 dp = (struct dmap *) mp->data;
467 /* determine the bit number and word within the dmap of
468 * the starting block. also determine how many blocks
469 * are to be updated within this dmap.
471 dbitno = blkno & (BPERDMAP - 1);
472 word = dbitno >> L2DBWORD;
473 nblks = min(rem, (s64)BPERDMAP - dbitno);
475 /* update the bits of the dmap words. the first and last
476 * words may only have a subset of their bits updated. if
477 * this is the case, we'll work against that word (i.e.
478 * partial first and/or last) only in a single pass. a
479 * single pass will also be used to update all words that
480 * are to have all their bits updated.
482 for (rbits = nblks; rbits > 0;
483 rbits -= nbits, dbitno += nbits) {
484 /* determine the bit number within the word and
485 * the number of bits within the word.
487 wbitno = dbitno & (DBWORD - 1);
488 nbits = min(rbits, DBWORD - wbitno);
490 /* check if only part of the word is to be updated. */
491 if (nbits < DBWORD) {
492 /* update (free or allocate) the bits
493 * in this word.
495 mask =
496 (ONES << (DBWORD - nbits) >> wbitno);
497 if (free)
498 dp->pmap[word] &=
499 cpu_to_le32(~mask);
500 else
501 dp->pmap[word] |=
502 cpu_to_le32(mask);
504 word += 1;
505 } else {
506 /* one or more words are to have all
507 * their bits updated. determine how
508 * many words and how many bits.
510 nwords = rbits >> L2DBWORD;
511 nbits = nwords << L2DBWORD;
513 /* update (free or allocate) the bits
514 * in these words.
516 if (free)
517 memset(&dp->pmap[word], 0,
518 nwords * 4);
519 else
520 memset(&dp->pmap[word], (int) ONES,
521 nwords * 4);
523 word += nwords;
528 * update dmap lsn
530 if (lblkno == lastlblkno)
531 continue;
533 lastlblkno = lblkno;
535 if (mp->lsn != 0) {
536 /* inherit older/smaller lsn */
537 logdiff(diffp, mp->lsn, log);
538 LOGSYNC_LOCK(log, flags);
539 if (difft < diffp) {
540 mp->lsn = lsn;
542 /* move bp after tblock in logsync list */
543 list_move(&mp->synclist, &tblk->synclist);
546 /* inherit younger/larger clsn */
547 logdiff(difft, tblk->clsn, log);
548 logdiff(diffp, mp->clsn, log);
549 if (difft > diffp)
550 mp->clsn = tblk->clsn;
551 LOGSYNC_UNLOCK(log, flags);
552 } else {
553 mp->log = log;
554 mp->lsn = lsn;
556 /* insert bp after tblock in logsync list */
557 LOGSYNC_LOCK(log, flags);
559 log->count++;
560 list_add(&mp->synclist, &tblk->synclist);
562 mp->clsn = tblk->clsn;
563 LOGSYNC_UNLOCK(log, flags);
567 /* write the last buffer. */
568 if (mp) {
569 write_metapage(mp);
572 return (0);
577 * NAME: dbNextAG()
579 * FUNCTION: find the preferred allocation group for new allocations.
581 * Within the allocation groups, we maintain a preferred
582 * allocation group which consists of a group with at least
583 * average free space. It is the preferred group that we target
584 * new inode allocation towards. The tie-in between inode
585 * allocation and block allocation occurs as we allocate the
586 * first (data) block of an inode and specify the inode (block)
587 * as the allocation hint for this block.
589 * We try to avoid having more than one open file growing in
590 * an allocation group, as this will lead to fragmentation.
591 * This differs from the old OS/2 method of trying to keep
592 * empty ags around for large allocations.
594 * PARAMETERS:
595 * ipbmap - pointer to in-core inode for the block map.
597 * RETURN VALUES:
598 * the preferred allocation group number.
600 int dbNextAG(struct inode *ipbmap)
602 s64 avgfree;
603 int agpref;
604 s64 hwm = 0;
605 int i;
606 int next_best = -1;
607 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
609 BMAP_LOCK(bmp);
611 /* determine the average number of free blocks within the ags. */
612 avgfree = (u32)bmp->db_nfree / bmp->db_numag;
615 * if the current preferred ag does not have an active allocator
616 * and has at least average freespace, return it
618 agpref = bmp->db_agpref;
619 if ((atomic_read(&bmp->db_active[agpref]) == 0) &&
620 (bmp->db_agfree[agpref] >= avgfree))
621 goto unlock;
623 /* From the last preferred ag, find the next one with at least
624 * average free space.
626 for (i = 0 ; i < bmp->db_numag; i++, agpref++) {
627 if (agpref == bmp->db_numag)
628 agpref = 0;
630 if (atomic_read(&bmp->db_active[agpref]))
631 /* open file is currently growing in this ag */
632 continue;
633 if (bmp->db_agfree[agpref] >= avgfree) {
634 /* Return this one */
635 bmp->db_agpref = agpref;
636 goto unlock;
637 } else if (bmp->db_agfree[agpref] > hwm) {
638 /* Less than avg. freespace, but best so far */
639 hwm = bmp->db_agfree[agpref];
640 next_best = agpref;
645 * If no inactive ag was found with average freespace, use the
646 * next best
648 if (next_best != -1)
649 bmp->db_agpref = next_best;
650 /* else leave db_agpref unchanged */
651 unlock:
652 BMAP_UNLOCK(bmp);
654 /* return the preferred group.
656 return (bmp->db_agpref);
660 * NAME: dbAlloc()
662 * FUNCTION: attempt to allocate a specified number of contiguous free
663 * blocks from the working allocation block map.
665 * the block allocation policy uses hints and a multi-step
666 * approach.
668 * for allocation requests smaller than the number of blocks
669 * per dmap, we first try to allocate the new blocks
670 * immediately following the hint. if these blocks are not
671 * available, we try to allocate blocks near the hint. if
672 * no blocks near the hint are available, we next try to
673 * allocate within the same dmap as contains the hint.
675 * if no blocks are available in the dmap or the allocation
676 * request is larger than the dmap size, we try to allocate
677 * within the same allocation group as contains the hint. if
678 * this does not succeed, we finally try to allocate anywhere
679 * within the aggregate.
681 * we also try to allocate anywhere within the aggregate for
682 * for allocation requests larger than the allocation group
683 * size or requests that specify no hint value.
685 * PARAMETERS:
686 * ip - pointer to in-core inode;
687 * hint - allocation hint.
688 * nblocks - number of contiguous blocks in the range.
689 * results - on successful return, set to the starting block number
690 * of the newly allocated contiguous range.
692 * RETURN VALUES:
693 * 0 - success
694 * -ENOSPC - insufficient disk resources
695 * -EIO - i/o error
697 int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results)
699 int rc, agno;
700 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
701 struct bmap *bmp;
702 struct metapage *mp;
703 s64 lblkno, blkno;
704 struct dmap *dp;
705 int l2nb;
706 s64 mapSize;
707 int writers;
709 /* assert that nblocks is valid */
710 assert(nblocks > 0);
712 #ifdef _STILL_TO_PORT
713 /* DASD limit check F226941 */
714 if (OVER_LIMIT(ip, nblocks))
715 return -ENOSPC;
716 #endif /* _STILL_TO_PORT */
718 /* get the log2 number of blocks to be allocated.
719 * if the number of blocks is not a log2 multiple,
720 * it will be rounded up to the next log2 multiple.
722 l2nb = BLKSTOL2(nblocks);
724 bmp = JFS_SBI(ip->i_sb)->bmap;
726 //retry: /* serialize w.r.t.extendfs() */
727 mapSize = bmp->db_mapsize;
729 /* the hint should be within the map */
730 if (hint >= mapSize) {
731 jfs_error(ip->i_sb, "dbAlloc: the hint is outside the map");
732 return -EIO;
735 /* if the number of blocks to be allocated is greater than the
736 * allocation group size, try to allocate anywhere.
738 if (l2nb > bmp->db_agl2size) {
739 IWRITE_LOCK(ipbmap);
741 rc = dbAllocAny(bmp, nblocks, l2nb, results);
743 goto write_unlock;
747 * If no hint, let dbNextAG recommend an allocation group
749 if (hint == 0)
750 goto pref_ag;
752 /* we would like to allocate close to the hint. adjust the
753 * hint to the block following the hint since the allocators
754 * will start looking for free space starting at this point.
756 blkno = hint + 1;
758 if (blkno >= bmp->db_mapsize)
759 goto pref_ag;
761 agno = blkno >> bmp->db_agl2size;
763 /* check if blkno crosses over into a new allocation group.
764 * if so, check if we should allow allocations within this
765 * allocation group.
767 if ((blkno & (bmp->db_agsize - 1)) == 0)
768 /* check if the AG is currenly being written to.
769 * if so, call dbNextAG() to find a non-busy
770 * AG with sufficient free space.
772 if (atomic_read(&bmp->db_active[agno]))
773 goto pref_ag;
775 /* check if the allocation request size can be satisfied from a
776 * single dmap. if so, try to allocate from the dmap containing
777 * the hint using a tiered strategy.
779 if (nblocks <= BPERDMAP) {
780 IREAD_LOCK(ipbmap);
782 /* get the buffer for the dmap containing the hint.
784 rc = -EIO;
785 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
786 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
787 if (mp == NULL)
788 goto read_unlock;
790 dp = (struct dmap *) mp->data;
792 /* first, try to satisfy the allocation request with the
793 * blocks beginning at the hint.
795 if ((rc = dbAllocNext(bmp, dp, blkno, (int) nblocks))
796 != -ENOSPC) {
797 if (rc == 0) {
798 *results = blkno;
799 mark_metapage_dirty(mp);
802 release_metapage(mp);
803 goto read_unlock;
806 writers = atomic_read(&bmp->db_active[agno]);
807 if ((writers > 1) ||
808 ((writers == 1) && (JFS_IP(ip)->active_ag != agno))) {
810 * Someone else is writing in this allocation
811 * group. To avoid fragmenting, try another ag
813 release_metapage(mp);
814 IREAD_UNLOCK(ipbmap);
815 goto pref_ag;
818 /* next, try to satisfy the allocation request with blocks
819 * near the hint.
821 if ((rc =
822 dbAllocNear(bmp, dp, blkno, (int) nblocks, l2nb, results))
823 != -ENOSPC) {
824 if (rc == 0)
825 mark_metapage_dirty(mp);
827 release_metapage(mp);
828 goto read_unlock;
831 /* try to satisfy the allocation request with blocks within
832 * the same dmap as the hint.
834 if ((rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results))
835 != -ENOSPC) {
836 if (rc == 0)
837 mark_metapage_dirty(mp);
839 release_metapage(mp);
840 goto read_unlock;
843 release_metapage(mp);
844 IREAD_UNLOCK(ipbmap);
847 /* try to satisfy the allocation request with blocks within
848 * the same allocation group as the hint.
850 IWRITE_LOCK(ipbmap);
851 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) != -ENOSPC)
852 goto write_unlock;
854 IWRITE_UNLOCK(ipbmap);
857 pref_ag:
859 * Let dbNextAG recommend a preferred allocation group
861 agno = dbNextAG(ipbmap);
862 IWRITE_LOCK(ipbmap);
864 /* Try to allocate within this allocation group. if that fails, try to
865 * allocate anywhere in the map.
867 if ((rc = dbAllocAG(bmp, agno, nblocks, l2nb, results)) == -ENOSPC)
868 rc = dbAllocAny(bmp, nblocks, l2nb, results);
870 write_unlock:
871 IWRITE_UNLOCK(ipbmap);
873 return (rc);
875 read_unlock:
876 IREAD_UNLOCK(ipbmap);
878 return (rc);
881 #ifdef _NOTYET
883 * NAME: dbAllocExact()
885 * FUNCTION: try to allocate the requested extent;
887 * PARAMETERS:
888 * ip - pointer to in-core inode;
889 * blkno - extent address;
890 * nblocks - extent length;
892 * RETURN VALUES:
893 * 0 - success
894 * -ENOSPC - insufficient disk resources
895 * -EIO - i/o error
897 int dbAllocExact(struct inode *ip, s64 blkno, int nblocks)
899 int rc;
900 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
901 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
902 struct dmap *dp;
903 s64 lblkno;
904 struct metapage *mp;
906 IREAD_LOCK(ipbmap);
909 * validate extent request:
911 * note: defragfs policy:
912 * max 64 blocks will be moved.
913 * allocation request size must be satisfied from a single dmap.
915 if (nblocks <= 0 || nblocks > BPERDMAP || blkno >= bmp->db_mapsize) {
916 IREAD_UNLOCK(ipbmap);
917 return -EINVAL;
920 if (nblocks > ((s64) 1 << bmp->db_maxfreebud)) {
921 /* the free space is no longer available */
922 IREAD_UNLOCK(ipbmap);
923 return -ENOSPC;
926 /* read in the dmap covering the extent */
927 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
928 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
929 if (mp == NULL) {
930 IREAD_UNLOCK(ipbmap);
931 return -EIO;
933 dp = (struct dmap *) mp->data;
935 /* try to allocate the requested extent */
936 rc = dbAllocNext(bmp, dp, blkno, nblocks);
938 IREAD_UNLOCK(ipbmap);
940 if (rc == 0)
941 mark_metapage_dirty(mp);
943 release_metapage(mp);
945 return (rc);
947 #endif /* _NOTYET */
950 * NAME: dbReAlloc()
952 * FUNCTION: attempt to extend a current allocation by a specified
953 * number of blocks.
955 * this routine attempts to satisfy the allocation request
956 * by first trying to extend the existing allocation in
957 * place by allocating the additional blocks as the blocks
958 * immediately following the current allocation. if these
959 * blocks are not available, this routine will attempt to
960 * allocate a new set of contiguous blocks large enough
961 * to cover the existing allocation plus the additional
962 * number of blocks required.
964 * PARAMETERS:
965 * ip - pointer to in-core inode requiring allocation.
966 * blkno - starting block of the current allocation.
967 * nblocks - number of contiguous blocks within the current
968 * allocation.
969 * addnblocks - number of blocks to add to the allocation.
970 * results - on successful return, set to the starting block number
971 * of the existing allocation if the existing allocation
972 * was extended in place or to a newly allocated contiguous
973 * range if the existing allocation could not be extended
974 * in place.
976 * RETURN VALUES:
977 * 0 - success
978 * -ENOSPC - insufficient disk resources
979 * -EIO - i/o error
982 dbReAlloc(struct inode *ip,
983 s64 blkno, s64 nblocks, s64 addnblocks, s64 * results)
985 int rc;
987 /* try to extend the allocation in place.
989 if ((rc = dbExtend(ip, blkno, nblocks, addnblocks)) == 0) {
990 *results = blkno;
991 return (0);
992 } else {
993 if (rc != -ENOSPC)
994 return (rc);
997 /* could not extend the allocation in place, so allocate a
998 * new set of blocks for the entire request (i.e. try to get
999 * a range of contiguous blocks large enough to cover the
1000 * existing allocation plus the additional blocks.)
1002 return (dbAlloc
1003 (ip, blkno + nblocks - 1, addnblocks + nblocks, results));
1008 * NAME: dbExtend()
1010 * FUNCTION: attempt to extend a current allocation by a specified
1011 * number of blocks.
1013 * this routine attempts to satisfy the allocation request
1014 * by first trying to extend the existing allocation in
1015 * place by allocating the additional blocks as the blocks
1016 * immediately following the current allocation.
1018 * PARAMETERS:
1019 * ip - pointer to in-core inode requiring allocation.
1020 * blkno - starting block of the current allocation.
1021 * nblocks - number of contiguous blocks within the current
1022 * allocation.
1023 * addnblocks - number of blocks to add to the allocation.
1025 * RETURN VALUES:
1026 * 0 - success
1027 * -ENOSPC - insufficient disk resources
1028 * -EIO - i/o error
1030 static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks)
1032 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
1033 s64 lblkno, lastblkno, extblkno;
1034 uint rel_block;
1035 struct metapage *mp;
1036 struct dmap *dp;
1037 int rc;
1038 struct inode *ipbmap = sbi->ipbmap;
1039 struct bmap *bmp;
1042 * We don't want a non-aligned extent to cross a page boundary
1044 if (((rel_block = blkno & (sbi->nbperpage - 1))) &&
1045 (rel_block + nblocks + addnblocks > sbi->nbperpage))
1046 return -ENOSPC;
1048 /* get the last block of the current allocation */
1049 lastblkno = blkno + nblocks - 1;
1051 /* determine the block number of the block following
1052 * the existing allocation.
1054 extblkno = lastblkno + 1;
1056 IREAD_LOCK(ipbmap);
1058 /* better be within the file system */
1059 bmp = sbi->bmap;
1060 if (lastblkno < 0 || lastblkno >= bmp->db_mapsize) {
1061 IREAD_UNLOCK(ipbmap);
1062 jfs_error(ip->i_sb,
1063 "dbExtend: the block is outside the filesystem");
1064 return -EIO;
1067 /* we'll attempt to extend the current allocation in place by
1068 * allocating the additional blocks as the blocks immediately
1069 * following the current allocation. we only try to extend the
1070 * current allocation in place if the number of additional blocks
1071 * can fit into a dmap, the last block of the current allocation
1072 * is not the last block of the file system, and the start of the
1073 * inplace extension is not on an allocation group boundary.
1075 if (addnblocks > BPERDMAP || extblkno >= bmp->db_mapsize ||
1076 (extblkno & (bmp->db_agsize - 1)) == 0) {
1077 IREAD_UNLOCK(ipbmap);
1078 return -ENOSPC;
1081 /* get the buffer for the dmap containing the first block
1082 * of the extension.
1084 lblkno = BLKTODMAP(extblkno, bmp->db_l2nbperpage);
1085 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
1086 if (mp == NULL) {
1087 IREAD_UNLOCK(ipbmap);
1088 return -EIO;
1091 dp = (struct dmap *) mp->data;
1093 /* try to allocate the blocks immediately following the
1094 * current allocation.
1096 rc = dbAllocNext(bmp, dp, extblkno, (int) addnblocks);
1098 IREAD_UNLOCK(ipbmap);
1100 /* were we successful ? */
1101 if (rc == 0)
1102 write_metapage(mp);
1103 else
1104 /* we were not successful */
1105 release_metapage(mp);
1108 return (rc);
1113 * NAME: dbAllocNext()
1115 * FUNCTION: attempt to allocate the blocks of the specified block
1116 * range within a dmap.
1118 * PARAMETERS:
1119 * bmp - pointer to bmap descriptor
1120 * dp - pointer to dmap.
1121 * blkno - starting block number of the range.
1122 * nblocks - number of contiguous free blocks of the range.
1124 * RETURN VALUES:
1125 * 0 - success
1126 * -ENOSPC - insufficient disk resources
1127 * -EIO - i/o error
1129 * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
1131 static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
1132 int nblocks)
1134 int dbitno, word, rembits, nb, nwords, wbitno, nw;
1135 int l2size;
1136 s8 *leaf;
1137 u32 mask;
1139 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
1140 jfs_error(bmp->db_ipbmap->i_sb,
1141 "dbAllocNext: Corrupt dmap page");
1142 return -EIO;
1145 /* pick up a pointer to the leaves of the dmap tree.
1147 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
1149 /* determine the bit number and word within the dmap of the
1150 * starting block.
1152 dbitno = blkno & (BPERDMAP - 1);
1153 word = dbitno >> L2DBWORD;
1155 /* check if the specified block range is contained within
1156 * this dmap.
1158 if (dbitno + nblocks > BPERDMAP)
1159 return -ENOSPC;
1161 /* check if the starting leaf indicates that anything
1162 * is free.
1164 if (leaf[word] == NOFREE)
1165 return -ENOSPC;
1167 /* check the dmaps words corresponding to block range to see
1168 * if the block range is free. not all bits of the first and
1169 * last words may be contained within the block range. if this
1170 * is the case, we'll work against those words (i.e. partial first
1171 * and/or last) on an individual basis (a single pass) and examine
1172 * the actual bits to determine if they are free. a single pass
1173 * will be used for all dmap words fully contained within the
1174 * specified range. within this pass, the leaves of the dmap
1175 * tree will be examined to determine if the blocks are free. a
1176 * single leaf may describe the free space of multiple dmap
1177 * words, so we may visit only a subset of the actual leaves
1178 * corresponding to the dmap words of the block range.
1180 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
1181 /* determine the bit number within the word and
1182 * the number of bits within the word.
1184 wbitno = dbitno & (DBWORD - 1);
1185 nb = min(rembits, DBWORD - wbitno);
1187 /* check if only part of the word is to be examined.
1189 if (nb < DBWORD) {
1190 /* check if the bits are free.
1192 mask = (ONES << (DBWORD - nb) >> wbitno);
1193 if ((mask & ~le32_to_cpu(dp->wmap[word])) != mask)
1194 return -ENOSPC;
1196 word += 1;
1197 } else {
1198 /* one or more dmap words are fully contained
1199 * within the block range. determine how many
1200 * words and how many bits.
1202 nwords = rembits >> L2DBWORD;
1203 nb = nwords << L2DBWORD;
1205 /* now examine the appropriate leaves to determine
1206 * if the blocks are free.
1208 while (nwords > 0) {
1209 /* does the leaf describe any free space ?
1211 if (leaf[word] < BUDMIN)
1212 return -ENOSPC;
1214 /* determine the l2 number of bits provided
1215 * by this leaf.
1217 l2size =
1218 min((int)leaf[word], NLSTOL2BSZ(nwords));
1220 /* determine how many words were handled.
1222 nw = BUDSIZE(l2size, BUDMIN);
1224 nwords -= nw;
1225 word += nw;
1230 /* allocate the blocks.
1232 return (dbAllocDmap(bmp, dp, blkno, nblocks));
1237 * NAME: dbAllocNear()
1239 * FUNCTION: attempt to allocate a number of contiguous free blocks near
1240 * a specified block (hint) within a dmap.
1242 * starting with the dmap leaf that covers the hint, we'll
1243 * check the next four contiguous leaves for sufficient free
1244 * space. if sufficient free space is found, we'll allocate
1245 * the desired free space.
1247 * PARAMETERS:
1248 * bmp - pointer to bmap descriptor
1249 * dp - pointer to dmap.
1250 * blkno - block number to allocate near.
1251 * nblocks - actual number of contiguous free blocks desired.
1252 * l2nb - log2 number of contiguous free blocks desired.
1253 * results - on successful return, set to the starting block number
1254 * of the newly allocated range.
1256 * RETURN VALUES:
1257 * 0 - success
1258 * -ENOSPC - insufficient disk resources
1259 * -EIO - i/o error
1261 * serialization: IREAD_LOCK(ipbmap) held on entry/exit;
1263 static int
1264 dbAllocNear(struct bmap * bmp,
1265 struct dmap * dp, s64 blkno, int nblocks, int l2nb, s64 * results)
1267 int word, lword, rc;
1268 s8 *leaf;
1270 if (dp->tree.leafidx != cpu_to_le32(LEAFIND)) {
1271 jfs_error(bmp->db_ipbmap->i_sb,
1272 "dbAllocNear: Corrupt dmap page");
1273 return -EIO;
1276 leaf = dp->tree.stree + le32_to_cpu(dp->tree.leafidx);
1278 /* determine the word within the dmap that holds the hint
1279 * (i.e. blkno). also, determine the last word in the dmap
1280 * that we'll include in our examination.
1282 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
1283 lword = min(word + 4, LPERDMAP);
1285 /* examine the leaves for sufficient free space.
1287 for (; word < lword; word++) {
1288 /* does the leaf describe sufficient free space ?
1290 if (leaf[word] < l2nb)
1291 continue;
1293 /* determine the block number within the file system
1294 * of the first block described by this dmap word.
1296 blkno = le64_to_cpu(dp->start) + (word << L2DBWORD);
1298 /* if not all bits of the dmap word are free, get the
1299 * starting bit number within the dmap word of the required
1300 * string of free bits and adjust the block number with the
1301 * value.
1303 if (leaf[word] < BUDMIN)
1304 blkno +=
1305 dbFindBits(le32_to_cpu(dp->wmap[word]), l2nb);
1307 /* allocate the blocks.
1309 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
1310 *results = blkno;
1312 return (rc);
1315 return -ENOSPC;
1320 * NAME: dbAllocAG()
1322 * FUNCTION: attempt to allocate the specified number of contiguous
1323 * free blocks within the specified allocation group.
1325 * unless the allocation group size is equal to the number
1326 * of blocks per dmap, the dmap control pages will be used to
1327 * find the required free space, if available. we start the
1328 * search at the highest dmap control page level which
1329 * distinctly describes the allocation group's free space
1330 * (i.e. the highest level at which the allocation group's
1331 * free space is not mixed in with that of any other group).
1332 * in addition, we start the search within this level at a
1333 * height of the dmapctl dmtree at which the nodes distinctly
1334 * describe the allocation group's free space. at this height,
1335 * the allocation group's free space may be represented by 1
1336 * or two sub-trees, depending on the allocation group size.
1337 * we search the top nodes of these subtrees left to right for
1338 * sufficient free space. if sufficient free space is found,
1339 * the subtree is searched to find the leftmost leaf that
1340 * has free space. once we have made it to the leaf, we
1341 * move the search to the next lower level dmap control page
1342 * corresponding to this leaf. we continue down the dmap control
1343 * pages until we find the dmap that contains or starts the
1344 * sufficient free space and we allocate at this dmap.
1346 * if the allocation group size is equal to the dmap size,
1347 * we'll start at the dmap corresponding to the allocation
1348 * group and attempt the allocation at this level.
1350 * the dmap control page search is also not performed if the
1351 * allocation group is completely free and we go to the first
1352 * dmap of the allocation group to do the allocation. this is
1353 * done because the allocation group may be part (not the first
1354 * part) of a larger binary buddy system, causing the dmap
1355 * control pages to indicate no free space (NOFREE) within
1356 * the allocation group.
1358 * PARAMETERS:
1359 * bmp - pointer to bmap descriptor
1360 * agno - allocation group number.
1361 * nblocks - actual number of contiguous free blocks desired.
1362 * l2nb - log2 number of contiguous free blocks desired.
1363 * results - on successful return, set to the starting block number
1364 * of the newly allocated range.
1366 * RETURN VALUES:
1367 * 0 - success
1368 * -ENOSPC - insufficient disk resources
1369 * -EIO - i/o error
1371 * note: IWRITE_LOCK(ipmap) held on entry/exit;
1373 static int
1374 dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb, s64 * results)
1376 struct metapage *mp;
1377 struct dmapctl *dcp;
1378 int rc, ti, i, k, m, n, agperlev;
1379 s64 blkno, lblkno;
1380 int budmin;
1382 /* allocation request should not be for more than the
1383 * allocation group size.
1385 if (l2nb > bmp->db_agl2size) {
1386 jfs_error(bmp->db_ipbmap->i_sb,
1387 "dbAllocAG: allocation request is larger than the "
1388 "allocation group size");
1389 return -EIO;
1392 /* determine the starting block number of the allocation
1393 * group.
1395 blkno = (s64) agno << bmp->db_agl2size;
1397 /* check if the allocation group size is the minimum allocation
1398 * group size or if the allocation group is completely free. if
1399 * the allocation group size is the minimum size of BPERDMAP (i.e.
1400 * 1 dmap), there is no need to search the dmap control page (below)
1401 * that fully describes the allocation group since the allocation
1402 * group is already fully described by a dmap. in this case, we
1403 * just call dbAllocCtl() to search the dmap tree and allocate the
1404 * required space if available.
1406 * if the allocation group is completely free, dbAllocCtl() is
1407 * also called to allocate the required space. this is done for
1408 * two reasons. first, it makes no sense searching the dmap control
1409 * pages for free space when we know that free space exists. second,
1410 * the dmap control pages may indicate that the allocation group
1411 * has no free space if the allocation group is part (not the first
1412 * part) of a larger binary buddy system.
1414 if (bmp->db_agsize == BPERDMAP
1415 || bmp->db_agfree[agno] == bmp->db_agsize) {
1416 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1417 if ((rc == -ENOSPC) &&
1418 (bmp->db_agfree[agno] == bmp->db_agsize)) {
1419 printk(KERN_ERR "blkno = %Lx, blocks = %Lx\n",
1420 (unsigned long long) blkno,
1421 (unsigned long long) nblocks);
1422 jfs_error(bmp->db_ipbmap->i_sb,
1423 "dbAllocAG: dbAllocCtl failed in free AG");
1425 return (rc);
1428 /* the buffer for the dmap control page that fully describes the
1429 * allocation group.
1431 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, bmp->db_aglevel);
1432 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1433 if (mp == NULL)
1434 return -EIO;
1435 dcp = (struct dmapctl *) mp->data;
1436 budmin = dcp->budmin;
1438 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
1439 jfs_error(bmp->db_ipbmap->i_sb,
1440 "dbAllocAG: Corrupt dmapctl page");
1441 release_metapage(mp);
1442 return -EIO;
1445 /* search the subtree(s) of the dmap control page that describes
1446 * the allocation group, looking for sufficient free space. to begin,
1447 * determine how many allocation groups are represented in a dmap
1448 * control page at the control page level (i.e. L0, L1, L2) that
1449 * fully describes an allocation group. next, determine the starting
1450 * tree index of this allocation group within the control page.
1452 agperlev =
1453 (1 << (L2LPERCTL - (bmp->db_agheigth << 1))) / bmp->db_agwidth;
1454 ti = bmp->db_agstart + bmp->db_agwidth * (agno & (agperlev - 1));
1456 /* dmap control page trees fan-out by 4 and a single allocation
1457 * group may be described by 1 or 2 subtrees within the ag level
1458 * dmap control page, depending upon the ag size. examine the ag's
1459 * subtrees for sufficient free space, starting with the leftmost
1460 * subtree.
1462 for (i = 0; i < bmp->db_agwidth; i++, ti++) {
1463 /* is there sufficient free space ?
1465 if (l2nb > dcp->stree[ti])
1466 continue;
1468 /* sufficient free space found in a subtree. now search down
1469 * the subtree to find the leftmost leaf that describes this
1470 * free space.
1472 for (k = bmp->db_agheigth; k > 0; k--) {
1473 for (n = 0, m = (ti << 2) + 1; n < 4; n++) {
1474 if (l2nb <= dcp->stree[m + n]) {
1475 ti = m + n;
1476 break;
1479 if (n == 4) {
1480 jfs_error(bmp->db_ipbmap->i_sb,
1481 "dbAllocAG: failed descending stree");
1482 release_metapage(mp);
1483 return -EIO;
1487 /* determine the block number within the file system
1488 * that corresponds to this leaf.
1490 if (bmp->db_aglevel == 2)
1491 blkno = 0;
1492 else if (bmp->db_aglevel == 1)
1493 blkno &= ~(MAXL1SIZE - 1);
1494 else /* bmp->db_aglevel == 0 */
1495 blkno &= ~(MAXL0SIZE - 1);
1497 blkno +=
1498 ((s64) (ti - le32_to_cpu(dcp->leafidx))) << budmin;
1500 /* release the buffer in preparation for going down
1501 * the next level of dmap control pages.
1503 release_metapage(mp);
1505 /* check if we need to continue to search down the lower
1506 * level dmap control pages. we need to if the number of
1507 * blocks required is less than maximum number of blocks
1508 * described at the next lower level.
1510 if (l2nb < budmin) {
1512 /* search the lower level dmap control pages to get
1513 * the starting block number of the the dmap that
1514 * contains or starts off the free space.
1516 if ((rc =
1517 dbFindCtl(bmp, l2nb, bmp->db_aglevel - 1,
1518 &blkno))) {
1519 if (rc == -ENOSPC) {
1520 jfs_error(bmp->db_ipbmap->i_sb,
1521 "dbAllocAG: control page "
1522 "inconsistent");
1523 return -EIO;
1525 return (rc);
1529 /* allocate the blocks.
1531 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1532 if (rc == -ENOSPC) {
1533 jfs_error(bmp->db_ipbmap->i_sb,
1534 "dbAllocAG: unable to allocate blocks");
1535 rc = -EIO;
1537 return (rc);
1540 /* no space in the allocation group. release the buffer and
1541 * return -ENOSPC.
1543 release_metapage(mp);
1545 return -ENOSPC;
1550 * NAME: dbAllocAny()
1552 * FUNCTION: attempt to allocate the specified number of contiguous
1553 * free blocks anywhere in the file system.
1555 * dbAllocAny() attempts to find the sufficient free space by
1556 * searching down the dmap control pages, starting with the
1557 * highest level (i.e. L0, L1, L2) control page. if free space
1558 * large enough to satisfy the desired free space is found, the
1559 * desired free space is allocated.
1561 * PARAMETERS:
1562 * bmp - pointer to bmap descriptor
1563 * nblocks - actual number of contiguous free blocks desired.
1564 * l2nb - log2 number of contiguous free blocks desired.
1565 * results - on successful return, set to the starting block number
1566 * of the newly allocated range.
1568 * RETURN VALUES:
1569 * 0 - success
1570 * -ENOSPC - insufficient disk resources
1571 * -EIO - i/o error
1573 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1575 static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results)
1577 int rc;
1578 s64 blkno = 0;
1580 /* starting with the top level dmap control page, search
1581 * down the dmap control levels for sufficient free space.
1582 * if free space is found, dbFindCtl() returns the starting
1583 * block number of the dmap that contains or starts off the
1584 * range of free space.
1586 if ((rc = dbFindCtl(bmp, l2nb, bmp->db_maxlevel, &blkno)))
1587 return (rc);
1589 /* allocate the blocks.
1591 rc = dbAllocCtl(bmp, nblocks, l2nb, blkno, results);
1592 if (rc == -ENOSPC) {
1593 jfs_error(bmp->db_ipbmap->i_sb,
1594 "dbAllocAny: unable to allocate blocks");
1595 return -EIO;
1597 return (rc);
1602 * NAME: dbFindCtl()
1604 * FUNCTION: starting at a specified dmap control page level and block
1605 * number, search down the dmap control levels for a range of
1606 * contiguous free blocks large enough to satisfy an allocation
1607 * request for the specified number of free blocks.
1609 * if sufficient contiguous free blocks are found, this routine
1610 * returns the starting block number within a dmap page that
1611 * contains or starts a range of contiqious free blocks that
1612 * is sufficient in size.
1614 * PARAMETERS:
1615 * bmp - pointer to bmap descriptor
1616 * level - starting dmap control page level.
1617 * l2nb - log2 number of contiguous free blocks desired.
1618 * *blkno - on entry, starting block number for conducting the search.
1619 * on successful return, the first block within a dmap page
1620 * that contains or starts a range of contiguous free blocks.
1622 * RETURN VALUES:
1623 * 0 - success
1624 * -ENOSPC - insufficient disk resources
1625 * -EIO - i/o error
1627 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1629 static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno)
1631 int rc, leafidx, lev;
1632 s64 b, lblkno;
1633 struct dmapctl *dcp;
1634 int budmin;
1635 struct metapage *mp;
1637 /* starting at the specified dmap control page level and block
1638 * number, search down the dmap control levels for the starting
1639 * block number of a dmap page that contains or starts off
1640 * sufficient free blocks.
1642 for (lev = level, b = *blkno; lev >= 0; lev--) {
1643 /* get the buffer of the dmap control page for the block
1644 * number and level (i.e. L0, L1, L2).
1646 lblkno = BLKTOCTL(b, bmp->db_l2nbperpage, lev);
1647 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1648 if (mp == NULL)
1649 return -EIO;
1650 dcp = (struct dmapctl *) mp->data;
1651 budmin = dcp->budmin;
1653 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
1654 jfs_error(bmp->db_ipbmap->i_sb,
1655 "dbFindCtl: Corrupt dmapctl page");
1656 release_metapage(mp);
1657 return -EIO;
1660 /* search the tree within the dmap control page for
1661 * sufficent free space. if sufficient free space is found,
1662 * dbFindLeaf() returns the index of the leaf at which
1663 * free space was found.
1665 rc = dbFindLeaf((dmtree_t *) dcp, l2nb, &leafidx);
1667 /* release the buffer.
1669 release_metapage(mp);
1671 /* space found ?
1673 if (rc) {
1674 if (lev != level) {
1675 jfs_error(bmp->db_ipbmap->i_sb,
1676 "dbFindCtl: dmap inconsistent");
1677 return -EIO;
1679 return -ENOSPC;
1682 /* adjust the block number to reflect the location within
1683 * the dmap control page (i.e. the leaf) at which free
1684 * space was found.
1686 b += (((s64) leafidx) << budmin);
1688 /* we stop the search at this dmap control page level if
1689 * the number of blocks required is greater than or equal
1690 * to the maximum number of blocks described at the next
1691 * (lower) level.
1693 if (l2nb >= budmin)
1694 break;
1697 *blkno = b;
1698 return (0);
1703 * NAME: dbAllocCtl()
1705 * FUNCTION: attempt to allocate a specified number of contiguous
1706 * blocks starting within a specific dmap.
1708 * this routine is called by higher level routines that search
1709 * the dmap control pages above the actual dmaps for contiguous
1710 * free space. the result of successful searches by these
1711 * routines are the starting block numbers within dmaps, with
1712 * the dmaps themselves containing the desired contiguous free
1713 * space or starting a contiguous free space of desired size
1714 * that is made up of the blocks of one or more dmaps. these
1715 * calls should not fail due to insufficent resources.
1717 * this routine is called in some cases where it is not known
1718 * whether it will fail due to insufficient resources. more
1719 * specifically, this occurs when allocating from an allocation
1720 * group whose size is equal to the number of blocks per dmap.
1721 * in this case, the dmap control pages are not examined prior
1722 * to calling this routine (to save pathlength) and the call
1723 * might fail.
1725 * for a request size that fits within a dmap, this routine relies
1726 * upon the dmap's dmtree to find the requested contiguous free
1727 * space. for request sizes that are larger than a dmap, the
1728 * requested free space will start at the first block of the
1729 * first dmap (i.e. blkno).
1731 * PARAMETERS:
1732 * bmp - pointer to bmap descriptor
1733 * nblocks - actual number of contiguous free blocks to allocate.
1734 * l2nb - log2 number of contiguous free blocks to allocate.
1735 * blkno - starting block number of the dmap to start the allocation
1736 * from.
1737 * results - on successful return, set to the starting block number
1738 * of the newly allocated range.
1740 * RETURN VALUES:
1741 * 0 - success
1742 * -ENOSPC - insufficient disk resources
1743 * -EIO - i/o error
1745 * serialization: IWRITE_LOCK(ipbmap) held on entry/exit;
1747 static int
1748 dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results)
1750 int rc, nb;
1751 s64 b, lblkno, n;
1752 struct metapage *mp;
1753 struct dmap *dp;
1755 /* check if the allocation request is confined to a single dmap.
1757 if (l2nb <= L2BPERDMAP) {
1758 /* get the buffer for the dmap.
1760 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
1761 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1762 if (mp == NULL)
1763 return -EIO;
1764 dp = (struct dmap *) mp->data;
1766 /* try to allocate the blocks.
1768 rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results);
1769 if (rc == 0)
1770 mark_metapage_dirty(mp);
1772 release_metapage(mp);
1774 return (rc);
1777 /* allocation request involving multiple dmaps. it must start on
1778 * a dmap boundary.
1780 assert((blkno & (BPERDMAP - 1)) == 0);
1782 /* allocate the blocks dmap by dmap.
1784 for (n = nblocks, b = blkno; n > 0; n -= nb, b += nb) {
1785 /* get the buffer for the dmap.
1787 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
1788 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1789 if (mp == NULL) {
1790 rc = -EIO;
1791 goto backout;
1793 dp = (struct dmap *) mp->data;
1795 /* the dmap better be all free.
1797 if (dp->tree.stree[ROOT] != L2BPERDMAP) {
1798 release_metapage(mp);
1799 jfs_error(bmp->db_ipbmap->i_sb,
1800 "dbAllocCtl: the dmap is not all free");
1801 rc = -EIO;
1802 goto backout;
1805 /* determine how many blocks to allocate from this dmap.
1807 nb = min(n, (s64)BPERDMAP);
1809 /* allocate the blocks from the dmap.
1811 if ((rc = dbAllocDmap(bmp, dp, b, nb))) {
1812 release_metapage(mp);
1813 goto backout;
1816 /* write the buffer.
1818 write_metapage(mp);
1821 /* set the results (starting block number) and return.
1823 *results = blkno;
1824 return (0);
1826 /* something failed in handling an allocation request involving
1827 * multiple dmaps. we'll try to clean up by backing out any
1828 * allocation that has already happened for this request. if
1829 * we fail in backing out the allocation, we'll mark the file
1830 * system to indicate that blocks have been leaked.
1832 backout:
1834 /* try to backout the allocations dmap by dmap.
1836 for (n = nblocks - n, b = blkno; n > 0;
1837 n -= BPERDMAP, b += BPERDMAP) {
1838 /* get the buffer for this dmap.
1840 lblkno = BLKTODMAP(b, bmp->db_l2nbperpage);
1841 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
1842 if (mp == NULL) {
1843 /* could not back out. mark the file system
1844 * to indicate that we have leaked blocks.
1846 jfs_error(bmp->db_ipbmap->i_sb,
1847 "dbAllocCtl: I/O Error: Block Leakage.");
1848 continue;
1850 dp = (struct dmap *) mp->data;
1852 /* free the blocks is this dmap.
1854 if (dbFreeDmap(bmp, dp, b, BPERDMAP)) {
1855 /* could not back out. mark the file system
1856 * to indicate that we have leaked blocks.
1858 release_metapage(mp);
1859 jfs_error(bmp->db_ipbmap->i_sb,
1860 "dbAllocCtl: Block Leakage.");
1861 continue;
1864 /* write the buffer.
1866 write_metapage(mp);
1869 return (rc);
1874 * NAME: dbAllocDmapLev()
1876 * FUNCTION: attempt to allocate a specified number of contiguous blocks
1877 * from a specified dmap.
1879 * this routine checks if the contiguous blocks are available.
1880 * if so, nblocks of blocks are allocated; otherwise, ENOSPC is
1881 * returned.
1883 * PARAMETERS:
1884 * mp - pointer to bmap descriptor
1885 * dp - pointer to dmap to attempt to allocate blocks from.
1886 * l2nb - log2 number of contiguous block desired.
1887 * nblocks - actual number of contiguous block desired.
1888 * results - on successful return, set to the starting block number
1889 * of the newly allocated range.
1891 * RETURN VALUES:
1892 * 0 - success
1893 * -ENOSPC - insufficient disk resources
1894 * -EIO - i/o error
1896 * serialization: IREAD_LOCK(ipbmap), e.g., from dbAlloc(), or
1897 * IWRITE_LOCK(ipbmap), e.g., dbAllocCtl(), held on entry/exit;
1899 static int
1900 dbAllocDmapLev(struct bmap * bmp,
1901 struct dmap * dp, int nblocks, int l2nb, s64 * results)
1903 s64 blkno;
1904 int leafidx, rc;
1906 /* can't be more than a dmaps worth of blocks */
1907 assert(l2nb <= L2BPERDMAP);
1909 /* search the tree within the dmap page for sufficient
1910 * free space. if sufficient free space is found, dbFindLeaf()
1911 * returns the index of the leaf at which free space was found.
1913 if (dbFindLeaf((dmtree_t *) & dp->tree, l2nb, &leafidx))
1914 return -ENOSPC;
1916 /* determine the block number within the file system corresponding
1917 * to the leaf at which free space was found.
1919 blkno = le64_to_cpu(dp->start) + (leafidx << L2DBWORD);
1921 /* if not all bits of the dmap word are free, get the starting
1922 * bit number within the dmap word of the required string of free
1923 * bits and adjust the block number with this value.
1925 if (dp->tree.stree[leafidx + LEAFIND] < BUDMIN)
1926 blkno += dbFindBits(le32_to_cpu(dp->wmap[leafidx]), l2nb);
1928 /* allocate the blocks */
1929 if ((rc = dbAllocDmap(bmp, dp, blkno, nblocks)) == 0)
1930 *results = blkno;
1932 return (rc);
1937 * NAME: dbAllocDmap()
1939 * FUNCTION: adjust the disk allocation map to reflect the allocation
1940 * of a specified block range within a dmap.
1942 * this routine allocates the specified blocks from the dmap
1943 * through a call to dbAllocBits(). if the allocation of the
1944 * block range causes the maximum string of free blocks within
1945 * the dmap to change (i.e. the value of the root of the dmap's
1946 * dmtree), this routine will cause this change to be reflected
1947 * up through the appropriate levels of the dmap control pages
1948 * by a call to dbAdjCtl() for the L0 dmap control page that
1949 * covers this dmap.
1951 * PARAMETERS:
1952 * bmp - pointer to bmap descriptor
1953 * dp - pointer to dmap to allocate the block range from.
1954 * blkno - starting block number of the block to be allocated.
1955 * nblocks - number of blocks to be allocated.
1957 * RETURN VALUES:
1958 * 0 - success
1959 * -EIO - i/o error
1961 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
1963 static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
1964 int nblocks)
1966 s8 oldroot;
1967 int rc;
1969 /* save the current value of the root (i.e. maximum free string)
1970 * of the dmap tree.
1972 oldroot = dp->tree.stree[ROOT];
1974 /* allocate the specified (blocks) bits */
1975 dbAllocBits(bmp, dp, blkno, nblocks);
1977 /* if the root has not changed, done. */
1978 if (dp->tree.stree[ROOT] == oldroot)
1979 return (0);
1981 /* root changed. bubble the change up to the dmap control pages.
1982 * if the adjustment of the upper level control pages fails,
1983 * backout the bit allocation (thus making everything consistent).
1985 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 1, 0)))
1986 dbFreeBits(bmp, dp, blkno, nblocks);
1988 return (rc);
1993 * NAME: dbFreeDmap()
1995 * FUNCTION: adjust the disk allocation map to reflect the allocation
1996 * of a specified block range within a dmap.
1998 * this routine frees the specified blocks from the dmap through
1999 * a call to dbFreeBits(). if the deallocation of the block range
2000 * causes the maximum string of free blocks within the dmap to
2001 * change (i.e. the value of the root of the dmap's dmtree), this
2002 * routine will cause this change to be reflected up through the
2003 * appropriate levels of the dmap control pages by a call to
2004 * dbAdjCtl() for the L0 dmap control page that covers this dmap.
2006 * PARAMETERS:
2007 * bmp - pointer to bmap descriptor
2008 * dp - pointer to dmap to free the block range from.
2009 * blkno - starting block number of the block to be freed.
2010 * nblocks - number of blocks to be freed.
2012 * RETURN VALUES:
2013 * 0 - success
2014 * -EIO - i/o error
2016 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2018 static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
2019 int nblocks)
2021 s8 oldroot;
2022 int rc = 0, word;
2024 /* save the current value of the root (i.e. maximum free string)
2025 * of the dmap tree.
2027 oldroot = dp->tree.stree[ROOT];
2029 /* free the specified (blocks) bits */
2030 rc = dbFreeBits(bmp, dp, blkno, nblocks);
2032 /* if error or the root has not changed, done. */
2033 if (rc || (dp->tree.stree[ROOT] == oldroot))
2034 return (rc);
2036 /* root changed. bubble the change up to the dmap control pages.
2037 * if the adjustment of the upper level control pages fails,
2038 * backout the deallocation.
2040 if ((rc = dbAdjCtl(bmp, blkno, dp->tree.stree[ROOT], 0, 0))) {
2041 word = (blkno & (BPERDMAP - 1)) >> L2DBWORD;
2043 /* as part of backing out the deallocation, we will have
2044 * to back split the dmap tree if the deallocation caused
2045 * the freed blocks to become part of a larger binary buddy
2046 * system.
2048 if (dp->tree.stree[word] == NOFREE)
2049 dbBackSplit((dmtree_t *) & dp->tree, word);
2051 dbAllocBits(bmp, dp, blkno, nblocks);
2054 return (rc);
2059 * NAME: dbAllocBits()
2061 * FUNCTION: allocate a specified block range from a dmap.
2063 * this routine updates the dmap to reflect the working
2064 * state allocation of the specified block range. it directly
2065 * updates the bits of the working map and causes the adjustment
2066 * of the binary buddy system described by the dmap's dmtree
2067 * leaves to reflect the bits allocated. it also causes the
2068 * dmap's dmtree, as a whole, to reflect the allocated range.
2070 * PARAMETERS:
2071 * bmp - pointer to bmap descriptor
2072 * dp - pointer to dmap to allocate bits from.
2073 * blkno - starting block number of the bits to be allocated.
2074 * nblocks - number of bits to be allocated.
2076 * RETURN VALUES: none
2078 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2080 static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2081 int nblocks)
2083 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2084 dmtree_t *tp = (dmtree_t *) & dp->tree;
2085 int size;
2086 s8 *leaf;
2088 /* pick up a pointer to the leaves of the dmap tree */
2089 leaf = dp->tree.stree + LEAFIND;
2091 /* determine the bit number and word within the dmap of the
2092 * starting block.
2094 dbitno = blkno & (BPERDMAP - 1);
2095 word = dbitno >> L2DBWORD;
2097 /* block range better be within the dmap */
2098 assert(dbitno + nblocks <= BPERDMAP);
2100 /* allocate the bits of the dmap's words corresponding to the block
2101 * range. not all bits of the first and last words may be contained
2102 * within the block range. if this is the case, we'll work against
2103 * those words (i.e. partial first and/or last) on an individual basis
2104 * (a single pass), allocating the bits of interest by hand and
2105 * updating the leaf corresponding to the dmap word. a single pass
2106 * will be used for all dmap words fully contained within the
2107 * specified range. within this pass, the bits of all fully contained
2108 * dmap words will be marked as free in a single shot and the leaves
2109 * will be updated. a single leaf may describe the free space of
2110 * multiple dmap words, so we may update only a subset of the actual
2111 * leaves corresponding to the dmap words of the block range.
2113 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
2114 /* determine the bit number within the word and
2115 * the number of bits within the word.
2117 wbitno = dbitno & (DBWORD - 1);
2118 nb = min(rembits, DBWORD - wbitno);
2120 /* check if only part of a word is to be allocated.
2122 if (nb < DBWORD) {
2123 /* allocate (set to 1) the appropriate bits within
2124 * this dmap word.
2126 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
2127 >> wbitno);
2129 /* update the leaf for this dmap word. in addition
2130 * to setting the leaf value to the binary buddy max
2131 * of the updated dmap word, dbSplit() will split
2132 * the binary system of the leaves if need be.
2134 dbSplit(tp, word, BUDMIN,
2135 dbMaxBud((u8 *) & dp->wmap[word]));
2137 word += 1;
2138 } else {
2139 /* one or more dmap words are fully contained
2140 * within the block range. determine how many
2141 * words and allocate (set to 1) the bits of these
2142 * words.
2144 nwords = rembits >> L2DBWORD;
2145 memset(&dp->wmap[word], (int) ONES, nwords * 4);
2147 /* determine how many bits.
2149 nb = nwords << L2DBWORD;
2151 /* now update the appropriate leaves to reflect
2152 * the allocated words.
2154 for (; nwords > 0; nwords -= nw) {
2155 if (leaf[word] < BUDMIN) {
2156 jfs_error(bmp->db_ipbmap->i_sb,
2157 "dbAllocBits: leaf page "
2158 "corrupt");
2159 break;
2162 /* determine what the leaf value should be
2163 * updated to as the minimum of the l2 number
2164 * of bits being allocated and the l2 number
2165 * of bits currently described by this leaf.
2167 size = min((int)leaf[word], NLSTOL2BSZ(nwords));
2169 /* update the leaf to reflect the allocation.
2170 * in addition to setting the leaf value to
2171 * NOFREE, dbSplit() will split the binary
2172 * system of the leaves to reflect the current
2173 * allocation (size).
2175 dbSplit(tp, word, size, NOFREE);
2177 /* get the number of dmap words handled */
2178 nw = BUDSIZE(size, BUDMIN);
2179 word += nw;
2184 /* update the free count for this dmap */
2185 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks);
2187 BMAP_LOCK(bmp);
2189 /* if this allocation group is completely free,
2190 * update the maximum allocation group number if this allocation
2191 * group is the new max.
2193 agno = blkno >> bmp->db_agl2size;
2194 if (agno > bmp->db_maxag)
2195 bmp->db_maxag = agno;
2197 /* update the free count for the allocation group and map */
2198 bmp->db_agfree[agno] -= nblocks;
2199 bmp->db_nfree -= nblocks;
2201 BMAP_UNLOCK(bmp);
2206 * NAME: dbFreeBits()
2208 * FUNCTION: free a specified block range from a dmap.
2210 * this routine updates the dmap to reflect the working
2211 * state allocation of the specified block range. it directly
2212 * updates the bits of the working map and causes the adjustment
2213 * of the binary buddy system described by the dmap's dmtree
2214 * leaves to reflect the bits freed. it also causes the dmap's
2215 * dmtree, as a whole, to reflect the deallocated range.
2217 * PARAMETERS:
2218 * bmp - pointer to bmap descriptor
2219 * dp - pointer to dmap to free bits from.
2220 * blkno - starting block number of the bits to be freed.
2221 * nblocks - number of bits to be freed.
2223 * RETURN VALUES: 0 for success
2225 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2227 static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
2228 int nblocks)
2230 int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
2231 dmtree_t *tp = (dmtree_t *) & dp->tree;
2232 int rc = 0;
2233 int size;
2235 /* determine the bit number and word within the dmap of the
2236 * starting block.
2238 dbitno = blkno & (BPERDMAP - 1);
2239 word = dbitno >> L2DBWORD;
2241 /* block range better be within the dmap.
2243 assert(dbitno + nblocks <= BPERDMAP);
2245 /* free the bits of the dmaps words corresponding to the block range.
2246 * not all bits of the first and last words may be contained within
2247 * the block range. if this is the case, we'll work against those
2248 * words (i.e. partial first and/or last) on an individual basis
2249 * (a single pass), freeing the bits of interest by hand and updating
2250 * the leaf corresponding to the dmap word. a single pass will be used
2251 * for all dmap words fully contained within the specified range.
2252 * within this pass, the bits of all fully contained dmap words will
2253 * be marked as free in a single shot and the leaves will be updated. a
2254 * single leaf may describe the free space of multiple dmap words,
2255 * so we may update only a subset of the actual leaves corresponding
2256 * to the dmap words of the block range.
2258 * dbJoin() is used to update leaf values and will join the binary
2259 * buddy system of the leaves if the new leaf values indicate this
2260 * should be done.
2262 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
2263 /* determine the bit number within the word and
2264 * the number of bits within the word.
2266 wbitno = dbitno & (DBWORD - 1);
2267 nb = min(rembits, DBWORD - wbitno);
2269 /* check if only part of a word is to be freed.
2271 if (nb < DBWORD) {
2272 /* free (zero) the appropriate bits within this
2273 * dmap word.
2275 dp->wmap[word] &=
2276 cpu_to_le32(~(ONES << (DBWORD - nb)
2277 >> wbitno));
2279 /* update the leaf for this dmap word.
2281 rc = dbJoin(tp, word,
2282 dbMaxBud((u8 *) & dp->wmap[word]));
2283 if (rc)
2284 return rc;
2286 word += 1;
2287 } else {
2288 /* one or more dmap words are fully contained
2289 * within the block range. determine how many
2290 * words and free (zero) the bits of these words.
2292 nwords = rembits >> L2DBWORD;
2293 memset(&dp->wmap[word], 0, nwords * 4);
2295 /* determine how many bits.
2297 nb = nwords << L2DBWORD;
2299 /* now update the appropriate leaves to reflect
2300 * the freed words.
2302 for (; nwords > 0; nwords -= nw) {
2303 /* determine what the leaf value should be
2304 * updated to as the minimum of the l2 number
2305 * of bits being freed and the l2 (max) number
2306 * of bits that can be described by this leaf.
2308 size =
2309 min(LITOL2BSZ
2310 (word, L2LPERDMAP, BUDMIN),
2311 NLSTOL2BSZ(nwords));
2313 /* update the leaf.
2315 rc = dbJoin(tp, word, size);
2316 if (rc)
2317 return rc;
2319 /* get the number of dmap words handled.
2321 nw = BUDSIZE(size, BUDMIN);
2322 word += nw;
2327 /* update the free count for this dmap.
2329 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks);
2331 BMAP_LOCK(bmp);
2333 /* update the free count for the allocation group and
2334 * map.
2336 agno = blkno >> bmp->db_agl2size;
2337 bmp->db_nfree += nblocks;
2338 bmp->db_agfree[agno] += nblocks;
2340 /* check if this allocation group is not completely free and
2341 * if it is currently the maximum (rightmost) allocation group.
2342 * if so, establish the new maximum allocation group number by
2343 * searching left for the first allocation group with allocation.
2345 if ((bmp->db_agfree[agno] == bmp->db_agsize && agno == bmp->db_maxag) ||
2346 (agno == bmp->db_numag - 1 &&
2347 bmp->db_agfree[agno] == (bmp-> db_mapsize & (BPERDMAP - 1)))) {
2348 while (bmp->db_maxag > 0) {
2349 bmp->db_maxag -= 1;
2350 if (bmp->db_agfree[bmp->db_maxag] !=
2351 bmp->db_agsize)
2352 break;
2355 /* re-establish the allocation group preference if the
2356 * current preference is right of the maximum allocation
2357 * group.
2359 if (bmp->db_agpref > bmp->db_maxag)
2360 bmp->db_agpref = bmp->db_maxag;
2363 BMAP_UNLOCK(bmp);
2365 return 0;
2370 * NAME: dbAdjCtl()
2372 * FUNCTION: adjust a dmap control page at a specified level to reflect
2373 * the change in a lower level dmap or dmap control page's
2374 * maximum string of free blocks (i.e. a change in the root
2375 * of the lower level object's dmtree) due to the allocation
2376 * or deallocation of a range of blocks with a single dmap.
2378 * on entry, this routine is provided with the new value of
2379 * the lower level dmap or dmap control page root and the
2380 * starting block number of the block range whose allocation
2381 * or deallocation resulted in the root change. this range
2382 * is respresented by a single leaf of the current dmapctl
2383 * and the leaf will be updated with this value, possibly
2384 * causing a binary buddy system within the leaves to be
2385 * split or joined. the update may also cause the dmapctl's
2386 * dmtree to be updated.
2388 * if the adjustment of the dmap control page, itself, causes its
2389 * root to change, this change will be bubbled up to the next dmap
2390 * control level by a recursive call to this routine, specifying
2391 * the new root value and the next dmap control page level to
2392 * be adjusted.
2393 * PARAMETERS:
2394 * bmp - pointer to bmap descriptor
2395 * blkno - the first block of a block range within a dmap. it is
2396 * the allocation or deallocation of this block range that
2397 * requires the dmap control page to be adjusted.
2398 * newval - the new value of the lower level dmap or dmap control
2399 * page root.
2400 * alloc - TRUE if adjustment is due to an allocation.
2401 * level - current level of dmap control page (i.e. L0, L1, L2) to
2402 * be adjusted.
2404 * RETURN VALUES:
2405 * 0 - success
2406 * -EIO - i/o error
2408 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2410 static int
2411 dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
2413 struct metapage *mp;
2414 s8 oldroot;
2415 int oldval;
2416 s64 lblkno;
2417 struct dmapctl *dcp;
2418 int rc, leafno, ti;
2420 /* get the buffer for the dmap control page for the specified
2421 * block number and control page level.
2423 lblkno = BLKTOCTL(blkno, bmp->db_l2nbperpage, level);
2424 mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0);
2425 if (mp == NULL)
2426 return -EIO;
2427 dcp = (struct dmapctl *) mp->data;
2429 if (dcp->leafidx != cpu_to_le32(CTLLEAFIND)) {
2430 jfs_error(bmp->db_ipbmap->i_sb,
2431 "dbAdjCtl: Corrupt dmapctl page");
2432 release_metapage(mp);
2433 return -EIO;
2436 /* determine the leaf number corresponding to the block and
2437 * the index within the dmap control tree.
2439 leafno = BLKTOCTLLEAF(blkno, dcp->budmin);
2440 ti = leafno + le32_to_cpu(dcp->leafidx);
2442 /* save the current leaf value and the current root level (i.e.
2443 * maximum l2 free string described by this dmapctl).
2445 oldval = dcp->stree[ti];
2446 oldroot = dcp->stree[ROOT];
2448 /* check if this is a control page update for an allocation.
2449 * if so, update the leaf to reflect the new leaf value using
2450 * dbSplit(); otherwise (deallocation), use dbJoin() to udpate
2451 * the leaf with the new value. in addition to updating the
2452 * leaf, dbSplit() will also split the binary buddy system of
2453 * the leaves, if required, and bubble new values within the
2454 * dmapctl tree, if required. similarly, dbJoin() will join
2455 * the binary buddy system of leaves and bubble new values up
2456 * the dmapctl tree as required by the new leaf value.
2458 if (alloc) {
2459 /* check if we are in the middle of a binary buddy
2460 * system. this happens when we are performing the
2461 * first allocation out of an allocation group that
2462 * is part (not the first part) of a larger binary
2463 * buddy system. if we are in the middle, back split
2464 * the system prior to calling dbSplit() which assumes
2465 * that it is at the front of a binary buddy system.
2467 if (oldval == NOFREE) {
2468 rc = dbBackSplit((dmtree_t *) dcp, leafno);
2469 if (rc)
2470 return rc;
2471 oldval = dcp->stree[ti];
2473 dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
2474 } else {
2475 rc = dbJoin((dmtree_t *) dcp, leafno, newval);
2476 if (rc)
2477 return rc;
2480 /* check if the root of the current dmap control page changed due
2481 * to the update and if the current dmap control page is not at
2482 * the current top level (i.e. L0, L1, L2) of the map. if so (i.e.
2483 * root changed and this is not the top level), call this routine
2484 * again (recursion) for the next higher level of the mapping to
2485 * reflect the change in root for the current dmap control page.
2487 if (dcp->stree[ROOT] != oldroot) {
2488 /* are we below the top level of the map. if so,
2489 * bubble the root up to the next higher level.
2491 if (level < bmp->db_maxlevel) {
2492 /* bubble up the new root of this dmap control page to
2493 * the next level.
2495 if ((rc =
2496 dbAdjCtl(bmp, blkno, dcp->stree[ROOT], alloc,
2497 level + 1))) {
2498 /* something went wrong in bubbling up the new
2499 * root value, so backout the changes to the
2500 * current dmap control page.
2502 if (alloc) {
2503 dbJoin((dmtree_t *) dcp, leafno,
2504 oldval);
2505 } else {
2506 /* the dbJoin() above might have
2507 * caused a larger binary buddy system
2508 * to form and we may now be in the
2509 * middle of it. if this is the case,
2510 * back split the buddies.
2512 if (dcp->stree[ti] == NOFREE)
2513 dbBackSplit((dmtree_t *)
2514 dcp, leafno);
2515 dbSplit((dmtree_t *) dcp, leafno,
2516 dcp->budmin, oldval);
2519 /* release the buffer and return the error.
2521 release_metapage(mp);
2522 return (rc);
2524 } else {
2525 /* we're at the top level of the map. update
2526 * the bmap control page to reflect the size
2527 * of the maximum free buddy system.
2529 assert(level == bmp->db_maxlevel);
2530 if (bmp->db_maxfreebud != oldroot) {
2531 jfs_error(bmp->db_ipbmap->i_sb,
2532 "dbAdjCtl: the maximum free buddy is "
2533 "not the old root");
2535 bmp->db_maxfreebud = dcp->stree[ROOT];
2539 /* write the buffer.
2541 write_metapage(mp);
2543 return (0);
2548 * NAME: dbSplit()
2550 * FUNCTION: update the leaf of a dmtree with a new value, splitting
2551 * the leaf from the binary buddy system of the dmtree's
2552 * leaves, as required.
2554 * PARAMETERS:
2555 * tp - pointer to the tree containing the leaf.
2556 * leafno - the number of the leaf to be updated.
2557 * splitsz - the size the binary buddy system starting at the leaf
2558 * must be split to, specified as the log2 number of blocks.
2559 * newval - the new value for the leaf.
2561 * RETURN VALUES: none
2563 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2565 static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval)
2567 int budsz;
2568 int cursz;
2569 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2571 /* check if the leaf needs to be split.
2573 if (leaf[leafno] > tp->dmt_budmin) {
2574 /* the split occurs by cutting the buddy system in half
2575 * at the specified leaf until we reach the specified
2576 * size. pick up the starting split size (current size
2577 * - 1 in l2) and the corresponding buddy size.
2579 cursz = leaf[leafno] - 1;
2580 budsz = BUDSIZE(cursz, tp->dmt_budmin);
2582 /* split until we reach the specified size.
2584 while (cursz >= splitsz) {
2585 /* update the buddy's leaf with its new value.
2587 dbAdjTree(tp, leafno ^ budsz, cursz);
2589 /* on to the next size and buddy.
2591 cursz -= 1;
2592 budsz >>= 1;
2596 /* adjust the dmap tree to reflect the specified leaf's new
2597 * value.
2599 dbAdjTree(tp, leafno, newval);
2604 * NAME: dbBackSplit()
2606 * FUNCTION: back split the binary buddy system of dmtree leaves
2607 * that hold a specified leaf until the specified leaf
2608 * starts its own binary buddy system.
2610 * the allocators typically perform allocations at the start
2611 * of binary buddy systems and dbSplit() is used to accomplish
2612 * any required splits. in some cases, however, allocation
2613 * may occur in the middle of a binary system and requires a
2614 * back split, with the split proceeding out from the middle of
2615 * the system (less efficient) rather than the start of the
2616 * system (more efficient). the cases in which a back split
2617 * is required are rare and are limited to the first allocation
2618 * within an allocation group which is a part (not first part)
2619 * of a larger binary buddy system and a few exception cases
2620 * in which a previous join operation must be backed out.
2622 * PARAMETERS:
2623 * tp - pointer to the tree containing the leaf.
2624 * leafno - the number of the leaf to be updated.
2626 * RETURN VALUES: none
2628 * serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
2630 static int dbBackSplit(dmtree_t * tp, int leafno)
2632 int budsz, bud, w, bsz, size;
2633 int cursz;
2634 s8 *leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2636 /* leaf should be part (not first part) of a binary
2637 * buddy system.
2639 assert(leaf[leafno] == NOFREE);
2641 /* the back split is accomplished by iteratively finding the leaf
2642 * that starts the buddy system that contains the specified leaf and
2643 * splitting that system in two. this iteration continues until
2644 * the specified leaf becomes the start of a buddy system.
2646 * determine maximum possible l2 size for the specified leaf.
2648 size =
2649 LITOL2BSZ(leafno, le32_to_cpu(tp->dmt_l2nleafs),
2650 tp->dmt_budmin);
2652 /* determine the number of leaves covered by this size. this
2653 * is the buddy size that we will start with as we search for
2654 * the buddy system that contains the specified leaf.
2656 budsz = BUDSIZE(size, tp->dmt_budmin);
2658 /* back split.
2660 while (leaf[leafno] == NOFREE) {
2661 /* find the leftmost buddy leaf.
2663 for (w = leafno, bsz = budsz;; bsz <<= 1,
2664 w = (w < bud) ? w : bud) {
2665 if (bsz >= le32_to_cpu(tp->dmt_nleafs)) {
2666 jfs_err("JFS: block map error in dbBackSplit");
2667 return -EIO;
2670 /* determine the buddy.
2672 bud = w ^ bsz;
2674 /* check if this buddy is the start of the system.
2676 if (leaf[bud] != NOFREE) {
2677 /* split the leaf at the start of the
2678 * system in two.
2680 cursz = leaf[bud] - 1;
2681 dbSplit(tp, bud, cursz, cursz);
2682 break;
2687 if (leaf[leafno] != size) {
2688 jfs_err("JFS: wrong leaf value in dbBackSplit");
2689 return -EIO;
2691 return 0;
2696 * NAME: dbJoin()
2698 * FUNCTION: update the leaf of a dmtree with a new value, joining
2699 * the leaf with other leaves of the dmtree into a multi-leaf
2700 * binary buddy system, as required.
2702 * PARAMETERS:
2703 * tp - pointer to the tree containing the leaf.
2704 * leafno - the number of the leaf to be updated.
2705 * newval - the new value for the leaf.
2707 * RETURN VALUES: none
2709 static int dbJoin(dmtree_t * tp, int leafno, int newval)
2711 int budsz, buddy;
2712 s8 *leaf;
2714 /* can the new leaf value require a join with other leaves ?
2716 if (newval >= tp->dmt_budmin) {
2717 /* pickup a pointer to the leaves of the tree.
2719 leaf = tp->dmt_stree + le32_to_cpu(tp->dmt_leafidx);
2721 /* try to join the specified leaf into a large binary
2722 * buddy system. the join proceeds by attempting to join
2723 * the specified leafno with its buddy (leaf) at new value.
2724 * if the join occurs, we attempt to join the left leaf
2725 * of the joined buddies with its buddy at new value + 1.
2726 * we continue to join until we find a buddy that cannot be
2727 * joined (does not have a value equal to the size of the
2728 * last join) or until all leaves have been joined into a
2729 * single system.
2731 * get the buddy size (number of words covered) of
2732 * the new value.
2734 budsz = BUDSIZE(newval, tp->dmt_budmin);
2736 /* try to join.
2738 while (budsz < le32_to_cpu(tp->dmt_nleafs)) {
2739 /* get the buddy leaf.
2741 buddy = leafno ^ budsz;
2743 /* if the leaf's new value is greater than its
2744 * buddy's value, we join no more.
2746 if (newval > leaf[buddy])
2747 break;
2749 /* It shouldn't be less */
2750 if (newval < leaf[buddy])
2751 return -EIO;
2753 /* check which (leafno or buddy) is the left buddy.
2754 * the left buddy gets to claim the blocks resulting
2755 * from the join while the right gets to claim none.
2756 * the left buddy is also eligable to participate in
2757 * a join at the next higher level while the right
2758 * is not.
2761 if (leafno < buddy) {
2762 /* leafno is the left buddy.
2764 dbAdjTree(tp, buddy, NOFREE);
2765 } else {
2766 /* buddy is the left buddy and becomes
2767 * leafno.
2769 dbAdjTree(tp, leafno, NOFREE);
2770 leafno = buddy;
2773 /* on to try the next join.
2775 newval += 1;
2776 budsz <<= 1;
2780 /* update the leaf value.
2782 dbAdjTree(tp, leafno, newval);
2784 return 0;
2789 * NAME: dbAdjTree()
2791 * FUNCTION: update a leaf of a dmtree with a new value, adjusting
2792 * the dmtree, as required, to reflect the new leaf value.
2793 * the combination of any buddies must already be done before
2794 * this is called.
2796 * PARAMETERS:
2797 * tp - pointer to the tree to be adjusted.
2798 * leafno - the number of the leaf to be updated.
2799 * newval - the new value for the leaf.
2801 * RETURN VALUES: none
2803 static void dbAdjTree(dmtree_t * tp, int leafno, int newval)
2805 int lp, pp, k;
2806 int max;
2808 /* pick up the index of the leaf for this leafno.
2810 lp = leafno + le32_to_cpu(tp->dmt_leafidx);
2812 /* is the current value the same as the old value ? if so,
2813 * there is nothing to do.
2815 if (tp->dmt_stree[lp] == newval)
2816 return;
2818 /* set the new value.
2820 tp->dmt_stree[lp] = newval;
2822 /* bubble the new value up the tree as required.
2824 for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) {
2825 /* get the index of the first leaf of the 4 leaf
2826 * group containing the specified leaf (leafno).
2828 lp = ((lp - 1) & ~0x03) + 1;
2830 /* get the index of the parent of this 4 leaf group.
2832 pp = (lp - 1) >> 2;
2834 /* determine the maximum of the 4 leaves.
2836 max = TREEMAX(&tp->dmt_stree[lp]);
2838 /* if the maximum of the 4 is the same as the
2839 * parent's value, we're done.
2841 if (tp->dmt_stree[pp] == max)
2842 break;
2844 /* parent gets new value.
2846 tp->dmt_stree[pp] = max;
2848 /* parent becomes leaf for next go-round.
2850 lp = pp;
2856 * NAME: dbFindLeaf()
2858 * FUNCTION: search a dmtree_t for sufficient free blocks, returning
2859 * the index of a leaf describing the free blocks if
2860 * sufficient free blocks are found.
2862 * the search starts at the top of the dmtree_t tree and
2863 * proceeds down the tree to the leftmost leaf with sufficient
2864 * free space.
2866 * PARAMETERS:
2867 * tp - pointer to the tree to be searched.
2868 * l2nb - log2 number of free blocks to search for.
2869 * leafidx - return pointer to be set to the index of the leaf
2870 * describing at least l2nb free blocks if sufficient
2871 * free blocks are found.
2873 * RETURN VALUES:
2874 * 0 - success
2875 * -ENOSPC - insufficient free blocks.
2877 static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx)
2879 int ti, n = 0, k, x = 0;
2881 /* first check the root of the tree to see if there is
2882 * sufficient free space.
2884 if (l2nb > tp->dmt_stree[ROOT])
2885 return -ENOSPC;
2887 /* sufficient free space available. now search down the tree
2888 * starting at the next level for the leftmost leaf that
2889 * describes sufficient free space.
2891 for (k = le32_to_cpu(tp->dmt_height), ti = 1;
2892 k > 0; k--, ti = ((ti + n) << 2) + 1) {
2893 /* search the four nodes at this level, starting from
2894 * the left.
2896 for (x = ti, n = 0; n < 4; n++) {
2897 /* sufficient free space found. move to the next
2898 * level (or quit if this is the last level).
2900 if (l2nb <= tp->dmt_stree[x + n])
2901 break;
2904 /* better have found something since the higher
2905 * levels of the tree said it was here.
2907 assert(n < 4);
2910 /* set the return to the leftmost leaf describing sufficient
2911 * free space.
2913 *leafidx = x + n - le32_to_cpu(tp->dmt_leafidx);
2915 return (0);
2920 * NAME: dbFindBits()
2922 * FUNCTION: find a specified number of binary buddy free bits within a
2923 * dmap bitmap word value.
2925 * this routine searches the bitmap value for (1 << l2nb) free
2926 * bits at (1 << l2nb) alignments within the value.
2928 * PARAMETERS:
2929 * word - dmap bitmap word value.
2930 * l2nb - number of free bits specified as a log2 number.
2932 * RETURN VALUES:
2933 * starting bit number of free bits.
2935 static int dbFindBits(u32 word, int l2nb)
2937 int bitno, nb;
2938 u32 mask;
2940 /* get the number of bits.
2942 nb = 1 << l2nb;
2943 assert(nb <= DBWORD);
2945 /* complement the word so we can use a mask (i.e. 0s represent
2946 * free bits) and compute the mask.
2948 word = ~word;
2949 mask = ONES << (DBWORD - nb);
2951 /* scan the word for nb free bits at nb alignments.
2953 for (bitno = 0; mask != 0; bitno += nb, mask >>= nb) {
2954 if ((mask & word) == mask)
2955 break;
2958 ASSERT(bitno < 32);
2960 /* return the bit number.
2962 return (bitno);
2967 * NAME: dbMaxBud(u8 *cp)
2969 * FUNCTION: determine the largest binary buddy string of free
2970 * bits within 32-bits of the map.
2972 * PARAMETERS:
2973 * cp - pointer to the 32-bit value.
2975 * RETURN VALUES:
2976 * largest binary buddy of free bits within a dmap word.
2978 static int dbMaxBud(u8 * cp)
2980 signed char tmp1, tmp2;
2982 /* check if the wmap word is all free. if so, the
2983 * free buddy size is BUDMIN.
2985 if (*((uint *) cp) == 0)
2986 return (BUDMIN);
2988 /* check if the wmap word is half free. if so, the
2989 * free buddy size is BUDMIN-1.
2991 if (*((u16 *) cp) == 0 || *((u16 *) cp + 1) == 0)
2992 return (BUDMIN - 1);
2994 /* not all free or half free. determine the free buddy
2995 * size thru table lookup using quarters of the wmap word.
2997 tmp1 = max(budtab[cp[2]], budtab[cp[3]]);
2998 tmp2 = max(budtab[cp[0]], budtab[cp[1]]);
2999 return (max(tmp1, tmp2));
3004 * NAME: cnttz(uint word)
3006 * FUNCTION: determine the number of trailing zeros within a 32-bit
3007 * value.
3009 * PARAMETERS:
3010 * value - 32-bit value to be examined.
3012 * RETURN VALUES:
3013 * count of trailing zeros
3015 static int cnttz(u32 word)
3017 int n;
3019 for (n = 0; n < 32; n++, word >>= 1) {
3020 if (word & 0x01)
3021 break;
3024 return (n);
3029 * NAME: cntlz(u32 value)
3031 * FUNCTION: determine the number of leading zeros within a 32-bit
3032 * value.
3034 * PARAMETERS:
3035 * value - 32-bit value to be examined.
3037 * RETURN VALUES:
3038 * count of leading zeros
3040 static int cntlz(u32 value)
3042 int n;
3044 for (n = 0; n < 32; n++, value <<= 1) {
3045 if (value & HIGHORDER)
3046 break;
3048 return (n);
3053 * NAME: blkstol2(s64 nb)
3055 * FUNCTION: convert a block count to its log2 value. if the block
3056 * count is not a l2 multiple, it is rounded up to the next
3057 * larger l2 multiple.
3059 * PARAMETERS:
3060 * nb - number of blocks
3062 * RETURN VALUES:
3063 * log2 number of blocks
3065 static int blkstol2(s64 nb)
3067 int l2nb;
3068 s64 mask; /* meant to be signed */
3070 mask = (s64) 1 << (64 - 1);
3072 /* count the leading bits.
3074 for (l2nb = 0; l2nb < 64; l2nb++, mask >>= 1) {
3075 /* leading bit found.
3077 if (nb & mask) {
3078 /* determine the l2 value.
3080 l2nb = (64 - 1) - l2nb;
3082 /* check if we need to round up.
3084 if (~mask & nb)
3085 l2nb++;
3087 return (l2nb);
3090 assert(0);
3091 return 0; /* fix compiler warning */
3096 * NAME: dbAllocBottomUp()
3098 * FUNCTION: alloc the specified block range from the working block
3099 * allocation map.
3101 * the blocks will be alloc from the working map one dmap
3102 * at a time.
3104 * PARAMETERS:
3105 * ip - pointer to in-core inode;
3106 * blkno - starting block number to be freed.
3107 * nblocks - number of blocks to be freed.
3109 * RETURN VALUES:
3110 * 0 - success
3111 * -EIO - i/o error
3113 int dbAllocBottomUp(struct inode *ip, s64 blkno, s64 nblocks)
3115 struct metapage *mp;
3116 struct dmap *dp;
3117 int nb, rc;
3118 s64 lblkno, rem;
3119 struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap;
3120 struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap;
3122 IREAD_LOCK(ipbmap);
3124 /* block to be allocated better be within the mapsize. */
3125 ASSERT(nblocks <= bmp->db_mapsize - blkno);
3128 * allocate the blocks a dmap at a time.
3130 mp = NULL;
3131 for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) {
3132 /* release previous dmap if any */
3133 if (mp) {
3134 write_metapage(mp);
3137 /* get the buffer for the current dmap. */
3138 lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage);
3139 mp = read_metapage(ipbmap, lblkno, PSIZE, 0);
3140 if (mp == NULL) {
3141 IREAD_UNLOCK(ipbmap);
3142 return -EIO;
3144 dp = (struct dmap *) mp->data;
3146 /* determine the number of blocks to be allocated from
3147 * this dmap.
3149 nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1)));
3151 /* allocate the blocks. */
3152 if ((rc = dbAllocDmapBU(bmp, dp, blkno, nb))) {
3153 release_metapage(mp);
3154 IREAD_UNLOCK(ipbmap);
3155 return (rc);
3159 /* write the last buffer. */
3160 write_metapage(mp);
3162 IREAD_UNLOCK(ipbmap);
3164 return (0);
3168 static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
3169 int nblocks)
3171 int rc;
3172 int dbitno, word, rembits, nb, nwords, wbitno, agno;
3173 s8 oldroot, *leaf;
3174 struct dmaptree *tp = (struct dmaptree *) & dp->tree;
3176 /* save the current value of the root (i.e. maximum free string)
3177 * of the dmap tree.
3179 oldroot = tp->stree[ROOT];
3181 /* pick up a pointer to the leaves of the dmap tree */
3182 leaf = tp->stree + LEAFIND;
3184 /* determine the bit number and word within the dmap of the
3185 * starting block.
3187 dbitno = blkno & (BPERDMAP - 1);
3188 word = dbitno >> L2DBWORD;
3190 /* block range better be within the dmap */
3191 assert(dbitno + nblocks <= BPERDMAP);
3193 /* allocate the bits of the dmap's words corresponding to the block
3194 * range. not all bits of the first and last words may be contained
3195 * within the block range. if this is the case, we'll work against
3196 * those words (i.e. partial first and/or last) on an individual basis
3197 * (a single pass), allocating the bits of interest by hand and
3198 * updating the leaf corresponding to the dmap word. a single pass
3199 * will be used for all dmap words fully contained within the
3200 * specified range. within this pass, the bits of all fully contained
3201 * dmap words will be marked as free in a single shot and the leaves
3202 * will be updated. a single leaf may describe the free space of
3203 * multiple dmap words, so we may update only a subset of the actual
3204 * leaves corresponding to the dmap words of the block range.
3206 for (rembits = nblocks; rembits > 0; rembits -= nb, dbitno += nb) {
3207 /* determine the bit number within the word and
3208 * the number of bits within the word.
3210 wbitno = dbitno & (DBWORD - 1);
3211 nb = min(rembits, DBWORD - wbitno);
3213 /* check if only part of a word is to be allocated.
3215 if (nb < DBWORD) {
3216 /* allocate (set to 1) the appropriate bits within
3217 * this dmap word.
3219 dp->wmap[word] |= cpu_to_le32(ONES << (DBWORD - nb)
3220 >> wbitno);
3222 word++;
3223 } else {
3224 /* one or more dmap words are fully contained
3225 * within the block range. determine how many
3226 * words and allocate (set to 1) the bits of these
3227 * words.
3229 nwords = rembits >> L2DBWORD;
3230 memset(&dp->wmap[word], (int) ONES, nwords * 4);
3232 /* determine how many bits */
3233 nb = nwords << L2DBWORD;
3234 word += nwords;
3238 /* update the free count for this dmap */
3239 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) - nblocks);
3241 /* reconstruct summary tree */
3242 dbInitDmapTree(dp);
3244 BMAP_LOCK(bmp);
3246 /* if this allocation group is completely free,
3247 * update the highest active allocation group number
3248 * if this allocation group is the new max.
3250 agno = blkno >> bmp->db_agl2size;
3251 if (agno > bmp->db_maxag)
3252 bmp->db_maxag = agno;
3254 /* update the free count for the allocation group and map */
3255 bmp->db_agfree[agno] -= nblocks;
3256 bmp->db_nfree -= nblocks;
3258 BMAP_UNLOCK(bmp);
3260 /* if the root has not changed, done. */
3261 if (tp->stree[ROOT] == oldroot)
3262 return (0);
3264 /* root changed. bubble the change up to the dmap control pages.
3265 * if the adjustment of the upper level control pages fails,
3266 * backout the bit allocation (thus making everything consistent).
3268 if ((rc = dbAdjCtl(bmp, blkno, tp->stree[ROOT], 1, 0)))
3269 dbFreeBits(bmp, dp, blkno, nblocks);
3271 return (rc);
3276 * NAME: dbExtendFS()
3278 * FUNCTION: extend bmap from blkno for nblocks;
3279 * dbExtendFS() updates bmap ready for dbAllocBottomUp();
3281 * L2
3283 * L1---------------------------------L1
3284 * | |
3285 * L0---------L0---------L0 L0---------L0---------L0
3286 * | | | | | |
3287 * d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,...,dn d0,.,dm;
3288 * L2L1L0d0,...,dnL0d0,...,dnL0d0,...,dnL1L0d0,...,dnL0d0,...,dnL0d0,..dm
3290 * <---old---><----------------------------extend----------------------->
3292 int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks)
3294 struct jfs_sb_info *sbi = JFS_SBI(ipbmap->i_sb);
3295 int nbperpage = sbi->nbperpage;
3296 int i, i0 = TRUE, j, j0 = TRUE, k, n;
3297 s64 newsize;
3298 s64 p;
3299 struct metapage *mp, *l2mp, *l1mp = NULL, *l0mp = NULL;
3300 struct dmapctl *l2dcp, *l1dcp, *l0dcp;
3301 struct dmap *dp;
3302 s8 *l0leaf, *l1leaf, *l2leaf;
3303 struct bmap *bmp = sbi->bmap;
3304 int agno, l2agsize, oldl2agsize;
3305 s64 ag_rem;
3307 newsize = blkno + nblocks;
3309 jfs_info("dbExtendFS: blkno:%Ld nblocks:%Ld newsize:%Ld",
3310 (long long) blkno, (long long) nblocks, (long long) newsize);
3313 * initialize bmap control page.
3315 * all the data in bmap control page should exclude
3316 * the mkfs hidden dmap page.
3319 /* update mapsize */
3320 bmp->db_mapsize = newsize;
3321 bmp->db_maxlevel = BMAPSZTOLEV(bmp->db_mapsize);
3323 /* compute new AG size */
3324 l2agsize = dbGetL2AGSize(newsize);
3325 oldl2agsize = bmp->db_agl2size;
3327 bmp->db_agl2size = l2agsize;
3328 bmp->db_agsize = 1 << l2agsize;
3330 /* compute new number of AG */
3331 agno = bmp->db_numag;
3332 bmp->db_numag = newsize >> l2agsize;
3333 bmp->db_numag += ((u32) newsize % (u32) bmp->db_agsize) ? 1 : 0;
3336 * reconfigure db_agfree[]
3337 * from old AG configuration to new AG configuration;
3339 * coalesce contiguous k (newAGSize/oldAGSize) AGs;
3340 * i.e., (AGi, ..., AGj) where i = k*n and j = k*(n+1) - 1 to AGn;
3341 * note: new AG size = old AG size * (2**x).
3343 if (l2agsize == oldl2agsize)
3344 goto extend;
3345 k = 1 << (l2agsize - oldl2agsize);
3346 ag_rem = bmp->db_agfree[0]; /* save agfree[0] */
3347 for (i = 0, n = 0; i < agno; n++) {
3348 bmp->db_agfree[n] = 0; /* init collection point */
3350 /* coalesce cotiguous k AGs; */
3351 for (j = 0; j < k && i < agno; j++, i++) {
3352 /* merge AGi to AGn */
3353 bmp->db_agfree[n] += bmp->db_agfree[i];
3356 bmp->db_agfree[0] += ag_rem; /* restore agfree[0] */
3358 for (; n < MAXAG; n++)
3359 bmp->db_agfree[n] = 0;
3362 * update highest active ag number
3365 bmp->db_maxag = bmp->db_maxag / k;
3368 * extend bmap
3370 * update bit maps and corresponding level control pages;
3371 * global control page db_nfree, db_agfree[agno], db_maxfreebud;
3373 extend:
3374 /* get L2 page */
3375 p = BMAPBLKNO + nbperpage; /* L2 page */
3376 l2mp = read_metapage(ipbmap, p, PSIZE, 0);
3377 if (!l2mp) {
3378 jfs_error(ipbmap->i_sb, "dbExtendFS: L2 page could not be read");
3379 return -EIO;
3381 l2dcp = (struct dmapctl *) l2mp->data;
3383 /* compute start L1 */
3384 k = blkno >> L2MAXL1SIZE;
3385 l2leaf = l2dcp->stree + CTLLEAFIND + k;
3386 p = BLKTOL1(blkno, sbi->l2nbperpage); /* L1 page */
3389 * extend each L1 in L2
3391 for (; k < LPERCTL; k++, p += nbperpage) {
3392 /* get L1 page */
3393 if (j0) {
3394 /* read in L1 page: (blkno & (MAXL1SIZE - 1)) */
3395 l1mp = read_metapage(ipbmap, p, PSIZE, 0);
3396 if (l1mp == NULL)
3397 goto errout;
3398 l1dcp = (struct dmapctl *) l1mp->data;
3400 /* compute start L0 */
3401 j = (blkno & (MAXL1SIZE - 1)) >> L2MAXL0SIZE;
3402 l1leaf = l1dcp->stree + CTLLEAFIND + j;
3403 p = BLKTOL0(blkno, sbi->l2nbperpage);
3404 j0 = FALSE;
3405 } else {
3406 /* assign/init L1 page */
3407 l1mp = get_metapage(ipbmap, p, PSIZE, 0);
3408 if (l1mp == NULL)
3409 goto errout;
3411 l1dcp = (struct dmapctl *) l1mp->data;
3413 /* compute start L0 */
3414 j = 0;
3415 l1leaf = l1dcp->stree + CTLLEAFIND;
3416 p += nbperpage; /* 1st L0 of L1.k */
3420 * extend each L0 in L1
3422 for (; j < LPERCTL; j++) {
3423 /* get L0 page */
3424 if (i0) {
3425 /* read in L0 page: (blkno & (MAXL0SIZE - 1)) */
3427 l0mp = read_metapage(ipbmap, p, PSIZE, 0);
3428 if (l0mp == NULL)
3429 goto errout;
3430 l0dcp = (struct dmapctl *) l0mp->data;
3432 /* compute start dmap */
3433 i = (blkno & (MAXL0SIZE - 1)) >>
3434 L2BPERDMAP;
3435 l0leaf = l0dcp->stree + CTLLEAFIND + i;
3436 p = BLKTODMAP(blkno,
3437 sbi->l2nbperpage);
3438 i0 = FALSE;
3439 } else {
3440 /* assign/init L0 page */
3441 l0mp = get_metapage(ipbmap, p, PSIZE, 0);
3442 if (l0mp == NULL)
3443 goto errout;
3445 l0dcp = (struct dmapctl *) l0mp->data;
3447 /* compute start dmap */
3448 i = 0;
3449 l0leaf = l0dcp->stree + CTLLEAFIND;
3450 p += nbperpage; /* 1st dmap of L0.j */
3454 * extend each dmap in L0
3456 for (; i < LPERCTL; i++) {
3458 * reconstruct the dmap page, and
3459 * initialize corresponding parent L0 leaf
3461 if ((n = blkno & (BPERDMAP - 1))) {
3462 /* read in dmap page: */
3463 mp = read_metapage(ipbmap, p,
3464 PSIZE, 0);
3465 if (mp == NULL)
3466 goto errout;
3467 n = min(nblocks, (s64)BPERDMAP - n);
3468 } else {
3469 /* assign/init dmap page */
3470 mp = read_metapage(ipbmap, p,
3471 PSIZE, 0);
3472 if (mp == NULL)
3473 goto errout;
3475 n = min(nblocks, (s64)BPERDMAP);
3478 dp = (struct dmap *) mp->data;
3479 *l0leaf = dbInitDmap(dp, blkno, n);
3481 bmp->db_nfree += n;
3482 agno = le64_to_cpu(dp->start) >> l2agsize;
3483 bmp->db_agfree[agno] += n;
3485 write_metapage(mp);
3487 l0leaf++;
3488 p += nbperpage;
3490 blkno += n;
3491 nblocks -= n;
3492 if (nblocks == 0)
3493 break;
3494 } /* for each dmap in a L0 */
3497 * build current L0 page from its leaves, and
3498 * initialize corresponding parent L1 leaf
3500 *l1leaf = dbInitDmapCtl(l0dcp, 0, ++i);
3501 write_metapage(l0mp);
3502 l0mp = NULL;
3504 if (nblocks)
3505 l1leaf++; /* continue for next L0 */
3506 else {
3507 /* more than 1 L0 ? */
3508 if (j > 0)
3509 break; /* build L1 page */
3510 else {
3511 /* summarize in global bmap page */
3512 bmp->db_maxfreebud = *l1leaf;
3513 release_metapage(l1mp);
3514 release_metapage(l2mp);
3515 goto finalize;
3518 } /* for each L0 in a L1 */
3521 * build current L1 page from its leaves, and
3522 * initialize corresponding parent L2 leaf
3524 *l2leaf = dbInitDmapCtl(l1dcp, 1, ++j);
3525 write_metapage(l1mp);
3526 l1mp = NULL;
3528 if (nblocks)
3529 l2leaf++; /* continue for next L1 */
3530 else {
3531 /* more than 1 L1 ? */
3532 if (k > 0)
3533 break; /* build L2 page */
3534 else {
3535 /* summarize in global bmap page */
3536 bmp->db_maxfreebud = *l2leaf;
3537 release_metapage(l2mp);
3538 goto finalize;
3541 } /* for each L1 in a L2 */
3543 jfs_error(ipbmap->i_sb,
3544 "dbExtendFS: function has not returned as expected");
3545 errout:
3546 if (l0mp)
3547 release_metapage(l0mp);
3548 if (l1mp)
3549 release_metapage(l1mp);
3550 release_metapage(l2mp);
3551 return -EIO;
3554 * finalize bmap control page
3556 finalize:
3558 return 0;
3563 * dbFinalizeBmap()
3565 void dbFinalizeBmap(struct inode *ipbmap)
3567 struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap;
3568 int actags, inactags, l2nl;
3569 s64 ag_rem, actfree, inactfree, avgfree;
3570 int i, n;
3573 * finalize bmap control page
3575 //finalize:
3577 * compute db_agpref: preferred ag to allocate from
3578 * (the leftmost ag with average free space in it);
3580 //agpref:
3581 /* get the number of active ags and inacitve ags */
3582 actags = bmp->db_maxag + 1;
3583 inactags = bmp->db_numag - actags;
3584 ag_rem = bmp->db_mapsize & (bmp->db_agsize - 1); /* ??? */
3586 /* determine how many blocks are in the inactive allocation
3587 * groups. in doing this, we must account for the fact that
3588 * the rightmost group might be a partial group (i.e. file
3589 * system size is not a multiple of the group size).
3591 inactfree = (inactags && ag_rem) ?
3592 ((inactags - 1) << bmp->db_agl2size) + ag_rem
3593 : inactags << bmp->db_agl2size;
3595 /* determine how many free blocks are in the active
3596 * allocation groups plus the average number of free blocks
3597 * within the active ags.
3599 actfree = bmp->db_nfree - inactfree;
3600 avgfree = (u32) actfree / (u32) actags;
3602 /* if the preferred allocation group has not average free space.
3603 * re-establish the preferred group as the leftmost
3604 * group with average free space.
3606 if (bmp->db_agfree[bmp->db_agpref] < avgfree) {
3607 for (bmp->db_agpref = 0; bmp->db_agpref < actags;
3608 bmp->db_agpref++) {
3609 if (bmp->db_agfree[bmp->db_agpref] >= avgfree)
3610 break;
3612 if (bmp->db_agpref >= bmp->db_numag) {
3613 jfs_error(ipbmap->i_sb,
3614 "cannot find ag with average freespace");
3619 * compute db_aglevel, db_agheigth, db_width, db_agstart:
3620 * an ag is covered in aglevel dmapctl summary tree,
3621 * at agheight level height (from leaf) with agwidth number of nodes
3622 * each, which starts at agstart index node of the smmary tree node
3623 * array;
3625 bmp->db_aglevel = BMAPSZTOLEV(bmp->db_agsize);
3626 l2nl =
3627 bmp->db_agl2size - (L2BPERDMAP + bmp->db_aglevel * L2LPERCTL);
3628 bmp->db_agheigth = l2nl >> 1;
3629 bmp->db_agwidth = 1 << (l2nl - (bmp->db_agheigth << 1));
3630 for (i = 5 - bmp->db_agheigth, bmp->db_agstart = 0, n = 1; i > 0;
3631 i--) {
3632 bmp->db_agstart += n;
3633 n <<= 2;
3640 * NAME: dbInitDmap()/ujfs_idmap_page()
3642 * FUNCTION: initialize working/persistent bitmap of the dmap page
3643 * for the specified number of blocks:
3645 * at entry, the bitmaps had been initialized as free (ZEROS);
3646 * The number of blocks will only account for the actually
3647 * existing blocks. Blocks which don't actually exist in
3648 * the aggregate will be marked as allocated (ONES);
3650 * PARAMETERS:
3651 * dp - pointer to page of map
3652 * nblocks - number of blocks this page
3654 * RETURNS: NONE
3656 static int dbInitDmap(struct dmap * dp, s64 Blkno, int nblocks)
3658 int blkno, w, b, r, nw, nb, i;
3660 /* starting block number within the dmap */
3661 blkno = Blkno & (BPERDMAP - 1);
3663 if (blkno == 0) {
3664 dp->nblocks = dp->nfree = cpu_to_le32(nblocks);
3665 dp->start = cpu_to_le64(Blkno);
3667 if (nblocks == BPERDMAP) {
3668 memset(&dp->wmap[0], 0, LPERDMAP * 4);
3669 memset(&dp->pmap[0], 0, LPERDMAP * 4);
3670 goto initTree;
3672 } else {
3673 dp->nblocks =
3674 cpu_to_le32(le32_to_cpu(dp->nblocks) + nblocks);
3675 dp->nfree = cpu_to_le32(le32_to_cpu(dp->nfree) + nblocks);
3678 /* word number containing start block number */
3679 w = blkno >> L2DBWORD;
3682 * free the bits corresponding to the block range (ZEROS):
3683 * note: not all bits of the first and last words may be contained
3684 * within the block range.
3686 for (r = nblocks; r > 0; r -= nb, blkno += nb) {
3687 /* number of bits preceding range to be freed in the word */
3688 b = blkno & (DBWORD - 1);
3689 /* number of bits to free in the word */
3690 nb = min(r, DBWORD - b);
3692 /* is partial word to be freed ? */
3693 if (nb < DBWORD) {
3694 /* free (set to 0) from the bitmap word */
3695 dp->wmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
3696 >> b));
3697 dp->pmap[w] &= cpu_to_le32(~(ONES << (DBWORD - nb)
3698 >> b));
3700 /* skip the word freed */
3701 w++;
3702 } else {
3703 /* free (set to 0) contiguous bitmap words */
3704 nw = r >> L2DBWORD;
3705 memset(&dp->wmap[w], 0, nw * 4);
3706 memset(&dp->pmap[w], 0, nw * 4);
3708 /* skip the words freed */
3709 nb = nw << L2DBWORD;
3710 w += nw;
3715 * mark bits following the range to be freed (non-existing
3716 * blocks) as allocated (ONES)
3719 if (blkno == BPERDMAP)
3720 goto initTree;
3722 /* the first word beyond the end of existing blocks */
3723 w = blkno >> L2DBWORD;
3725 /* does nblocks fall on a 32-bit boundary ? */
3726 b = blkno & (DBWORD - 1);
3727 if (b) {
3728 /* mark a partial word allocated */
3729 dp->wmap[w] = dp->pmap[w] = cpu_to_le32(ONES >> b);
3730 w++;
3733 /* set the rest of the words in the page to allocated (ONES) */
3734 for (i = w; i < LPERDMAP; i++)
3735 dp->pmap[i] = dp->wmap[i] = cpu_to_le32(ONES);
3738 * init tree
3740 initTree:
3741 return (dbInitDmapTree(dp));
3746 * NAME: dbInitDmapTree()/ujfs_complete_dmap()
3748 * FUNCTION: initialize summary tree of the specified dmap:
3750 * at entry, bitmap of the dmap has been initialized;
3752 * PARAMETERS:
3753 * dp - dmap to complete
3754 * blkno - starting block number for this dmap
3755 * treemax - will be filled in with max free for this dmap
3757 * RETURNS: max free string at the root of the tree
3759 static int dbInitDmapTree(struct dmap * dp)
3761 struct dmaptree *tp;
3762 s8 *cp;
3763 int i;
3765 /* init fixed info of tree */
3766 tp = &dp->tree;
3767 tp->nleafs = cpu_to_le32(LPERDMAP);
3768 tp->l2nleafs = cpu_to_le32(L2LPERDMAP);
3769 tp->leafidx = cpu_to_le32(LEAFIND);
3770 tp->height = cpu_to_le32(4);
3771 tp->budmin = BUDMIN;
3773 /* init each leaf from corresponding wmap word:
3774 * note: leaf is set to NOFREE(-1) if all blocks of corresponding
3775 * bitmap word are allocated.
3777 cp = tp->stree + le32_to_cpu(tp->leafidx);
3778 for (i = 0; i < LPERDMAP; i++)
3779 *cp++ = dbMaxBud((u8 *) & dp->wmap[i]);
3781 /* build the dmap's binary buddy summary tree */
3782 return (dbInitTree(tp));
3787 * NAME: dbInitTree()/ujfs_adjtree()
3789 * FUNCTION: initialize binary buddy summary tree of a dmap or dmapctl.
3791 * at entry, the leaves of the tree has been initialized
3792 * from corresponding bitmap word or root of summary tree
3793 * of the child control page;
3794 * configure binary buddy system at the leaf level, then
3795 * bubble up the values of the leaf nodes up the tree.
3797 * PARAMETERS:
3798 * cp - Pointer to the root of the tree
3799 * l2leaves- Number of leaf nodes as a power of 2
3800 * l2min - Number of blocks that can be covered by a leaf
3801 * as a power of 2
3803 * RETURNS: max free string at the root of the tree
3805 static int dbInitTree(struct dmaptree * dtp)
3807 int l2max, l2free, bsize, nextb, i;
3808 int child, parent, nparent;
3809 s8 *tp, *cp, *cp1;
3811 tp = dtp->stree;
3813 /* Determine the maximum free string possible for the leaves */
3814 l2max = le32_to_cpu(dtp->l2nleafs) + dtp->budmin;
3817 * configure the leaf levevl into binary buddy system
3819 * Try to combine buddies starting with a buddy size of 1
3820 * (i.e. two leaves). At a buddy size of 1 two buddy leaves
3821 * can be combined if both buddies have a maximum free of l2min;
3822 * the combination will result in the left-most buddy leaf having
3823 * a maximum free of l2min+1.
3824 * After processing all buddies for a given size, process buddies
3825 * at the next higher buddy size (i.e. current size * 2) and
3826 * the next maximum free (current free + 1).
3827 * This continues until the maximum possible buddy combination
3828 * yields maximum free.
3830 for (l2free = dtp->budmin, bsize = 1; l2free < l2max;
3831 l2free++, bsize = nextb) {
3832 /* get next buddy size == current buddy pair size */
3833 nextb = bsize << 1;
3835 /* scan each adjacent buddy pair at current buddy size */
3836 for (i = 0, cp = tp + le32_to_cpu(dtp->leafidx);
3837 i < le32_to_cpu(dtp->nleafs);
3838 i += nextb, cp += nextb) {
3839 /* coalesce if both adjacent buddies are max free */
3840 if (*cp == l2free && *(cp + bsize) == l2free) {
3841 *cp = l2free + 1; /* left take right */
3842 *(cp + bsize) = -1; /* right give left */
3848 * bubble summary information of leaves up the tree.
3850 * Starting at the leaf node level, the four nodes described by
3851 * the higher level parent node are compared for a maximum free and
3852 * this maximum becomes the value of the parent node.
3853 * when all lower level nodes are processed in this fashion then
3854 * move up to the next level (parent becomes a lower level node) and
3855 * continue the process for that level.
3857 for (child = le32_to_cpu(dtp->leafidx),
3858 nparent = le32_to_cpu(dtp->nleafs) >> 2;
3859 nparent > 0; nparent >>= 2, child = parent) {
3860 /* get index of 1st node of parent level */
3861 parent = (child - 1) >> 2;
3863 /* set the value of the parent node as the maximum
3864 * of the four nodes of the current level.
3866 for (i = 0, cp = tp + child, cp1 = tp + parent;
3867 i < nparent; i++, cp += 4, cp1++)
3868 *cp1 = TREEMAX(cp);
3871 return (*tp);
3876 * dbInitDmapCtl()
3878 * function: initialize dmapctl page
3880 static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i)
3881 { /* start leaf index not covered by range */
3882 s8 *cp;
3884 dcp->nleafs = cpu_to_le32(LPERCTL);
3885 dcp->l2nleafs = cpu_to_le32(L2LPERCTL);
3886 dcp->leafidx = cpu_to_le32(CTLLEAFIND);
3887 dcp->height = cpu_to_le32(5);
3888 dcp->budmin = L2BPERDMAP + L2LPERCTL * level;
3891 * initialize the leaves of current level that were not covered
3892 * by the specified input block range (i.e. the leaves have no
3893 * low level dmapctl or dmap).
3895 cp = &dcp->stree[CTLLEAFIND + i];
3896 for (; i < LPERCTL; i++)
3897 *cp++ = NOFREE;
3899 /* build the dmap's binary buddy summary tree */
3900 return (dbInitTree((struct dmaptree *) dcp));
3905 * NAME: dbGetL2AGSize()/ujfs_getagl2size()
3907 * FUNCTION: Determine log2(allocation group size) from aggregate size
3909 * PARAMETERS:
3910 * nblocks - Number of blocks in aggregate
3912 * RETURNS: log2(allocation group size) in aggregate blocks
3914 static int dbGetL2AGSize(s64 nblocks)
3916 s64 sz;
3917 s64 m;
3918 int l2sz;
3920 if (nblocks < BPERDMAP * MAXAG)
3921 return (L2BPERDMAP);
3923 /* round up aggregate size to power of 2 */
3924 m = ((u64) 1 << (64 - 1));
3925 for (l2sz = 64; l2sz >= 0; l2sz--, m >>= 1) {
3926 if (m & nblocks)
3927 break;
3930 sz = (s64) 1 << l2sz;
3931 if (sz < nblocks)
3932 l2sz += 1;
3934 /* agsize = roundupSize/max_number_of_ag */
3935 return (l2sz - L2MAXAG);
3940 * NAME: dbMapFileSizeToMapSize()
3942 * FUNCTION: compute number of blocks the block allocation map file
3943 * can cover from the map file size;
3945 * RETURNS: Number of blocks which can be covered by this block map file;
3949 * maximum number of map pages at each level including control pages
3951 #define MAXL0PAGES (1 + LPERCTL)
3952 #define MAXL1PAGES (1 + LPERCTL * MAXL0PAGES)
3953 #define MAXL2PAGES (1 + LPERCTL * MAXL1PAGES)
3956 * convert number of map pages to the zero origin top dmapctl level
3958 #define BMAPPGTOLEV(npages) \
3959 (((npages) <= 3 + MAXL0PAGES) ? 0 \
3960 : ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
3962 s64 dbMapFileSizeToMapSize(struct inode * ipbmap)
3964 struct super_block *sb = ipbmap->i_sb;
3965 s64 nblocks;
3966 s64 npages, ndmaps;
3967 int level, i;
3968 int complete, factor;
3970 nblocks = ipbmap->i_size >> JFS_SBI(sb)->l2bsize;
3971 npages = nblocks >> JFS_SBI(sb)->l2nbperpage;
3972 level = BMAPPGTOLEV(npages);
3974 /* At each level, accumulate the number of dmap pages covered by
3975 * the number of full child levels below it;
3976 * repeat for the last incomplete child level.
3978 ndmaps = 0;
3979 npages--; /* skip the first global control page */
3980 /* skip higher level control pages above top level covered by map */
3981 npages -= (2 - level);
3982 npages--; /* skip top level's control page */
3983 for (i = level; i >= 0; i--) {
3984 factor =
3985 (i == 2) ? MAXL1PAGES : ((i == 1) ? MAXL0PAGES : 1);
3986 complete = (u32) npages / factor;
3987 ndmaps += complete * ((i == 2) ? LPERCTL * LPERCTL
3988 : ((i == 1) ? LPERCTL : 1));
3990 /* pages in last/incomplete child */
3991 npages = (u32) npages % factor;
3992 /* skip incomplete child's level control page */
3993 npages--;
3996 /* convert the number of dmaps into the number of blocks
3997 * which can be covered by the dmaps;
3999 nblocks = ndmaps << L2BPERDMAP;
4001 return (nblocks);