2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_alloc.h"
35 #include "xfs_error.h"
36 #include "xfs_trace.h"
39 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
41 #define XFSA_FIXUP_BNO_OK 1
42 #define XFSA_FIXUP_CNT_OK 2
45 * Prototypes for per-ag allocation routines
48 STATIC
int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t
*);
49 STATIC
int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t
*);
50 STATIC
int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t
*);
51 STATIC
int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t
*,
52 xfs_btree_cur_t
*, xfs_agblock_t
*, xfs_extlen_t
*, int *);
59 * Lookup the record equal to [bno, len] in the btree given by cur.
61 STATIC
int /* error */
63 struct xfs_btree_cur
*cur
, /* btree cursor */
64 xfs_agblock_t bno
, /* starting block of extent */
65 xfs_extlen_t len
, /* length of extent */
66 int *stat
) /* success/failure */
68 cur
->bc_rec
.a
.ar_startblock
= bno
;
69 cur
->bc_rec
.a
.ar_blockcount
= len
;
70 return xfs_btree_lookup(cur
, XFS_LOOKUP_EQ
, stat
);
74 * Lookup the first record greater than or equal to [bno, len]
75 * in the btree given by cur.
77 STATIC
int /* error */
79 struct xfs_btree_cur
*cur
, /* btree cursor */
80 xfs_agblock_t bno
, /* starting block of extent */
81 xfs_extlen_t len
, /* length of extent */
82 int *stat
) /* success/failure */
84 cur
->bc_rec
.a
.ar_startblock
= bno
;
85 cur
->bc_rec
.a
.ar_blockcount
= len
;
86 return xfs_btree_lookup(cur
, XFS_LOOKUP_GE
, stat
);
90 * Lookup the first record less than or equal to [bno, len]
91 * in the btree given by cur.
95 struct xfs_btree_cur
*cur
, /* btree cursor */
96 xfs_agblock_t bno
, /* starting block of extent */
97 xfs_extlen_t len
, /* length of extent */
98 int *stat
) /* success/failure */
100 cur
->bc_rec
.a
.ar_startblock
= bno
;
101 cur
->bc_rec
.a
.ar_blockcount
= len
;
102 return xfs_btree_lookup(cur
, XFS_LOOKUP_LE
, stat
);
106 * Update the record referred to by cur to the value given
108 * This either works (return 0) or gets an EFSCORRUPTED error.
110 STATIC
int /* error */
112 struct xfs_btree_cur
*cur
, /* btree cursor */
113 xfs_agblock_t bno
, /* starting block of extent */
114 xfs_extlen_t len
) /* length of extent */
116 union xfs_btree_rec rec
;
118 rec
.alloc
.ar_startblock
= cpu_to_be32(bno
);
119 rec
.alloc
.ar_blockcount
= cpu_to_be32(len
);
120 return xfs_btree_update(cur
, &rec
);
124 * Get the data from the pointed-to record.
128 struct xfs_btree_cur
*cur
, /* btree cursor */
129 xfs_agblock_t
*bno
, /* output: starting block of extent */
130 xfs_extlen_t
*len
, /* output: length of extent */
131 int *stat
) /* output: success/failure */
133 union xfs_btree_rec
*rec
;
136 error
= xfs_btree_get_rec(cur
, &rec
, stat
);
137 if (!error
&& *stat
== 1) {
138 *bno
= be32_to_cpu(rec
->alloc
.ar_startblock
);
139 *len
= be32_to_cpu(rec
->alloc
.ar_blockcount
);
145 * Compute aligned version of the found extent.
146 * Takes alignment and min length into account.
149 xfs_alloc_compute_aligned(
150 xfs_agblock_t foundbno
, /* starting block in found extent */
151 xfs_extlen_t foundlen
, /* length in found extent */
152 xfs_extlen_t alignment
, /* alignment for allocation */
153 xfs_extlen_t minlen
, /* minimum length for allocation */
154 xfs_agblock_t
*resbno
, /* result block number */
155 xfs_extlen_t
*reslen
) /* result length */
161 if (alignment
> 1 && foundlen
>= minlen
) {
162 bno
= roundup(foundbno
, alignment
);
163 diff
= bno
- foundbno
;
164 len
= diff
>= foundlen
? 0 : foundlen
- diff
;
174 * Compute best start block and diff for "near" allocations.
175 * freelen >= wantlen already checked by caller.
177 STATIC xfs_extlen_t
/* difference value (absolute) */
178 xfs_alloc_compute_diff(
179 xfs_agblock_t wantbno
, /* target starting block */
180 xfs_extlen_t wantlen
, /* target length */
181 xfs_extlen_t alignment
, /* target alignment */
182 xfs_agblock_t freebno
, /* freespace's starting block */
183 xfs_extlen_t freelen
, /* freespace's length */
184 xfs_agblock_t
*newbnop
) /* result: best start block from free */
186 xfs_agblock_t freeend
; /* end of freespace extent */
187 xfs_agblock_t newbno1
; /* return block number */
188 xfs_agblock_t newbno2
; /* other new block number */
189 xfs_extlen_t newlen1
=0; /* length with newbno1 */
190 xfs_extlen_t newlen2
=0; /* length with newbno2 */
191 xfs_agblock_t wantend
; /* end of target extent */
193 ASSERT(freelen
>= wantlen
);
194 freeend
= freebno
+ freelen
;
195 wantend
= wantbno
+ wantlen
;
196 if (freebno
>= wantbno
) {
197 if ((newbno1
= roundup(freebno
, alignment
)) >= freeend
)
198 newbno1
= NULLAGBLOCK
;
199 } else if (freeend
>= wantend
&& alignment
> 1) {
200 newbno1
= roundup(wantbno
, alignment
);
201 newbno2
= newbno1
- alignment
;
202 if (newbno1
>= freeend
)
203 newbno1
= NULLAGBLOCK
;
205 newlen1
= XFS_EXTLEN_MIN(wantlen
, freeend
- newbno1
);
206 if (newbno2
< freebno
)
207 newbno2
= NULLAGBLOCK
;
209 newlen2
= XFS_EXTLEN_MIN(wantlen
, freeend
- newbno2
);
210 if (newbno1
!= NULLAGBLOCK
&& newbno2
!= NULLAGBLOCK
) {
211 if (newlen1
< newlen2
||
212 (newlen1
== newlen2
&&
213 XFS_ABSDIFF(newbno1
, wantbno
) >
214 XFS_ABSDIFF(newbno2
, wantbno
)))
216 } else if (newbno2
!= NULLAGBLOCK
)
218 } else if (freeend
>= wantend
) {
220 } else if (alignment
> 1) {
221 newbno1
= roundup(freeend
- wantlen
, alignment
);
222 if (newbno1
> freeend
- wantlen
&&
223 newbno1
- alignment
>= freebno
)
224 newbno1
-= alignment
;
225 else if (newbno1
>= freeend
)
226 newbno1
= NULLAGBLOCK
;
228 newbno1
= freeend
- wantlen
;
230 return newbno1
== NULLAGBLOCK
? 0 : XFS_ABSDIFF(newbno1
, wantbno
);
234 * Fix up the length, based on mod and prod.
235 * len should be k * prod + mod for some k.
236 * If len is too small it is returned unchanged.
237 * If len hits maxlen it is left alone.
241 xfs_alloc_arg_t
*args
) /* allocation argument structure */
246 ASSERT(args
->mod
< args
->prod
);
248 ASSERT(rlen
>= args
->minlen
);
249 ASSERT(rlen
<= args
->maxlen
);
250 if (args
->prod
<= 1 || rlen
< args
->mod
|| rlen
== args
->maxlen
||
251 (args
->mod
== 0 && rlen
< args
->prod
))
253 k
= rlen
% args
->prod
;
257 if ((int)(rlen
= rlen
- k
- args
->mod
) < (int)args
->minlen
)
260 if ((int)(rlen
= rlen
- args
->prod
- (args
->mod
- k
)) <
264 ASSERT(rlen
>= args
->minlen
);
265 ASSERT(rlen
<= args
->maxlen
);
270 * Fix up length if there is too little space left in the a.g.
271 * Return 1 if ok, 0 if too little, should give up.
274 xfs_alloc_fix_minleft(
275 xfs_alloc_arg_t
*args
) /* allocation argument structure */
277 xfs_agf_t
*agf
; /* a.g. freelist header */
278 int diff
; /* free space difference */
280 if (args
->minleft
== 0)
282 agf
= XFS_BUF_TO_AGF(args
->agbp
);
283 diff
= be32_to_cpu(agf
->agf_freeblks
)
284 + be32_to_cpu(agf
->agf_flcount
)
285 - args
->len
- args
->minleft
;
288 args
->len
+= diff
; /* shrink the allocated space */
289 if (args
->len
>= args
->minlen
)
291 args
->agbno
= NULLAGBLOCK
;
296 * Update the two btrees, logically removing from freespace the extent
297 * starting at rbno, rlen blocks. The extent is contained within the
298 * actual (current) free extent fbno for flen blocks.
299 * Flags are passed in indicating whether the cursors are set to the
302 STATIC
int /* error code */
303 xfs_alloc_fixup_trees(
304 xfs_btree_cur_t
*cnt_cur
, /* cursor for by-size btree */
305 xfs_btree_cur_t
*bno_cur
, /* cursor for by-block btree */
306 xfs_agblock_t fbno
, /* starting block of free extent */
307 xfs_extlen_t flen
, /* length of free extent */
308 xfs_agblock_t rbno
, /* starting block of returned extent */
309 xfs_extlen_t rlen
, /* length of returned extent */
310 int flags
) /* flags, XFSA_FIXUP_... */
312 int error
; /* error code */
313 int i
; /* operation results */
314 xfs_agblock_t nfbno1
; /* first new free startblock */
315 xfs_agblock_t nfbno2
; /* second new free startblock */
316 xfs_extlen_t nflen1
=0; /* first new free length */
317 xfs_extlen_t nflen2
=0; /* second new free length */
320 * Look up the record in the by-size tree if necessary.
322 if (flags
& XFSA_FIXUP_CNT_OK
) {
324 if ((error
= xfs_alloc_get_rec(cnt_cur
, &nfbno1
, &nflen1
, &i
)))
326 XFS_WANT_CORRUPTED_RETURN(
327 i
== 1 && nfbno1
== fbno
&& nflen1
== flen
);
330 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, fbno
, flen
, &i
)))
332 XFS_WANT_CORRUPTED_RETURN(i
== 1);
335 * Look up the record in the by-block tree if necessary.
337 if (flags
& XFSA_FIXUP_BNO_OK
) {
339 if ((error
= xfs_alloc_get_rec(bno_cur
, &nfbno1
, &nflen1
, &i
)))
341 XFS_WANT_CORRUPTED_RETURN(
342 i
== 1 && nfbno1
== fbno
&& nflen1
== flen
);
345 if ((error
= xfs_alloc_lookup_eq(bno_cur
, fbno
, flen
, &i
)))
347 XFS_WANT_CORRUPTED_RETURN(i
== 1);
351 if (bno_cur
->bc_nlevels
== 1 && cnt_cur
->bc_nlevels
== 1) {
352 struct xfs_btree_block
*bnoblock
;
353 struct xfs_btree_block
*cntblock
;
355 bnoblock
= XFS_BUF_TO_BLOCK(bno_cur
->bc_bufs
[0]);
356 cntblock
= XFS_BUF_TO_BLOCK(cnt_cur
->bc_bufs
[0]);
358 XFS_WANT_CORRUPTED_RETURN(
359 bnoblock
->bb_numrecs
== cntblock
->bb_numrecs
);
364 * Deal with all four cases: the allocated record is contained
365 * within the freespace record, so we can have new freespace
366 * at either (or both) end, or no freespace remaining.
368 if (rbno
== fbno
&& rlen
== flen
)
369 nfbno1
= nfbno2
= NULLAGBLOCK
;
370 else if (rbno
== fbno
) {
371 nfbno1
= rbno
+ rlen
;
372 nflen1
= flen
- rlen
;
373 nfbno2
= NULLAGBLOCK
;
374 } else if (rbno
+ rlen
== fbno
+ flen
) {
376 nflen1
= flen
- rlen
;
377 nfbno2
= NULLAGBLOCK
;
380 nflen1
= rbno
- fbno
;
381 nfbno2
= rbno
+ rlen
;
382 nflen2
= (fbno
+ flen
) - nfbno2
;
385 * Delete the entry from the by-size btree.
387 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
389 XFS_WANT_CORRUPTED_RETURN(i
== 1);
391 * Add new by-size btree entry(s).
393 if (nfbno1
!= NULLAGBLOCK
) {
394 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, nfbno1
, nflen1
, &i
)))
396 XFS_WANT_CORRUPTED_RETURN(i
== 0);
397 if ((error
= xfs_btree_insert(cnt_cur
, &i
)))
399 XFS_WANT_CORRUPTED_RETURN(i
== 1);
401 if (nfbno2
!= NULLAGBLOCK
) {
402 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, nfbno2
, nflen2
, &i
)))
404 XFS_WANT_CORRUPTED_RETURN(i
== 0);
405 if ((error
= xfs_btree_insert(cnt_cur
, &i
)))
407 XFS_WANT_CORRUPTED_RETURN(i
== 1);
410 * Fix up the by-block btree entry(s).
412 if (nfbno1
== NULLAGBLOCK
) {
414 * No remaining freespace, just delete the by-block tree entry.
416 if ((error
= xfs_btree_delete(bno_cur
, &i
)))
418 XFS_WANT_CORRUPTED_RETURN(i
== 1);
421 * Update the by-block entry to start later|be shorter.
423 if ((error
= xfs_alloc_update(bno_cur
, nfbno1
, nflen1
)))
426 if (nfbno2
!= NULLAGBLOCK
) {
428 * 2 resulting free entries, need to add one.
430 if ((error
= xfs_alloc_lookup_eq(bno_cur
, nfbno2
, nflen2
, &i
)))
432 XFS_WANT_CORRUPTED_RETURN(i
== 0);
433 if ((error
= xfs_btree_insert(bno_cur
, &i
)))
435 XFS_WANT_CORRUPTED_RETURN(i
== 1);
441 * Read in the allocation group free block array.
443 STATIC
int /* error */
445 xfs_mount_t
*mp
, /* mount point structure */
446 xfs_trans_t
*tp
, /* transaction pointer */
447 xfs_agnumber_t agno
, /* allocation group number */
448 xfs_buf_t
**bpp
) /* buffer for the ag free block array */
450 xfs_buf_t
*bp
; /* return value */
453 ASSERT(agno
!= NULLAGNUMBER
);
454 error
= xfs_trans_read_buf(
455 mp
, tp
, mp
->m_ddev_targp
,
456 XFS_AG_DADDR(mp
, agno
, XFS_AGFL_DADDR(mp
)),
457 XFS_FSS_TO_BB(mp
, 1), 0, &bp
);
461 ASSERT(!XFS_BUF_GETERROR(bp
));
462 XFS_BUF_SET_VTYPE_REF(bp
, B_FS_AGFL
, XFS_AGFL_REF
);
468 * Allocation group level functions.
472 * Allocate a variable extent in the allocation group agno.
473 * Type and bno are used to determine where in the allocation group the
475 * Extent's length (returned in *len) will be between minlen and maxlen,
476 * and of the form k * prod + mod unless there's nothing that large.
477 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
479 STATIC
int /* error */
480 xfs_alloc_ag_vextent(
481 xfs_alloc_arg_t
*args
) /* argument structure for allocation */
485 ASSERT(args
->minlen
> 0);
486 ASSERT(args
->maxlen
> 0);
487 ASSERT(args
->minlen
<= args
->maxlen
);
488 ASSERT(args
->mod
< args
->prod
);
489 ASSERT(args
->alignment
> 0);
491 * Branch to correct routine based on the type.
494 switch (args
->type
) {
495 case XFS_ALLOCTYPE_THIS_AG
:
496 error
= xfs_alloc_ag_vextent_size(args
);
498 case XFS_ALLOCTYPE_NEAR_BNO
:
499 error
= xfs_alloc_ag_vextent_near(args
);
501 case XFS_ALLOCTYPE_THIS_BNO
:
502 error
= xfs_alloc_ag_vextent_exact(args
);
511 * If the allocation worked, need to change the agf structure
512 * (and log it), and the superblock.
514 if (args
->agbno
!= NULLAGBLOCK
) {
515 xfs_agf_t
*agf
; /* allocation group freelist header */
516 long slen
= (long)args
->len
;
518 ASSERT(args
->len
>= args
->minlen
&& args
->len
<= args
->maxlen
);
519 ASSERT(!(args
->wasfromfl
) || !args
->isfl
);
520 ASSERT(args
->agbno
% args
->alignment
== 0);
521 if (!(args
->wasfromfl
)) {
523 agf
= XFS_BUF_TO_AGF(args
->agbp
);
524 be32_add_cpu(&agf
->agf_freeblks
, -(args
->len
));
525 xfs_trans_agblocks_delta(args
->tp
,
526 -((long)(args
->len
)));
527 args
->pag
->pagf_freeblks
-= args
->len
;
528 ASSERT(be32_to_cpu(agf
->agf_freeblks
) <=
529 be32_to_cpu(agf
->agf_length
));
530 xfs_alloc_log_agf(args
->tp
, args
->agbp
,
533 * Search the busylist for these blocks and mark the
534 * transaction as synchronous if blocks are found. This
535 * avoids the need to block due to a synchronous log
536 * force to ensure correct ordering as the synchronous
537 * transaction will guarantee that for us.
539 if (xfs_alloc_busy_search(args
->mp
, args
->agno
,
540 args
->agbno
, args
->len
))
541 xfs_trans_set_sync(args
->tp
);
544 xfs_trans_mod_sb(args
->tp
,
545 args
->wasdel
? XFS_TRANS_SB_RES_FDBLOCKS
:
546 XFS_TRANS_SB_FDBLOCKS
, -slen
);
547 XFS_STATS_INC(xs_allocx
);
548 XFS_STATS_ADD(xs_allocb
, args
->len
);
554 * Allocate a variable extent at exactly agno/bno.
555 * Extent's length (returned in *len) will be between minlen and maxlen,
556 * and of the form k * prod + mod unless there's nothing that large.
557 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
559 STATIC
int /* error */
560 xfs_alloc_ag_vextent_exact(
561 xfs_alloc_arg_t
*args
) /* allocation argument structure */
563 xfs_btree_cur_t
*bno_cur
;/* by block-number btree cursor */
564 xfs_btree_cur_t
*cnt_cur
;/* by count btree cursor */
565 xfs_agblock_t end
; /* end of allocated extent */
567 xfs_agblock_t fbno
; /* start block of found extent */
568 xfs_agblock_t fend
; /* end block of found extent */
569 xfs_extlen_t flen
; /* length of found extent */
570 int i
; /* success/failure of operation */
571 xfs_agblock_t maxend
; /* end of maximal extent */
572 xfs_agblock_t minend
; /* end of minimal extent */
573 xfs_extlen_t rlen
; /* length of returned extent */
575 ASSERT(args
->alignment
== 1);
578 * Allocate/initialize a cursor for the by-number freespace btree.
580 bno_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
581 args
->agno
, XFS_BTNUM_BNO
);
584 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
585 * Look for the closest free block <= bno, it must contain bno
586 * if any free block does.
588 error
= xfs_alloc_lookup_le(bno_cur
, args
->agbno
, args
->minlen
, &i
);
595 * Grab the freespace record.
597 error
= xfs_alloc_get_rec(bno_cur
, &fbno
, &flen
, &i
);
600 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
601 ASSERT(fbno
<= args
->agbno
);
602 minend
= args
->agbno
+ args
->minlen
;
603 maxend
= args
->agbno
+ args
->maxlen
;
607 * Give up if the freespace isn't long enough for the minimum request.
613 * End of extent will be smaller of the freespace end and the
614 * maximal requested end.
616 * Fix the length according to mod and prod if given.
618 end
= XFS_AGBLOCK_MIN(fend
, maxend
);
619 args
->len
= end
- args
->agbno
;
620 xfs_alloc_fix_len(args
);
621 if (!xfs_alloc_fix_minleft(args
))
625 ASSERT(args
->agbno
+ rlen
<= fend
);
626 end
= args
->agbno
+ rlen
;
629 * We are allocating agbno for rlen [agbno .. end]
630 * Allocate/initialize a cursor for the by-size btree.
632 cnt_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
633 args
->agno
, XFS_BTNUM_CNT
);
634 ASSERT(args
->agbno
+ args
->len
<=
635 be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
));
636 error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur
, fbno
, flen
, args
->agbno
,
637 args
->len
, XFSA_FIXUP_BNO_OK
);
639 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
643 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
644 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
647 trace_xfs_alloc_exact_done(args
);
651 /* Didn't find it, return null. */
652 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
653 args
->agbno
= NULLAGBLOCK
;
654 trace_xfs_alloc_exact_notfound(args
);
658 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_ERROR
);
659 trace_xfs_alloc_exact_error(args
);
664 * Search the btree in a given direction via the search cursor and compare
665 * the records found against the good extent we've already found.
668 xfs_alloc_find_best_extent(
669 struct xfs_alloc_arg
*args
, /* allocation argument structure */
670 struct xfs_btree_cur
**gcur
, /* good cursor */
671 struct xfs_btree_cur
**scur
, /* searching cursor */
672 xfs_agblock_t gdiff
, /* difference for search comparison */
673 xfs_agblock_t
*sbno
, /* extent found by search */
675 xfs_extlen_t
*slena
, /* aligned length */
676 int dir
) /* 0 = search right, 1 = search left */
684 /* The good extent is perfect, no need to search. */
689 * Look until we find a better one, run out of space or run off the end.
692 error
= xfs_alloc_get_rec(*scur
, sbno
, slen
, &i
);
695 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
696 xfs_alloc_compute_aligned(*sbno
, *slen
, args
->alignment
,
697 args
->minlen
, &bno
, slena
);
700 * The good extent is closer than this one.
703 if (bno
>= args
->agbno
+ gdiff
)
706 if (bno
<= args
->agbno
- gdiff
)
711 * Same distance, compare length and pick the best.
713 if (*slena
>= args
->minlen
) {
714 args
->len
= XFS_EXTLEN_MIN(*slena
, args
->maxlen
);
715 xfs_alloc_fix_len(args
);
717 sdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
718 args
->alignment
, *sbno
,
722 * Choose closer size and invalidate other cursor.
730 error
= xfs_btree_increment(*scur
, 0, &i
);
732 error
= xfs_btree_decrement(*scur
, 0, &i
);
738 xfs_btree_del_cursor(*scur
, XFS_BTREE_NOERROR
);
743 xfs_btree_del_cursor(*gcur
, XFS_BTREE_NOERROR
);
748 /* caller invalidates cursors */
753 * Allocate a variable extent near bno in the allocation group agno.
754 * Extent's length (returned in len) will be between minlen and maxlen,
755 * and of the form k * prod + mod unless there's nothing that large.
756 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
758 STATIC
int /* error */
759 xfs_alloc_ag_vextent_near(
760 xfs_alloc_arg_t
*args
) /* allocation argument structure */
762 xfs_btree_cur_t
*bno_cur_gt
; /* cursor for bno btree, right side */
763 xfs_btree_cur_t
*bno_cur_lt
; /* cursor for bno btree, left side */
764 xfs_btree_cur_t
*cnt_cur
; /* cursor for count btree */
765 xfs_agblock_t gtbno
; /* start bno of right side entry */
766 xfs_agblock_t gtbnoa
; /* aligned ... */
767 xfs_extlen_t gtdiff
; /* difference to right side entry */
768 xfs_extlen_t gtlen
; /* length of right side entry */
769 xfs_extlen_t gtlena
= 0; /* aligned ... */
770 xfs_agblock_t gtnew
; /* useful start bno of right side */
771 int error
; /* error code */
772 int i
; /* result code, temporary */
773 int j
; /* result code, temporary */
774 xfs_agblock_t ltbno
; /* start bno of left side entry */
775 xfs_agblock_t ltbnoa
; /* aligned ... */
776 xfs_extlen_t ltdiff
; /* difference to left side entry */
777 xfs_extlen_t ltlen
; /* length of left side entry */
778 xfs_extlen_t ltlena
= 0; /* aligned ... */
779 xfs_agblock_t ltnew
; /* useful start bno of left side */
780 xfs_extlen_t rlen
; /* length of returned extent */
781 #if defined(DEBUG) && defined(__KERNEL__)
783 * Randomly don't execute the first algorithm.
785 int dofirst
; /* set to do first algorithm */
787 dofirst
= random32() & 1;
790 * Get a cursor for the by-size btree.
792 cnt_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
793 args
->agno
, XFS_BTNUM_CNT
);
795 bno_cur_lt
= bno_cur_gt
= NULL
;
797 * See if there are any free extents as big as maxlen.
799 if ((error
= xfs_alloc_lookup_ge(cnt_cur
, 0, args
->maxlen
, &i
)))
802 * If none, then pick up the last entry in the tree unless the
806 if ((error
= xfs_alloc_ag_vextent_small(args
, cnt_cur
, <bno
,
809 if (i
== 0 || ltlen
== 0) {
810 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
818 * If the requested extent is large wrt the freespaces available
819 * in this a.g., then the cursor will be pointing to a btree entry
820 * near the right edge of the tree. If it's in the last btree leaf
821 * block, then we just examine all the entries in that block
822 * that are big enough, and pick the best one.
823 * This is written as a while loop so we can break out of it,
824 * but we never loop back to the top.
826 while (xfs_btree_islastblock(cnt_cur
, 0)) {
830 xfs_agblock_t bnew
=0;
832 #if defined(DEBUG) && defined(__KERNEL__)
837 * Start from the entry that lookup found, sequence through
838 * all larger free blocks. If we're actually pointing at a
839 * record smaller than maxlen, go to the start of this block,
840 * and skip all those smaller than minlen.
842 if (ltlen
|| args
->alignment
> 1) {
843 cnt_cur
->bc_ptrs
[0] = 1;
845 if ((error
= xfs_alloc_get_rec(cnt_cur
, <bno
,
848 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
849 if (ltlen
>= args
->minlen
)
851 if ((error
= xfs_btree_increment(cnt_cur
, 0, &i
)))
854 ASSERT(ltlen
>= args
->minlen
);
858 i
= cnt_cur
->bc_ptrs
[0];
859 for (j
= 1, blen
= 0, bdiff
= 0;
860 !error
&& j
&& (blen
< args
->maxlen
|| bdiff
> 0);
861 error
= xfs_btree_increment(cnt_cur
, 0, &j
)) {
863 * For each entry, decide if it's better than
864 * the previous best entry.
866 if ((error
= xfs_alloc_get_rec(cnt_cur
, <bno
, <len
, &i
)))
868 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
869 xfs_alloc_compute_aligned(ltbno
, ltlen
, args
->alignment
,
870 args
->minlen
, <bnoa
, <lena
);
871 if (ltlena
< args
->minlen
)
873 args
->len
= XFS_EXTLEN_MIN(ltlena
, args
->maxlen
);
874 xfs_alloc_fix_len(args
);
875 ASSERT(args
->len
>= args
->minlen
);
876 if (args
->len
< blen
)
878 ltdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
879 args
->alignment
, ltbno
, ltlen
, <new
);
880 if (ltnew
!= NULLAGBLOCK
&&
881 (args
->len
> blen
|| ltdiff
< bdiff
)) {
885 besti
= cnt_cur
->bc_ptrs
[0];
889 * It didn't work. We COULD be in a case where
890 * there's a good record somewhere, so try again.
895 * Point at the best entry, and retrieve it again.
897 cnt_cur
->bc_ptrs
[0] = besti
;
898 if ((error
= xfs_alloc_get_rec(cnt_cur
, <bno
, <len
, &i
)))
900 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
901 ASSERT(ltbno
+ ltlen
<= be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
));
903 if (!xfs_alloc_fix_minleft(args
)) {
904 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
905 trace_xfs_alloc_near_nominleft(args
);
910 * We are allocating starting at bnew for blen blocks.
913 ASSERT(bnew
>= ltbno
);
914 ASSERT(bnew
+ blen
<= ltbno
+ ltlen
);
916 * Set up a cursor for the by-bno tree.
918 bno_cur_lt
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
,
919 args
->agbp
, args
->agno
, XFS_BTNUM_BNO
);
921 * Fix up the btree entries.
923 if ((error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur_lt
, ltbno
,
924 ltlen
, bnew
, blen
, XFSA_FIXUP_CNT_OK
)))
926 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
927 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_NOERROR
);
929 trace_xfs_alloc_near_first(args
);
934 * Search in the by-bno tree to the left and to the right
935 * simultaneously, until in each case we find a space big enough,
936 * or run into the edge of the tree. When we run into the edge,
937 * we deallocate that cursor.
938 * If both searches succeed, we compare the two spaces and pick
940 * With alignment, it's possible for both to fail; the upper
941 * level algorithm that picks allocation groups for allocations
942 * is not supposed to do this.
945 * Allocate and initialize the cursor for the leftward search.
947 bno_cur_lt
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
948 args
->agno
, XFS_BTNUM_BNO
);
950 * Lookup <= bno to find the leftward search's starting point.
952 if ((error
= xfs_alloc_lookup_le(bno_cur_lt
, args
->agbno
, args
->maxlen
, &i
)))
956 * Didn't find anything; use this cursor for the rightward
959 bno_cur_gt
= bno_cur_lt
;
963 * Found something. Duplicate the cursor for the rightward search.
965 else if ((error
= xfs_btree_dup_cursor(bno_cur_lt
, &bno_cur_gt
)))
968 * Increment the cursor, so we will point at the entry just right
969 * of the leftward entry if any, or to the leftmost entry.
971 if ((error
= xfs_btree_increment(bno_cur_gt
, 0, &i
)))
975 * It failed, there are no rightward entries.
977 xfs_btree_del_cursor(bno_cur_gt
, XFS_BTREE_NOERROR
);
981 * Loop going left with the leftward cursor, right with the
982 * rightward cursor, until either both directions give up or
983 * we find an entry at least as big as minlen.
987 if ((error
= xfs_alloc_get_rec(bno_cur_lt
, <bno
, <len
, &i
)))
989 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
990 xfs_alloc_compute_aligned(ltbno
, ltlen
, args
->alignment
,
991 args
->minlen
, <bnoa
, <lena
);
992 if (ltlena
>= args
->minlen
)
994 if ((error
= xfs_btree_decrement(bno_cur_lt
, 0, &i
)))
997 xfs_btree_del_cursor(bno_cur_lt
,
1003 if ((error
= xfs_alloc_get_rec(bno_cur_gt
, >bno
, >len
, &i
)))
1005 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1006 xfs_alloc_compute_aligned(gtbno
, gtlen
, args
->alignment
,
1007 args
->minlen
, >bnoa
, >lena
);
1008 if (gtlena
>= args
->minlen
)
1010 if ((error
= xfs_btree_increment(bno_cur_gt
, 0, &i
)))
1013 xfs_btree_del_cursor(bno_cur_gt
,
1018 } while (bno_cur_lt
|| bno_cur_gt
);
1021 * Got both cursors still active, need to find better entry.
1023 if (bno_cur_lt
&& bno_cur_gt
) {
1024 if (ltlena
>= args
->minlen
) {
1026 * Left side is good, look for a right side entry.
1028 args
->len
= XFS_EXTLEN_MIN(ltlena
, args
->maxlen
);
1029 xfs_alloc_fix_len(args
);
1030 ltdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
1031 args
->alignment
, ltbno
, ltlen
, <new
);
1033 error
= xfs_alloc_find_best_extent(args
,
1034 &bno_cur_lt
, &bno_cur_gt
,
1035 ltdiff
, >bno
, >len
, >lena
,
1036 0 /* search right */);
1038 ASSERT(gtlena
>= args
->minlen
);
1041 * Right side is good, look for a left side entry.
1043 args
->len
= XFS_EXTLEN_MIN(gtlena
, args
->maxlen
);
1044 xfs_alloc_fix_len(args
);
1045 gtdiff
= xfs_alloc_compute_diff(args
->agbno
, args
->len
,
1046 args
->alignment
, gtbno
, gtlen
, >new
);
1048 error
= xfs_alloc_find_best_extent(args
,
1049 &bno_cur_gt
, &bno_cur_lt
,
1050 gtdiff
, <bno
, <len
, <lena
,
1051 1 /* search left */);
1059 * If we couldn't get anything, give up.
1061 if (bno_cur_lt
== NULL
&& bno_cur_gt
== NULL
) {
1062 trace_xfs_alloc_size_neither(args
);
1063 args
->agbno
= NULLAGBLOCK
;
1068 * At this point we have selected a freespace entry, either to the
1069 * left or to the right. If it's on the right, copy all the
1070 * useful variables to the "left" set so we only have one
1071 * copy of this code.
1074 bno_cur_lt
= bno_cur_gt
;
1085 * Fix up the length and compute the useful address.
1087 args
->len
= XFS_EXTLEN_MIN(ltlena
, args
->maxlen
);
1088 xfs_alloc_fix_len(args
);
1089 if (!xfs_alloc_fix_minleft(args
)) {
1090 trace_xfs_alloc_near_nominleft(args
);
1091 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_NOERROR
);
1092 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1096 (void)xfs_alloc_compute_diff(args
->agbno
, rlen
, args
->alignment
, ltbno
,
1098 ASSERT(ltnew
>= ltbno
);
1099 ASSERT(ltnew
+ rlen
<= ltbno
+ ltlen
);
1100 ASSERT(ltnew
+ rlen
<= be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
));
1101 args
->agbno
= ltnew
;
1102 if ((error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur_lt
, ltbno
, ltlen
,
1103 ltnew
, rlen
, XFSA_FIXUP_BNO_OK
)))
1107 trace_xfs_alloc_near_greater(args
);
1109 trace_xfs_alloc_near_lesser(args
);
1111 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1112 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_NOERROR
);
1116 trace_xfs_alloc_near_error(args
);
1117 if (cnt_cur
!= NULL
)
1118 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
1119 if (bno_cur_lt
!= NULL
)
1120 xfs_btree_del_cursor(bno_cur_lt
, XFS_BTREE_ERROR
);
1121 if (bno_cur_gt
!= NULL
)
1122 xfs_btree_del_cursor(bno_cur_gt
, XFS_BTREE_ERROR
);
1127 * Allocate a variable extent anywhere in the allocation group agno.
1128 * Extent's length (returned in len) will be between minlen and maxlen,
1129 * and of the form k * prod + mod unless there's nothing that large.
1130 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1132 STATIC
int /* error */
1133 xfs_alloc_ag_vextent_size(
1134 xfs_alloc_arg_t
*args
) /* allocation argument structure */
1136 xfs_btree_cur_t
*bno_cur
; /* cursor for bno btree */
1137 xfs_btree_cur_t
*cnt_cur
; /* cursor for cnt btree */
1138 int error
; /* error result */
1139 xfs_agblock_t fbno
; /* start of found freespace */
1140 xfs_extlen_t flen
; /* length of found freespace */
1141 int i
; /* temp status variable */
1142 xfs_agblock_t rbno
; /* returned block number */
1143 xfs_extlen_t rlen
; /* length of returned extent */
1146 * Allocate and initialize a cursor for the by-size btree.
1148 cnt_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
1149 args
->agno
, XFS_BTNUM_CNT
);
1152 * Look for an entry >= maxlen+alignment-1 blocks.
1154 if ((error
= xfs_alloc_lookup_ge(cnt_cur
, 0,
1155 args
->maxlen
+ args
->alignment
- 1, &i
)))
1158 * If none, then pick up the last entry in the tree unless the
1162 if ((error
= xfs_alloc_ag_vextent_small(args
, cnt_cur
, &fbno
,
1165 if (i
== 0 || flen
== 0) {
1166 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1167 trace_xfs_alloc_size_noentry(args
);
1173 * There's a freespace as big as maxlen+alignment-1, get it.
1176 if ((error
= xfs_alloc_get_rec(cnt_cur
, &fbno
, &flen
, &i
)))
1178 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1181 * In the first case above, we got the last entry in the
1182 * by-size btree. Now we check to see if the space hits maxlen
1183 * once aligned; if not, we search left for something better.
1184 * This can't happen in the second case above.
1186 xfs_alloc_compute_aligned(fbno
, flen
, args
->alignment
, args
->minlen
,
1188 rlen
= XFS_EXTLEN_MIN(args
->maxlen
, rlen
);
1189 XFS_WANT_CORRUPTED_GOTO(rlen
== 0 ||
1190 (rlen
<= flen
&& rbno
+ rlen
<= fbno
+ flen
), error0
);
1191 if (rlen
< args
->maxlen
) {
1192 xfs_agblock_t bestfbno
;
1193 xfs_extlen_t bestflen
;
1194 xfs_agblock_t bestrbno
;
1195 xfs_extlen_t bestrlen
;
1202 if ((error
= xfs_btree_decrement(cnt_cur
, 0, &i
)))
1206 if ((error
= xfs_alloc_get_rec(cnt_cur
, &fbno
, &flen
,
1209 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1210 if (flen
< bestrlen
)
1212 xfs_alloc_compute_aligned(fbno
, flen
, args
->alignment
,
1213 args
->minlen
, &rbno
, &rlen
);
1214 rlen
= XFS_EXTLEN_MIN(args
->maxlen
, rlen
);
1215 XFS_WANT_CORRUPTED_GOTO(rlen
== 0 ||
1216 (rlen
<= flen
&& rbno
+ rlen
<= fbno
+ flen
),
1218 if (rlen
> bestrlen
) {
1223 if (rlen
== args
->maxlen
)
1227 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, bestfbno
, bestflen
,
1230 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1236 args
->wasfromfl
= 0;
1238 * Fix up the length.
1241 xfs_alloc_fix_len(args
);
1242 if (rlen
< args
->minlen
|| !xfs_alloc_fix_minleft(args
)) {
1243 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1244 trace_xfs_alloc_size_nominleft(args
);
1245 args
->agbno
= NULLAGBLOCK
;
1249 XFS_WANT_CORRUPTED_GOTO(rlen
<= flen
, error0
);
1251 * Allocate and initialize a cursor for the by-block tree.
1253 bno_cur
= xfs_allocbt_init_cursor(args
->mp
, args
->tp
, args
->agbp
,
1254 args
->agno
, XFS_BTNUM_BNO
);
1255 if ((error
= xfs_alloc_fixup_trees(cnt_cur
, bno_cur
, fbno
, flen
,
1256 rbno
, rlen
, XFSA_FIXUP_CNT_OK
)))
1258 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1259 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
1260 cnt_cur
= bno_cur
= NULL
;
1263 XFS_WANT_CORRUPTED_GOTO(
1264 args
->agbno
+ args
->len
<=
1265 be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
),
1267 trace_xfs_alloc_size_done(args
);
1271 trace_xfs_alloc_size_error(args
);
1273 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
1275 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_ERROR
);
1280 * Deal with the case where only small freespaces remain.
1281 * Either return the contents of the last freespace record,
1282 * or allocate space from the freelist if there is nothing in the tree.
1284 STATIC
int /* error */
1285 xfs_alloc_ag_vextent_small(
1286 xfs_alloc_arg_t
*args
, /* allocation argument structure */
1287 xfs_btree_cur_t
*ccur
, /* by-size cursor */
1288 xfs_agblock_t
*fbnop
, /* result block number */
1289 xfs_extlen_t
*flenp
, /* result length */
1290 int *stat
) /* status: 0-freelist, 1-normal/none */
1297 if ((error
= xfs_btree_decrement(ccur
, 0, &i
)))
1300 if ((error
= xfs_alloc_get_rec(ccur
, &fbno
, &flen
, &i
)))
1302 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1305 * Nothing in the btree, try the freelist. Make sure
1306 * to respect minleft even when pulling from the
1309 else if (args
->minlen
== 1 && args
->alignment
== 1 && !args
->isfl
&&
1310 (be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_flcount
)
1312 error
= xfs_alloc_get_freelist(args
->tp
, args
->agbp
, &fbno
, 0);
1315 if (fbno
!= NULLAGBLOCK
) {
1316 if (args
->userdata
) {
1319 bp
= xfs_btree_get_bufs(args
->mp
, args
->tp
,
1320 args
->agno
, fbno
, 0);
1321 xfs_trans_binval(args
->tp
, bp
);
1325 XFS_WANT_CORRUPTED_GOTO(
1326 args
->agbno
+ args
->len
<=
1327 be32_to_cpu(XFS_BUF_TO_AGF(args
->agbp
)->agf_length
),
1329 args
->wasfromfl
= 1;
1330 trace_xfs_alloc_small_freelist(args
);
1335 * Nothing in the freelist.
1341 * Can't allocate from the freelist for some reason.
1348 * Can't do the allocation, give up.
1350 if (flen
< args
->minlen
) {
1351 args
->agbno
= NULLAGBLOCK
;
1352 trace_xfs_alloc_small_notenough(args
);
1358 trace_xfs_alloc_small_done(args
);
1362 trace_xfs_alloc_small_error(args
);
1367 * Free the extent starting at agno/bno for length.
1369 STATIC
int /* error */
1371 xfs_trans_t
*tp
, /* transaction pointer */
1372 xfs_buf_t
*agbp
, /* buffer for a.g. freelist header */
1373 xfs_agnumber_t agno
, /* allocation group number */
1374 xfs_agblock_t bno
, /* starting block number */
1375 xfs_extlen_t len
, /* length of extent */
1376 int isfl
) /* set if is freelist blocks - no sb acctg */
1378 xfs_btree_cur_t
*bno_cur
; /* cursor for by-block btree */
1379 xfs_btree_cur_t
*cnt_cur
; /* cursor for by-size btree */
1380 int error
; /* error return value */
1381 xfs_agblock_t gtbno
; /* start of right neighbor block */
1382 xfs_extlen_t gtlen
; /* length of right neighbor block */
1383 int haveleft
; /* have a left neighbor block */
1384 int haveright
; /* have a right neighbor block */
1385 int i
; /* temp, result code */
1386 xfs_agblock_t ltbno
; /* start of left neighbor block */
1387 xfs_extlen_t ltlen
; /* length of left neighbor block */
1388 xfs_mount_t
*mp
; /* mount point struct for filesystem */
1389 xfs_agblock_t nbno
; /* new starting block of freespace */
1390 xfs_extlen_t nlen
; /* new length of freespace */
1394 * Allocate and initialize a cursor for the by-block btree.
1396 bno_cur
= xfs_allocbt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_BNO
);
1399 * Look for a neighboring block on the left (lower block numbers)
1400 * that is contiguous with this space.
1402 if ((error
= xfs_alloc_lookup_le(bno_cur
, bno
, len
, &haveleft
)))
1406 * There is a block to our left.
1408 if ((error
= xfs_alloc_get_rec(bno_cur
, <bno
, <len
, &i
)))
1410 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1412 * It's not contiguous, though.
1414 if (ltbno
+ ltlen
< bno
)
1418 * If this failure happens the request to free this
1419 * space was invalid, it's (partly) already free.
1422 XFS_WANT_CORRUPTED_GOTO(ltbno
+ ltlen
<= bno
, error0
);
1426 * Look for a neighboring block on the right (higher block numbers)
1427 * that is contiguous with this space.
1429 if ((error
= xfs_btree_increment(bno_cur
, 0, &haveright
)))
1433 * There is a block to our right.
1435 if ((error
= xfs_alloc_get_rec(bno_cur
, >bno
, >len
, &i
)))
1437 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1439 * It's not contiguous, though.
1441 if (bno
+ len
< gtbno
)
1445 * If this failure happens the request to free this
1446 * space was invalid, it's (partly) already free.
1449 XFS_WANT_CORRUPTED_GOTO(gtbno
>= bno
+ len
, error0
);
1453 * Now allocate and initialize a cursor for the by-size tree.
1455 cnt_cur
= xfs_allocbt_init_cursor(mp
, tp
, agbp
, agno
, XFS_BTNUM_CNT
);
1457 * Have both left and right contiguous neighbors.
1458 * Merge all three into a single free block.
1460 if (haveleft
&& haveright
) {
1462 * Delete the old by-size entry on the left.
1464 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, ltbno
, ltlen
, &i
)))
1466 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1467 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1469 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1471 * Delete the old by-size entry on the right.
1473 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, gtbno
, gtlen
, &i
)))
1475 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1476 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1478 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1480 * Delete the old by-block entry for the right block.
1482 if ((error
= xfs_btree_delete(bno_cur
, &i
)))
1484 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1486 * Move the by-block cursor back to the left neighbor.
1488 if ((error
= xfs_btree_decrement(bno_cur
, 0, &i
)))
1490 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1493 * Check that this is the right record: delete didn't
1494 * mangle the cursor.
1497 xfs_agblock_t xxbno
;
1500 if ((error
= xfs_alloc_get_rec(bno_cur
, &xxbno
, &xxlen
,
1503 XFS_WANT_CORRUPTED_GOTO(
1504 i
== 1 && xxbno
== ltbno
&& xxlen
== ltlen
,
1509 * Update remaining by-block entry to the new, joined block.
1512 nlen
= len
+ ltlen
+ gtlen
;
1513 if ((error
= xfs_alloc_update(bno_cur
, nbno
, nlen
)))
1517 * Have only a left contiguous neighbor.
1518 * Merge it together with the new freespace.
1520 else if (haveleft
) {
1522 * Delete the old by-size entry on the left.
1524 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, ltbno
, ltlen
, &i
)))
1526 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1527 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1529 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1531 * Back up the by-block cursor to the left neighbor, and
1532 * update its length.
1534 if ((error
= xfs_btree_decrement(bno_cur
, 0, &i
)))
1536 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1539 if ((error
= xfs_alloc_update(bno_cur
, nbno
, nlen
)))
1543 * Have only a right contiguous neighbor.
1544 * Merge it together with the new freespace.
1546 else if (haveright
) {
1548 * Delete the old by-size entry on the right.
1550 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, gtbno
, gtlen
, &i
)))
1552 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1553 if ((error
= xfs_btree_delete(cnt_cur
, &i
)))
1555 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1557 * Update the starting block and length of the right
1558 * neighbor in the by-block tree.
1562 if ((error
= xfs_alloc_update(bno_cur
, nbno
, nlen
)))
1566 * No contiguous neighbors.
1567 * Insert the new freespace into the by-block tree.
1572 if ((error
= xfs_btree_insert(bno_cur
, &i
)))
1574 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1576 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_NOERROR
);
1579 * In all cases we need to insert the new freespace in the by-size tree.
1581 if ((error
= xfs_alloc_lookup_eq(cnt_cur
, nbno
, nlen
, &i
)))
1583 XFS_WANT_CORRUPTED_GOTO(i
== 0, error0
);
1584 if ((error
= xfs_btree_insert(cnt_cur
, &i
)))
1586 XFS_WANT_CORRUPTED_GOTO(i
== 1, error0
);
1587 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_NOERROR
);
1590 * Update the freespace totals in the ag and superblock.
1594 xfs_perag_t
*pag
; /* per allocation group data */
1596 pag
= xfs_perag_get(mp
, agno
);
1597 pag
->pagf_freeblks
+= len
;
1600 agf
= XFS_BUF_TO_AGF(agbp
);
1601 be32_add_cpu(&agf
->agf_freeblks
, len
);
1602 xfs_trans_agblocks_delta(tp
, len
);
1603 XFS_WANT_CORRUPTED_GOTO(
1604 be32_to_cpu(agf
->agf_freeblks
) <=
1605 be32_to_cpu(agf
->agf_length
),
1607 xfs_alloc_log_agf(tp
, agbp
, XFS_AGF_FREEBLKS
);
1609 xfs_trans_mod_sb(tp
, XFS_TRANS_SB_FDBLOCKS
, (long)len
);
1610 XFS_STATS_INC(xs_freex
);
1611 XFS_STATS_ADD(xs_freeb
, len
);
1614 trace_xfs_free_extent(mp
, agno
, bno
, len
, isfl
, haveleft
, haveright
);
1617 * Since blocks move to the free list without the coordination
1618 * used in xfs_bmap_finish, we can't allow block to be available
1619 * for reallocation and non-transaction writing (user data)
1620 * until we know that the transaction that moved it to the free
1621 * list is permanently on disk. We track the blocks by declaring
1622 * these blocks as "busy"; the busy list is maintained on a per-ag
1623 * basis and each transaction records which entries should be removed
1624 * when the iclog commits to disk. If a busy block is allocated,
1625 * the iclog is pushed up to the LSN that freed the block.
1627 xfs_alloc_busy_insert(tp
, agno
, bno
, len
);
1631 trace_xfs_free_extent(mp
, agno
, bno
, len
, isfl
, -1, -1);
1633 xfs_btree_del_cursor(bno_cur
, XFS_BTREE_ERROR
);
1635 xfs_btree_del_cursor(cnt_cur
, XFS_BTREE_ERROR
);
1640 * Visible (exported) allocation/free functions.
1641 * Some of these are used just by xfs_alloc_btree.c and this file.
1645 * Compute and fill in value of m_ag_maxlevels.
1648 xfs_alloc_compute_maxlevels(
1649 xfs_mount_t
*mp
) /* file system mount structure */
1657 maxleafents
= (mp
->m_sb
.sb_agblocks
+ 1) / 2;
1658 minleafrecs
= mp
->m_alloc_mnr
[0];
1659 minnoderecs
= mp
->m_alloc_mnr
[1];
1660 maxblocks
= (maxleafents
+ minleafrecs
- 1) / minleafrecs
;
1661 for (level
= 1; maxblocks
> 1; level
++)
1662 maxblocks
= (maxblocks
+ minnoderecs
- 1) / minnoderecs
;
1663 mp
->m_ag_maxlevels
= level
;
1667 * Find the length of the longest extent in an AG.
1670 xfs_alloc_longest_free_extent(
1671 struct xfs_mount
*mp
,
1672 struct xfs_perag
*pag
)
1674 xfs_extlen_t need
, delta
= 0;
1676 need
= XFS_MIN_FREELIST_PAG(pag
, mp
);
1677 if (need
> pag
->pagf_flcount
)
1678 delta
= need
- pag
->pagf_flcount
;
1680 if (pag
->pagf_longest
> delta
)
1681 return pag
->pagf_longest
- delta
;
1682 return pag
->pagf_flcount
> 0 || pag
->pagf_longest
> 0;
1686 * Decide whether to use this allocation group for this allocation.
1687 * If so, fix up the btree freelist's size.
1689 STATIC
int /* error */
1690 xfs_alloc_fix_freelist(
1691 xfs_alloc_arg_t
*args
, /* allocation argument structure */
1692 int flags
) /* XFS_ALLOC_FLAG_... */
1694 xfs_buf_t
*agbp
; /* agf buffer pointer */
1695 xfs_agf_t
*agf
; /* a.g. freespace structure pointer */
1696 xfs_buf_t
*agflbp
;/* agfl buffer pointer */
1697 xfs_agblock_t bno
; /* freelist block */
1698 xfs_extlen_t delta
; /* new blocks needed in freelist */
1699 int error
; /* error result code */
1700 xfs_extlen_t longest
;/* longest extent in allocation group */
1701 xfs_mount_t
*mp
; /* file system mount point structure */
1702 xfs_extlen_t need
; /* total blocks needed in freelist */
1703 xfs_perag_t
*pag
; /* per-ag information structure */
1704 xfs_alloc_arg_t targs
; /* local allocation arguments */
1705 xfs_trans_t
*tp
; /* transaction pointer */
1711 if (!pag
->pagf_init
) {
1712 if ((error
= xfs_alloc_read_agf(mp
, tp
, args
->agno
, flags
,
1715 if (!pag
->pagf_init
) {
1716 ASSERT(flags
& XFS_ALLOC_FLAG_TRYLOCK
);
1717 ASSERT(!(flags
& XFS_ALLOC_FLAG_FREEING
));
1725 * If this is a metadata preferred pag and we are user data
1726 * then try somewhere else if we are not being asked to
1727 * try harder at this point
1729 if (pag
->pagf_metadata
&& args
->userdata
&&
1730 (flags
& XFS_ALLOC_FLAG_TRYLOCK
)) {
1731 ASSERT(!(flags
& XFS_ALLOC_FLAG_FREEING
));
1736 if (!(flags
& XFS_ALLOC_FLAG_FREEING
)) {
1738 * If it looks like there isn't a long enough extent, or enough
1739 * total blocks, reject it.
1741 need
= XFS_MIN_FREELIST_PAG(pag
, mp
);
1742 longest
= xfs_alloc_longest_free_extent(mp
, pag
);
1743 if ((args
->minlen
+ args
->alignment
+ args
->minalignslop
- 1) >
1745 ((int)(pag
->pagf_freeblks
+ pag
->pagf_flcount
-
1746 need
- args
->total
) < (int)args
->minleft
)) {
1748 xfs_trans_brelse(tp
, agbp
);
1755 * Get the a.g. freespace buffer.
1756 * Can fail if we're not blocking on locks, and it's held.
1759 if ((error
= xfs_alloc_read_agf(mp
, tp
, args
->agno
, flags
,
1763 ASSERT(flags
& XFS_ALLOC_FLAG_TRYLOCK
);
1764 ASSERT(!(flags
& XFS_ALLOC_FLAG_FREEING
));
1770 * Figure out how many blocks we should have in the freelist.
1772 agf
= XFS_BUF_TO_AGF(agbp
);
1773 need
= XFS_MIN_FREELIST(agf
, mp
);
1775 * If there isn't enough total or single-extent, reject it.
1777 if (!(flags
& XFS_ALLOC_FLAG_FREEING
)) {
1778 delta
= need
> be32_to_cpu(agf
->agf_flcount
) ?
1779 (need
- be32_to_cpu(agf
->agf_flcount
)) : 0;
1780 longest
= be32_to_cpu(agf
->agf_longest
);
1781 longest
= (longest
> delta
) ? (longest
- delta
) :
1782 (be32_to_cpu(agf
->agf_flcount
) > 0 || longest
> 0);
1783 if ((args
->minlen
+ args
->alignment
+ args
->minalignslop
- 1) >
1785 ((int)(be32_to_cpu(agf
->agf_freeblks
) +
1786 be32_to_cpu(agf
->agf_flcount
) - need
- args
->total
) <
1787 (int)args
->minleft
)) {
1788 xfs_trans_brelse(tp
, agbp
);
1794 * Make the freelist shorter if it's too long.
1796 while (be32_to_cpu(agf
->agf_flcount
) > need
) {
1799 error
= xfs_alloc_get_freelist(tp
, agbp
, &bno
, 0);
1802 if ((error
= xfs_free_ag_extent(tp
, agbp
, args
->agno
, bno
, 1, 1)))
1804 bp
= xfs_btree_get_bufs(mp
, tp
, args
->agno
, bno
, 0);
1805 xfs_trans_binval(tp
, bp
);
1808 * Initialize the args structure.
1813 targs
.agno
= args
->agno
;
1814 targs
.mod
= targs
.minleft
= targs
.wasdel
= targs
.userdata
=
1815 targs
.minalignslop
= 0;
1816 targs
.alignment
= targs
.minlen
= targs
.prod
= targs
.isfl
= 1;
1817 targs
.type
= XFS_ALLOCTYPE_THIS_AG
;
1819 if ((error
= xfs_alloc_read_agfl(mp
, tp
, targs
.agno
, &agflbp
)))
1822 * Make the freelist longer if it's too short.
1824 while (be32_to_cpu(agf
->agf_flcount
) < need
) {
1826 targs
.maxlen
= need
- be32_to_cpu(agf
->agf_flcount
);
1828 * Allocate as many blocks as possible at once.
1830 if ((error
= xfs_alloc_ag_vextent(&targs
))) {
1831 xfs_trans_brelse(tp
, agflbp
);
1835 * Stop if we run out. Won't happen if callers are obeying
1836 * the restrictions correctly. Can happen for free calls
1837 * on a completely full ag.
1839 if (targs
.agbno
== NULLAGBLOCK
) {
1840 if (flags
& XFS_ALLOC_FLAG_FREEING
)
1842 xfs_trans_brelse(tp
, agflbp
);
1847 * Put each allocated block on the list.
1849 for (bno
= targs
.agbno
; bno
< targs
.agbno
+ targs
.len
; bno
++) {
1850 error
= xfs_alloc_put_freelist(tp
, agbp
,
1856 xfs_trans_brelse(tp
, agflbp
);
1862 * Get a block from the freelist.
1863 * Returns with the buffer for the block gotten.
1866 xfs_alloc_get_freelist(
1867 xfs_trans_t
*tp
, /* transaction pointer */
1868 xfs_buf_t
*agbp
, /* buffer containing the agf structure */
1869 xfs_agblock_t
*bnop
, /* block address retrieved from freelist */
1870 int btreeblk
) /* destination is a AGF btree */
1872 xfs_agf_t
*agf
; /* a.g. freespace structure */
1873 xfs_agfl_t
*agfl
; /* a.g. freelist structure */
1874 xfs_buf_t
*agflbp
;/* buffer for a.g. freelist structure */
1875 xfs_agblock_t bno
; /* block number returned */
1878 xfs_mount_t
*mp
; /* mount structure */
1879 xfs_perag_t
*pag
; /* per allocation group data */
1881 agf
= XFS_BUF_TO_AGF(agbp
);
1883 * Freelist is empty, give up.
1885 if (!agf
->agf_flcount
) {
1886 *bnop
= NULLAGBLOCK
;
1890 * Read the array of free blocks.
1893 if ((error
= xfs_alloc_read_agfl(mp
, tp
,
1894 be32_to_cpu(agf
->agf_seqno
), &agflbp
)))
1896 agfl
= XFS_BUF_TO_AGFL(agflbp
);
1898 * Get the block number and update the data structures.
1900 bno
= be32_to_cpu(agfl
->agfl_bno
[be32_to_cpu(agf
->agf_flfirst
)]);
1901 be32_add_cpu(&agf
->agf_flfirst
, 1);
1902 xfs_trans_brelse(tp
, agflbp
);
1903 if (be32_to_cpu(agf
->agf_flfirst
) == XFS_AGFL_SIZE(mp
))
1904 agf
->agf_flfirst
= 0;
1906 pag
= xfs_perag_get(mp
, be32_to_cpu(agf
->agf_seqno
));
1907 be32_add_cpu(&agf
->agf_flcount
, -1);
1908 xfs_trans_agflist_delta(tp
, -1);
1909 pag
->pagf_flcount
--;
1912 logflags
= XFS_AGF_FLFIRST
| XFS_AGF_FLCOUNT
;
1914 be32_add_cpu(&agf
->agf_btreeblks
, 1);
1915 pag
->pagf_btreeblks
++;
1916 logflags
|= XFS_AGF_BTREEBLKS
;
1919 xfs_alloc_log_agf(tp
, agbp
, logflags
);
1923 * As blocks are freed, they are added to the per-ag busy list and
1924 * remain there until the freeing transaction is committed to disk.
1925 * Now that we have allocated blocks, this list must be searched to see
1926 * if a block is being reused. If one is, then the freeing transaction
1927 * must be pushed to disk before this transaction.
1929 * We do this by setting the current transaction to a sync transaction
1930 * which guarantees that the freeing transaction is on disk before this
1931 * transaction. This is done instead of a synchronous log force here so
1932 * that we don't sit and wait with the AGF locked in the transaction
1933 * during the log force.
1935 if (xfs_alloc_busy_search(mp
, be32_to_cpu(agf
->agf_seqno
), bno
, 1))
1936 xfs_trans_set_sync(tp
);
1941 * Log the given fields from the agf structure.
1945 xfs_trans_t
*tp
, /* transaction pointer */
1946 xfs_buf_t
*bp
, /* buffer for a.g. freelist header */
1947 int fields
) /* mask of fields to be logged (XFS_AGF_...) */
1949 int first
; /* first byte offset */
1950 int last
; /* last byte offset */
1951 static const short offsets
[] = {
1952 offsetof(xfs_agf_t
, agf_magicnum
),
1953 offsetof(xfs_agf_t
, agf_versionnum
),
1954 offsetof(xfs_agf_t
, agf_seqno
),
1955 offsetof(xfs_agf_t
, agf_length
),
1956 offsetof(xfs_agf_t
, agf_roots
[0]),
1957 offsetof(xfs_agf_t
, agf_levels
[0]),
1958 offsetof(xfs_agf_t
, agf_flfirst
),
1959 offsetof(xfs_agf_t
, agf_fllast
),
1960 offsetof(xfs_agf_t
, agf_flcount
),
1961 offsetof(xfs_agf_t
, agf_freeblks
),
1962 offsetof(xfs_agf_t
, agf_longest
),
1963 offsetof(xfs_agf_t
, agf_btreeblks
),
1967 trace_xfs_agf(tp
->t_mountp
, XFS_BUF_TO_AGF(bp
), fields
, _RET_IP_
);
1969 xfs_btree_offsets(fields
, offsets
, XFS_AGF_NUM_BITS
, &first
, &last
);
1970 xfs_trans_log_buf(tp
, bp
, (uint
)first
, (uint
)last
);
1974 * Interface for inode allocation to force the pag data to be initialized.
1977 xfs_alloc_pagf_init(
1978 xfs_mount_t
*mp
, /* file system mount structure */
1979 xfs_trans_t
*tp
, /* transaction pointer */
1980 xfs_agnumber_t agno
, /* allocation group number */
1981 int flags
) /* XFS_ALLOC_FLAGS_... */
1986 if ((error
= xfs_alloc_read_agf(mp
, tp
, agno
, flags
, &bp
)))
1989 xfs_trans_brelse(tp
, bp
);
1994 * Put the block on the freelist for the allocation group.
1997 xfs_alloc_put_freelist(
1998 xfs_trans_t
*tp
, /* transaction pointer */
1999 xfs_buf_t
*agbp
, /* buffer for a.g. freelist header */
2000 xfs_buf_t
*agflbp
,/* buffer for a.g. free block array */
2001 xfs_agblock_t bno
, /* block being freed */
2002 int btreeblk
) /* block came from a AGF btree */
2004 xfs_agf_t
*agf
; /* a.g. freespace structure */
2005 xfs_agfl_t
*agfl
; /* a.g. free block array */
2006 __be32
*blockp
;/* pointer to array entry */
2009 xfs_mount_t
*mp
; /* mount structure */
2010 xfs_perag_t
*pag
; /* per allocation group data */
2012 agf
= XFS_BUF_TO_AGF(agbp
);
2015 if (!agflbp
&& (error
= xfs_alloc_read_agfl(mp
, tp
,
2016 be32_to_cpu(agf
->agf_seqno
), &agflbp
)))
2018 agfl
= XFS_BUF_TO_AGFL(agflbp
);
2019 be32_add_cpu(&agf
->agf_fllast
, 1);
2020 if (be32_to_cpu(agf
->agf_fllast
) == XFS_AGFL_SIZE(mp
))
2021 agf
->agf_fllast
= 0;
2023 pag
= xfs_perag_get(mp
, be32_to_cpu(agf
->agf_seqno
));
2024 be32_add_cpu(&agf
->agf_flcount
, 1);
2025 xfs_trans_agflist_delta(tp
, 1);
2026 pag
->pagf_flcount
++;
2028 logflags
= XFS_AGF_FLLAST
| XFS_AGF_FLCOUNT
;
2030 be32_add_cpu(&agf
->agf_btreeblks
, -1);
2031 pag
->pagf_btreeblks
--;
2032 logflags
|= XFS_AGF_BTREEBLKS
;
2036 xfs_alloc_log_agf(tp
, agbp
, logflags
);
2038 ASSERT(be32_to_cpu(agf
->agf_flcount
) <= XFS_AGFL_SIZE(mp
));
2039 blockp
= &agfl
->agfl_bno
[be32_to_cpu(agf
->agf_fllast
)];
2040 *blockp
= cpu_to_be32(bno
);
2041 xfs_alloc_log_agf(tp
, agbp
, logflags
);
2042 xfs_trans_log_buf(tp
, agflbp
,
2043 (int)((xfs_caddr_t
)blockp
- (xfs_caddr_t
)agfl
),
2044 (int)((xfs_caddr_t
)blockp
- (xfs_caddr_t
)agfl
+
2045 sizeof(xfs_agblock_t
) - 1));
2050 * Read in the allocation group header (free/alloc section).
2054 struct xfs_mount
*mp
, /* mount point structure */
2055 struct xfs_trans
*tp
, /* transaction pointer */
2056 xfs_agnumber_t agno
, /* allocation group number */
2057 int flags
, /* XFS_BUF_ */
2058 struct xfs_buf
**bpp
) /* buffer for the ag freelist header */
2060 struct xfs_agf
*agf
; /* ag freelist header */
2061 int agf_ok
; /* set if agf is consistent */
2064 ASSERT(agno
!= NULLAGNUMBER
);
2065 error
= xfs_trans_read_buf(
2066 mp
, tp
, mp
->m_ddev_targp
,
2067 XFS_AG_DADDR(mp
, agno
, XFS_AGF_DADDR(mp
)),
2068 XFS_FSS_TO_BB(mp
, 1), flags
, bpp
);
2074 ASSERT(!XFS_BUF_GETERROR(*bpp
));
2075 agf
= XFS_BUF_TO_AGF(*bpp
);
2078 * Validate the magic number of the agf block.
2081 be32_to_cpu(agf
->agf_magicnum
) == XFS_AGF_MAGIC
&&
2082 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf
->agf_versionnum
)) &&
2083 be32_to_cpu(agf
->agf_freeblks
) <= be32_to_cpu(agf
->agf_length
) &&
2084 be32_to_cpu(agf
->agf_flfirst
) < XFS_AGFL_SIZE(mp
) &&
2085 be32_to_cpu(agf
->agf_fllast
) < XFS_AGFL_SIZE(mp
) &&
2086 be32_to_cpu(agf
->agf_flcount
) <= XFS_AGFL_SIZE(mp
) &&
2087 be32_to_cpu(agf
->agf_seqno
) == agno
;
2088 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
2089 agf_ok
= agf_ok
&& be32_to_cpu(agf
->agf_btreeblks
) <=
2090 be32_to_cpu(agf
->agf_length
);
2091 if (unlikely(XFS_TEST_ERROR(!agf_ok
, mp
, XFS_ERRTAG_ALLOC_READ_AGF
,
2092 XFS_RANDOM_ALLOC_READ_AGF
))) {
2093 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2094 XFS_ERRLEVEL_LOW
, mp
, agf
);
2095 xfs_trans_brelse(tp
, *bpp
);
2096 return XFS_ERROR(EFSCORRUPTED
);
2098 XFS_BUF_SET_VTYPE_REF(*bpp
, B_FS_AGF
, XFS_AGF_REF
);
2103 * Read in the allocation group header (free/alloc section).
2107 struct xfs_mount
*mp
, /* mount point structure */
2108 struct xfs_trans
*tp
, /* transaction pointer */
2109 xfs_agnumber_t agno
, /* allocation group number */
2110 int flags
, /* XFS_ALLOC_FLAG_... */
2111 struct xfs_buf
**bpp
) /* buffer for the ag freelist header */
2113 struct xfs_agf
*agf
; /* ag freelist header */
2114 struct xfs_perag
*pag
; /* per allocation group data */
2117 ASSERT(agno
!= NULLAGNUMBER
);
2119 error
= xfs_read_agf(mp
, tp
, agno
,
2120 (flags
& XFS_ALLOC_FLAG_TRYLOCK
) ? XBF_TRYLOCK
: 0,
2126 ASSERT(!XFS_BUF_GETERROR(*bpp
));
2128 agf
= XFS_BUF_TO_AGF(*bpp
);
2129 pag
= xfs_perag_get(mp
, agno
);
2130 if (!pag
->pagf_init
) {
2131 pag
->pagf_freeblks
= be32_to_cpu(agf
->agf_freeblks
);
2132 pag
->pagf_btreeblks
= be32_to_cpu(agf
->agf_btreeblks
);
2133 pag
->pagf_flcount
= be32_to_cpu(agf
->agf_flcount
);
2134 pag
->pagf_longest
= be32_to_cpu(agf
->agf_longest
);
2135 pag
->pagf_levels
[XFS_BTNUM_BNOi
] =
2136 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNOi
]);
2137 pag
->pagf_levels
[XFS_BTNUM_CNTi
] =
2138 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNTi
]);
2139 spin_lock_init(&pag
->pagb_lock
);
2140 pag
->pagb_count
= 0;
2141 pag
->pagb_tree
= RB_ROOT
;
2145 else if (!XFS_FORCED_SHUTDOWN(mp
)) {
2146 ASSERT(pag
->pagf_freeblks
== be32_to_cpu(agf
->agf_freeblks
));
2147 ASSERT(pag
->pagf_btreeblks
== be32_to_cpu(agf
->agf_btreeblks
));
2148 ASSERT(pag
->pagf_flcount
== be32_to_cpu(agf
->agf_flcount
));
2149 ASSERT(pag
->pagf_longest
== be32_to_cpu(agf
->agf_longest
));
2150 ASSERT(pag
->pagf_levels
[XFS_BTNUM_BNOi
] ==
2151 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNOi
]));
2152 ASSERT(pag
->pagf_levels
[XFS_BTNUM_CNTi
] ==
2153 be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNTi
]));
2161 * Allocate an extent (variable-size).
2162 * Depending on the allocation type, we either look in a single allocation
2163 * group or loop over the allocation groups to find the result.
2167 xfs_alloc_arg_t
*args
) /* allocation argument structure */
2169 xfs_agblock_t agsize
; /* allocation group size */
2171 int flags
; /* XFS_ALLOC_FLAG_... locking flags */
2172 xfs_extlen_t minleft
;/* minimum left value, temp copy */
2173 xfs_mount_t
*mp
; /* mount structure pointer */
2174 xfs_agnumber_t sagno
; /* starting allocation group number */
2175 xfs_alloctype_t type
; /* input allocation type */
2178 xfs_agnumber_t rotorstep
= xfs_rotorstep
; /* inode32 agf stepper */
2181 type
= args
->otype
= args
->type
;
2182 args
->agbno
= NULLAGBLOCK
;
2184 * Just fix this up, for the case where the last a.g. is shorter
2185 * (or there's only one a.g.) and the caller couldn't easily figure
2186 * that out (xfs_bmap_alloc).
2188 agsize
= mp
->m_sb
.sb_agblocks
;
2189 if (args
->maxlen
> agsize
)
2190 args
->maxlen
= agsize
;
2191 if (args
->alignment
== 0)
2192 args
->alignment
= 1;
2193 ASSERT(XFS_FSB_TO_AGNO(mp
, args
->fsbno
) < mp
->m_sb
.sb_agcount
);
2194 ASSERT(XFS_FSB_TO_AGBNO(mp
, args
->fsbno
) < agsize
);
2195 ASSERT(args
->minlen
<= args
->maxlen
);
2196 ASSERT(args
->minlen
<= agsize
);
2197 ASSERT(args
->mod
< args
->prod
);
2198 if (XFS_FSB_TO_AGNO(mp
, args
->fsbno
) >= mp
->m_sb
.sb_agcount
||
2199 XFS_FSB_TO_AGBNO(mp
, args
->fsbno
) >= agsize
||
2200 args
->minlen
> args
->maxlen
|| args
->minlen
> agsize
||
2201 args
->mod
>= args
->prod
) {
2202 args
->fsbno
= NULLFSBLOCK
;
2203 trace_xfs_alloc_vextent_badargs(args
);
2206 minleft
= args
->minleft
;
2209 case XFS_ALLOCTYPE_THIS_AG
:
2210 case XFS_ALLOCTYPE_NEAR_BNO
:
2211 case XFS_ALLOCTYPE_THIS_BNO
:
2213 * These three force us into a single a.g.
2215 args
->agno
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
2216 args
->pag
= xfs_perag_get(mp
, args
->agno
);
2218 error
= xfs_alloc_fix_freelist(args
, 0);
2219 args
->minleft
= minleft
;
2221 trace_xfs_alloc_vextent_nofix(args
);
2225 trace_xfs_alloc_vextent_noagbp(args
);
2228 args
->agbno
= XFS_FSB_TO_AGBNO(mp
, args
->fsbno
);
2229 if ((error
= xfs_alloc_ag_vextent(args
)))
2232 case XFS_ALLOCTYPE_START_BNO
:
2234 * Try near allocation first, then anywhere-in-ag after
2235 * the first a.g. fails.
2237 if ((args
->userdata
== XFS_ALLOC_INITIAL_USER_DATA
) &&
2238 (mp
->m_flags
& XFS_MOUNT_32BITINODES
)) {
2239 args
->fsbno
= XFS_AGB_TO_FSB(mp
,
2240 ((mp
->m_agfrotor
/ rotorstep
) %
2241 mp
->m_sb
.sb_agcount
), 0);
2244 args
->agbno
= XFS_FSB_TO_AGBNO(mp
, args
->fsbno
);
2245 args
->type
= XFS_ALLOCTYPE_NEAR_BNO
;
2247 case XFS_ALLOCTYPE_ANY_AG
:
2248 case XFS_ALLOCTYPE_START_AG
:
2249 case XFS_ALLOCTYPE_FIRST_AG
:
2251 * Rotate through the allocation groups looking for a winner.
2253 if (type
== XFS_ALLOCTYPE_ANY_AG
) {
2255 * Start with the last place we left off.
2257 args
->agno
= sagno
= (mp
->m_agfrotor
/ rotorstep
) %
2258 mp
->m_sb
.sb_agcount
;
2259 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2260 flags
= XFS_ALLOC_FLAG_TRYLOCK
;
2261 } else if (type
== XFS_ALLOCTYPE_FIRST_AG
) {
2263 * Start with allocation group given by bno.
2265 args
->agno
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
2266 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2270 if (type
== XFS_ALLOCTYPE_START_AG
)
2271 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2273 * Start with the given allocation group.
2275 args
->agno
= sagno
= XFS_FSB_TO_AGNO(mp
, args
->fsbno
);
2276 flags
= XFS_ALLOC_FLAG_TRYLOCK
;
2279 * Loop over allocation groups twice; first time with
2280 * trylock set, second time without.
2283 args
->pag
= xfs_perag_get(mp
, args
->agno
);
2284 if (no_min
) args
->minleft
= 0;
2285 error
= xfs_alloc_fix_freelist(args
, flags
);
2286 args
->minleft
= minleft
;
2288 trace_xfs_alloc_vextent_nofix(args
);
2292 * If we get a buffer back then the allocation will fly.
2295 if ((error
= xfs_alloc_ag_vextent(args
)))
2300 trace_xfs_alloc_vextent_loopfailed(args
);
2303 * Didn't work, figure out the next iteration.
2305 if (args
->agno
== sagno
&&
2306 type
== XFS_ALLOCTYPE_START_BNO
)
2307 args
->type
= XFS_ALLOCTYPE_THIS_AG
;
2309 * For the first allocation, we can try any AG to get
2310 * space. However, if we already have allocated a
2311 * block, we don't want to try AGs whose number is below
2312 * sagno. Otherwise, we may end up with out-of-order
2313 * locking of AGF, which might cause deadlock.
2315 if (++(args
->agno
) == mp
->m_sb
.sb_agcount
) {
2316 if (args
->firstblock
!= NULLFSBLOCK
)
2322 * Reached the starting a.g., must either be done
2323 * or switch to non-trylock mode.
2325 if (args
->agno
== sagno
) {
2327 args
->agbno
= NULLAGBLOCK
;
2328 trace_xfs_alloc_vextent_allfailed(args
);
2335 if (type
== XFS_ALLOCTYPE_START_BNO
) {
2336 args
->agbno
= XFS_FSB_TO_AGBNO(mp
,
2338 args
->type
= XFS_ALLOCTYPE_NEAR_BNO
;
2342 xfs_perag_put(args
->pag
);
2344 if (bump_rotor
|| (type
== XFS_ALLOCTYPE_ANY_AG
)) {
2345 if (args
->agno
== sagno
)
2346 mp
->m_agfrotor
= (mp
->m_agfrotor
+ 1) %
2347 (mp
->m_sb
.sb_agcount
* rotorstep
);
2349 mp
->m_agfrotor
= (args
->agno
* rotorstep
+ 1) %
2350 (mp
->m_sb
.sb_agcount
* rotorstep
);
2357 if (args
->agbno
== NULLAGBLOCK
)
2358 args
->fsbno
= NULLFSBLOCK
;
2360 args
->fsbno
= XFS_AGB_TO_FSB(mp
, args
->agno
, args
->agbno
);
2362 ASSERT(args
->len
>= args
->minlen
);
2363 ASSERT(args
->len
<= args
->maxlen
);
2364 ASSERT(args
->agbno
% args
->alignment
== 0);
2365 XFS_AG_CHECK_DADDR(mp
, XFS_FSB_TO_DADDR(mp
, args
->fsbno
),
2369 xfs_perag_put(args
->pag
);
2372 xfs_perag_put(args
->pag
);
2378 * Just break up the extent address and hand off to xfs_free_ag_extent
2379 * after fixing up the freelist.
2383 xfs_trans_t
*tp
, /* transaction pointer */
2384 xfs_fsblock_t bno
, /* starting block number of extent */
2385 xfs_extlen_t len
) /* length of extent */
2387 xfs_alloc_arg_t args
;
2391 memset(&args
, 0, sizeof(xfs_alloc_arg_t
));
2393 args
.mp
= tp
->t_mountp
;
2394 args
.agno
= XFS_FSB_TO_AGNO(args
.mp
, bno
);
2395 ASSERT(args
.agno
< args
.mp
->m_sb
.sb_agcount
);
2396 args
.agbno
= XFS_FSB_TO_AGBNO(args
.mp
, bno
);
2397 args
.pag
= xfs_perag_get(args
.mp
, args
.agno
);
2398 if ((error
= xfs_alloc_fix_freelist(&args
, XFS_ALLOC_FLAG_FREEING
)))
2401 ASSERT(args
.agbp
!= NULL
);
2402 ASSERT((args
.agbno
+ len
) <=
2403 be32_to_cpu(XFS_BUF_TO_AGF(args
.agbp
)->agf_length
));
2405 error
= xfs_free_ag_extent(tp
, args
.agbp
, args
.agno
, args
.agbno
, len
, 0);
2407 xfs_perag_put(args
.pag
);
2413 * AG Busy list management
2414 * The busy list contains block ranges that have been freed but whose
2415 * transactions have not yet hit disk. If any block listed in a busy
2416 * list is reused, the transaction that freed it must be forced to disk
2417 * before continuing to use the block.
2419 * xfs_alloc_busy_insert - add to the per-ag busy list
2420 * xfs_alloc_busy_clear - remove an item from the per-ag busy list
2421 * xfs_alloc_busy_search - search for a busy extent
2425 * Insert a new extent into the busy tree.
2427 * The busy extent tree is indexed by the start block of the busy extent.
2428 * there can be multiple overlapping ranges in the busy extent tree but only
2429 * ever one entry at a given start block. The reason for this is that
2430 * multi-block extents can be freed, then smaller chunks of that extent
2431 * allocated and freed again before the first transaction commit is on disk.
2432 * If the exact same start block is freed a second time, we have to wait for
2433 * that busy extent to pass out of the tree before the new extent is inserted.
2434 * There are two main cases we have to handle here.
2436 * The first case is a transaction that triggers a "free - allocate - free"
2437 * cycle. This can occur during btree manipulations as a btree block is freed
2438 * to the freelist, then allocated from the free list, then freed again. In
2439 * this case, the second extxpnet free is what triggers the duplicate and as
2440 * such the transaction IDs should match. Because the extent was allocated in
2441 * this transaction, the transaction must be marked as synchronous. This is
2442 * true for all cases where the free/alloc/free occurs in the one transaction,
2443 * hence the addition of the ASSERT(tp->t_flags & XFS_TRANS_SYNC) to this case.
2444 * This serves to catch violations of the second case quite effectively.
2446 * The second case is where the free/alloc/free occur in different
2447 * transactions. In this case, the thread freeing the extent the second time
2448 * can't mark the extent busy immediately because it is already tracked in a
2449 * transaction that may be committing. When the log commit for the existing
2450 * busy extent completes, the busy extent will be removed from the tree. If we
2451 * allow the second busy insert to continue using that busy extent structure,
2452 * it can be freed before this transaction is safely in the log. Hence our
2453 * only option in this case is to force the log to remove the existing busy
2454 * extent from the list before we insert the new one with the current
2457 * The problem we are trying to avoid in the free-alloc-free in separate
2458 * transactions is most easily described with a timeline:
2460 * Thread 1 Thread 2 Thread 3 xfslogd
2483 * checkpoint completes
2485 * By issuing a log force in thread 3 @ "KABOOM", the thread will block until
2486 * the checkpoint completes, and the busy extent it matched will have been
2487 * removed from the tree when it is woken. Hence it can then continue safely.
2489 * However, to ensure this matching process is robust, we need to use the
2490 * transaction ID for identifying transaction, as delayed logging results in
2491 * the busy extent and transaction lifecycles being different. i.e. the busy
2492 * extent is active for a lot longer than the transaction. Hence the
2493 * transaction structure can be freed and reallocated, then mark the same
2494 * extent busy again in the new transaction. In this case the new transaction
2495 * will have a different tid but can have the same address, and hence we need
2496 * to check against the tid.
2498 * Future: for delayed logging, we could avoid the log force if the extent was
2499 * first freed in the current checkpoint sequence. This, however, requires the
2500 * ability to pin the current checkpoint in memory until this transaction
2501 * commits to ensure that both the original free and the current one combine
2502 * logically into the one checkpoint. If the checkpoint sequences are
2503 * different, however, we still need to wait on a log force.
2506 xfs_alloc_busy_insert(
2507 struct xfs_trans
*tp
,
2508 xfs_agnumber_t agno
,
2512 struct xfs_busy_extent
*new;
2513 struct xfs_busy_extent
*busyp
;
2514 struct xfs_perag
*pag
;
2515 struct rb_node
**rbp
;
2516 struct rb_node
*parent
;
2520 new = kmem_zalloc(sizeof(struct xfs_busy_extent
), KM_MAYFAIL
);
2523 * No Memory! Since it is now not possible to track the free
2524 * block, make this a synchronous transaction to insure that
2525 * the block is not reused before this transaction commits.
2527 trace_xfs_alloc_busy(tp
, agno
, bno
, len
, 1);
2528 xfs_trans_set_sync(tp
);
2535 new->tid
= xfs_log_get_trans_ident(tp
);
2537 INIT_LIST_HEAD(&new->list
);
2539 /* trace before insert to be able to see failed inserts */
2540 trace_xfs_alloc_busy(tp
, agno
, bno
, len
, 0);
2542 pag
= xfs_perag_get(tp
->t_mountp
, new->agno
);
2544 spin_lock(&pag
->pagb_lock
);
2545 rbp
= &pag
->pagb_tree
.rb_node
;
2549 while (*rbp
&& match
>= 0) {
2551 busyp
= rb_entry(parent
, struct xfs_busy_extent
, rb_node
);
2553 if (new->bno
< busyp
->bno
) {
2554 /* may overlap, but exact start block is lower */
2555 rbp
= &(*rbp
)->rb_left
;
2556 if (new->bno
+ new->length
> busyp
->bno
)
2557 match
= busyp
->tid
== new->tid
? 1 : -1;
2558 } else if (new->bno
> busyp
->bno
) {
2559 /* may overlap, but exact start block is higher */
2560 rbp
= &(*rbp
)->rb_right
;
2561 if (bno
< busyp
->bno
+ busyp
->length
)
2562 match
= busyp
->tid
== new->tid
? 1 : -1;
2564 match
= busyp
->tid
== new->tid
? 1 : -1;
2569 /* overlap marked busy in different transaction */
2570 spin_unlock(&pag
->pagb_lock
);
2571 xfs_log_force(tp
->t_mountp
, XFS_LOG_SYNC
);
2576 * overlap marked busy in same transaction. Update if exact
2577 * start block match, otherwise combine the busy extents into
2580 if (busyp
->bno
== new->bno
) {
2581 busyp
->length
= max(busyp
->length
, new->length
);
2582 spin_unlock(&pag
->pagb_lock
);
2583 ASSERT(tp
->t_flags
& XFS_TRANS_SYNC
);
2588 rb_erase(&busyp
->rb_node
, &pag
->pagb_tree
);
2589 new->length
= max(busyp
->bno
+ busyp
->length
,
2590 new->bno
+ new->length
) -
2591 min(busyp
->bno
, new->bno
);
2592 new->bno
= min(busyp
->bno
, new->bno
);
2596 rb_link_node(&new->rb_node
, parent
, rbp
);
2597 rb_insert_color(&new->rb_node
, &pag
->pagb_tree
);
2599 list_add(&new->list
, &tp
->t_busy
);
2600 spin_unlock(&pag
->pagb_lock
);
2606 * Search for a busy extent within the range of the extent we are about to
2607 * allocate. You need to be holding the busy extent tree lock when calling
2608 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2609 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2610 * match. This is done so that a non-zero return indicates an overlap that
2611 * will require a synchronous transaction, but it can still be
2612 * used to distinguish between a partial or exact match.
2615 xfs_alloc_busy_search(
2616 struct xfs_mount
*mp
,
2617 xfs_agnumber_t agno
,
2621 struct xfs_perag
*pag
;
2622 struct rb_node
*rbp
;
2623 struct xfs_busy_extent
*busyp
;
2626 pag
= xfs_perag_get(mp
, agno
);
2627 spin_lock(&pag
->pagb_lock
);
2629 rbp
= pag
->pagb_tree
.rb_node
;
2631 /* find closest start bno overlap */
2633 busyp
= rb_entry(rbp
, struct xfs_busy_extent
, rb_node
);
2634 if (bno
< busyp
->bno
) {
2635 /* may overlap, but exact start block is lower */
2636 if (bno
+ len
> busyp
->bno
)
2639 } else if (bno
> busyp
->bno
) {
2640 /* may overlap, but exact start block is higher */
2641 if (bno
< busyp
->bno
+ busyp
->length
)
2643 rbp
= rbp
->rb_right
;
2645 /* bno matches busyp, length determines exact match */
2646 match
= (busyp
->length
== len
) ? 1 : -1;
2650 spin_unlock(&pag
->pagb_lock
);
2651 trace_xfs_alloc_busysearch(mp
, agno
, bno
, len
, !!match
);
2657 xfs_alloc_busy_clear(
2658 struct xfs_mount
*mp
,
2659 struct xfs_busy_extent
*busyp
)
2661 struct xfs_perag
*pag
;
2663 trace_xfs_alloc_unbusy(mp
, busyp
->agno
, busyp
->bno
,
2666 ASSERT(xfs_alloc_busy_search(mp
, busyp
->agno
, busyp
->bno
,
2667 busyp
->length
) == 1);
2669 list_del_init(&busyp
->list
);
2671 pag
= xfs_perag_get(mp
, busyp
->agno
);
2672 spin_lock(&pag
->pagb_lock
);
2673 rb_erase(&busyp
->rb_node
, &pag
->pagb_tree
);
2674 spin_unlock(&pag
->pagb_lock
);