2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
17 #include <linux/prefetch.h>
32 #include "ops_address.h"
34 #define BFITNOENT ((u32)~0)
35 #define NO_BLOCK ((u64)~0)
37 #if BITS_PER_LONG == 32
38 #define LBITMASK (0x55555555UL)
39 #define LBITSKIP55 (0x55555555UL)
40 #define LBITSKIP00 (0x00000000UL)
42 #define LBITMASK (0x5555555555555555UL)
43 #define LBITSKIP55 (0x5555555555555555UL)
44 #define LBITSKIP00 (0x0000000000000000UL)
48 * These routines are used by the resource group routines (rgrp.c)
49 * to keep track of block allocation. Each block is represented by two
50 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
53 * 1 = Used (not metadata)
54 * 2 = Unlinked (still in use) inode
58 static const char valid_change
[16] = {
66 static u32
rgblk_search(struct gfs2_rgrpd
*rgd
, u32 goal
,
67 unsigned char old_state
, unsigned char new_state
,
71 * gfs2_setbit - Set a bit in the bitmaps
72 * @buffer: the buffer that holds the bitmaps
73 * @buflen: the length (in bytes) of the buffer
74 * @block: the block to set
75 * @new_state: the new state of the block
79 static inline void gfs2_setbit(struct gfs2_rgrpd
*rgd
, unsigned char *buf1
,
80 unsigned char *buf2
, unsigned int offset
,
81 unsigned int buflen
, u32 block
,
82 unsigned char new_state
)
84 unsigned char *byte1
, *byte2
, *end
, cur_state
;
85 const unsigned int bit
= (block
% GFS2_NBBY
) * GFS2_BIT_SIZE
;
87 byte1
= buf1
+ offset
+ (block
/ GFS2_NBBY
);
88 end
= buf1
+ offset
+ buflen
;
92 cur_state
= (*byte1
>> bit
) & GFS2_BIT_MASK
;
94 if (unlikely(!valid_change
[new_state
* 4 + cur_state
])) {
95 gfs2_consist_rgrpd(rgd
);
98 *byte1
^= (cur_state
^ new_state
) << bit
;
101 byte2
= buf2
+ offset
+ (block
/ GFS2_NBBY
);
102 cur_state
= (*byte2
>> bit
) & GFS2_BIT_MASK
;
103 *byte2
^= (cur_state
^ new_state
) << bit
;
108 * gfs2_testbit - test a bit in the bitmaps
109 * @buffer: the buffer that holds the bitmaps
110 * @buflen: the length (in bytes) of the buffer
111 * @block: the block to read
115 static inline unsigned char gfs2_testbit(struct gfs2_rgrpd
*rgd
,
116 const unsigned char *buffer
,
117 unsigned int buflen
, u32 block
)
119 const unsigned char *byte
, *end
;
120 unsigned char cur_state
;
123 byte
= buffer
+ (block
/ GFS2_NBBY
);
124 bit
= (block
% GFS2_NBBY
) * GFS2_BIT_SIZE
;
125 end
= buffer
+ buflen
;
127 gfs2_assert(rgd
->rd_sbd
, byte
< end
);
129 cur_state
= (*byte
>> bit
) & GFS2_BIT_MASK
;
135 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
136 * a block in a given allocation state.
137 * @buffer: the buffer that holds the bitmaps
138 * @buflen: the length (in bytes) of the buffer
139 * @goal: start search at this block's bit-pair (within @buffer)
140 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for.
142 * Scope of @goal and returned block number is only within this bitmap buffer,
143 * not entire rgrp or filesystem. @buffer will be offset from the actual
144 * beginning of a bitmap block buffer, skipping any header structures.
146 * Return: the block number (bitmap buffer scope) that was found
149 static u32
gfs2_bitfit(const u8
*buffer
, unsigned int buflen
, u32 goal
,
152 const u8
*byte
, *start
, *end
;
154 u32 g1
, g2
, misaligned
;
155 unsigned long *plong
;
156 unsigned long lskipval
;
158 lskipval
= (old_state
& GFS2_BLKST_USED
) ? LBITSKIP00
: LBITSKIP55
;
159 g1
= (goal
/ GFS2_NBBY
);
162 end
= buffer
+ buflen
;
163 g2
= ALIGN(g1
, sizeof(unsigned long));
164 plong
= (unsigned long *)(buffer
+ g2
);
165 startbit
= bit
= (goal
% GFS2_NBBY
) * GFS2_BIT_SIZE
;
166 misaligned
= g2
- g1
;
169 /* parse the bitmap a byte at a time */
172 if (((*byte
>> bit
) & GFS2_BIT_MASK
) == old_state
) {
174 (((byte
- start
) * GFS2_NBBY
) +
175 ((bit
- startbit
) >> 1));
177 bit
+= GFS2_BIT_SIZE
;
178 if (bit
>= GFS2_NBBY
* GFS2_BIT_SIZE
) {
183 plong
= (unsigned long *)byte
;
190 /* parse the bitmap a unsigned long at a time */
192 /* Stop at "end - 1" or else prefetch can go past the end and segfault.
193 We could "if" it but we'd lose some of the performance gained.
194 This way will only slow down searching the very last 4/8 bytes
195 depending on architecture. I've experimented with several ways
196 of writing this section such as using an else before the goto
197 but this one seems to be the fastest. */
198 while ((unsigned char *)plong
< end
- sizeof(unsigned long)) {
200 if (((*plong
) & LBITMASK
) != lskipval
)
204 if ((unsigned char *)plong
< end
) {
205 byte
= (const u8
*)plong
;
206 misaligned
+= sizeof(unsigned long) - 1;
213 * gfs2_bitcount - count the number of bits in a certain state
214 * @buffer: the buffer that holds the bitmaps
215 * @buflen: the length (in bytes) of the buffer
216 * @state: the state of the block we're looking for
218 * Returns: The number of bits
221 static u32
gfs2_bitcount(struct gfs2_rgrpd
*rgd
, const u8
*buffer
,
222 unsigned int buflen
, u8 state
)
224 const u8
*byte
= buffer
;
225 const u8
*end
= buffer
+ buflen
;
226 const u8 state1
= state
<< 2;
227 const u8 state2
= state
<< 4;
228 const u8 state3
= state
<< 6;
231 for (; byte
< end
; byte
++) {
232 if (((*byte
) & 0x03) == state
)
234 if (((*byte
) & 0x0C) == state1
)
236 if (((*byte
) & 0x30) == state2
)
238 if (((*byte
) & 0xC0) == state3
)
246 * gfs2_rgrp_verify - Verify that a resource group is consistent
247 * @sdp: the filesystem
252 void gfs2_rgrp_verify(struct gfs2_rgrpd
*rgd
)
254 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
255 struct gfs2_bitmap
*bi
= NULL
;
256 u32 length
= rgd
->rd_length
;
260 memset(count
, 0, 4 * sizeof(u32
));
262 /* Count # blocks in each of 4 possible allocation states */
263 for (buf
= 0; buf
< length
; buf
++) {
264 bi
= rgd
->rd_bits
+ buf
;
265 for (x
= 0; x
< 4; x
++)
266 count
[x
] += gfs2_bitcount(rgd
,
272 if (count
[0] != rgd
->rd_rg
.rg_free
) {
273 if (gfs2_consist_rgrpd(rgd
))
274 fs_err(sdp
, "free data mismatch: %u != %u\n",
275 count
[0], rgd
->rd_rg
.rg_free
);
281 rgd
->rd_rg
.rg_dinodes
;
282 if (count
[1] + count
[2] != tmp
) {
283 if (gfs2_consist_rgrpd(rgd
))
284 fs_err(sdp
, "used data mismatch: %u != %u\n",
289 if (count
[3] != rgd
->rd_rg
.rg_dinodes
) {
290 if (gfs2_consist_rgrpd(rgd
))
291 fs_err(sdp
, "used metadata mismatch: %u != %u\n",
292 count
[3], rgd
->rd_rg
.rg_dinodes
);
296 if (count
[2] > count
[3]) {
297 if (gfs2_consist_rgrpd(rgd
))
298 fs_err(sdp
, "unlinked inodes > inodes: %u\n",
305 static inline int rgrp_contains_block(struct gfs2_rgrpd
*rgd
, u64 block
)
307 u64 first
= rgd
->rd_data0
;
308 u64 last
= first
+ rgd
->rd_data
;
309 return first
<= block
&& block
< last
;
313 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
314 * @sdp: The GFS2 superblock
315 * @n: The data block number
317 * Returns: The resource group, or NULL if not found
320 struct gfs2_rgrpd
*gfs2_blk2rgrpd(struct gfs2_sbd
*sdp
, u64 blk
)
322 struct gfs2_rgrpd
*rgd
;
324 spin_lock(&sdp
->sd_rindex_spin
);
326 list_for_each_entry(rgd
, &sdp
->sd_rindex_mru_list
, rd_list_mru
) {
327 if (rgrp_contains_block(rgd
, blk
)) {
328 list_move(&rgd
->rd_list_mru
, &sdp
->sd_rindex_mru_list
);
329 spin_unlock(&sdp
->sd_rindex_spin
);
334 spin_unlock(&sdp
->sd_rindex_spin
);
340 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
341 * @sdp: The GFS2 superblock
343 * Returns: The first rgrp in the filesystem
346 struct gfs2_rgrpd
*gfs2_rgrpd_get_first(struct gfs2_sbd
*sdp
)
348 gfs2_assert(sdp
, !list_empty(&sdp
->sd_rindex_list
));
349 return list_entry(sdp
->sd_rindex_list
.next
, struct gfs2_rgrpd
, rd_list
);
353 * gfs2_rgrpd_get_next - get the next RG
356 * Returns: The next rgrp
359 struct gfs2_rgrpd
*gfs2_rgrpd_get_next(struct gfs2_rgrpd
*rgd
)
361 if (rgd
->rd_list
.next
== &rgd
->rd_sbd
->sd_rindex_list
)
363 return list_entry(rgd
->rd_list
.next
, struct gfs2_rgrpd
, rd_list
);
366 static void clear_rgrpdi(struct gfs2_sbd
*sdp
)
368 struct list_head
*head
;
369 struct gfs2_rgrpd
*rgd
;
370 struct gfs2_glock
*gl
;
372 spin_lock(&sdp
->sd_rindex_spin
);
373 sdp
->sd_rindex_forward
= NULL
;
374 head
= &sdp
->sd_rindex_recent_list
;
375 while (!list_empty(head
)) {
376 rgd
= list_entry(head
->next
, struct gfs2_rgrpd
, rd_recent
);
377 list_del(&rgd
->rd_recent
);
379 spin_unlock(&sdp
->sd_rindex_spin
);
381 head
= &sdp
->sd_rindex_list
;
382 while (!list_empty(head
)) {
383 rgd
= list_entry(head
->next
, struct gfs2_rgrpd
, rd_list
);
386 list_del(&rgd
->rd_list
);
387 list_del(&rgd
->rd_list_mru
);
390 gl
->gl_object
= NULL
;
395 kmem_cache_free(gfs2_rgrpd_cachep
, rgd
);
399 void gfs2_clear_rgrpd(struct gfs2_sbd
*sdp
)
401 mutex_lock(&sdp
->sd_rindex_mutex
);
403 mutex_unlock(&sdp
->sd_rindex_mutex
);
406 static void gfs2_rindex_print(const struct gfs2_rgrpd
*rgd
)
408 printk(KERN_INFO
" ri_addr = %llu\n", (unsigned long long)rgd
->rd_addr
);
409 printk(KERN_INFO
" ri_length = %u\n", rgd
->rd_length
);
410 printk(KERN_INFO
" ri_data0 = %llu\n", (unsigned long long)rgd
->rd_data0
);
411 printk(KERN_INFO
" ri_data = %u\n", rgd
->rd_data
);
412 printk(KERN_INFO
" ri_bitbytes = %u\n", rgd
->rd_bitbytes
);
416 * gfs2_compute_bitstructs - Compute the bitmap sizes
417 * @rgd: The resource group descriptor
419 * Calculates bitmap descriptors, one for each block that contains bitmap data
424 static int compute_bitstructs(struct gfs2_rgrpd
*rgd
)
426 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
427 struct gfs2_bitmap
*bi
;
428 u32 length
= rgd
->rd_length
; /* # blocks in hdr & bitmap */
429 u32 bytes_left
, bytes
;
435 rgd
->rd_bits
= kcalloc(length
, sizeof(struct gfs2_bitmap
), GFP_NOFS
);
439 bytes_left
= rgd
->rd_bitbytes
;
441 for (x
= 0; x
< length
; x
++) {
442 bi
= rgd
->rd_bits
+ x
;
444 /* small rgrp; bitmap stored completely in header block */
447 bi
->bi_offset
= sizeof(struct gfs2_rgrp
);
452 bytes
= sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_rgrp
);
453 bi
->bi_offset
= sizeof(struct gfs2_rgrp
);
457 } else if (x
+ 1 == length
) {
459 bi
->bi_offset
= sizeof(struct gfs2_meta_header
);
460 bi
->bi_start
= rgd
->rd_bitbytes
- bytes_left
;
464 bytes
= sdp
->sd_sb
.sb_bsize
-
465 sizeof(struct gfs2_meta_header
);
466 bi
->bi_offset
= sizeof(struct gfs2_meta_header
);
467 bi
->bi_start
= rgd
->rd_bitbytes
- bytes_left
;
475 gfs2_consist_rgrpd(rgd
);
478 bi
= rgd
->rd_bits
+ (length
- 1);
479 if ((bi
->bi_start
+ bi
->bi_len
) * GFS2_NBBY
!= rgd
->rd_data
) {
480 if (gfs2_consist_rgrpd(rgd
)) {
481 gfs2_rindex_print(rgd
);
482 fs_err(sdp
, "start=%u len=%u offset=%u\n",
483 bi
->bi_start
, bi
->bi_len
, bi
->bi_offset
);
492 * gfs2_ri_total - Total up the file system space, according to the rindex.
495 u64
gfs2_ri_total(struct gfs2_sbd
*sdp
)
498 struct inode
*inode
= sdp
->sd_rindex
;
499 struct gfs2_inode
*ip
= GFS2_I(inode
);
500 char buf
[sizeof(struct gfs2_rindex
)];
501 struct file_ra_state ra_state
;
504 mutex_lock(&sdp
->sd_rindex_mutex
);
505 file_ra_state_init(&ra_state
, inode
->i_mapping
);
506 for (rgrps
= 0;; rgrps
++) {
507 loff_t pos
= rgrps
* sizeof(struct gfs2_rindex
);
509 if (pos
+ sizeof(struct gfs2_rindex
) >= ip
->i_di
.di_size
)
511 error
= gfs2_internal_read(ip
, &ra_state
, buf
, &pos
,
512 sizeof(struct gfs2_rindex
));
513 if (error
!= sizeof(struct gfs2_rindex
))
515 total_data
+= be32_to_cpu(((struct gfs2_rindex
*)buf
)->ri_data
);
517 mutex_unlock(&sdp
->sd_rindex_mutex
);
521 static void gfs2_rindex_in(struct gfs2_rgrpd
*rgd
, const void *buf
)
523 const struct gfs2_rindex
*str
= buf
;
525 rgd
->rd_addr
= be64_to_cpu(str
->ri_addr
);
526 rgd
->rd_length
= be32_to_cpu(str
->ri_length
);
527 rgd
->rd_data0
= be64_to_cpu(str
->ri_data0
);
528 rgd
->rd_data
= be32_to_cpu(str
->ri_data
);
529 rgd
->rd_bitbytes
= be32_to_cpu(str
->ri_bitbytes
);
533 * read_rindex_entry - Pull in a new resource index entry from the disk
534 * @gl: The glock covering the rindex inode
536 * Returns: 0 on success, error code otherwise
539 static int read_rindex_entry(struct gfs2_inode
*ip
,
540 struct file_ra_state
*ra_state
)
542 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
543 loff_t pos
= sdp
->sd_rgrps
* sizeof(struct gfs2_rindex
);
544 char buf
[sizeof(struct gfs2_rindex
)];
546 struct gfs2_rgrpd
*rgd
;
548 error
= gfs2_internal_read(ip
, ra_state
, buf
, &pos
,
549 sizeof(struct gfs2_rindex
));
552 if (error
!= sizeof(struct gfs2_rindex
)) {
558 rgd
= kmem_cache_zalloc(gfs2_rgrpd_cachep
, GFP_NOFS
);
563 mutex_init(&rgd
->rd_mutex
);
564 lops_init_le(&rgd
->rd_le
, &gfs2_rg_lops
);
567 list_add_tail(&rgd
->rd_list
, &sdp
->sd_rindex_list
);
568 list_add_tail(&rgd
->rd_list_mru
, &sdp
->sd_rindex_mru_list
);
570 gfs2_rindex_in(rgd
, buf
);
571 error
= compute_bitstructs(rgd
);
575 error
= gfs2_glock_get(sdp
, rgd
->rd_addr
,
576 &gfs2_rgrp_glops
, CREATE
, &rgd
->rd_gl
);
580 rgd
->rd_gl
->gl_object
= rgd
;
581 rgd
->rd_flags
&= ~GFS2_RDF_UPTODATE
;
582 rgd
->rd_flags
|= GFS2_RDF_CHECK
;
587 * gfs2_ri_update - Pull in a new resource index from the disk
588 * @ip: pointer to the rindex inode
590 * Returns: 0 on successful update, error code otherwise
593 static int gfs2_ri_update(struct gfs2_inode
*ip
)
595 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
596 struct inode
*inode
= &ip
->i_inode
;
597 struct file_ra_state ra_state
;
598 u64 rgrp_count
= ip
->i_di
.di_size
;
601 if (do_div(rgrp_count
, sizeof(struct gfs2_rindex
))) {
602 gfs2_consist_inode(ip
);
608 file_ra_state_init(&ra_state
, inode
->i_mapping
);
609 for (sdp
->sd_rgrps
= 0; sdp
->sd_rgrps
< rgrp_count
; sdp
->sd_rgrps
++) {
610 error
= read_rindex_entry(ip
, &ra_state
);
617 sdp
->sd_rindex_uptodate
= 1;
622 * gfs2_ri_update_special - Pull in a new resource index from the disk
624 * This is a special version that's safe to call from gfs2_inplace_reserve_i.
625 * In this case we know that we don't have any resource groups in memory yet.
627 * @ip: pointer to the rindex inode
629 * Returns: 0 on successful update, error code otherwise
631 static int gfs2_ri_update_special(struct gfs2_inode
*ip
)
633 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
634 struct inode
*inode
= &ip
->i_inode
;
635 struct file_ra_state ra_state
;
638 file_ra_state_init(&ra_state
, inode
->i_mapping
);
639 for (sdp
->sd_rgrps
= 0;; sdp
->sd_rgrps
++) {
640 /* Ignore partials */
641 if ((sdp
->sd_rgrps
+ 1) * sizeof(struct gfs2_rindex
) >
644 error
= read_rindex_entry(ip
, &ra_state
);
651 sdp
->sd_rindex_uptodate
= 1;
656 * gfs2_rindex_hold - Grab a lock on the rindex
657 * @sdp: The GFS2 superblock
658 * @ri_gh: the glock holder
660 * We grab a lock on the rindex inode to make sure that it doesn't
661 * change whilst we are performing an operation. We keep this lock
662 * for quite long periods of time compared to other locks. This
663 * doesn't matter, since it is shared and it is very, very rarely
664 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
666 * This makes sure that we're using the latest copy of the resource index
667 * special file, which might have been updated if someone expanded the
668 * filesystem (via gfs2_grow utility), which adds new resource groups.
670 * Returns: 0 on success, error code otherwise
673 int gfs2_rindex_hold(struct gfs2_sbd
*sdp
, struct gfs2_holder
*ri_gh
)
675 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_rindex
);
676 struct gfs2_glock
*gl
= ip
->i_gl
;
679 error
= gfs2_glock_nq_init(gl
, LM_ST_SHARED
, 0, ri_gh
);
683 /* Read new copy from disk if we don't have the latest */
684 if (!sdp
->sd_rindex_uptodate
) {
685 mutex_lock(&sdp
->sd_rindex_mutex
);
686 if (!sdp
->sd_rindex_uptodate
) {
687 error
= gfs2_ri_update(ip
);
689 gfs2_glock_dq_uninit(ri_gh
);
691 mutex_unlock(&sdp
->sd_rindex_mutex
);
697 static void gfs2_rgrp_in(struct gfs2_rgrpd
*rgd
, const void *buf
)
699 const struct gfs2_rgrp
*str
= buf
;
700 struct gfs2_rgrp_host
*rg
= &rgd
->rd_rg
;
703 rg_flags
= be32_to_cpu(str
->rg_flags
);
704 if (rg_flags
& GFS2_RGF_NOALLOC
)
705 rgd
->rd_flags
|= GFS2_RDF_NOALLOC
;
707 rgd
->rd_flags
&= ~GFS2_RDF_NOALLOC
;
708 rg
->rg_free
= be32_to_cpu(str
->rg_free
);
709 rg
->rg_dinodes
= be32_to_cpu(str
->rg_dinodes
);
710 rg
->rg_igeneration
= be64_to_cpu(str
->rg_igeneration
);
713 static void gfs2_rgrp_out(struct gfs2_rgrpd
*rgd
, void *buf
)
715 struct gfs2_rgrp
*str
= buf
;
716 struct gfs2_rgrp_host
*rg
= &rgd
->rd_rg
;
719 if (rgd
->rd_flags
& GFS2_RDF_NOALLOC
)
720 rg_flags
|= GFS2_RGF_NOALLOC
;
721 str
->rg_flags
= cpu_to_be32(rg_flags
);
722 str
->rg_free
= cpu_to_be32(rg
->rg_free
);
723 str
->rg_dinodes
= cpu_to_be32(rg
->rg_dinodes
);
724 str
->__pad
= cpu_to_be32(0);
725 str
->rg_igeneration
= cpu_to_be64(rg
->rg_igeneration
);
726 memset(&str
->rg_reserved
, 0, sizeof(str
->rg_reserved
));
730 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
731 * @rgd: the struct gfs2_rgrpd describing the RG to read in
733 * Read in all of a Resource Group's header and bitmap blocks.
734 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
739 int gfs2_rgrp_bh_get(struct gfs2_rgrpd
*rgd
)
741 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
742 struct gfs2_glock
*gl
= rgd
->rd_gl
;
743 unsigned int length
= rgd
->rd_length
;
744 struct gfs2_bitmap
*bi
;
748 mutex_lock(&rgd
->rd_mutex
);
750 spin_lock(&sdp
->sd_rindex_spin
);
751 if (rgd
->rd_bh_count
) {
753 spin_unlock(&sdp
->sd_rindex_spin
);
754 mutex_unlock(&rgd
->rd_mutex
);
757 spin_unlock(&sdp
->sd_rindex_spin
);
759 for (x
= 0; x
< length
; x
++) {
760 bi
= rgd
->rd_bits
+ x
;
761 error
= gfs2_meta_read(gl
, rgd
->rd_addr
+ x
, 0, &bi
->bi_bh
);
766 for (y
= length
; y
--;) {
767 bi
= rgd
->rd_bits
+ y
;
768 error
= gfs2_meta_wait(sdp
, bi
->bi_bh
);
771 if (gfs2_metatype_check(sdp
, bi
->bi_bh
, y
? GFS2_METATYPE_RB
:
778 if (!(rgd
->rd_flags
& GFS2_RDF_UPTODATE
)) {
779 gfs2_rgrp_in(rgd
, (rgd
->rd_bits
[0].bi_bh
)->b_data
);
780 rgd
->rd_flags
|= GFS2_RDF_UPTODATE
;
783 spin_lock(&sdp
->sd_rindex_spin
);
784 rgd
->rd_free_clone
= rgd
->rd_rg
.rg_free
;
786 spin_unlock(&sdp
->sd_rindex_spin
);
788 mutex_unlock(&rgd
->rd_mutex
);
794 bi
= rgd
->rd_bits
+ x
;
797 gfs2_assert_warn(sdp
, !bi
->bi_clone
);
799 mutex_unlock(&rgd
->rd_mutex
);
804 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd
*rgd
)
806 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
808 spin_lock(&sdp
->sd_rindex_spin
);
809 gfs2_assert_warn(rgd
->rd_sbd
, rgd
->rd_bh_count
);
811 spin_unlock(&sdp
->sd_rindex_spin
);
815 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
816 * @rgd: the struct gfs2_rgrpd describing the RG to read in
820 void gfs2_rgrp_bh_put(struct gfs2_rgrpd
*rgd
)
822 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
823 int x
, length
= rgd
->rd_length
;
825 spin_lock(&sdp
->sd_rindex_spin
);
826 gfs2_assert_warn(rgd
->rd_sbd
, rgd
->rd_bh_count
);
827 if (--rgd
->rd_bh_count
) {
828 spin_unlock(&sdp
->sd_rindex_spin
);
832 for (x
= 0; x
< length
; x
++) {
833 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ x
;
840 spin_unlock(&sdp
->sd_rindex_spin
);
843 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd
*rgd
)
845 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
846 unsigned int length
= rgd
->rd_length
;
849 for (x
= 0; x
< length
; x
++) {
850 struct gfs2_bitmap
*bi
= rgd
->rd_bits
+ x
;
853 memcpy(bi
->bi_clone
+ bi
->bi_offset
,
854 bi
->bi_bh
->b_data
+ bi
->bi_offset
, bi
->bi_len
);
857 spin_lock(&sdp
->sd_rindex_spin
);
858 rgd
->rd_free_clone
= rgd
->rd_rg
.rg_free
;
859 spin_unlock(&sdp
->sd_rindex_spin
);
863 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
864 * @ip: the incore GFS2 inode structure
866 * Returns: the struct gfs2_alloc
869 struct gfs2_alloc
*gfs2_alloc_get(struct gfs2_inode
*ip
)
871 BUG_ON(ip
->i_alloc
!= NULL
);
872 ip
->i_alloc
= kzalloc(sizeof(struct gfs2_alloc
), GFP_KERNEL
);
877 * try_rgrp_fit - See if a given reservation will fit in a given RG
879 * @al: the struct gfs2_alloc structure describing the reservation
881 * If there's room for the requested blocks to be allocated from the RG:
882 * Sets the $al_rgd field in @al.
884 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
887 static int try_rgrp_fit(struct gfs2_rgrpd
*rgd
, struct gfs2_alloc
*al
)
889 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
892 if (rgd
->rd_flags
& GFS2_RDF_NOALLOC
)
895 spin_lock(&sdp
->sd_rindex_spin
);
896 if (rgd
->rd_free_clone
>= al
->al_requested
) {
900 spin_unlock(&sdp
->sd_rindex_spin
);
906 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
909 * Returns: The inode, if one has been found
912 static struct inode
*try_rgrp_unlink(struct gfs2_rgrpd
*rgd
, u64
*last_unlinked
)
917 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
921 if (goal
>= rgd
->rd_data
)
923 down_write(&sdp
->sd_log_flush_lock
);
925 block
= rgblk_search(rgd
, goal
, GFS2_BLKST_UNLINKED
,
926 GFS2_BLKST_UNLINKED
, &n
);
927 up_write(&sdp
->sd_log_flush_lock
);
928 if (block
== BFITNOENT
)
930 /* rgblk_search can return a block < goal, so we need to
931 keep it marching forward. */
932 no_addr
= block
+ rgd
->rd_data0
;
934 if (*last_unlinked
!= NO_BLOCK
&& no_addr
<= *last_unlinked
)
936 *last_unlinked
= no_addr
;
937 inode
= gfs2_inode_lookup(rgd
->rd_sbd
->sd_vfs
, DT_UNKNOWN
,
943 rgd
->rd_flags
&= ~GFS2_RDF_CHECK
;
948 * recent_rgrp_first - get first RG from "recent" list
949 * @sdp: The GFS2 superblock
950 * @rglast: address of the rgrp used last
952 * Returns: The first rgrp in the recent list
955 static struct gfs2_rgrpd
*recent_rgrp_first(struct gfs2_sbd
*sdp
,
958 struct gfs2_rgrpd
*rgd
;
960 spin_lock(&sdp
->sd_rindex_spin
);
963 list_for_each_entry(rgd
, &sdp
->sd_rindex_recent_list
, rd_recent
) {
964 if (rgrp_contains_block(rgd
, rglast
))
969 if (!list_empty(&sdp
->sd_rindex_recent_list
))
970 rgd
= list_entry(sdp
->sd_rindex_recent_list
.next
,
971 struct gfs2_rgrpd
, rd_recent
);
973 spin_unlock(&sdp
->sd_rindex_spin
);
978 * recent_rgrp_next - get next RG from "recent" list
979 * @cur_rgd: current rgrp
982 * Returns: The next rgrp in the recent list
985 static struct gfs2_rgrpd
*recent_rgrp_next(struct gfs2_rgrpd
*cur_rgd
,
988 struct gfs2_sbd
*sdp
= cur_rgd
->rd_sbd
;
989 struct list_head
*head
;
990 struct gfs2_rgrpd
*rgd
;
992 spin_lock(&sdp
->sd_rindex_spin
);
994 head
= &sdp
->sd_rindex_recent_list
;
996 list_for_each_entry(rgd
, head
, rd_recent
) {
997 if (rgd
== cur_rgd
) {
998 if (cur_rgd
->rd_recent
.next
!= head
)
999 rgd
= list_entry(cur_rgd
->rd_recent
.next
,
1000 struct gfs2_rgrpd
, rd_recent
);
1005 list_del(&cur_rgd
->rd_recent
);
1012 if (!list_empty(head
))
1013 rgd
= list_entry(head
->next
, struct gfs2_rgrpd
, rd_recent
);
1016 spin_unlock(&sdp
->sd_rindex_spin
);
1021 * recent_rgrp_add - add an RG to tail of "recent" list
1022 * @new_rgd: The rgrp to add
1026 static void recent_rgrp_add(struct gfs2_rgrpd
*new_rgd
)
1028 struct gfs2_sbd
*sdp
= new_rgd
->rd_sbd
;
1029 struct gfs2_rgrpd
*rgd
;
1030 unsigned int count
= 0;
1031 unsigned int max
= sdp
->sd_rgrps
/ gfs2_jindex_size(sdp
);
1033 spin_lock(&sdp
->sd_rindex_spin
);
1035 list_for_each_entry(rgd
, &sdp
->sd_rindex_recent_list
, rd_recent
) {
1042 list_add_tail(&new_rgd
->rd_recent
, &sdp
->sd_rindex_recent_list
);
1045 spin_unlock(&sdp
->sd_rindex_spin
);
1049 * forward_rgrp_get - get an rgrp to try next from full list
1050 * @sdp: The GFS2 superblock
1052 * Returns: The rgrp to try next
1055 static struct gfs2_rgrpd
*forward_rgrp_get(struct gfs2_sbd
*sdp
)
1057 struct gfs2_rgrpd
*rgd
;
1058 unsigned int journals
= gfs2_jindex_size(sdp
);
1059 unsigned int rg
= 0, x
;
1061 spin_lock(&sdp
->sd_rindex_spin
);
1063 rgd
= sdp
->sd_rindex_forward
;
1065 if (sdp
->sd_rgrps
>= journals
)
1066 rg
= sdp
->sd_rgrps
* sdp
->sd_jdesc
->jd_jid
/ journals
;
1068 for (x
= 0, rgd
= gfs2_rgrpd_get_first(sdp
); x
< rg
;
1069 x
++, rgd
= gfs2_rgrpd_get_next(rgd
))
1072 sdp
->sd_rindex_forward
= rgd
;
1075 spin_unlock(&sdp
->sd_rindex_spin
);
1081 * forward_rgrp_set - set the forward rgrp pointer
1082 * @sdp: the filesystem
1083 * @rgd: The new forward rgrp
1087 static void forward_rgrp_set(struct gfs2_sbd
*sdp
, struct gfs2_rgrpd
*rgd
)
1089 spin_lock(&sdp
->sd_rindex_spin
);
1090 sdp
->sd_rindex_forward
= rgd
;
1091 spin_unlock(&sdp
->sd_rindex_spin
);
1095 * get_local_rgrp - Choose and lock a rgrp for allocation
1096 * @ip: the inode to reserve space for
1097 * @rgp: the chosen and locked rgrp
1099 * Try to acquire rgrp in way which avoids contending with others.
1104 static struct inode
*get_local_rgrp(struct gfs2_inode
*ip
, u64
*last_unlinked
)
1106 struct inode
*inode
= NULL
;
1107 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1108 struct gfs2_rgrpd
*rgd
, *begin
= NULL
;
1109 struct gfs2_alloc
*al
= ip
->i_alloc
;
1110 int flags
= LM_FLAG_TRY
;
1113 int error
, rg_locked
;
1115 /* Try recently successful rgrps */
1117 rgd
= recent_rgrp_first(sdp
, ip
->i_goal
);
1122 if (gfs2_glock_is_locked_by_me(rgd
->rd_gl
)) {
1126 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
,
1127 LM_FLAG_TRY
, &al
->al_rgd_gh
);
1131 if (try_rgrp_fit(rgd
, al
))
1133 if (rgd
->rd_flags
& GFS2_RDF_CHECK
)
1134 inode
= try_rgrp_unlink(rgd
, last_unlinked
);
1136 gfs2_glock_dq_uninit(&al
->al_rgd_gh
);
1139 rgd
= recent_rgrp_next(rgd
, 1);
1143 rgd
= recent_rgrp_next(rgd
, 0);
1147 return ERR_PTR(error
);
1151 /* Go through full list of rgrps */
1153 begin
= rgd
= forward_rgrp_get(sdp
);
1158 if (gfs2_glock_is_locked_by_me(rgd
->rd_gl
)) {
1162 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
, flags
,
1167 if (try_rgrp_fit(rgd
, al
))
1169 if (rgd
->rd_flags
& GFS2_RDF_CHECK
)
1170 inode
= try_rgrp_unlink(rgd
, last_unlinked
);
1172 gfs2_glock_dq_uninit(&al
->al_rgd_gh
);
1182 return ERR_PTR(error
);
1185 rgd
= gfs2_rgrpd_get_next(rgd
);
1187 rgd
= gfs2_rgrpd_get_first(sdp
);
1191 return ERR_PTR(-ENOSPC
);
1196 gfs2_log_flush(sdp
, NULL
);
1202 recent_rgrp_add(rgd
);
1203 rgd
= gfs2_rgrpd_get_next(rgd
);
1205 rgd
= gfs2_rgrpd_get_first(sdp
);
1206 forward_rgrp_set(sdp
, rgd
);
1213 * gfs2_inplace_reserve_i - Reserve space in the filesystem
1214 * @ip: the inode to reserve space for
1219 int gfs2_inplace_reserve_i(struct gfs2_inode
*ip
, char *file
, unsigned int line
)
1221 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1222 struct gfs2_alloc
*al
= ip
->i_alloc
;
1223 struct inode
*inode
;
1225 u64 last_unlinked
= NO_BLOCK
;
1227 if (gfs2_assert_warn(sdp
, al
->al_requested
))
1231 /* We need to hold the rindex unless the inode we're using is
1232 the rindex itself, in which case it's already held. */
1233 if (ip
!= GFS2_I(sdp
->sd_rindex
))
1234 error
= gfs2_rindex_hold(sdp
, &al
->al_ri_gh
);
1235 else if (!sdp
->sd_rgrps
) /* We may not have the rindex read in, so: */
1236 error
= gfs2_ri_update_special(ip
);
1241 inode
= get_local_rgrp(ip
, &last_unlinked
);
1243 if (ip
!= GFS2_I(sdp
->sd_rindex
))
1244 gfs2_glock_dq_uninit(&al
->al_ri_gh
);
1246 return PTR_ERR(inode
);
1248 gfs2_log_flush(sdp
, NULL
);
1259 * gfs2_inplace_release - release an inplace reservation
1260 * @ip: the inode the reservation was taken out on
1262 * Release a reservation made by gfs2_inplace_reserve().
1265 void gfs2_inplace_release(struct gfs2_inode
*ip
)
1267 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1268 struct gfs2_alloc
*al
= ip
->i_alloc
;
1270 if (gfs2_assert_warn(sdp
, al
->al_alloced
<= al
->al_requested
) == -1)
1271 fs_warn(sdp
, "al_alloced = %u, al_requested = %u "
1272 "al_file = %s, al_line = %u\n",
1273 al
->al_alloced
, al
->al_requested
, al
->al_file
,
1277 if (al
->al_rgd_gh
.gh_gl
)
1278 gfs2_glock_dq_uninit(&al
->al_rgd_gh
);
1279 if (ip
!= GFS2_I(sdp
->sd_rindex
))
1280 gfs2_glock_dq_uninit(&al
->al_ri_gh
);
1284 * gfs2_get_block_type - Check a block in a RG is of given type
1285 * @rgd: the resource group holding the block
1286 * @block: the block number
1288 * Returns: The block type (GFS2_BLKST_*)
1291 unsigned char gfs2_get_block_type(struct gfs2_rgrpd
*rgd
, u64 block
)
1293 struct gfs2_bitmap
*bi
= NULL
;
1294 u32 length
, rgrp_block
, buf_block
;
1298 length
= rgd
->rd_length
;
1299 rgrp_block
= block
- rgd
->rd_data0
;
1301 for (buf
= 0; buf
< length
; buf
++) {
1302 bi
= rgd
->rd_bits
+ buf
;
1303 if (rgrp_block
< (bi
->bi_start
+ bi
->bi_len
) * GFS2_NBBY
)
1307 gfs2_assert(rgd
->rd_sbd
, buf
< length
);
1308 buf_block
= rgrp_block
- bi
->bi_start
* GFS2_NBBY
;
1310 type
= gfs2_testbit(rgd
, bi
->bi_bh
->b_data
+ bi
->bi_offset
,
1311 bi
->bi_len
, buf_block
);
1317 * rgblk_search - find a block in @old_state, change allocation
1318 * state to @new_state
1319 * @rgd: the resource group descriptor
1320 * @goal: the goal block within the RG (start here to search for avail block)
1321 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1322 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1323 * @n: The extent length
1325 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1326 * Add the found bitmap buffer to the transaction.
1327 * Set the found bits to @new_state to change block's allocation state.
1329 * This function never fails, because we wouldn't call it unless we
1330 * know (from reservation results, etc.) that a block is available.
1332 * Scope of @goal and returned block is just within rgrp, not the whole
1335 * Returns: the block number allocated
1338 static u32
rgblk_search(struct gfs2_rgrpd
*rgd
, u32 goal
,
1339 unsigned char old_state
, unsigned char new_state
,
1342 struct gfs2_bitmap
*bi
= NULL
;
1343 const u32 length
= rgd
->rd_length
;
1345 unsigned int buf
, x
;
1346 const unsigned int elen
= *n
;
1350 /* Find bitmap block that contains bits for goal block */
1351 for (buf
= 0; buf
< length
; buf
++) {
1352 bi
= rgd
->rd_bits
+ buf
;
1353 if (goal
< (bi
->bi_start
+ bi
->bi_len
) * GFS2_NBBY
)
1357 gfs2_assert(rgd
->rd_sbd
, buf
< length
);
1359 /* Convert scope of "goal" from rgrp-wide to within found bit block */
1360 goal
-= bi
->bi_start
* GFS2_NBBY
;
1362 /* Search (up to entire) bitmap in this rgrp for allocatable block.
1363 "x <= length", instead of "x < length", because we typically start
1364 the search in the middle of a bit block, but if we can't find an
1365 allocatable block anywhere else, we want to be able wrap around and
1366 search in the first part of our first-searched bit block. */
1367 for (x
= 0; x
<= length
; x
++) {
1368 /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
1369 bitmaps, so we must search the originals for that. */
1370 buffer
= bi
->bi_bh
->b_data
+ bi
->bi_offset
;
1371 if (old_state
!= GFS2_BLKST_UNLINKED
&& bi
->bi_clone
)
1372 buffer
= bi
->bi_clone
+ bi
->bi_offset
;
1374 blk
= gfs2_bitfit(buffer
, bi
->bi_len
, goal
, old_state
);
1375 if (blk
!= BFITNOENT
)
1378 /* Try next bitmap block (wrap back to rgrp header if at end) */
1379 buf
= (buf
+ 1) % length
;
1380 bi
= rgd
->rd_bits
+ buf
;
1384 if (blk
!= BFITNOENT
&& old_state
!= new_state
) {
1386 gfs2_trans_add_bh(rgd
->rd_gl
, bi
->bi_bh
, 1);
1387 gfs2_setbit(rgd
, bi
->bi_bh
->b_data
, bi
->bi_clone
, bi
->bi_offset
,
1388 bi
->bi_len
, blk
, new_state
);
1392 if (goal
>= (bi
->bi_len
* GFS2_NBBY
))
1394 if (gfs2_testbit(rgd
, buffer
, bi
->bi_len
, goal
) !=
1397 gfs2_setbit(rgd
, bi
->bi_bh
->b_data
, bi
->bi_clone
,
1398 bi
->bi_offset
, bi
->bi_len
, goal
,
1404 return (blk
== BFITNOENT
) ? blk
: (bi
->bi_start
* GFS2_NBBY
) + blk
;
1408 * rgblk_free - Change alloc state of given block(s)
1409 * @sdp: the filesystem
1410 * @bstart: the start of a run of blocks to free
1411 * @blen: the length of the block run (all must lie within ONE RG!)
1412 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1414 * Returns: Resource group containing the block(s)
1417 static struct gfs2_rgrpd
*rgblk_free(struct gfs2_sbd
*sdp
, u64 bstart
,
1418 u32 blen
, unsigned char new_state
)
1420 struct gfs2_rgrpd
*rgd
;
1421 struct gfs2_bitmap
*bi
= NULL
;
1422 u32 length
, rgrp_blk
, buf_blk
;
1425 rgd
= gfs2_blk2rgrpd(sdp
, bstart
);
1427 if (gfs2_consist(sdp
))
1428 fs_err(sdp
, "block = %llu\n", (unsigned long long)bstart
);
1432 length
= rgd
->rd_length
;
1434 rgrp_blk
= bstart
- rgd
->rd_data0
;
1437 for (buf
= 0; buf
< length
; buf
++) {
1438 bi
= rgd
->rd_bits
+ buf
;
1439 if (rgrp_blk
< (bi
->bi_start
+ bi
->bi_len
) * GFS2_NBBY
)
1443 gfs2_assert(rgd
->rd_sbd
, buf
< length
);
1445 buf_blk
= rgrp_blk
- bi
->bi_start
* GFS2_NBBY
;
1448 if (!bi
->bi_clone
) {
1449 bi
->bi_clone
= kmalloc(bi
->bi_bh
->b_size
,
1450 GFP_NOFS
| __GFP_NOFAIL
);
1451 memcpy(bi
->bi_clone
+ bi
->bi_offset
,
1452 bi
->bi_bh
->b_data
+ bi
->bi_offset
,
1455 gfs2_trans_add_bh(rgd
->rd_gl
, bi
->bi_bh
, 1);
1456 gfs2_setbit(rgd
, bi
->bi_bh
->b_data
, NULL
, bi
->bi_offset
,
1457 bi
->bi_len
, buf_blk
, new_state
);
1464 * gfs2_alloc_block - Allocate a block
1465 * @ip: the inode to allocate the block for
1467 * Returns: the allocated block
1470 u64
gfs2_alloc_block(struct gfs2_inode
*ip
, unsigned int *n
)
1472 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1473 struct gfs2_alloc
*al
= ip
->i_alloc
;
1474 struct gfs2_rgrpd
*rgd
= al
->al_rgd
;
1478 if (rgrp_contains_block(rgd
, ip
->i_goal
))
1479 goal
= ip
->i_goal
- rgd
->rd_data0
;
1481 goal
= rgd
->rd_last_alloc
;
1483 blk
= rgblk_search(rgd
, goal
, GFS2_BLKST_FREE
, GFS2_BLKST_USED
, n
);
1484 BUG_ON(blk
== BFITNOENT
);
1486 rgd
->rd_last_alloc
= blk
;
1487 block
= rgd
->rd_data0
+ blk
;
1490 gfs2_assert_withdraw(sdp
, rgd
->rd_rg
.rg_free
>= *n
);
1491 rgd
->rd_rg
.rg_free
-= *n
;
1493 gfs2_trans_add_bh(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
, 1);
1494 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
1496 al
->al_alloced
+= *n
;
1498 gfs2_statfs_change(sdp
, 0, -(s64
)*n
, 0);
1499 gfs2_quota_change(ip
, *n
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
1501 spin_lock(&sdp
->sd_rindex_spin
);
1502 rgd
->rd_free_clone
-= *n
;
1503 spin_unlock(&sdp
->sd_rindex_spin
);
1509 * gfs2_alloc_di - Allocate a dinode
1510 * @dip: the directory that the inode is going in
1512 * Returns: the block allocated
1515 u64
gfs2_alloc_di(struct gfs2_inode
*dip
, u64
*generation
)
1517 struct gfs2_sbd
*sdp
= GFS2_SB(&dip
->i_inode
);
1518 struct gfs2_alloc
*al
= dip
->i_alloc
;
1519 struct gfs2_rgrpd
*rgd
= al
->al_rgd
;
1524 blk
= rgblk_search(rgd
, rgd
->rd_last_alloc
,
1525 GFS2_BLKST_FREE
, GFS2_BLKST_DINODE
, &n
);
1526 BUG_ON(blk
== BFITNOENT
);
1528 rgd
->rd_last_alloc
= blk
;
1530 block
= rgd
->rd_data0
+ blk
;
1532 gfs2_assert_withdraw(sdp
, rgd
->rd_rg
.rg_free
);
1533 rgd
->rd_rg
.rg_free
--;
1534 rgd
->rd_rg
.rg_dinodes
++;
1535 *generation
= rgd
->rd_rg
.rg_igeneration
++;
1536 gfs2_trans_add_bh(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
, 1);
1537 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
1541 gfs2_statfs_change(sdp
, 0, -1, +1);
1542 gfs2_trans_add_unrevoke(sdp
, block
, 1);
1544 spin_lock(&sdp
->sd_rindex_spin
);
1545 rgd
->rd_free_clone
--;
1546 spin_unlock(&sdp
->sd_rindex_spin
);
1552 * gfs2_free_data - free a contiguous run of data block(s)
1553 * @ip: the inode these blocks are being freed from
1554 * @bstart: first block of a run of contiguous blocks
1555 * @blen: the length of the block run
1559 void gfs2_free_data(struct gfs2_inode
*ip
, u64 bstart
, u32 blen
)
1561 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1562 struct gfs2_rgrpd
*rgd
;
1564 rgd
= rgblk_free(sdp
, bstart
, blen
, GFS2_BLKST_FREE
);
1568 rgd
->rd_rg
.rg_free
+= blen
;
1570 gfs2_trans_add_bh(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
, 1);
1571 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
1573 gfs2_trans_add_rg(rgd
);
1575 gfs2_statfs_change(sdp
, 0, +blen
, 0);
1576 gfs2_quota_change(ip
, -(s64
)blen
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
1580 * gfs2_free_meta - free a contiguous run of data block(s)
1581 * @ip: the inode these blocks are being freed from
1582 * @bstart: first block of a run of contiguous blocks
1583 * @blen: the length of the block run
1587 void gfs2_free_meta(struct gfs2_inode
*ip
, u64 bstart
, u32 blen
)
1589 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1590 struct gfs2_rgrpd
*rgd
;
1592 rgd
= rgblk_free(sdp
, bstart
, blen
, GFS2_BLKST_FREE
);
1596 rgd
->rd_rg
.rg_free
+= blen
;
1598 gfs2_trans_add_bh(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
, 1);
1599 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
1601 gfs2_trans_add_rg(rgd
);
1603 gfs2_statfs_change(sdp
, 0, +blen
, 0);
1604 gfs2_quota_change(ip
, -(s64
)blen
, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
1605 gfs2_meta_wipe(ip
, bstart
, blen
);
1608 void gfs2_unlink_di(struct inode
*inode
)
1610 struct gfs2_inode
*ip
= GFS2_I(inode
);
1611 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
1612 struct gfs2_rgrpd
*rgd
;
1613 u64 blkno
= ip
->i_no_addr
;
1615 rgd
= rgblk_free(sdp
, blkno
, 1, GFS2_BLKST_UNLINKED
);
1618 gfs2_trans_add_bh(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
, 1);
1619 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
1620 gfs2_trans_add_rg(rgd
);
1623 static void gfs2_free_uninit_di(struct gfs2_rgrpd
*rgd
, u64 blkno
)
1625 struct gfs2_sbd
*sdp
= rgd
->rd_sbd
;
1626 struct gfs2_rgrpd
*tmp_rgd
;
1628 tmp_rgd
= rgblk_free(sdp
, blkno
, 1, GFS2_BLKST_FREE
);
1631 gfs2_assert_withdraw(sdp
, rgd
== tmp_rgd
);
1633 if (!rgd
->rd_rg
.rg_dinodes
)
1634 gfs2_consist_rgrpd(rgd
);
1635 rgd
->rd_rg
.rg_dinodes
--;
1636 rgd
->rd_rg
.rg_free
++;
1638 gfs2_trans_add_bh(rgd
->rd_gl
, rgd
->rd_bits
[0].bi_bh
, 1);
1639 gfs2_rgrp_out(rgd
, rgd
->rd_bits
[0].bi_bh
->b_data
);
1641 gfs2_statfs_change(sdp
, 0, +1, -1);
1642 gfs2_trans_add_rg(rgd
);
1646 void gfs2_free_di(struct gfs2_rgrpd
*rgd
, struct gfs2_inode
*ip
)
1648 gfs2_free_uninit_di(rgd
, ip
->i_no_addr
);
1649 gfs2_quota_change(ip
, -1, ip
->i_inode
.i_uid
, ip
->i_inode
.i_gid
);
1650 gfs2_meta_wipe(ip
, ip
->i_no_addr
, 1);
1654 * gfs2_rlist_add - add a RG to a list of RGs
1655 * @sdp: the filesystem
1656 * @rlist: the list of resource groups
1659 * Figure out what RG a block belongs to and add that RG to the list
1661 * FIXME: Don't use NOFAIL
1665 void gfs2_rlist_add(struct gfs2_sbd
*sdp
, struct gfs2_rgrp_list
*rlist
,
1668 struct gfs2_rgrpd
*rgd
;
1669 struct gfs2_rgrpd
**tmp
;
1670 unsigned int new_space
;
1673 if (gfs2_assert_warn(sdp
, !rlist
->rl_ghs
))
1676 rgd
= gfs2_blk2rgrpd(sdp
, block
);
1678 if (gfs2_consist(sdp
))
1679 fs_err(sdp
, "block = %llu\n", (unsigned long long)block
);
1683 for (x
= 0; x
< rlist
->rl_rgrps
; x
++)
1684 if (rlist
->rl_rgd
[x
] == rgd
)
1687 if (rlist
->rl_rgrps
== rlist
->rl_space
) {
1688 new_space
= rlist
->rl_space
+ 10;
1690 tmp
= kcalloc(new_space
, sizeof(struct gfs2_rgrpd
*),
1691 GFP_NOFS
| __GFP_NOFAIL
);
1693 if (rlist
->rl_rgd
) {
1694 memcpy(tmp
, rlist
->rl_rgd
,
1695 rlist
->rl_space
* sizeof(struct gfs2_rgrpd
*));
1696 kfree(rlist
->rl_rgd
);
1699 rlist
->rl_space
= new_space
;
1700 rlist
->rl_rgd
= tmp
;
1703 rlist
->rl_rgd
[rlist
->rl_rgrps
++] = rgd
;
1707 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1708 * and initialize an array of glock holders for them
1709 * @rlist: the list of resource groups
1710 * @state: the lock state to acquire the RG lock in
1711 * @flags: the modifier flags for the holder structures
1713 * FIXME: Don't use NOFAIL
1717 void gfs2_rlist_alloc(struct gfs2_rgrp_list
*rlist
, unsigned int state
)
1721 rlist
->rl_ghs
= kcalloc(rlist
->rl_rgrps
, sizeof(struct gfs2_holder
),
1722 GFP_NOFS
| __GFP_NOFAIL
);
1723 for (x
= 0; x
< rlist
->rl_rgrps
; x
++)
1724 gfs2_holder_init(rlist
->rl_rgd
[x
]->rd_gl
,
1730 * gfs2_rlist_free - free a resource group list
1731 * @list: the list of resource groups
1735 void gfs2_rlist_free(struct gfs2_rgrp_list
*rlist
)
1739 kfree(rlist
->rl_rgd
);
1741 if (rlist
->rl_ghs
) {
1742 for (x
= 0; x
< rlist
->rl_rgrps
; x
++)
1743 gfs2_holder_uninit(&rlist
->rl_ghs
[x
]);
1744 kfree(rlist
->rl_ghs
);