1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/hfsplus/bitmap.c
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
9 * Handling of allocation file
12 #include <linux/pagemap.h>
14 #include "hfsplus_fs.h"
15 #include "hfsplus_raw.h"
17 #define PAGE_CACHE_BITS (PAGE_SIZE * 8)
19 int hfsplus_block_allocate(struct super_block
*sb
, u32 size
,
22 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
24 struct address_space
*mapping
;
25 __be32
*pptr
, *curr
, *end
;
26 u32 mask
, start
, len
, n
;
34 hfs_dbg(BITMAP
, "block_allocate: %u,%u,%u\n", size
, offset
, len
);
35 mutex_lock(&sbi
->alloc_mutex
);
36 mapping
= sbi
->alloc_file
->i_mapping
;
37 page
= read_mapping_page(mapping
, offset
/ PAGE_CACHE_BITS
, NULL
);
43 curr
= pptr
+ (offset
& (PAGE_CACHE_BITS
- 1)) / 32;
45 offset
&= ~(PAGE_CACHE_BITS
- 1);
46 if ((size
^ offset
) / PAGE_CACHE_BITS
)
47 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
49 end
= pptr
+ ((size
+ 31) & (PAGE_CACHE_BITS
- 1)) / 32;
51 /* scan the first partial u32 for zero bits */
55 mask
= (1U << 31) >> i
;
56 for (; i
< 32; mask
>>= 1, i
++) {
63 /* scan complete u32s for the first zero bit */
70 for (i
= 0; i
< 32; mask
>>= 1, i
++) {
78 offset
+= PAGE_CACHE_BITS
;
81 page
= read_mapping_page(mapping
, offset
/ PAGE_CACHE_BITS
,
87 curr
= pptr
= kmap(page
);
88 if ((size
^ offset
) / PAGE_CACHE_BITS
)
89 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
91 end
= pptr
+ ((size
+ 31) & (PAGE_CACHE_BITS
- 1)) / 32;
93 hfs_dbg(BITMAP
, "bitmap full\n");
98 start
= offset
+ (curr
- pptr
) * 32 + i
;
100 hfs_dbg(BITMAP
, "bitmap full\n");
103 /* do any partial u32 at the start */
104 len
= min(size
- start
, len
);
110 if (!--len
|| n
& mask
)
115 *curr
++ = cpu_to_be32(n
);
119 n
= be32_to_cpu(*curr
);
126 *curr
++ = cpu_to_be32(0xffffffff);
129 set_page_dirty(page
);
131 offset
+= PAGE_CACHE_BITS
;
132 page
= read_mapping_page(mapping
, offset
/ PAGE_CACHE_BITS
,
140 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
143 /* do any partial u32 at end */
145 for (i
= 0; i
< len
; i
++) {
152 *curr
= cpu_to_be32(n
);
153 set_page_dirty(page
);
155 *max
= offset
+ (curr
- pptr
) * 32 + i
- start
;
156 sbi
->free_blocks
-= *max
;
157 hfsplus_mark_mdb_dirty(sb
);
158 hfs_dbg(BITMAP
, "-> %u,%u\n", start
, *max
);
160 mutex_unlock(&sbi
->alloc_mutex
);
164 int hfsplus_block_free(struct super_block
*sb
, u32 offset
, u32 count
)
166 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
168 struct address_space
*mapping
;
169 __be32
*pptr
, *curr
, *end
;
173 /* is there any actual work to be done? */
177 hfs_dbg(BITMAP
, "block_free: %u,%u\n", offset
, count
);
178 /* are all of the bits in range? */
179 if ((offset
+ count
) > sbi
->total_blocks
)
182 mutex_lock(&sbi
->alloc_mutex
);
183 mapping
= sbi
->alloc_file
->i_mapping
;
184 pnr
= offset
/ PAGE_CACHE_BITS
;
185 page
= read_mapping_page(mapping
, pnr
, NULL
);
189 curr
= pptr
+ (offset
& (PAGE_CACHE_BITS
- 1)) / 32;
190 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
193 /* do any partial u32 at the start */
197 mask
= 0xffffffffU
<< j
;
199 mask
|= 0xffffffffU
>> (i
+ count
);
200 *curr
++ &= cpu_to_be32(mask
);
203 *curr
++ &= cpu_to_be32(mask
);
217 set_page_dirty(page
);
219 page
= read_mapping_page(mapping
, ++pnr
, NULL
);
224 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
227 /* do any partial u32 at end */
229 mask
= 0xffffffffU
>> count
;
230 *curr
&= cpu_to_be32(mask
);
233 set_page_dirty(page
);
235 sbi
->free_blocks
+= len
;
236 hfsplus_mark_mdb_dirty(sb
);
237 mutex_unlock(&sbi
->alloc_mutex
);
242 pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page
));
243 mutex_unlock(&sbi
->alloc_mutex
);