2 * linux/fs/hfsplus/bitmap.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handling of allocation file
11 #include <linux/pagemap.h>
13 #include "hfsplus_fs.h"
14 #include "hfsplus_raw.h"
16 #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8)
18 int hfsplus_block_allocate(struct super_block
*sb
, u32 size
, u32 offset
, u32
*max
)
21 struct address_space
*mapping
;
22 __be32
*pptr
, *curr
, *end
;
23 u32 mask
, start
, len
, n
;
31 dprint(DBG_BITMAP
, "block_allocate: %u,%u,%u\n", size
, offset
, len
);
32 mutex_lock(&HFSPLUS_SB(sb
).alloc_file
->i_mutex
);
33 mapping
= HFSPLUS_SB(sb
).alloc_file
->i_mapping
;
34 page
= read_cache_page(mapping
, offset
/ PAGE_CACHE_BITS
,
35 (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
37 curr
= pptr
+ (offset
& (PAGE_CACHE_BITS
- 1)) / 32;
39 offset
&= ~(PAGE_CACHE_BITS
- 1);
40 if ((size
^ offset
) / PAGE_CACHE_BITS
)
41 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
43 end
= pptr
+ ((size
+ 31) & (PAGE_CACHE_BITS
- 1)) / 32;
45 /* scan the first partial u32 for zero bits */
49 mask
= (1U << 31) >> i
;
50 for (; i
< 32; mask
>>= 1, i
++) {
57 /* scan complete u32s for the first zero bit */
64 for (i
= 0; i
< 32; mask
>>= 1, i
++) {
72 offset
+= PAGE_CACHE_BITS
;
75 page
= read_cache_page(mapping
, offset
/ PAGE_CACHE_BITS
,
76 (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
77 curr
= pptr
= kmap(page
);
78 if ((size
^ offset
) / PAGE_CACHE_BITS
)
79 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
81 end
= pptr
+ ((size
+ 31) & (PAGE_CACHE_BITS
- 1)) / 32;
83 dprint(DBG_BITMAP
, "bitmap full\n");
88 start
= offset
+ (curr
- pptr
) * 32 + i
;
90 dprint(DBG_BITMAP
, "bitmap full\n");
93 /* do any partial u32 at the start */
94 len
= min(size
- start
, len
);
100 if (!--len
|| n
& mask
)
105 *curr
++ = cpu_to_be32(n
);
109 n
= be32_to_cpu(*curr
);
116 *curr
++ = cpu_to_be32(0xffffffff);
119 set_page_dirty(page
);
121 offset
+= PAGE_CACHE_BITS
;
122 page
= read_cache_page(mapping
, offset
/ PAGE_CACHE_BITS
,
123 (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
126 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
129 /* do any partial u32 at end */
131 for (i
= 0; i
< len
; i
++) {
138 *curr
= cpu_to_be32(n
);
139 set_page_dirty(page
);
141 *max
= offset
+ (curr
- pptr
) * 32 + i
- start
;
142 HFSPLUS_SB(sb
).free_blocks
-= *max
;
144 dprint(DBG_BITMAP
, "-> %u,%u\n", start
, *max
);
146 mutex_unlock(&HFSPLUS_SB(sb
).alloc_file
->i_mutex
);
150 int hfsplus_block_free(struct super_block
*sb
, u32 offset
, u32 count
)
153 struct address_space
*mapping
;
154 __be32
*pptr
, *curr
, *end
;
158 /* is there any actual work to be done? */
162 dprint(DBG_BITMAP
, "block_free: %u,%u\n", offset
, count
);
163 /* are all of the bits in range? */
164 if ((offset
+ count
) > HFSPLUS_SB(sb
).total_blocks
)
167 mutex_lock(&HFSPLUS_SB(sb
).alloc_file
->i_mutex
);
168 mapping
= HFSPLUS_SB(sb
).alloc_file
->i_mapping
;
169 pnr
= offset
/ PAGE_CACHE_BITS
;
170 page
= read_cache_page(mapping
, pnr
, (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
172 curr
= pptr
+ (offset
& (PAGE_CACHE_BITS
- 1)) / 32;
173 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
176 /* do any partial u32 at the start */
180 mask
= 0xffffffffU
<< j
;
182 mask
|= 0xffffffffU
>> (i
+ count
);
183 *curr
++ &= cpu_to_be32(mask
);
186 *curr
++ &= cpu_to_be32(mask
);
200 set_page_dirty(page
);
202 page
= read_cache_page(mapping
, ++pnr
, (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
205 end
= pptr
+ PAGE_CACHE_BITS
/ 32;
208 /* do any partial u32 at end */
210 mask
= 0xffffffffU
>> count
;
211 *curr
&= cpu_to_be32(mask
);
214 set_page_dirty(page
);
216 HFSPLUS_SB(sb
).free_blocks
+= len
;
218 mutex_unlock(&HFSPLUS_SB(sb
).alloc_file
->i_mutex
);