hpet: fix unwanted interrupt due to stale irq status bit
[firewire-audio.git] / fs / hfsplus / btree.c
blob22e4d4e329999c3ba9848036a3639bc194598f74
1 /*
2 * linux/fs/hfsplus/btree.c
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle opening/closing btree
9 */
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/log2.h>
15 #include "hfsplus_fs.h"
16 #include "hfsplus_raw.h"
19 /* Get a reference to a B*Tree and do some initial checks */
20 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
22 struct hfs_btree *tree;
23 struct hfs_btree_header_rec *head;
24 struct address_space *mapping;
25 struct inode *inode;
26 struct page *page;
27 unsigned int size;
29 tree = kzalloc(sizeof(*tree), GFP_KERNEL);
30 if (!tree)
31 return NULL;
33 mutex_init(&tree->tree_lock);
34 spin_lock_init(&tree->hash_lock);
35 tree->sb = sb;
36 tree->cnid = id;
37 inode = hfsplus_iget(sb, id);
38 if (IS_ERR(inode))
39 goto free_tree;
40 tree->inode = inode;
42 if (!HFSPLUS_I(tree->inode)->first_blocks) {
43 printk(KERN_ERR
44 "hfs: invalid btree extent records (0 size).\n");
45 goto free_inode;
48 mapping = tree->inode->i_mapping;
49 page = read_mapping_page(mapping, 0, NULL);
50 if (IS_ERR(page))
51 goto free_inode;
53 /* Load the header */
54 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
55 tree->root = be32_to_cpu(head->root);
56 tree->leaf_count = be32_to_cpu(head->leaf_count);
57 tree->leaf_head = be32_to_cpu(head->leaf_head);
58 tree->leaf_tail = be32_to_cpu(head->leaf_tail);
59 tree->node_count = be32_to_cpu(head->node_count);
60 tree->free_nodes = be32_to_cpu(head->free_nodes);
61 tree->attributes = be32_to_cpu(head->attributes);
62 tree->node_size = be16_to_cpu(head->node_size);
63 tree->max_key_len = be16_to_cpu(head->max_key_len);
64 tree->depth = be16_to_cpu(head->depth);
66 /* Verify the tree and set the correct compare function */
67 switch (id) {
68 case HFSPLUS_EXT_CNID:
69 if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
70 printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
71 tree->max_key_len);
72 goto fail_page;
74 if (tree->attributes & HFS_TREE_VARIDXKEYS) {
75 printk(KERN_ERR "hfs: invalid extent btree flag\n");
76 goto fail_page;
79 tree->keycmp = hfsplus_ext_cmp_key;
80 break;
81 case HFSPLUS_CAT_CNID:
82 if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
83 printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
84 tree->max_key_len);
85 goto fail_page;
87 if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
88 printk(KERN_ERR "hfs: invalid catalog btree flag\n");
89 goto fail_page;
92 if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
93 (head->key_type == HFSPLUS_KEY_BINARY))
94 tree->keycmp = hfsplus_cat_bin_cmp_key;
95 else {
96 tree->keycmp = hfsplus_cat_case_cmp_key;
97 set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
99 break;
100 default:
101 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
102 goto fail_page;
105 if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
106 printk(KERN_ERR "hfs: invalid btree flag\n");
107 goto fail_page;
110 size = tree->node_size;
111 if (!is_power_of_2(size))
112 goto fail_page;
113 if (!tree->node_count)
114 goto fail_page;
116 tree->node_size_shift = ffs(size) - 1;
118 tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
120 kunmap(page);
121 page_cache_release(page);
122 return tree;
124 fail_page:
125 page_cache_release(page);
126 free_inode:
127 tree->inode->i_mapping->a_ops = &hfsplus_aops;
128 iput(tree->inode);
129 free_tree:
130 kfree(tree);
131 return NULL;
134 /* Release resources used by a btree */
135 void hfs_btree_close(struct hfs_btree *tree)
137 struct hfs_bnode *node;
138 int i;
140 if (!tree)
141 return;
143 for (i = 0; i < NODE_HASH_SIZE; i++) {
144 while ((node = tree->node_hash[i])) {
145 tree->node_hash[i] = node->next_hash;
146 if (atomic_read(&node->refcnt))
147 printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
148 node->tree->cnid, node->this, atomic_read(&node->refcnt));
149 hfs_bnode_free(node);
150 tree->node_hash_cnt--;
153 iput(tree->inode);
154 kfree(tree);
157 void hfs_btree_write(struct hfs_btree *tree)
159 struct hfs_btree_header_rec *head;
160 struct hfs_bnode *node;
161 struct page *page;
163 node = hfs_bnode_find(tree, 0);
164 if (IS_ERR(node))
165 /* panic? */
166 return;
167 /* Load the header */
168 page = node->page[0];
169 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
171 head->root = cpu_to_be32(tree->root);
172 head->leaf_count = cpu_to_be32(tree->leaf_count);
173 head->leaf_head = cpu_to_be32(tree->leaf_head);
174 head->leaf_tail = cpu_to_be32(tree->leaf_tail);
175 head->node_count = cpu_to_be32(tree->node_count);
176 head->free_nodes = cpu_to_be32(tree->free_nodes);
177 head->attributes = cpu_to_be32(tree->attributes);
178 head->depth = cpu_to_be16(tree->depth);
180 kunmap(page);
181 set_page_dirty(page);
182 hfs_bnode_put(node);
185 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
187 struct hfs_btree *tree = prev->tree;
188 struct hfs_bnode *node;
189 struct hfs_bnode_desc desc;
190 __be32 cnid;
192 node = hfs_bnode_create(tree, idx);
193 if (IS_ERR(node))
194 return node;
196 tree->free_nodes--;
197 prev->next = idx;
198 cnid = cpu_to_be32(idx);
199 hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
201 node->type = HFS_NODE_MAP;
202 node->num_recs = 1;
203 hfs_bnode_clear(node, 0, tree->node_size);
204 desc.next = 0;
205 desc.prev = 0;
206 desc.type = HFS_NODE_MAP;
207 desc.height = 0;
208 desc.num_recs = cpu_to_be16(1);
209 desc.reserved = 0;
210 hfs_bnode_write(node, &desc, 0, sizeof(desc));
211 hfs_bnode_write_u16(node, 14, 0x8000);
212 hfs_bnode_write_u16(node, tree->node_size - 2, 14);
213 hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
215 return node;
218 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
220 struct hfs_bnode *node, *next_node;
221 struct page **pagep;
222 u32 nidx, idx;
223 unsigned off;
224 u16 off16;
225 u16 len;
226 u8 *data, byte, m;
227 int i;
229 while (!tree->free_nodes) {
230 struct inode *inode = tree->inode;
231 struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
232 u32 count;
233 int res;
235 res = hfsplus_file_extend(inode);
236 if (res)
237 return ERR_PTR(res);
238 hip->phys_size = inode->i_size =
239 (loff_t)hip->alloc_blocks <<
240 HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
241 hip->fs_blocks =
242 hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
243 inode_set_bytes(inode, inode->i_size);
244 count = inode->i_size >> tree->node_size_shift;
245 tree->free_nodes = count - tree->node_count;
246 tree->node_count = count;
249 nidx = 0;
250 node = hfs_bnode_find(tree, nidx);
251 if (IS_ERR(node))
252 return node;
253 len = hfs_brec_lenoff(node, 2, &off16);
254 off = off16;
256 off += node->page_offset;
257 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
258 data = kmap(*pagep);
259 off &= ~PAGE_CACHE_MASK;
260 idx = 0;
262 for (;;) {
263 while (len) {
264 byte = data[off];
265 if (byte != 0xff) {
266 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
267 if (!(byte & m)) {
268 idx += i;
269 data[off] |= m;
270 set_page_dirty(*pagep);
271 kunmap(*pagep);
272 tree->free_nodes--;
273 mark_inode_dirty(tree->inode);
274 hfs_bnode_put(node);
275 return hfs_bnode_create(tree, idx);
279 if (++off >= PAGE_CACHE_SIZE) {
280 kunmap(*pagep);
281 data = kmap(*++pagep);
282 off = 0;
284 idx += 8;
285 len--;
287 kunmap(*pagep);
288 nidx = node->next;
289 if (!nidx) {
290 printk(KERN_DEBUG "hfs: create new bmap node...\n");
291 next_node = hfs_bmap_new_bmap(node, idx);
292 } else
293 next_node = hfs_bnode_find(tree, nidx);
294 hfs_bnode_put(node);
295 if (IS_ERR(next_node))
296 return next_node;
297 node = next_node;
299 len = hfs_brec_lenoff(node, 0, &off16);
300 off = off16;
301 off += node->page_offset;
302 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
303 data = kmap(*pagep);
304 off &= ~PAGE_CACHE_MASK;
308 void hfs_bmap_free(struct hfs_bnode *node)
310 struct hfs_btree *tree;
311 struct page *page;
312 u16 off, len;
313 u32 nidx;
314 u8 *data, byte, m;
316 dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
317 BUG_ON(!node->this);
318 tree = node->tree;
319 nidx = node->this;
320 node = hfs_bnode_find(tree, 0);
321 if (IS_ERR(node))
322 return;
323 len = hfs_brec_lenoff(node, 2, &off);
324 while (nidx >= len * 8) {
325 u32 i;
327 nidx -= len * 8;
328 i = node->next;
329 hfs_bnode_put(node);
330 if (!i) {
331 /* panic */;
332 printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
333 return;
335 node = hfs_bnode_find(tree, i);
336 if (IS_ERR(node))
337 return;
338 if (node->type != HFS_NODE_MAP) {
339 /* panic */;
340 printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
341 hfs_bnode_put(node);
342 return;
344 len = hfs_brec_lenoff(node, 0, &off);
346 off += node->page_offset + nidx / 8;
347 page = node->page[off >> PAGE_CACHE_SHIFT];
348 data = kmap(page);
349 off &= ~PAGE_CACHE_MASK;
350 m = 1 << (~nidx & 7);
351 byte = data[off];
352 if (!(byte & m)) {
353 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
354 kunmap(page);
355 hfs_bnode_put(node);
356 return;
358 data[off] = byte & ~m;
359 set_page_dirty(page);
360 kunmap(page);
361 hfs_bnode_put(node);
362 tree->free_nodes++;
363 mark_inode_dirty(tree->inode);