[PATCH] include/asm-i386/: "extern inline" -> "static inline"
[linux-2.6/btrfs-unstable.git] / fs / hfsplus / bnode.c
blobb85abc6e6f83b3c3344ac102521521f7655d5134
1 /*
2 * linux/fs/hfsplus/bnode.c
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle basic btree node operations
9 */
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/fs.h>
15 #include <linux/swap.h>
16 #include <linux/version.h>
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
21 /* Copy a specified range of bytes from the raw data of a node */
22 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
24 struct page **pagep;
25 int l;
27 off += node->page_offset;
28 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
29 off &= ~PAGE_CACHE_MASK;
31 l = min(len, (int)PAGE_CACHE_SIZE - off);
32 memcpy(buf, kmap(*pagep) + off, l);
33 kunmap(*pagep);
35 while ((len -= l) != 0) {
36 buf += l;
37 l = min(len, (int)PAGE_CACHE_SIZE);
38 memcpy(buf, kmap(*++pagep), l);
39 kunmap(*pagep);
43 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
45 __be16 data;
46 // optimize later...
47 hfs_bnode_read(node, &data, off, 2);
48 return be16_to_cpu(data);
51 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
53 u8 data;
54 // optimize later...
55 hfs_bnode_read(node, &data, off, 1);
56 return data;
59 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
61 struct hfs_btree *tree;
62 int key_len;
64 tree = node->tree;
65 if (node->type == HFS_NODE_LEAF ||
66 tree->attributes & HFS_TREE_VARIDXKEYS)
67 key_len = hfs_bnode_read_u16(node, off) + 2;
68 else
69 key_len = tree->max_key_len + 2;
71 hfs_bnode_read(node, key, off, key_len);
74 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
76 struct page **pagep;
77 int l;
79 off += node->page_offset;
80 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
81 off &= ~PAGE_CACHE_MASK;
83 l = min(len, (int)PAGE_CACHE_SIZE - off);
84 memcpy(kmap(*pagep) + off, buf, l);
85 set_page_dirty(*pagep);
86 kunmap(*pagep);
88 while ((len -= l) != 0) {
89 buf += l;
90 l = min(len, (int)PAGE_CACHE_SIZE);
91 memcpy(kmap(*++pagep), buf, l);
92 set_page_dirty(*pagep);
93 kunmap(*pagep);
97 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
99 __be16 v = cpu_to_be16(data);
100 // optimize later...
101 hfs_bnode_write(node, &v, off, 2);
104 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
106 struct page **pagep;
107 int l;
109 off += node->page_offset;
110 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
111 off &= ~PAGE_CACHE_MASK;
113 l = min(len, (int)PAGE_CACHE_SIZE - off);
114 memset(kmap(*pagep) + off, 0, l);
115 set_page_dirty(*pagep);
116 kunmap(*pagep);
118 while ((len -= l) != 0) {
119 l = min(len, (int)PAGE_CACHE_SIZE);
120 memset(kmap(*++pagep), 0, l);
121 set_page_dirty(*pagep);
122 kunmap(*pagep);
126 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
127 struct hfs_bnode *src_node, int src, int len)
129 struct hfs_btree *tree;
130 struct page **src_page, **dst_page;
131 int l;
133 dprint(DBG_BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
134 if (!len)
135 return;
136 tree = src_node->tree;
137 src += src_node->page_offset;
138 dst += dst_node->page_offset;
139 src_page = src_node->page + (src >> PAGE_CACHE_SHIFT);
140 src &= ~PAGE_CACHE_MASK;
141 dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT);
142 dst &= ~PAGE_CACHE_MASK;
144 if (src == dst) {
145 l = min(len, (int)PAGE_CACHE_SIZE - src);
146 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
147 kunmap(*src_page);
148 set_page_dirty(*dst_page);
149 kunmap(*dst_page);
151 while ((len -= l) != 0) {
152 l = min(len, (int)PAGE_CACHE_SIZE);
153 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
154 kunmap(*src_page);
155 set_page_dirty(*dst_page);
156 kunmap(*dst_page);
158 } else {
159 void *src_ptr, *dst_ptr;
161 do {
162 src_ptr = kmap(*src_page) + src;
163 dst_ptr = kmap(*dst_page) + dst;
164 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
165 l = PAGE_CACHE_SIZE - src;
166 src = 0;
167 dst += l;
168 } else {
169 l = PAGE_CACHE_SIZE - dst;
170 src += l;
171 dst = 0;
173 l = min(len, l);
174 memcpy(dst_ptr, src_ptr, l);
175 kunmap(*src_page);
176 set_page_dirty(*dst_page);
177 kunmap(*dst_page);
178 if (!dst)
179 dst_page++;
180 else
181 src_page++;
182 } while ((len -= l));
186 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
188 struct page **src_page, **dst_page;
189 int l;
191 dprint(DBG_BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
192 if (!len)
193 return;
194 src += node->page_offset;
195 dst += node->page_offset;
196 if (dst > src) {
197 src += len - 1;
198 src_page = node->page + (src >> PAGE_CACHE_SHIFT);
199 src = (src & ~PAGE_CACHE_MASK) + 1;
200 dst += len - 1;
201 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
202 dst = (dst & ~PAGE_CACHE_MASK) + 1;
204 if (src == dst) {
205 while (src < len) {
206 memmove(kmap(*dst_page), kmap(*src_page), src);
207 kunmap(*src_page);
208 set_page_dirty(*dst_page);
209 kunmap(*dst_page);
210 len -= src;
211 src = PAGE_CACHE_SIZE;
212 src_page--;
213 dst_page--;
215 src -= len;
216 memmove(kmap(*dst_page) + src, kmap(*src_page) + src, len);
217 kunmap(*src_page);
218 set_page_dirty(*dst_page);
219 kunmap(*dst_page);
220 } else {
221 void *src_ptr, *dst_ptr;
223 do {
224 src_ptr = kmap(*src_page) + src;
225 dst_ptr = kmap(*dst_page) + dst;
226 if (src < dst) {
227 l = src;
228 src = PAGE_CACHE_SIZE;
229 dst -= l;
230 } else {
231 l = dst;
232 src -= l;
233 dst = PAGE_CACHE_SIZE;
235 l = min(len, l);
236 memmove(dst_ptr - l, src_ptr - l, l);
237 kunmap(*src_page);
238 set_page_dirty(*dst_page);
239 kunmap(*dst_page);
240 if (dst == PAGE_CACHE_SIZE)
241 dst_page--;
242 else
243 src_page--;
244 } while ((len -= l));
246 } else {
247 src_page = node->page + (src >> PAGE_CACHE_SHIFT);
248 src &= ~PAGE_CACHE_MASK;
249 dst_page = node->page + (dst >> PAGE_CACHE_SHIFT);
250 dst &= ~PAGE_CACHE_MASK;
252 if (src == dst) {
253 l = min(len, (int)PAGE_CACHE_SIZE - src);
254 memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l);
255 kunmap(*src_page);
256 set_page_dirty(*dst_page);
257 kunmap(*dst_page);
259 while ((len -= l) != 0) {
260 l = min(len, (int)PAGE_CACHE_SIZE);
261 memmove(kmap(*++dst_page), kmap(*++src_page), l);
262 kunmap(*src_page);
263 set_page_dirty(*dst_page);
264 kunmap(*dst_page);
266 } else {
267 void *src_ptr, *dst_ptr;
269 do {
270 src_ptr = kmap(*src_page) + src;
271 dst_ptr = kmap(*dst_page) + dst;
272 if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) {
273 l = PAGE_CACHE_SIZE - src;
274 src = 0;
275 dst += l;
276 } else {
277 l = PAGE_CACHE_SIZE - dst;
278 src += l;
279 dst = 0;
281 l = min(len, l);
282 memmove(dst_ptr, src_ptr, l);
283 kunmap(*src_page);
284 set_page_dirty(*dst_page);
285 kunmap(*dst_page);
286 if (!dst)
287 dst_page++;
288 else
289 src_page++;
290 } while ((len -= l));
295 void hfs_bnode_dump(struct hfs_bnode *node)
297 struct hfs_bnode_desc desc;
298 __be32 cnid;
299 int i, off, key_off;
301 dprint(DBG_BNODE_MOD, "bnode: %d\n", node->this);
302 hfs_bnode_read(node, &desc, 0, sizeof(desc));
303 dprint(DBG_BNODE_MOD, "%d, %d, %d, %d, %d\n",
304 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
305 desc.type, desc.height, be16_to_cpu(desc.num_recs));
307 off = node->tree->node_size - 2;
308 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
309 key_off = hfs_bnode_read_u16(node, off);
310 dprint(DBG_BNODE_MOD, " %d", key_off);
311 if (i && node->type == HFS_NODE_INDEX) {
312 int tmp;
314 if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
315 tmp = hfs_bnode_read_u16(node, key_off) + 2;
316 else
317 tmp = node->tree->max_key_len + 2;
318 dprint(DBG_BNODE_MOD, " (%d", tmp);
319 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
320 dprint(DBG_BNODE_MOD, ",%d)", be32_to_cpu(cnid));
321 } else if (i && node->type == HFS_NODE_LEAF) {
322 int tmp;
324 tmp = hfs_bnode_read_u16(node, key_off);
325 dprint(DBG_BNODE_MOD, " (%d)", tmp);
328 dprint(DBG_BNODE_MOD, "\n");
331 void hfs_bnode_unlink(struct hfs_bnode *node)
333 struct hfs_btree *tree;
334 struct hfs_bnode *tmp;
335 __be32 cnid;
337 tree = node->tree;
338 if (node->prev) {
339 tmp = hfs_bnode_find(tree, node->prev);
340 if (IS_ERR(tmp))
341 return;
342 tmp->next = node->next;
343 cnid = cpu_to_be32(tmp->next);
344 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
345 hfs_bnode_put(tmp);
346 } else if (node->type == HFS_NODE_LEAF)
347 tree->leaf_head = node->next;
349 if (node->next) {
350 tmp = hfs_bnode_find(tree, node->next);
351 if (IS_ERR(tmp))
352 return;
353 tmp->prev = node->prev;
354 cnid = cpu_to_be32(tmp->prev);
355 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
356 hfs_bnode_put(tmp);
357 } else if (node->type == HFS_NODE_LEAF)
358 tree->leaf_tail = node->prev;
360 // move down?
361 if (!node->prev && !node->next) {
362 printk("hfs_btree_del_level\n");
364 if (!node->parent) {
365 tree->root = 0;
366 tree->depth = 0;
368 set_bit(HFS_BNODE_DELETED, &node->flags);
371 static inline int hfs_bnode_hash(u32 num)
373 num = (num >> 16) + num;
374 num += num >> 8;
375 return num & (NODE_HASH_SIZE - 1);
378 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
380 struct hfs_bnode *node;
382 if (cnid >= tree->node_count) {
383 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
384 return NULL;
387 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
388 node; node = node->next_hash) {
389 if (node->this == cnid) {
390 return node;
393 return NULL;
396 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
398 struct super_block *sb;
399 struct hfs_bnode *node, *node2;
400 struct address_space *mapping;
401 struct page *page;
402 int size, block, i, hash;
403 loff_t off;
405 if (cnid >= tree->node_count) {
406 printk("HFS+-fs: request for non-existent node %d in B*Tree\n", cnid);
407 return NULL;
410 sb = tree->inode->i_sb;
411 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
412 sizeof(struct page *);
413 node = kmalloc(size, GFP_KERNEL);
414 if (!node)
415 return NULL;
416 memset(node, 0, size);
417 node->tree = tree;
418 node->this = cnid;
419 set_bit(HFS_BNODE_NEW, &node->flags);
420 atomic_set(&node->refcnt, 1);
421 dprint(DBG_BNODE_REFS, "new_node(%d:%d): 1\n",
422 node->tree->cnid, node->this);
423 init_waitqueue_head(&node->lock_wq);
424 spin_lock(&tree->hash_lock);
425 node2 = hfs_bnode_findhash(tree, cnid);
426 if (!node2) {
427 hash = hfs_bnode_hash(cnid);
428 node->next_hash = tree->node_hash[hash];
429 tree->node_hash[hash] = node;
430 tree->node_hash_cnt++;
431 } else {
432 spin_unlock(&tree->hash_lock);
433 kfree(node);
434 wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
435 return node2;
437 spin_unlock(&tree->hash_lock);
439 mapping = tree->inode->i_mapping;
440 off = (loff_t)cnid << tree->node_size_shift;
441 block = off >> PAGE_CACHE_SHIFT;
442 node->page_offset = off & ~PAGE_CACHE_MASK;
443 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
444 page = read_cache_page(mapping, block, (filler_t *)mapping->a_ops->readpage, NULL);
445 if (IS_ERR(page))
446 goto fail;
447 if (PageError(page)) {
448 page_cache_release(page);
449 goto fail;
451 page_cache_release(page);
452 node->page[i] = page;
455 return node;
456 fail:
457 set_bit(HFS_BNODE_ERROR, &node->flags);
458 return node;
461 void hfs_bnode_unhash(struct hfs_bnode *node)
463 struct hfs_bnode **p;
465 dprint(DBG_BNODE_REFS, "remove_node(%d:%d): %d\n",
466 node->tree->cnid, node->this, atomic_read(&node->refcnt));
467 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
468 *p && *p != node; p = &(*p)->next_hash)
470 if (!*p)
471 BUG();
472 *p = node->next_hash;
473 node->tree->node_hash_cnt--;
476 /* Load a particular node out of a tree */
477 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
479 struct hfs_bnode *node;
480 struct hfs_bnode_desc *desc;
481 int i, rec_off, off, next_off;
482 int entry_size, key_size;
484 spin_lock(&tree->hash_lock);
485 node = hfs_bnode_findhash(tree, num);
486 if (node) {
487 hfs_bnode_get(node);
488 spin_unlock(&tree->hash_lock);
489 wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
490 if (test_bit(HFS_BNODE_ERROR, &node->flags))
491 goto node_error;
492 return node;
494 spin_unlock(&tree->hash_lock);
495 node = __hfs_bnode_create(tree, num);
496 if (!node)
497 return ERR_PTR(-ENOMEM);
498 if (test_bit(HFS_BNODE_ERROR, &node->flags))
499 goto node_error;
500 if (!test_bit(HFS_BNODE_NEW, &node->flags))
501 return node;
503 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
504 node->prev = be32_to_cpu(desc->prev);
505 node->next = be32_to_cpu(desc->next);
506 node->num_recs = be16_to_cpu(desc->num_recs);
507 node->type = desc->type;
508 node->height = desc->height;
509 kunmap(node->page[0]);
511 switch (node->type) {
512 case HFS_NODE_HEADER:
513 case HFS_NODE_MAP:
514 if (node->height != 0)
515 goto node_error;
516 break;
517 case HFS_NODE_LEAF:
518 if (node->height != 1)
519 goto node_error;
520 break;
521 case HFS_NODE_INDEX:
522 if (node->height <= 1 || node->height > tree->depth)
523 goto node_error;
524 break;
525 default:
526 goto node_error;
529 rec_off = tree->node_size - 2;
530 off = hfs_bnode_read_u16(node, rec_off);
531 if (off != sizeof(struct hfs_bnode_desc))
532 goto node_error;
533 for (i = 1; i <= node->num_recs; off = next_off, i++) {
534 rec_off -= 2;
535 next_off = hfs_bnode_read_u16(node, rec_off);
536 if (next_off <= off ||
537 next_off > tree->node_size ||
538 next_off & 1)
539 goto node_error;
540 entry_size = next_off - off;
541 if (node->type != HFS_NODE_INDEX &&
542 node->type != HFS_NODE_LEAF)
543 continue;
544 key_size = hfs_bnode_read_u16(node, off) + 2;
545 if (key_size >= entry_size || key_size & 1)
546 goto node_error;
548 clear_bit(HFS_BNODE_NEW, &node->flags);
549 wake_up(&node->lock_wq);
550 return node;
552 node_error:
553 set_bit(HFS_BNODE_ERROR, &node->flags);
554 clear_bit(HFS_BNODE_NEW, &node->flags);
555 wake_up(&node->lock_wq);
556 hfs_bnode_put(node);
557 return ERR_PTR(-EIO);
560 void hfs_bnode_free(struct hfs_bnode *node)
562 //int i;
564 //for (i = 0; i < node->tree->pages_per_bnode; i++)
565 // if (node->page[i])
566 // page_cache_release(node->page[i]);
567 kfree(node);
570 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
572 struct hfs_bnode *node;
573 struct page **pagep;
574 int i;
576 spin_lock(&tree->hash_lock);
577 node = hfs_bnode_findhash(tree, num);
578 spin_unlock(&tree->hash_lock);
579 if (node) {
580 printk("new node %u already hashed?\n", num);
581 BUG();
583 node = __hfs_bnode_create(tree, num);
584 if (!node)
585 return ERR_PTR(-ENOMEM);
586 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
587 hfs_bnode_put(node);
588 return ERR_PTR(-EIO);
591 pagep = node->page;
592 memset(kmap(*pagep) + node->page_offset, 0,
593 min((int)PAGE_CACHE_SIZE, (int)tree->node_size));
594 set_page_dirty(*pagep);
595 kunmap(*pagep);
596 for (i = 1; i < tree->pages_per_bnode; i++) {
597 memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE);
598 set_page_dirty(*pagep);
599 kunmap(*pagep);
601 clear_bit(HFS_BNODE_NEW, &node->flags);
602 wake_up(&node->lock_wq);
604 return node;
607 void hfs_bnode_get(struct hfs_bnode *node)
609 if (node) {
610 atomic_inc(&node->refcnt);
611 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
612 node->tree->cnid, node->this, atomic_read(&node->refcnt));
616 /* Dispose of resources used by a node */
617 void hfs_bnode_put(struct hfs_bnode *node)
619 if (node) {
620 struct hfs_btree *tree = node->tree;
621 int i;
623 dprint(DBG_BNODE_REFS, "put_node(%d:%d): %d\n",
624 node->tree->cnid, node->this, atomic_read(&node->refcnt));
625 if (!atomic_read(&node->refcnt))
626 BUG();
627 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
628 return;
629 for (i = 0; i < tree->pages_per_bnode; i++) {
630 if (!node->page[i])
631 continue;
632 mark_page_accessed(node->page[i]);
635 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
636 hfs_bnode_unhash(node);
637 spin_unlock(&tree->hash_lock);
638 hfs_bmap_free(node);
639 hfs_bnode_free(node);
640 return;
642 spin_unlock(&tree->hash_lock);