[ARM] 3152/1: make various assembly local labels actually local (the rest)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / jfs / jfs_dtree.c
blob404f33eae50792b2d3508507745890c28600dce8
1 /*
2 * Copyright (C) International Business Machines Corp., 2000-2004
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * jfs_dtree.c: directory B+-tree manager
22 * B+-tree with variable length key directory:
24 * each directory page is structured as an array of 32-byte
25 * directory entry slots initialized as a freelist
26 * to avoid search/compaction of free space at insertion.
27 * when an entry is inserted, a number of slots are allocated
28 * from the freelist as required to store variable length data
29 * of the entry; when the entry is deleted, slots of the entry
30 * are returned to freelist.
32 * leaf entry stores full name as key and file serial number
33 * (aka inode number) as data.
34 * internal/router entry stores sufffix compressed name
35 * as key and simple extent descriptor as data.
37 * each directory page maintains a sorted entry index table
38 * which stores the start slot index of sorted entries
39 * to allow binary search on the table.
41 * directory starts as a root/leaf page in on-disk inode
42 * inline data area.
43 * when it becomes full, it starts a leaf of a external extent
44 * of length of 1 block. each time the first leaf becomes full,
45 * it is extended rather than split (its size is doubled),
46 * until its length becoms 4 KBytes, from then the extent is split
47 * with new 4 Kbyte extent when it becomes full
48 * to reduce external fragmentation of small directories.
50 * blah, blah, blah, for linear scan of directory in pieces by
51 * readdir().
54 * case-insensitive directory file system
56 * names are stored in case-sensitive way in leaf entry.
57 * but stored, searched and compared in case-insensitive (uppercase) order
58 * (i.e., both search key and entry key are folded for search/compare):
59 * (note that case-sensitive order is BROKEN in storage, e.g.,
60 * sensitive: Ad, aB, aC, aD -> insensitive: aB, aC, aD, Ad
62 * entries which folds to the same key makes up a equivalent class
63 * whose members are stored as contiguous cluster (may cross page boundary)
64 * but whose order is arbitrary and acts as duplicate, e.g.,
65 * abc, Abc, aBc, abC)
67 * once match is found at leaf, requires scan forward/backward
68 * either for, in case-insensitive search, duplicate
69 * or for, in case-sensitive search, for exact match
71 * router entry must be created/stored in case-insensitive way
72 * in internal entry:
73 * (right most key of left page and left most key of right page
74 * are folded, and its suffix compression is propagated as router
75 * key in parent)
76 * (e.g., if split occurs <abc> and <aBd>, <ABD> trather than <aB>
77 * should be made the router key for the split)
79 * case-insensitive search:
81 * fold search key;
83 * case-insensitive search of B-tree:
84 * for internal entry, router key is already folded;
85 * for leaf entry, fold the entry key before comparison.
87 * if (leaf entry case-insensitive match found)
88 * if (next entry satisfies case-insensitive match)
89 * return EDUPLICATE;
90 * if (prev entry satisfies case-insensitive match)
91 * return EDUPLICATE;
92 * return match;
93 * else
94 * return no match;
96 * serialization:
97 * target directory inode lock is being held on entry/exit
98 * of all main directory service routines.
100 * log based recovery:
103 #include <linux/fs.h>
104 #include <linux/quotaops.h>
105 #include "jfs_incore.h"
106 #include "jfs_superblock.h"
107 #include "jfs_filsys.h"
108 #include "jfs_metapage.h"
109 #include "jfs_dmap.h"
110 #include "jfs_unicode.h"
111 #include "jfs_debug.h"
113 /* dtree split parameter */
114 struct dtsplit {
115 struct metapage *mp;
116 s16 index;
117 s16 nslot;
118 struct component_name *key;
119 ddata_t *data;
120 struct pxdlist *pxdlist;
123 #define DT_PAGE(IP, MP) BT_PAGE(IP, MP, dtpage_t, i_dtroot)
125 /* get page buffer for specified block address */
126 #define DT_GETPAGE(IP, BN, MP, SIZE, P, RC)\
128 BT_GETPAGE(IP, BN, MP, dtpage_t, SIZE, P, RC, i_dtroot)\
129 if (!(RC))\
131 if (((P)->header.nextindex > (((BN)==0)?DTROOTMAXSLOT:(P)->header.maxslot)) ||\
132 ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT)))\
134 BT_PUTPAGE(MP);\
135 jfs_error((IP)->i_sb, "DT_GETPAGE: dtree page corrupt");\
136 MP = NULL;\
137 RC = -EIO;\
142 /* for consistency */
143 #define DT_PUTPAGE(MP) BT_PUTPAGE(MP)
145 #define DT_GETSEARCH(IP, LEAF, BN, MP, P, INDEX) \
146 BT_GETSEARCH(IP, LEAF, BN, MP, dtpage_t, P, INDEX, i_dtroot)
149 * forward references
151 static int dtSplitUp(tid_t tid, struct inode *ip,
152 struct dtsplit * split, struct btstack * btstack);
154 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
155 struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rxdp);
157 static int dtExtendPage(tid_t tid, struct inode *ip,
158 struct dtsplit * split, struct btstack * btstack);
160 static int dtSplitRoot(tid_t tid, struct inode *ip,
161 struct dtsplit * split, struct metapage ** rmpp);
163 static int dtDeleteUp(tid_t tid, struct inode *ip, struct metapage * fmp,
164 dtpage_t * fp, struct btstack * btstack);
166 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p);
168 static int dtReadFirst(struct inode *ip, struct btstack * btstack);
170 static int dtReadNext(struct inode *ip,
171 loff_t * offset, struct btstack * btstack);
173 static int dtCompare(struct component_name * key, dtpage_t * p, int si);
175 static int ciCompare(struct component_name * key, dtpage_t * p, int si,
176 int flag);
178 static void dtGetKey(dtpage_t * p, int i, struct component_name * key,
179 int flag);
181 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
182 int ri, struct component_name * key, int flag);
184 static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
185 ddata_t * data, struct dt_lock **);
187 static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
188 struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
189 int do_index);
191 static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock);
193 static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock);
195 static void dtLinelockFreelist(dtpage_t * p, int m, struct dt_lock ** dtlock);
197 #define ciToUpper(c) UniStrupr((c)->name)
200 * read_index_page()
202 * Reads a page of a directory's index table.
203 * Having metadata mapped into the directory inode's address space
204 * presents a multitude of problems. We avoid this by mapping to
205 * the absolute address space outside of the *_metapage routines
207 static struct metapage *read_index_page(struct inode *inode, s64 blkno)
209 int rc;
210 s64 xaddr;
211 int xflag;
212 s32 xlen;
214 rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
215 if (rc || (xaddr == 0))
216 return NULL;
218 return read_metapage(inode, xaddr, PSIZE, 1);
222 * get_index_page()
224 * Same as get_index_page(), but get's a new page without reading
226 static struct metapage *get_index_page(struct inode *inode, s64 blkno)
228 int rc;
229 s64 xaddr;
230 int xflag;
231 s32 xlen;
233 rc = xtLookup(inode, blkno, 1, &xflag, &xaddr, &xlen, 1);
234 if (rc || (xaddr == 0))
235 return NULL;
237 return get_metapage(inode, xaddr, PSIZE, 1);
241 * find_index()
243 * Returns dtree page containing directory table entry for specified
244 * index and pointer to its entry.
246 * mp must be released by caller.
248 static struct dir_table_slot *find_index(struct inode *ip, u32 index,
249 struct metapage ** mp, s64 *lblock)
251 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
252 s64 blkno;
253 s64 offset;
254 int page_offset;
255 struct dir_table_slot *slot;
256 static int maxWarnings = 10;
258 if (index < 2) {
259 if (maxWarnings) {
260 jfs_warn("find_entry called with index = %d", index);
261 maxWarnings--;
263 return NULL;
266 if (index >= jfs_ip->next_index) {
267 jfs_warn("find_entry called with index >= next_index");
268 return NULL;
271 if (jfs_dirtable_inline(ip)) {
273 * Inline directory table
275 *mp = NULL;
276 slot = &jfs_ip->i_dirtable[index - 2];
277 } else {
278 offset = (index - 2) * sizeof(struct dir_table_slot);
279 page_offset = offset & (PSIZE - 1);
280 blkno = ((offset + 1) >> L2PSIZE) <<
281 JFS_SBI(ip->i_sb)->l2nbperpage;
283 if (*mp && (*lblock != blkno)) {
284 release_metapage(*mp);
285 *mp = NULL;
287 if (*mp == 0) {
288 *lblock = blkno;
289 *mp = read_index_page(ip, blkno);
291 if (*mp == 0) {
292 jfs_err("free_index: error reading directory table");
293 return NULL;
296 slot =
297 (struct dir_table_slot *) ((char *) (*mp)->data +
298 page_offset);
300 return slot;
303 static inline void lock_index(tid_t tid, struct inode *ip, struct metapage * mp,
304 u32 index)
306 struct tlock *tlck;
307 struct linelock *llck;
308 struct lv *lv;
310 tlck = txLock(tid, ip, mp, tlckDATA);
311 llck = (struct linelock *) tlck->lock;
313 if (llck->index >= llck->maxcnt)
314 llck = txLinelock(llck);
315 lv = &llck->lv[llck->index];
318 * Linelock slot size is twice the size of directory table
319 * slot size. 512 entries per page.
321 lv->offset = ((index - 2) & 511) >> 1;
322 lv->length = 1;
323 llck->index++;
327 * add_index()
329 * Adds an entry to the directory index table. This is used to provide
330 * each directory entry with a persistent index in which to resume
331 * directory traversals
333 static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
335 struct super_block *sb = ip->i_sb;
336 struct jfs_sb_info *sbi = JFS_SBI(sb);
337 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
338 u64 blkno;
339 struct dir_table_slot *dirtab_slot;
340 u32 index;
341 struct linelock *llck;
342 struct lv *lv;
343 struct metapage *mp;
344 s64 offset;
345 uint page_offset;
346 struct tlock *tlck;
347 s64 xaddr;
349 ASSERT(DO_INDEX(ip));
351 if (jfs_ip->next_index < 2) {
352 jfs_warn("add_index: next_index = %d. Resetting!",
353 jfs_ip->next_index);
354 jfs_ip->next_index = 2;
357 index = jfs_ip->next_index++;
359 if (index <= MAX_INLINE_DIRTABLE_ENTRY) {
361 * i_size reflects size of index table, or 8 bytes per entry.
363 ip->i_size = (loff_t) (index - 1) << 3;
366 * dir table fits inline within inode
368 dirtab_slot = &jfs_ip->i_dirtable[index-2];
369 dirtab_slot->flag = DIR_INDEX_VALID;
370 dirtab_slot->slot = slot;
371 DTSaddress(dirtab_slot, bn);
373 set_cflag(COMMIT_Dirtable, ip);
375 return index;
377 if (index == (MAX_INLINE_DIRTABLE_ENTRY + 1)) {
378 struct dir_table_slot temp_table[12];
381 * It's time to move the inline table to an external
382 * page and begin to build the xtree
384 if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
385 goto clean_up;
386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
387 DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
388 goto clean_up;
392 * Save the table, we're going to overwrite it with the
393 * xtree root
395 memcpy(temp_table, &jfs_ip->i_dirtable, sizeof(temp_table));
398 * Initialize empty x-tree
400 xtInitRoot(tid, ip);
403 * Add the first block to the xtree
405 if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
406 /* This really shouldn't fail */
407 jfs_warn("add_index: xtInsert failed!");
408 memcpy(&jfs_ip->i_dirtable, temp_table,
409 sizeof (temp_table));
410 dbFree(ip, xaddr, sbi->nbperpage);
411 DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
412 goto clean_up;
414 ip->i_size = PSIZE;
416 if ((mp = get_index_page(ip, 0)) == 0) {
417 jfs_err("add_index: get_metapage failed!");
418 xtTruncate(tid, ip, 0, COMMIT_PWMAP);
419 memcpy(&jfs_ip->i_dirtable, temp_table,
420 sizeof (temp_table));
421 goto clean_up;
423 tlck = txLock(tid, ip, mp, tlckDATA);
424 llck = (struct linelock *) & tlck->lock;
425 ASSERT(llck->index == 0);
426 lv = &llck->lv[0];
428 lv->offset = 0;
429 lv->length = 6; /* tlckDATA slot size is 16 bytes */
430 llck->index++;
432 memcpy(mp->data, temp_table, sizeof(temp_table));
434 mark_metapage_dirty(mp);
435 release_metapage(mp);
438 * Logging is now directed by xtree tlocks
440 clear_cflag(COMMIT_Dirtable, ip);
443 offset = (index - 2) * sizeof(struct dir_table_slot);
444 page_offset = offset & (PSIZE - 1);
445 blkno = ((offset + 1) >> L2PSIZE) << sbi->l2nbperpage;
446 if (page_offset == 0) {
448 * This will be the beginning of a new page
450 xaddr = 0;
451 if (xtInsert(tid, ip, 0, blkno, sbi->nbperpage, &xaddr, 0)) {
452 jfs_warn("add_index: xtInsert failed!");
453 goto clean_up;
455 ip->i_size += PSIZE;
457 if ((mp = get_index_page(ip, blkno)))
458 memset(mp->data, 0, PSIZE); /* Just looks better */
459 else
460 xtTruncate(tid, ip, offset, COMMIT_PWMAP);
461 } else
462 mp = read_index_page(ip, blkno);
464 if (mp == 0) {
465 jfs_err("add_index: get/read_metapage failed!");
466 goto clean_up;
469 lock_index(tid, ip, mp, index);
471 dirtab_slot =
472 (struct dir_table_slot *) ((char *) mp->data + page_offset);
473 dirtab_slot->flag = DIR_INDEX_VALID;
474 dirtab_slot->slot = slot;
475 DTSaddress(dirtab_slot, bn);
477 mark_metapage_dirty(mp);
478 release_metapage(mp);
480 return index;
482 clean_up:
484 jfs_ip->next_index--;
486 return 0;
490 * free_index()
492 * Marks an entry to the directory index table as free.
494 static void free_index(tid_t tid, struct inode *ip, u32 index, u32 next)
496 struct dir_table_slot *dirtab_slot;
497 s64 lblock;
498 struct metapage *mp = NULL;
500 dirtab_slot = find_index(ip, index, &mp, &lblock);
502 if (dirtab_slot == 0)
503 return;
505 dirtab_slot->flag = DIR_INDEX_FREE;
506 dirtab_slot->slot = dirtab_slot->addr1 = 0;
507 dirtab_slot->addr2 = cpu_to_le32(next);
509 if (mp) {
510 lock_index(tid, ip, mp, index);
511 mark_metapage_dirty(mp);
512 release_metapage(mp);
513 } else
514 set_cflag(COMMIT_Dirtable, ip);
518 * modify_index()
520 * Changes an entry in the directory index table
522 static void modify_index(tid_t tid, struct inode *ip, u32 index, s64 bn,
523 int slot, struct metapage ** mp, u64 *lblock)
525 struct dir_table_slot *dirtab_slot;
527 dirtab_slot = find_index(ip, index, mp, lblock);
529 if (dirtab_slot == 0)
530 return;
532 DTSaddress(dirtab_slot, bn);
533 dirtab_slot->slot = slot;
535 if (*mp) {
536 lock_index(tid, ip, *mp, index);
537 mark_metapage_dirty(*mp);
538 } else
539 set_cflag(COMMIT_Dirtable, ip);
543 * read_index()
545 * reads a directory table slot
547 static int read_index(struct inode *ip, u32 index,
548 struct dir_table_slot * dirtab_slot)
550 s64 lblock;
551 struct metapage *mp = NULL;
552 struct dir_table_slot *slot;
554 slot = find_index(ip, index, &mp, &lblock);
555 if (slot == 0) {
556 return -EIO;
559 memcpy(dirtab_slot, slot, sizeof(struct dir_table_slot));
561 if (mp)
562 release_metapage(mp);
564 return 0;
568 * dtSearch()
570 * function:
571 * Search for the entry with specified key
573 * parameter:
575 * return: 0 - search result on stack, leaf page pinned;
576 * errno - I/O error
578 int dtSearch(struct inode *ip, struct component_name * key, ino_t * data,
579 struct btstack * btstack, int flag)
581 int rc = 0;
582 int cmp = 1; /* init for empty page */
583 s64 bn;
584 struct metapage *mp;
585 dtpage_t *p;
586 s8 *stbl;
587 int base, index, lim;
588 struct btframe *btsp;
589 pxd_t *pxd;
590 int psize = 288; /* initial in-line directory */
591 ino_t inumber;
592 struct component_name ciKey;
593 struct super_block *sb = ip->i_sb;
595 ciKey.name =
596 (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
597 GFP_NOFS);
598 if (ciKey.name == 0) {
599 rc = -ENOMEM;
600 goto dtSearch_Exit2;
604 /* uppercase search key for c-i directory */
605 UniStrcpy(ciKey.name, key->name);
606 ciKey.namlen = key->namlen;
608 /* only uppercase if case-insensitive support is on */
609 if ((JFS_SBI(sb)->mntflag & JFS_OS2) == JFS_OS2) {
610 ciToUpper(&ciKey);
612 BT_CLR(btstack); /* reset stack */
614 /* init level count for max pages to split */
615 btstack->nsplit = 1;
618 * search down tree from root:
620 * between two consecutive entries of <Ki, Pi> and <Kj, Pj> of
621 * internal page, child page Pi contains entry with k, Ki <= K < Kj.
623 * if entry with search key K is not found
624 * internal page search find the entry with largest key Ki
625 * less than K which point to the child page to search;
626 * leaf page search find the entry with smallest key Kj
627 * greater than K so that the returned index is the position of
628 * the entry to be shifted right for insertion of new entry.
629 * for empty tree, search key is greater than any key of the tree.
631 * by convention, root bn = 0.
633 for (bn = 0;;) {
634 /* get/pin the page to search */
635 DT_GETPAGE(ip, bn, mp, psize, p, rc);
636 if (rc)
637 goto dtSearch_Exit1;
639 /* get sorted entry table of the page */
640 stbl = DT_GETSTBL(p);
643 * binary search with search key K on the current page.
645 for (base = 0, lim = p->header.nextindex; lim; lim >>= 1) {
646 index = base + (lim >> 1);
648 if (p->header.flag & BT_LEAF) {
649 /* uppercase leaf name to compare */
650 cmp =
651 ciCompare(&ciKey, p, stbl[index],
652 JFS_SBI(sb)->mntflag);
653 } else {
654 /* router key is in uppercase */
656 cmp = dtCompare(&ciKey, p, stbl[index]);
660 if (cmp == 0) {
662 * search hit
664 /* search hit - leaf page:
665 * return the entry found
667 if (p->header.flag & BT_LEAF) {
668 inumber = le32_to_cpu(
669 ((struct ldtentry *) & p->slot[stbl[index]])->inumber);
672 * search for JFS_LOOKUP
674 if (flag == JFS_LOOKUP) {
675 *data = inumber;
676 rc = 0;
677 goto out;
681 * search for JFS_CREATE
683 if (flag == JFS_CREATE) {
684 *data = inumber;
685 rc = -EEXIST;
686 goto out;
690 * search for JFS_REMOVE or JFS_RENAME
692 if ((flag == JFS_REMOVE ||
693 flag == JFS_RENAME) &&
694 *data != inumber) {
695 rc = -ESTALE;
696 goto out;
700 * JFS_REMOVE|JFS_FINDDIR|JFS_RENAME
702 /* save search result */
703 *data = inumber;
704 btsp = btstack->top;
705 btsp->bn = bn;
706 btsp->index = index;
707 btsp->mp = mp;
709 rc = 0;
710 goto dtSearch_Exit1;
713 /* search hit - internal page:
714 * descend/search its child page
716 goto getChild;
719 if (cmp > 0) {
720 base = index + 1;
721 --lim;
726 * search miss
728 * base is the smallest index with key (Kj) greater than
729 * search key (K) and may be zero or (maxindex + 1) index.
732 * search miss - leaf page
734 * return location of entry (base) where new entry with
735 * search key K is to be inserted.
737 if (p->header.flag & BT_LEAF) {
739 * search for JFS_LOOKUP, JFS_REMOVE, or JFS_RENAME
741 if (flag == JFS_LOOKUP || flag == JFS_REMOVE ||
742 flag == JFS_RENAME) {
743 rc = -ENOENT;
744 goto out;
748 * search for JFS_CREATE|JFS_FINDDIR:
750 * save search result
752 *data = 0;
753 btsp = btstack->top;
754 btsp->bn = bn;
755 btsp->index = base;
756 btsp->mp = mp;
758 rc = 0;
759 goto dtSearch_Exit1;
763 * search miss - internal page
765 * if base is non-zero, decrement base by one to get the parent
766 * entry of the child page to search.
768 index = base ? base - 1 : base;
771 * go down to child page
773 getChild:
774 /* update max. number of pages to split */
775 if (BT_STACK_FULL(btstack)) {
776 /* Something's corrupted, mark filesytem dirty so
777 * chkdsk will fix it.
779 jfs_error(sb, "stack overrun in dtSearch!");
780 BT_STACK_DUMP(btstack);
781 rc = -EIO;
782 goto out;
784 btstack->nsplit++;
786 /* push (bn, index) of the parent page/entry */
787 BT_PUSH(btstack, bn, index);
789 /* get the child page block number */
790 pxd = (pxd_t *) & p->slot[stbl[index]];
791 bn = addressPXD(pxd);
792 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
794 /* unpin the parent page */
795 DT_PUTPAGE(mp);
798 out:
799 DT_PUTPAGE(mp);
801 dtSearch_Exit1:
803 kfree(ciKey.name);
805 dtSearch_Exit2:
807 return rc;
812 * dtInsert()
814 * function: insert an entry to directory tree
816 * parameter:
818 * return: 0 - success;
819 * errno - failure;
821 int dtInsert(tid_t tid, struct inode *ip,
822 struct component_name * name, ino_t * fsn, struct btstack * btstack)
824 int rc = 0;
825 struct metapage *mp; /* meta-page buffer */
826 dtpage_t *p; /* base B+-tree index page */
827 s64 bn;
828 int index;
829 struct dtsplit split; /* split information */
830 ddata_t data;
831 struct dt_lock *dtlck;
832 int n;
833 struct tlock *tlck;
834 struct lv *lv;
837 * retrieve search result
839 * dtSearch() returns (leaf page pinned, index at which to insert).
840 * n.b. dtSearch() may return index of (maxindex + 1) of
841 * the full page.
843 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
846 * insert entry for new key
848 if (DO_INDEX(ip)) {
849 if (JFS_IP(ip)->next_index == DIREND) {
850 DT_PUTPAGE(mp);
851 return -EMLINK;
853 n = NDTLEAF(name->namlen);
854 data.leaf.tid = tid;
855 data.leaf.ip = ip;
856 } else {
857 n = NDTLEAF_LEGACY(name->namlen);
858 data.leaf.ip = NULL; /* signifies legacy directory format */
860 data.leaf.ino = *fsn;
863 * leaf page does not have enough room for new entry:
865 * extend/split the leaf page;
867 * dtSplitUp() will insert the entry and unpin the leaf page.
869 if (n > p->header.freecnt) {
870 split.mp = mp;
871 split.index = index;
872 split.nslot = n;
873 split.key = name;
874 split.data = &data;
875 rc = dtSplitUp(tid, ip, &split, btstack);
876 return rc;
880 * leaf page does have enough room for new entry:
882 * insert the new data entry into the leaf page;
884 BT_MARK_DIRTY(mp, ip);
886 * acquire a transaction lock on the leaf page
888 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
889 dtlck = (struct dt_lock *) & tlck->lock;
890 ASSERT(dtlck->index == 0);
891 lv = & dtlck->lv[0];
893 /* linelock header */
894 lv->offset = 0;
895 lv->length = 1;
896 dtlck->index++;
898 dtInsertEntry(p, index, name, &data, &dtlck);
900 /* linelock stbl of non-root leaf page */
901 if (!(p->header.flag & BT_ROOT)) {
902 if (dtlck->index >= dtlck->maxcnt)
903 dtlck = (struct dt_lock *) txLinelock(dtlck);
904 lv = & dtlck->lv[dtlck->index];
905 n = index >> L2DTSLOTSIZE;
906 lv->offset = p->header.stblindex + n;
907 lv->length =
908 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
909 dtlck->index++;
912 /* unpin the leaf page */
913 DT_PUTPAGE(mp);
915 return 0;
920 * dtSplitUp()
922 * function: propagate insertion bottom up;
924 * parameter:
926 * return: 0 - success;
927 * errno - failure;
928 * leaf page unpinned;
930 static int dtSplitUp(tid_t tid,
931 struct inode *ip, struct dtsplit * split, struct btstack * btstack)
933 struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
934 int rc = 0;
935 struct metapage *smp;
936 dtpage_t *sp; /* split page */
937 struct metapage *rmp;
938 dtpage_t *rp; /* new right page split from sp */
939 pxd_t rpxd; /* new right page extent descriptor */
940 struct metapage *lmp;
941 dtpage_t *lp; /* left child page */
942 int skip; /* index of entry of insertion */
943 struct btframe *parent; /* parent page entry on traverse stack */
944 s64 xaddr, nxaddr;
945 int xlen, xsize;
946 struct pxdlist pxdlist;
947 pxd_t *pxd;
948 struct component_name key = { 0, NULL };
949 ddata_t *data = split->data;
950 int n;
951 struct dt_lock *dtlck;
952 struct tlock *tlck;
953 struct lv *lv;
954 int quota_allocation = 0;
956 /* get split page */
957 smp = split->mp;
958 sp = DT_PAGE(ip, smp);
960 key.name =
961 (wchar_t *) kmalloc((JFS_NAME_MAX + 2) * sizeof(wchar_t),
962 GFP_NOFS);
963 if (key.name == 0) {
964 DT_PUTPAGE(smp);
965 rc = -ENOMEM;
966 goto dtSplitUp_Exit;
970 * split leaf page
972 * The split routines insert the new entry, and
973 * acquire txLock as appropriate.
976 * split root leaf page:
978 if (sp->header.flag & BT_ROOT) {
980 * allocate a single extent child page
982 xlen = 1;
983 n = sbi->bsize >> L2DTSLOTSIZE;
984 n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
985 n -= DTROOTMAXSLOT - sp->header.freecnt; /* header + entries */
986 if (n <= split->nslot)
987 xlen++;
988 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr))) {
989 DT_PUTPAGE(smp);
990 goto freeKeyName;
993 pxdlist.maxnpxd = 1;
994 pxdlist.npxd = 0;
995 pxd = &pxdlist.pxd[0];
996 PXDaddress(pxd, xaddr);
997 PXDlength(pxd, xlen);
998 split->pxdlist = &pxdlist;
999 rc = dtSplitRoot(tid, ip, split, &rmp);
1001 if (rc)
1002 dbFree(ip, xaddr, xlen);
1003 else
1004 DT_PUTPAGE(rmp);
1006 DT_PUTPAGE(smp);
1008 goto freeKeyName;
1012 * extend first leaf page
1014 * extend the 1st extent if less than buffer page size
1015 * (dtExtendPage() reurns leaf page unpinned)
1017 pxd = &sp->header.self;
1018 xlen = lengthPXD(pxd);
1019 xsize = xlen << sbi->l2bsize;
1020 if (xsize < PSIZE) {
1021 xaddr = addressPXD(pxd);
1022 n = xsize >> L2DTSLOTSIZE;
1023 n -= (n + 31) >> L2DTSLOTSIZE; /* stbl size */
1024 if ((n + sp->header.freecnt) <= split->nslot)
1025 n = xlen + (xlen << 1);
1026 else
1027 n = xlen;
1029 /* Allocate blocks to quota. */
1030 if (DQUOT_ALLOC_BLOCK(ip, n)) {
1031 rc = -EDQUOT;
1032 goto extendOut;
1034 quota_allocation += n;
1036 if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
1037 (s64) n, &nxaddr)))
1038 goto extendOut;
1040 pxdlist.maxnpxd = 1;
1041 pxdlist.npxd = 0;
1042 pxd = &pxdlist.pxd[0];
1043 PXDaddress(pxd, nxaddr)
1044 PXDlength(pxd, xlen + n);
1045 split->pxdlist = &pxdlist;
1046 if ((rc = dtExtendPage(tid, ip, split, btstack))) {
1047 nxaddr = addressPXD(pxd);
1048 if (xaddr != nxaddr) {
1049 /* free relocated extent */
1050 xlen = lengthPXD(pxd);
1051 dbFree(ip, nxaddr, (s64) xlen);
1052 } else {
1053 /* free extended delta */
1054 xlen = lengthPXD(pxd) - n;
1055 xaddr = addressPXD(pxd) + xlen;
1056 dbFree(ip, xaddr, (s64) n);
1060 extendOut:
1061 DT_PUTPAGE(smp);
1062 goto freeKeyName;
1066 * split leaf page <sp> into <sp> and a new right page <rp>.
1068 * return <rp> pinned and its extent descriptor <rpxd>
1071 * allocate new directory page extent and
1072 * new index page(s) to cover page split(s)
1074 * allocation hint: ?
1076 n = btstack->nsplit;
1077 pxdlist.maxnpxd = pxdlist.npxd = 0;
1078 xlen = sbi->nbperpage;
1079 for (pxd = pxdlist.pxd; n > 0; n--, pxd++) {
1080 if ((rc = dbAlloc(ip, 0, (s64) xlen, &xaddr)) == 0) {
1081 PXDaddress(pxd, xaddr);
1082 PXDlength(pxd, xlen);
1083 pxdlist.maxnpxd++;
1084 continue;
1087 DT_PUTPAGE(smp);
1089 /* undo allocation */
1090 goto splitOut;
1093 split->pxdlist = &pxdlist;
1094 if ((rc = dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd))) {
1095 DT_PUTPAGE(smp);
1097 /* undo allocation */
1098 goto splitOut;
1102 * propagate up the router entry for the leaf page just split
1104 * insert a router entry for the new page into the parent page,
1105 * propagate the insert/split up the tree by walking back the stack
1106 * of (bn of parent page, index of child page entry in parent page)
1107 * that were traversed during the search for the page that split.
1109 * the propagation of insert/split up the tree stops if the root
1110 * splits or the page inserted into doesn't have to split to hold
1111 * the new entry.
1113 * the parent entry for the split page remains the same, and
1114 * a new entry is inserted at its right with the first key and
1115 * block number of the new right page.
1117 * There are a maximum of 4 pages pinned at any time:
1118 * two children, left parent and right parent (when the parent splits).
1119 * keep the child pages pinned while working on the parent.
1120 * make sure that all pins are released at exit.
1122 while ((parent = BT_POP(btstack)) != NULL) {
1123 /* parent page specified by stack frame <parent> */
1125 /* keep current child pages (<lp>, <rp>) pinned */
1126 lmp = smp;
1127 lp = sp;
1130 * insert router entry in parent for new right child page <rp>
1132 /* get the parent page <sp> */
1133 DT_GETPAGE(ip, parent->bn, smp, PSIZE, sp, rc);
1134 if (rc) {
1135 DT_PUTPAGE(lmp);
1136 DT_PUTPAGE(rmp);
1137 goto splitOut;
1141 * The new key entry goes ONE AFTER the index of parent entry,
1142 * because the split was to the right.
1144 skip = parent->index + 1;
1147 * compute the key for the router entry
1149 * key suffix compression:
1150 * for internal pages that have leaf pages as children,
1151 * retain only what's needed to distinguish between
1152 * the new entry and the entry on the page to its left.
1153 * If the keys compare equal, retain the entire key.
1155 * note that compression is performed only at computing
1156 * router key at the lowest internal level.
1157 * further compression of the key between pairs of higher
1158 * level internal pages loses too much information and
1159 * the search may fail.
1160 * (e.g., two adjacent leaf pages of {a, ..., x} {xx, ...,}
1161 * results in two adjacent parent entries (a)(xx).
1162 * if split occurs between these two entries, and
1163 * if compression is applied, the router key of parent entry
1164 * of right page (x) will divert search for x into right
1165 * subtree and miss x in the left subtree.)
1167 * the entire key must be retained for the next-to-leftmost
1168 * internal key at any level of the tree, or search may fail
1169 * (e.g., ?)
1171 switch (rp->header.flag & BT_TYPE) {
1172 case BT_LEAF:
1174 * compute the length of prefix for suffix compression
1175 * between last entry of left page and first entry
1176 * of right page
1178 if ((sp->header.flag & BT_ROOT && skip > 1) ||
1179 sp->header.prev != 0 || skip > 1) {
1180 /* compute uppercase router prefix key */
1181 rc = ciGetLeafPrefixKey(lp,
1182 lp->header.nextindex-1,
1183 rp, 0, &key,
1184 sbi->mntflag);
1185 if (rc) {
1186 DT_PUTPAGE(lmp);
1187 DT_PUTPAGE(rmp);
1188 DT_PUTPAGE(smp);
1189 goto splitOut;
1191 } else {
1192 /* next to leftmost entry of
1193 lowest internal level */
1195 /* compute uppercase router key */
1196 dtGetKey(rp, 0, &key, sbi->mntflag);
1197 key.name[key.namlen] = 0;
1199 if ((sbi->mntflag & JFS_OS2) == JFS_OS2)
1200 ciToUpper(&key);
1203 n = NDTINTERNAL(key.namlen);
1204 break;
1206 case BT_INTERNAL:
1207 dtGetKey(rp, 0, &key, sbi->mntflag);
1208 n = NDTINTERNAL(key.namlen);
1209 break;
1211 default:
1212 jfs_err("dtSplitUp(): UFO!");
1213 break;
1216 /* unpin left child page */
1217 DT_PUTPAGE(lmp);
1220 * compute the data for the router entry
1222 data->xd = rpxd; /* child page xd */
1225 * parent page is full - split the parent page
1227 if (n > sp->header.freecnt) {
1228 /* init for parent page split */
1229 split->mp = smp;
1230 split->index = skip; /* index at insert */
1231 split->nslot = n;
1232 split->key = &key;
1233 /* split->data = data; */
1235 /* unpin right child page */
1236 DT_PUTPAGE(rmp);
1238 /* The split routines insert the new entry,
1239 * acquire txLock as appropriate.
1240 * return <rp> pinned and its block number <rbn>.
1242 rc = (sp->header.flag & BT_ROOT) ?
1243 dtSplitRoot(tid, ip, split, &rmp) :
1244 dtSplitPage(tid, ip, split, &rmp, &rp, &rpxd);
1245 if (rc) {
1246 DT_PUTPAGE(smp);
1247 goto splitOut;
1250 /* smp and rmp are pinned */
1253 * parent page is not full - insert router entry in parent page
1255 else {
1256 BT_MARK_DIRTY(smp, ip);
1258 * acquire a transaction lock on the parent page
1260 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
1261 dtlck = (struct dt_lock *) & tlck->lock;
1262 ASSERT(dtlck->index == 0);
1263 lv = & dtlck->lv[0];
1265 /* linelock header */
1266 lv->offset = 0;
1267 lv->length = 1;
1268 dtlck->index++;
1270 /* linelock stbl of non-root parent page */
1271 if (!(sp->header.flag & BT_ROOT)) {
1272 lv++;
1273 n = skip >> L2DTSLOTSIZE;
1274 lv->offset = sp->header.stblindex + n;
1275 lv->length =
1276 ((sp->header.nextindex -
1277 1) >> L2DTSLOTSIZE) - n + 1;
1278 dtlck->index++;
1281 dtInsertEntry(sp, skip, &key, data, &dtlck);
1283 /* exit propagate up */
1284 break;
1288 /* unpin current split and its right page */
1289 DT_PUTPAGE(smp);
1290 DT_PUTPAGE(rmp);
1293 * free remaining extents allocated for split
1295 splitOut:
1296 n = pxdlist.npxd;
1297 pxd = &pxdlist.pxd[n];
1298 for (; n < pxdlist.maxnpxd; n++, pxd++)
1299 dbFree(ip, addressPXD(pxd), (s64) lengthPXD(pxd));
1301 freeKeyName:
1302 kfree(key.name);
1304 /* Rollback quota allocation */
1305 if (rc && quota_allocation)
1306 DQUOT_FREE_BLOCK(ip, quota_allocation);
1308 dtSplitUp_Exit:
1310 return rc;
1315 * dtSplitPage()
1317 * function: Split a non-root page of a btree.
1319 * parameter:
1321 * return: 0 - success;
1322 * errno - failure;
1323 * return split and new page pinned;
1325 static int dtSplitPage(tid_t tid, struct inode *ip, struct dtsplit * split,
1326 struct metapage ** rmpp, dtpage_t ** rpp, pxd_t * rpxdp)
1328 int rc = 0;
1329 struct metapage *smp;
1330 dtpage_t *sp;
1331 struct metapage *rmp;
1332 dtpage_t *rp; /* new right page allocated */
1333 s64 rbn; /* new right page block number */
1334 struct metapage *mp;
1335 dtpage_t *p;
1336 s64 nextbn;
1337 struct pxdlist *pxdlist;
1338 pxd_t *pxd;
1339 int skip, nextindex, half, left, nxt, off, si;
1340 struct ldtentry *ldtentry;
1341 struct idtentry *idtentry;
1342 u8 *stbl;
1343 struct dtslot *f;
1344 int fsi, stblsize;
1345 int n;
1346 struct dt_lock *sdtlck, *rdtlck;
1347 struct tlock *tlck;
1348 struct dt_lock *dtlck;
1349 struct lv *slv, *rlv, *lv;
1351 /* get split page */
1352 smp = split->mp;
1353 sp = DT_PAGE(ip, smp);
1356 * allocate the new right page for the split
1358 pxdlist = split->pxdlist;
1359 pxd = &pxdlist->pxd[pxdlist->npxd];
1360 pxdlist->npxd++;
1361 rbn = addressPXD(pxd);
1362 rmp = get_metapage(ip, rbn, PSIZE, 1);
1363 if (rmp == NULL)
1364 return -EIO;
1366 /* Allocate blocks to quota. */
1367 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
1368 release_metapage(rmp);
1369 return -EDQUOT;
1372 jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
1374 BT_MARK_DIRTY(rmp, ip);
1376 * acquire a transaction lock on the new right page
1378 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
1379 rdtlck = (struct dt_lock *) & tlck->lock;
1381 rp = (dtpage_t *) rmp->data;
1382 *rpp = rp;
1383 rp->header.self = *pxd;
1385 BT_MARK_DIRTY(smp, ip);
1387 * acquire a transaction lock on the split page
1389 * action:
1391 tlck = txLock(tid, ip, smp, tlckDTREE | tlckENTRY);
1392 sdtlck = (struct dt_lock *) & tlck->lock;
1394 /* linelock header of split page */
1395 ASSERT(sdtlck->index == 0);
1396 slv = & sdtlck->lv[0];
1397 slv->offset = 0;
1398 slv->length = 1;
1399 sdtlck->index++;
1402 * initialize/update sibling pointers between sp and rp
1404 nextbn = le64_to_cpu(sp->header.next);
1405 rp->header.next = cpu_to_le64(nextbn);
1406 rp->header.prev = cpu_to_le64(addressPXD(&sp->header.self));
1407 sp->header.next = cpu_to_le64(rbn);
1410 * initialize new right page
1412 rp->header.flag = sp->header.flag;
1414 /* compute sorted entry table at start of extent data area */
1415 rp->header.nextindex = 0;
1416 rp->header.stblindex = 1;
1418 n = PSIZE >> L2DTSLOTSIZE;
1419 rp->header.maxslot = n;
1420 stblsize = (n + 31) >> L2DTSLOTSIZE; /* in unit of slot */
1422 /* init freelist */
1423 fsi = rp->header.stblindex + stblsize;
1424 rp->header.freelist = fsi;
1425 rp->header.freecnt = rp->header.maxslot - fsi;
1428 * sequential append at tail: append without split
1430 * If splitting the last page on a level because of appending
1431 * a entry to it (skip is maxentry), it's likely that the access is
1432 * sequential. Adding an empty page on the side of the level is less
1433 * work and can push the fill factor much higher than normal.
1434 * If we're wrong it's no big deal, we'll just do the split the right
1435 * way next time.
1436 * (It may look like it's equally easy to do a similar hack for
1437 * reverse sorted data, that is, split the tree left,
1438 * but it's not. Be my guest.)
1440 if (nextbn == 0 && split->index == sp->header.nextindex) {
1441 /* linelock header + stbl (first slot) of new page */
1442 rlv = & rdtlck->lv[rdtlck->index];
1443 rlv->offset = 0;
1444 rlv->length = 2;
1445 rdtlck->index++;
1448 * initialize freelist of new right page
1450 f = &rp->slot[fsi];
1451 for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
1452 f->next = fsi;
1453 f->next = -1;
1455 /* insert entry at the first entry of the new right page */
1456 dtInsertEntry(rp, 0, split->key, split->data, &rdtlck);
1458 goto out;
1462 * non-sequential insert (at possibly middle page)
1466 * update prev pointer of previous right sibling page;
1468 if (nextbn != 0) {
1469 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
1470 if (rc) {
1471 discard_metapage(rmp);
1472 return rc;
1475 BT_MARK_DIRTY(mp, ip);
1477 * acquire a transaction lock on the next page
1479 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
1480 jfs_info("dtSplitPage: tlck = 0x%p, ip = 0x%p, mp=0x%p",
1481 tlck, ip, mp);
1482 dtlck = (struct dt_lock *) & tlck->lock;
1484 /* linelock header of previous right sibling page */
1485 lv = & dtlck->lv[dtlck->index];
1486 lv->offset = 0;
1487 lv->length = 1;
1488 dtlck->index++;
1490 p->header.prev = cpu_to_le64(rbn);
1492 DT_PUTPAGE(mp);
1496 * split the data between the split and right pages.
1498 skip = split->index;
1499 half = (PSIZE >> L2DTSLOTSIZE) >> 1; /* swag */
1500 left = 0;
1503 * compute fill factor for split pages
1505 * <nxt> traces the next entry to move to rp
1506 * <off> traces the next entry to stay in sp
1508 stbl = (u8 *) & sp->slot[sp->header.stblindex];
1509 nextindex = sp->header.nextindex;
1510 for (nxt = off = 0; nxt < nextindex; ++off) {
1511 if (off == skip)
1512 /* check for fill factor with new entry size */
1513 n = split->nslot;
1514 else {
1515 si = stbl[nxt];
1516 switch (sp->header.flag & BT_TYPE) {
1517 case BT_LEAF:
1518 ldtentry = (struct ldtentry *) & sp->slot[si];
1519 if (DO_INDEX(ip))
1520 n = NDTLEAF(ldtentry->namlen);
1521 else
1522 n = NDTLEAF_LEGACY(ldtentry->
1523 namlen);
1524 break;
1526 case BT_INTERNAL:
1527 idtentry = (struct idtentry *) & sp->slot[si];
1528 n = NDTINTERNAL(idtentry->namlen);
1529 break;
1531 default:
1532 break;
1535 ++nxt; /* advance to next entry to move in sp */
1538 left += n;
1539 if (left >= half)
1540 break;
1543 /* <nxt> poins to the 1st entry to move */
1546 * move entries to right page
1548 * dtMoveEntry() initializes rp and reserves entry for insertion
1550 * split page moved out entries are linelocked;
1551 * new/right page moved in entries are linelocked;
1553 /* linelock header + stbl of new right page */
1554 rlv = & rdtlck->lv[rdtlck->index];
1555 rlv->offset = 0;
1556 rlv->length = 5;
1557 rdtlck->index++;
1559 dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
1561 sp->header.nextindex = nxt;
1564 * finalize freelist of new right page
1566 fsi = rp->header.freelist;
1567 f = &rp->slot[fsi];
1568 for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
1569 f->next = fsi;
1570 f->next = -1;
1573 * Update directory index table for entries now in right page
1575 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
1576 s64 lblock;
1578 mp = NULL;
1579 stbl = DT_GETSTBL(rp);
1580 for (n = 0; n < rp->header.nextindex; n++) {
1581 ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
1582 modify_index(tid, ip, le32_to_cpu(ldtentry->index),
1583 rbn, n, &mp, &lblock);
1585 if (mp)
1586 release_metapage(mp);
1590 * the skipped index was on the left page,
1592 if (skip <= off) {
1593 /* insert the new entry in the split page */
1594 dtInsertEntry(sp, skip, split->key, split->data, &sdtlck);
1596 /* linelock stbl of split page */
1597 if (sdtlck->index >= sdtlck->maxcnt)
1598 sdtlck = (struct dt_lock *) txLinelock(sdtlck);
1599 slv = & sdtlck->lv[sdtlck->index];
1600 n = skip >> L2DTSLOTSIZE;
1601 slv->offset = sp->header.stblindex + n;
1602 slv->length =
1603 ((sp->header.nextindex - 1) >> L2DTSLOTSIZE) - n + 1;
1604 sdtlck->index++;
1607 * the skipped index was on the right page,
1609 else {
1610 /* adjust the skip index to reflect the new position */
1611 skip -= nxt;
1613 /* insert the new entry in the right page */
1614 dtInsertEntry(rp, skip, split->key, split->data, &rdtlck);
1617 out:
1618 *rmpp = rmp;
1619 *rpxdp = *pxd;
1621 return rc;
1626 * dtExtendPage()
1628 * function: extend 1st/only directory leaf page
1630 * parameter:
1632 * return: 0 - success;
1633 * errno - failure;
1634 * return extended page pinned;
1636 static int dtExtendPage(tid_t tid,
1637 struct inode *ip, struct dtsplit * split, struct btstack * btstack)
1639 struct super_block *sb = ip->i_sb;
1640 int rc;
1641 struct metapage *smp, *pmp, *mp;
1642 dtpage_t *sp, *pp;
1643 struct pxdlist *pxdlist;
1644 pxd_t *pxd, *tpxd;
1645 int xlen, xsize;
1646 int newstblindex, newstblsize;
1647 int oldstblindex, oldstblsize;
1648 int fsi, last;
1649 struct dtslot *f;
1650 struct btframe *parent;
1651 int n;
1652 struct dt_lock *dtlck;
1653 s64 xaddr, txaddr;
1654 struct tlock *tlck;
1655 struct pxd_lock *pxdlock;
1656 struct lv *lv;
1657 uint type;
1658 struct ldtentry *ldtentry;
1659 u8 *stbl;
1661 /* get page to extend */
1662 smp = split->mp;
1663 sp = DT_PAGE(ip, smp);
1665 /* get parent/root page */
1666 parent = BT_POP(btstack);
1667 DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc);
1668 if (rc)
1669 return (rc);
1672 * extend the extent
1674 pxdlist = split->pxdlist;
1675 pxd = &pxdlist->pxd[pxdlist->npxd];
1676 pxdlist->npxd++;
1678 xaddr = addressPXD(pxd);
1679 tpxd = &sp->header.self;
1680 txaddr = addressPXD(tpxd);
1681 /* in-place extension */
1682 if (xaddr == txaddr) {
1683 type = tlckEXTEND;
1685 /* relocation */
1686 else {
1687 type = tlckNEW;
1689 /* save moved extent descriptor for later free */
1690 tlck = txMaplock(tid, ip, tlckDTREE | tlckRELOCATE);
1691 pxdlock = (struct pxd_lock *) & tlck->lock;
1692 pxdlock->flag = mlckFREEPXD;
1693 pxdlock->pxd = sp->header.self;
1694 pxdlock->index = 1;
1697 * Update directory index table to reflect new page address
1699 if (DO_INDEX(ip)) {
1700 s64 lblock;
1702 mp = NULL;
1703 stbl = DT_GETSTBL(sp);
1704 for (n = 0; n < sp->header.nextindex; n++) {
1705 ldtentry =
1706 (struct ldtentry *) & sp->slot[stbl[n]];
1707 modify_index(tid, ip,
1708 le32_to_cpu(ldtentry->index),
1709 xaddr, n, &mp, &lblock);
1711 if (mp)
1712 release_metapage(mp);
1717 * extend the page
1719 sp->header.self = *pxd;
1721 jfs_info("dtExtendPage: ip:0x%p smp:0x%p sp:0x%p", ip, smp, sp);
1723 BT_MARK_DIRTY(smp, ip);
1725 * acquire a transaction lock on the extended/leaf page
1727 tlck = txLock(tid, ip, smp, tlckDTREE | type);
1728 dtlck = (struct dt_lock *) & tlck->lock;
1729 lv = & dtlck->lv[0];
1731 /* update buffer extent descriptor of extended page */
1732 xlen = lengthPXD(pxd);
1733 xsize = xlen << JFS_SBI(sb)->l2bsize;
1734 #ifdef _STILL_TO_PORT
1735 bmSetXD(smp, xaddr, xsize);
1736 #endif /* _STILL_TO_PORT */
1739 * copy old stbl to new stbl at start of extended area
1741 oldstblindex = sp->header.stblindex;
1742 oldstblsize = (sp->header.maxslot + 31) >> L2DTSLOTSIZE;
1743 newstblindex = sp->header.maxslot;
1744 n = xsize >> L2DTSLOTSIZE;
1745 newstblsize = (n + 31) >> L2DTSLOTSIZE;
1746 memcpy(&sp->slot[newstblindex], &sp->slot[oldstblindex],
1747 sp->header.nextindex);
1750 * in-line extension: linelock old area of extended page
1752 if (type == tlckEXTEND) {
1753 /* linelock header */
1754 lv->offset = 0;
1755 lv->length = 1;
1756 dtlck->index++;
1757 lv++;
1759 /* linelock new stbl of extended page */
1760 lv->offset = newstblindex;
1761 lv->length = newstblsize;
1764 * relocation: linelock whole relocated area
1766 else {
1767 lv->offset = 0;
1768 lv->length = sp->header.maxslot + newstblsize;
1771 dtlck->index++;
1773 sp->header.maxslot = n;
1774 sp->header.stblindex = newstblindex;
1775 /* sp->header.nextindex remains the same */
1778 * add old stbl region at head of freelist
1780 fsi = oldstblindex;
1781 f = &sp->slot[fsi];
1782 last = sp->header.freelist;
1783 for (n = 0; n < oldstblsize; n++, fsi++, f++) {
1784 f->next = last;
1785 last = fsi;
1787 sp->header.freelist = last;
1788 sp->header.freecnt += oldstblsize;
1791 * append free region of newly extended area at tail of freelist
1793 /* init free region of newly extended area */
1794 fsi = n = newstblindex + newstblsize;
1795 f = &sp->slot[fsi];
1796 for (fsi++; fsi < sp->header.maxslot; f++, fsi++)
1797 f->next = fsi;
1798 f->next = -1;
1800 /* append new free region at tail of old freelist */
1801 fsi = sp->header.freelist;
1802 if (fsi == -1)
1803 sp->header.freelist = n;
1804 else {
1805 do {
1806 f = &sp->slot[fsi];
1807 fsi = f->next;
1808 } while (fsi != -1);
1810 f->next = n;
1813 sp->header.freecnt += sp->header.maxslot - n;
1816 * insert the new entry
1818 dtInsertEntry(sp, split->index, split->key, split->data, &dtlck);
1820 BT_MARK_DIRTY(pmp, ip);
1822 * linelock any freeslots residing in old extent
1824 if (type == tlckEXTEND) {
1825 n = sp->header.maxslot >> 2;
1826 if (sp->header.freelist < n)
1827 dtLinelockFreelist(sp, n, &dtlck);
1831 * update parent entry on the parent/root page
1834 * acquire a transaction lock on the parent/root page
1836 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
1837 dtlck = (struct dt_lock *) & tlck->lock;
1838 lv = & dtlck->lv[dtlck->index];
1840 /* linelock parent entry - 1st slot */
1841 lv->offset = 1;
1842 lv->length = 1;
1843 dtlck->index++;
1845 /* update the parent pxd for page extension */
1846 tpxd = (pxd_t *) & pp->slot[1];
1847 *tpxd = *pxd;
1849 DT_PUTPAGE(pmp);
1850 return 0;
1855 * dtSplitRoot()
1857 * function:
1858 * split the full root page into
1859 * original/root/split page and new right page
1860 * i.e., root remains fixed in tree anchor (inode) and
1861 * the root is copied to a single new right child page
1862 * since root page << non-root page, and
1863 * the split root page contains a single entry for the
1864 * new right child page.
1866 * parameter:
1868 * return: 0 - success;
1869 * errno - failure;
1870 * return new page pinned;
1872 static int dtSplitRoot(tid_t tid,
1873 struct inode *ip, struct dtsplit * split, struct metapage ** rmpp)
1875 struct super_block *sb = ip->i_sb;
1876 struct metapage *smp;
1877 dtroot_t *sp;
1878 struct metapage *rmp;
1879 dtpage_t *rp;
1880 s64 rbn;
1881 int xlen;
1882 int xsize;
1883 struct dtslot *f;
1884 s8 *stbl;
1885 int fsi, stblsize, n;
1886 struct idtentry *s;
1887 pxd_t *ppxd;
1888 struct pxdlist *pxdlist;
1889 pxd_t *pxd;
1890 struct dt_lock *dtlck;
1891 struct tlock *tlck;
1892 struct lv *lv;
1894 /* get split root page */
1895 smp = split->mp;
1896 sp = &JFS_IP(ip)->i_dtroot;
1899 * allocate/initialize a single (right) child page
1901 * N.B. at first split, a one (or two) block to fit new entry
1902 * is allocated; at subsequent split, a full page is allocated;
1904 pxdlist = split->pxdlist;
1905 pxd = &pxdlist->pxd[pxdlist->npxd];
1906 pxdlist->npxd++;
1907 rbn = addressPXD(pxd);
1908 xlen = lengthPXD(pxd);
1909 xsize = xlen << JFS_SBI(sb)->l2bsize;
1910 rmp = get_metapage(ip, rbn, xsize, 1);
1911 if (!rmp)
1912 return -EIO;
1914 rp = rmp->data;
1916 /* Allocate blocks to quota. */
1917 if (DQUOT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
1918 release_metapage(rmp);
1919 return -EDQUOT;
1922 BT_MARK_DIRTY(rmp, ip);
1924 * acquire a transaction lock on the new right page
1926 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckNEW);
1927 dtlck = (struct dt_lock *) & tlck->lock;
1929 rp->header.flag =
1930 (sp->header.flag & BT_LEAF) ? BT_LEAF : BT_INTERNAL;
1931 rp->header.self = *pxd;
1933 /* initialize sibling pointers */
1934 rp->header.next = 0;
1935 rp->header.prev = 0;
1938 * move in-line root page into new right page extent
1940 /* linelock header + copied entries + new stbl (1st slot) in new page */
1941 ASSERT(dtlck->index == 0);
1942 lv = & dtlck->lv[0];
1943 lv->offset = 0;
1944 lv->length = 10; /* 1 + 8 + 1 */
1945 dtlck->index++;
1947 n = xsize >> L2DTSLOTSIZE;
1948 rp->header.maxslot = n;
1949 stblsize = (n + 31) >> L2DTSLOTSIZE;
1951 /* copy old stbl to new stbl at start of extended area */
1952 rp->header.stblindex = DTROOTMAXSLOT;
1953 stbl = (s8 *) & rp->slot[DTROOTMAXSLOT];
1954 memcpy(stbl, sp->header.stbl, sp->header.nextindex);
1955 rp->header.nextindex = sp->header.nextindex;
1957 /* copy old data area to start of new data area */
1958 memcpy(&rp->slot[1], &sp->slot[1], IDATASIZE);
1961 * append free region of newly extended area at tail of freelist
1963 /* init free region of newly extended area */
1964 fsi = n = DTROOTMAXSLOT + stblsize;
1965 f = &rp->slot[fsi];
1966 for (fsi++; fsi < rp->header.maxslot; f++, fsi++)
1967 f->next = fsi;
1968 f->next = -1;
1970 /* append new free region at tail of old freelist */
1971 fsi = sp->header.freelist;
1972 if (fsi == -1)
1973 rp->header.freelist = n;
1974 else {
1975 rp->header.freelist = fsi;
1977 do {
1978 f = &rp->slot[fsi];
1979 fsi = f->next;
1980 } while (fsi != -1);
1982 f->next = n;
1985 rp->header.freecnt = sp->header.freecnt + rp->header.maxslot - n;
1988 * Update directory index table for entries now in right page
1990 if ((rp->header.flag & BT_LEAF) && DO_INDEX(ip)) {
1991 s64 lblock;
1992 struct metapage *mp = NULL;
1993 struct ldtentry *ldtentry;
1995 stbl = DT_GETSTBL(rp);
1996 for (n = 0; n < rp->header.nextindex; n++) {
1997 ldtentry = (struct ldtentry *) & rp->slot[stbl[n]];
1998 modify_index(tid, ip, le32_to_cpu(ldtentry->index),
1999 rbn, n, &mp, &lblock);
2001 if (mp)
2002 release_metapage(mp);
2005 * insert the new entry into the new right/child page
2006 * (skip index in the new right page will not change)
2008 dtInsertEntry(rp, split->index, split->key, split->data, &dtlck);
2011 * reset parent/root page
2013 * set the 1st entry offset to 0, which force the left-most key
2014 * at any level of the tree to be less than any search key.
2016 * The btree comparison code guarantees that the left-most key on any
2017 * level of the tree is never used, so it doesn't need to be filled in.
2019 BT_MARK_DIRTY(smp, ip);
2021 * acquire a transaction lock on the root page (in-memory inode)
2023 tlck = txLock(tid, ip, smp, tlckDTREE | tlckNEW | tlckBTROOT);
2024 dtlck = (struct dt_lock *) & tlck->lock;
2026 /* linelock root */
2027 ASSERT(dtlck->index == 0);
2028 lv = & dtlck->lv[0];
2029 lv->offset = 0;
2030 lv->length = DTROOTMAXSLOT;
2031 dtlck->index++;
2033 /* update page header of root */
2034 if (sp->header.flag & BT_LEAF) {
2035 sp->header.flag &= ~BT_LEAF;
2036 sp->header.flag |= BT_INTERNAL;
2039 /* init the first entry */
2040 s = (struct idtentry *) & sp->slot[DTENTRYSTART];
2041 ppxd = (pxd_t *) s;
2042 *ppxd = *pxd;
2043 s->next = -1;
2044 s->namlen = 0;
2046 stbl = sp->header.stbl;
2047 stbl[0] = DTENTRYSTART;
2048 sp->header.nextindex = 1;
2050 /* init freelist */
2051 fsi = DTENTRYSTART + 1;
2052 f = &sp->slot[fsi];
2054 /* init free region of remaining area */
2055 for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
2056 f->next = fsi;
2057 f->next = -1;
2059 sp->header.freelist = DTENTRYSTART + 1;
2060 sp->header.freecnt = DTROOTMAXSLOT - (DTENTRYSTART + 1);
2062 *rmpp = rmp;
2064 return 0;
2069 * dtDelete()
2071 * function: delete the entry(s) referenced by a key.
2073 * parameter:
2075 * return:
2077 int dtDelete(tid_t tid,
2078 struct inode *ip, struct component_name * key, ino_t * ino, int flag)
2080 int rc = 0;
2081 s64 bn;
2082 struct metapage *mp, *imp;
2083 dtpage_t *p;
2084 int index;
2085 struct btstack btstack;
2086 struct dt_lock *dtlck;
2087 struct tlock *tlck;
2088 struct lv *lv;
2089 int i;
2090 struct ldtentry *ldtentry;
2091 u8 *stbl;
2092 u32 table_index, next_index;
2093 struct metapage *nmp;
2094 dtpage_t *np;
2097 * search for the entry to delete:
2099 * dtSearch() returns (leaf page pinned, index at which to delete).
2101 if ((rc = dtSearch(ip, key, ino, &btstack, flag)))
2102 return rc;
2104 /* retrieve search result */
2105 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
2108 * We need to find put the index of the next entry into the
2109 * directory index table in order to resume a readdir from this
2110 * entry.
2112 if (DO_INDEX(ip)) {
2113 stbl = DT_GETSTBL(p);
2114 ldtentry = (struct ldtentry *) & p->slot[stbl[index]];
2115 table_index = le32_to_cpu(ldtentry->index);
2116 if (index == (p->header.nextindex - 1)) {
2118 * Last entry in this leaf page
2120 if ((p->header.flag & BT_ROOT)
2121 || (p->header.next == 0))
2122 next_index = -1;
2123 else {
2124 /* Read next leaf page */
2125 DT_GETPAGE(ip, le64_to_cpu(p->header.next),
2126 nmp, PSIZE, np, rc);
2127 if (rc)
2128 next_index = -1;
2129 else {
2130 stbl = DT_GETSTBL(np);
2131 ldtentry =
2132 (struct ldtentry *) & np->
2133 slot[stbl[0]];
2134 next_index =
2135 le32_to_cpu(ldtentry->index);
2136 DT_PUTPAGE(nmp);
2139 } else {
2140 ldtentry =
2141 (struct ldtentry *) & p->slot[stbl[index + 1]];
2142 next_index = le32_to_cpu(ldtentry->index);
2144 free_index(tid, ip, table_index, next_index);
2147 * the leaf page becomes empty, delete the page
2149 if (p->header.nextindex == 1) {
2150 /* delete empty page */
2151 rc = dtDeleteUp(tid, ip, mp, p, &btstack);
2154 * the leaf page has other entries remaining:
2156 * delete the entry from the leaf page.
2158 else {
2159 BT_MARK_DIRTY(mp, ip);
2161 * acquire a transaction lock on the leaf page
2163 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
2164 dtlck = (struct dt_lock *) & tlck->lock;
2167 * Do not assume that dtlck->index will be zero. During a
2168 * rename within a directory, this transaction may have
2169 * modified this page already when adding the new entry.
2172 /* linelock header */
2173 if (dtlck->index >= dtlck->maxcnt)
2174 dtlck = (struct dt_lock *) txLinelock(dtlck);
2175 lv = & dtlck->lv[dtlck->index];
2176 lv->offset = 0;
2177 lv->length = 1;
2178 dtlck->index++;
2180 /* linelock stbl of non-root leaf page */
2181 if (!(p->header.flag & BT_ROOT)) {
2182 if (dtlck->index >= dtlck->maxcnt)
2183 dtlck = (struct dt_lock *) txLinelock(dtlck);
2184 lv = & dtlck->lv[dtlck->index];
2185 i = index >> L2DTSLOTSIZE;
2186 lv->offset = p->header.stblindex + i;
2187 lv->length =
2188 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
2189 i + 1;
2190 dtlck->index++;
2193 /* free the leaf entry */
2194 dtDeleteEntry(p, index, &dtlck);
2197 * Update directory index table for entries moved in stbl
2199 if (DO_INDEX(ip) && index < p->header.nextindex) {
2200 s64 lblock;
2202 imp = NULL;
2203 stbl = DT_GETSTBL(p);
2204 for (i = index; i < p->header.nextindex; i++) {
2205 ldtentry =
2206 (struct ldtentry *) & p->slot[stbl[i]];
2207 modify_index(tid, ip,
2208 le32_to_cpu(ldtentry->index),
2209 bn, i, &imp, &lblock);
2211 if (imp)
2212 release_metapage(imp);
2215 DT_PUTPAGE(mp);
2218 return rc;
2223 * dtDeleteUp()
2225 * function:
2226 * free empty pages as propagating deletion up the tree
2228 * parameter:
2230 * return:
2232 static int dtDeleteUp(tid_t tid, struct inode *ip,
2233 struct metapage * fmp, dtpage_t * fp, struct btstack * btstack)
2235 int rc = 0;
2236 struct metapage *mp;
2237 dtpage_t *p;
2238 int index, nextindex;
2239 int xlen;
2240 struct btframe *parent;
2241 struct dt_lock *dtlck;
2242 struct tlock *tlck;
2243 struct lv *lv;
2244 struct pxd_lock *pxdlock;
2245 int i;
2248 * keep the root leaf page which has become empty
2250 if (BT_IS_ROOT(fmp)) {
2252 * reset the root
2254 * dtInitRoot() acquires txlock on the root
2256 dtInitRoot(tid, ip, PARENT(ip));
2258 DT_PUTPAGE(fmp);
2260 return 0;
2264 * free the non-root leaf page
2267 * acquire a transaction lock on the page
2269 * write FREEXTENT|NOREDOPAGE log record
2270 * N.B. linelock is overlaid as freed extent descriptor, and
2271 * the buffer page is freed;
2273 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
2274 pxdlock = (struct pxd_lock *) & tlck->lock;
2275 pxdlock->flag = mlckFREEPXD;
2276 pxdlock->pxd = fp->header.self;
2277 pxdlock->index = 1;
2279 /* update sibling pointers */
2280 if ((rc = dtRelink(tid, ip, fp))) {
2281 BT_PUTPAGE(fmp);
2282 return rc;
2285 xlen = lengthPXD(&fp->header.self);
2287 /* Free quota allocation. */
2288 DQUOT_FREE_BLOCK(ip, xlen);
2290 /* free/invalidate its buffer page */
2291 discard_metapage(fmp);
2294 * propagate page deletion up the directory tree
2296 * If the delete from the parent page makes it empty,
2297 * continue all the way up the tree.
2298 * stop if the root page is reached (which is never deleted) or
2299 * if the entry deletion does not empty the page.
2301 while ((parent = BT_POP(btstack)) != NULL) {
2302 /* pin the parent page <sp> */
2303 DT_GETPAGE(ip, parent->bn, mp, PSIZE, p, rc);
2304 if (rc)
2305 return rc;
2308 * free the extent of the child page deleted
2310 index = parent->index;
2313 * delete the entry for the child page from parent
2315 nextindex = p->header.nextindex;
2318 * the parent has the single entry being deleted:
2320 * free the parent page which has become empty.
2322 if (nextindex == 1) {
2324 * keep the root internal page which has become empty
2326 if (p->header.flag & BT_ROOT) {
2328 * reset the root
2330 * dtInitRoot() acquires txlock on the root
2332 dtInitRoot(tid, ip, PARENT(ip));
2334 DT_PUTPAGE(mp);
2336 return 0;
2339 * free the parent page
2341 else {
2343 * acquire a transaction lock on the page
2345 * write FREEXTENT|NOREDOPAGE log record
2347 tlck =
2348 txMaplock(tid, ip,
2349 tlckDTREE | tlckFREE);
2350 pxdlock = (struct pxd_lock *) & tlck->lock;
2351 pxdlock->flag = mlckFREEPXD;
2352 pxdlock->pxd = p->header.self;
2353 pxdlock->index = 1;
2355 /* update sibling pointers */
2356 if ((rc = dtRelink(tid, ip, p))) {
2357 DT_PUTPAGE(mp);
2358 return rc;
2361 xlen = lengthPXD(&p->header.self);
2363 /* Free quota allocation */
2364 DQUOT_FREE_BLOCK(ip, xlen);
2366 /* free/invalidate its buffer page */
2367 discard_metapage(mp);
2369 /* propagate up */
2370 continue;
2375 * the parent has other entries remaining:
2377 * delete the router entry from the parent page.
2379 BT_MARK_DIRTY(mp, ip);
2381 * acquire a transaction lock on the page
2383 * action: router entry deletion
2385 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
2386 dtlck = (struct dt_lock *) & tlck->lock;
2388 /* linelock header */
2389 if (dtlck->index >= dtlck->maxcnt)
2390 dtlck = (struct dt_lock *) txLinelock(dtlck);
2391 lv = & dtlck->lv[dtlck->index];
2392 lv->offset = 0;
2393 lv->length = 1;
2394 dtlck->index++;
2396 /* linelock stbl of non-root leaf page */
2397 if (!(p->header.flag & BT_ROOT)) {
2398 if (dtlck->index < dtlck->maxcnt)
2399 lv++;
2400 else {
2401 dtlck = (struct dt_lock *) txLinelock(dtlck);
2402 lv = & dtlck->lv[0];
2404 i = index >> L2DTSLOTSIZE;
2405 lv->offset = p->header.stblindex + i;
2406 lv->length =
2407 ((p->header.nextindex - 1) >> L2DTSLOTSIZE) -
2408 i + 1;
2409 dtlck->index++;
2412 /* free the router entry */
2413 dtDeleteEntry(p, index, &dtlck);
2415 /* reset key of new leftmost entry of level (for consistency) */
2416 if (index == 0 &&
2417 ((p->header.flag & BT_ROOT) || p->header.prev == 0))
2418 dtTruncateEntry(p, 0, &dtlck);
2420 /* unpin the parent page */
2421 DT_PUTPAGE(mp);
2423 /* exit propagation up */
2424 break;
2427 return 0;
2430 #ifdef _NOTYET
2432 * NAME: dtRelocate()
2434 * FUNCTION: relocate dtpage (internal or leaf) of directory;
2435 * This function is mainly used by defragfs utility.
2437 int dtRelocate(tid_t tid, struct inode *ip, s64 lmxaddr, pxd_t * opxd,
2438 s64 nxaddr)
2440 int rc = 0;
2441 struct metapage *mp, *pmp, *lmp, *rmp;
2442 dtpage_t *p, *pp, *rp = 0, *lp= 0;
2443 s64 bn;
2444 int index;
2445 struct btstack btstack;
2446 pxd_t *pxd;
2447 s64 oxaddr, nextbn, prevbn;
2448 int xlen, xsize;
2449 struct tlock *tlck;
2450 struct dt_lock *dtlck;
2451 struct pxd_lock *pxdlock;
2452 s8 *stbl;
2453 struct lv *lv;
2455 oxaddr = addressPXD(opxd);
2456 xlen = lengthPXD(opxd);
2458 jfs_info("dtRelocate: lmxaddr:%Ld xaddr:%Ld:%Ld xlen:%d",
2459 (long long)lmxaddr, (long long)oxaddr, (long long)nxaddr,
2460 xlen);
2463 * 1. get the internal parent dtpage covering
2464 * router entry for the tartget page to be relocated;
2466 rc = dtSearchNode(ip, lmxaddr, opxd, &btstack);
2467 if (rc)
2468 return rc;
2470 /* retrieve search result */
2471 DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index);
2472 jfs_info("dtRelocate: parent router entry validated.");
2475 * 2. relocate the target dtpage
2477 /* read in the target page from src extent */
2478 DT_GETPAGE(ip, oxaddr, mp, PSIZE, p, rc);
2479 if (rc) {
2480 /* release the pinned parent page */
2481 DT_PUTPAGE(pmp);
2482 return rc;
2486 * read in sibling pages if any to update sibling pointers;
2488 rmp = NULL;
2489 if (p->header.next) {
2490 nextbn = le64_to_cpu(p->header.next);
2491 DT_GETPAGE(ip, nextbn, rmp, PSIZE, rp, rc);
2492 if (rc) {
2493 DT_PUTPAGE(mp);
2494 DT_PUTPAGE(pmp);
2495 return (rc);
2499 lmp = NULL;
2500 if (p->header.prev) {
2501 prevbn = le64_to_cpu(p->header.prev);
2502 DT_GETPAGE(ip, prevbn, lmp, PSIZE, lp, rc);
2503 if (rc) {
2504 DT_PUTPAGE(mp);
2505 DT_PUTPAGE(pmp);
2506 if (rmp)
2507 DT_PUTPAGE(rmp);
2508 return (rc);
2512 /* at this point, all xtpages to be updated are in memory */
2515 * update sibling pointers of sibling dtpages if any;
2517 if (lmp) {
2518 tlck = txLock(tid, ip, lmp, tlckDTREE | tlckRELINK);
2519 dtlck = (struct dt_lock *) & tlck->lock;
2520 /* linelock header */
2521 ASSERT(dtlck->index == 0);
2522 lv = & dtlck->lv[0];
2523 lv->offset = 0;
2524 lv->length = 1;
2525 dtlck->index++;
2527 lp->header.next = cpu_to_le64(nxaddr);
2528 DT_PUTPAGE(lmp);
2531 if (rmp) {
2532 tlck = txLock(tid, ip, rmp, tlckDTREE | tlckRELINK);
2533 dtlck = (struct dt_lock *) & tlck->lock;
2534 /* linelock header */
2535 ASSERT(dtlck->index == 0);
2536 lv = & dtlck->lv[0];
2537 lv->offset = 0;
2538 lv->length = 1;
2539 dtlck->index++;
2541 rp->header.prev = cpu_to_le64(nxaddr);
2542 DT_PUTPAGE(rmp);
2546 * update the target dtpage to be relocated
2548 * write LOG_REDOPAGE of LOG_NEW type for dst page
2549 * for the whole target page (logredo() will apply
2550 * after image and update bmap for allocation of the
2551 * dst extent), and update bmap for allocation of
2552 * the dst extent;
2554 tlck = txLock(tid, ip, mp, tlckDTREE | tlckNEW);
2555 dtlck = (struct dt_lock *) & tlck->lock;
2556 /* linelock header */
2557 ASSERT(dtlck->index == 0);
2558 lv = & dtlck->lv[0];
2560 /* update the self address in the dtpage header */
2561 pxd = &p->header.self;
2562 PXDaddress(pxd, nxaddr);
2564 /* the dst page is the same as the src page, i.e.,
2565 * linelock for afterimage of the whole page;
2567 lv->offset = 0;
2568 lv->length = p->header.maxslot;
2569 dtlck->index++;
2571 /* update the buffer extent descriptor of the dtpage */
2572 xsize = xlen << JFS_SBI(ip->i_sb)->l2bsize;
2573 #ifdef _STILL_TO_PORT
2574 bmSetXD(mp, nxaddr, xsize);
2575 #endif /* _STILL_TO_PORT */
2576 /* unpin the relocated page */
2577 DT_PUTPAGE(mp);
2578 jfs_info("dtRelocate: target dtpage relocated.");
2580 /* the moved extent is dtpage, then a LOG_NOREDOPAGE log rec
2581 * needs to be written (in logredo(), the LOG_NOREDOPAGE log rec
2582 * will also force a bmap update ).
2586 * 3. acquire maplock for the source extent to be freed;
2588 /* for dtpage relocation, write a LOG_NOREDOPAGE record
2589 * for the source dtpage (logredo() will init NoRedoPage
2590 * filter and will also update bmap for free of the source
2591 * dtpage), and upadte bmap for free of the source dtpage;
2593 tlck = txMaplock(tid, ip, tlckDTREE | tlckFREE);
2594 pxdlock = (struct pxd_lock *) & tlck->lock;
2595 pxdlock->flag = mlckFREEPXD;
2596 PXDaddress(&pxdlock->pxd, oxaddr);
2597 PXDlength(&pxdlock->pxd, xlen);
2598 pxdlock->index = 1;
2601 * 4. update the parent router entry for relocation;
2603 * acquire tlck for the parent entry covering the target dtpage;
2604 * write LOG_REDOPAGE to apply after image only;
2606 jfs_info("dtRelocate: update parent router entry.");
2607 tlck = txLock(tid, ip, pmp, tlckDTREE | tlckENTRY);
2608 dtlck = (struct dt_lock *) & tlck->lock;
2609 lv = & dtlck->lv[dtlck->index];
2611 /* update the PXD with the new address */
2612 stbl = DT_GETSTBL(pp);
2613 pxd = (pxd_t *) & pp->slot[stbl[index]];
2614 PXDaddress(pxd, nxaddr);
2615 lv->offset = stbl[index];
2616 lv->length = 1;
2617 dtlck->index++;
2619 /* unpin the parent dtpage */
2620 DT_PUTPAGE(pmp);
2622 return rc;
2626 * NAME: dtSearchNode()
2628 * FUNCTION: Search for an dtpage containing a specified address
2629 * This function is mainly used by defragfs utility.
2631 * NOTE: Search result on stack, the found page is pinned at exit.
2632 * The result page must be an internal dtpage.
2633 * lmxaddr give the address of the left most page of the
2634 * dtree level, in which the required dtpage resides.
2636 static int dtSearchNode(struct inode *ip, s64 lmxaddr, pxd_t * kpxd,
2637 struct btstack * btstack)
2639 int rc = 0;
2640 s64 bn;
2641 struct metapage *mp;
2642 dtpage_t *p;
2643 int psize = 288; /* initial in-line directory */
2644 s8 *stbl;
2645 int i;
2646 pxd_t *pxd;
2647 struct btframe *btsp;
2649 BT_CLR(btstack); /* reset stack */
2652 * descend tree to the level with specified leftmost page
2654 * by convention, root bn = 0.
2656 for (bn = 0;;) {
2657 /* get/pin the page to search */
2658 DT_GETPAGE(ip, bn, mp, psize, p, rc);
2659 if (rc)
2660 return rc;
2662 /* does the xaddr of leftmost page of the levevl
2663 * matches levevl search key ?
2665 if (p->header.flag & BT_ROOT) {
2666 if (lmxaddr == 0)
2667 break;
2668 } else if (addressPXD(&p->header.self) == lmxaddr)
2669 break;
2672 * descend down to leftmost child page
2674 if (p->header.flag & BT_LEAF) {
2675 DT_PUTPAGE(mp);
2676 return -ESTALE;
2679 /* get the leftmost entry */
2680 stbl = DT_GETSTBL(p);
2681 pxd = (pxd_t *) & p->slot[stbl[0]];
2683 /* get the child page block address */
2684 bn = addressPXD(pxd);
2685 psize = lengthPXD(pxd) << JFS_SBI(ip->i_sb)->l2bsize;
2686 /* unpin the parent page */
2687 DT_PUTPAGE(mp);
2691 * search each page at the current levevl
2693 loop:
2694 stbl = DT_GETSTBL(p);
2695 for (i = 0; i < p->header.nextindex; i++) {
2696 pxd = (pxd_t *) & p->slot[stbl[i]];
2698 /* found the specified router entry */
2699 if (addressPXD(pxd) == addressPXD(kpxd) &&
2700 lengthPXD(pxd) == lengthPXD(kpxd)) {
2701 btsp = btstack->top;
2702 btsp->bn = bn;
2703 btsp->index = i;
2704 btsp->mp = mp;
2706 return 0;
2710 /* get the right sibling page if any */
2711 if (p->header.next)
2712 bn = le64_to_cpu(p->header.next);
2713 else {
2714 DT_PUTPAGE(mp);
2715 return -ESTALE;
2718 /* unpin current page */
2719 DT_PUTPAGE(mp);
2721 /* get the right sibling page */
2722 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
2723 if (rc)
2724 return rc;
2726 goto loop;
2728 #endif /* _NOTYET */
2731 * dtRelink()
2733 * function:
2734 * link around a freed page.
2736 * parameter:
2737 * fp: page to be freed
2739 * return:
2741 static int dtRelink(tid_t tid, struct inode *ip, dtpage_t * p)
2743 int rc;
2744 struct metapage *mp;
2745 s64 nextbn, prevbn;
2746 struct tlock *tlck;
2747 struct dt_lock *dtlck;
2748 struct lv *lv;
2750 nextbn = le64_to_cpu(p->header.next);
2751 prevbn = le64_to_cpu(p->header.prev);
2753 /* update prev pointer of the next page */
2754 if (nextbn != 0) {
2755 DT_GETPAGE(ip, nextbn, mp, PSIZE, p, rc);
2756 if (rc)
2757 return rc;
2759 BT_MARK_DIRTY(mp, ip);
2761 * acquire a transaction lock on the next page
2763 * action: update prev pointer;
2765 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
2766 jfs_info("dtRelink nextbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
2767 tlck, ip, mp);
2768 dtlck = (struct dt_lock *) & tlck->lock;
2770 /* linelock header */
2771 if (dtlck->index >= dtlck->maxcnt)
2772 dtlck = (struct dt_lock *) txLinelock(dtlck);
2773 lv = & dtlck->lv[dtlck->index];
2774 lv->offset = 0;
2775 lv->length = 1;
2776 dtlck->index++;
2778 p->header.prev = cpu_to_le64(prevbn);
2779 DT_PUTPAGE(mp);
2782 /* update next pointer of the previous page */
2783 if (prevbn != 0) {
2784 DT_GETPAGE(ip, prevbn, mp, PSIZE, p, rc);
2785 if (rc)
2786 return rc;
2788 BT_MARK_DIRTY(mp, ip);
2790 * acquire a transaction lock on the prev page
2792 * action: update next pointer;
2794 tlck = txLock(tid, ip, mp, tlckDTREE | tlckRELINK);
2795 jfs_info("dtRelink prevbn: tlck = 0x%p, ip = 0x%p, mp=0x%p",
2796 tlck, ip, mp);
2797 dtlck = (struct dt_lock *) & tlck->lock;
2799 /* linelock header */
2800 if (dtlck->index >= dtlck->maxcnt)
2801 dtlck = (struct dt_lock *) txLinelock(dtlck);
2802 lv = & dtlck->lv[dtlck->index];
2803 lv->offset = 0;
2804 lv->length = 1;
2805 dtlck->index++;
2807 p->header.next = cpu_to_le64(nextbn);
2808 DT_PUTPAGE(mp);
2811 return 0;
2816 * dtInitRoot()
2818 * initialize directory root (inline in inode)
2820 void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
2822 struct jfs_inode_info *jfs_ip = JFS_IP(ip);
2823 dtroot_t *p;
2824 int fsi;
2825 struct dtslot *f;
2826 struct tlock *tlck;
2827 struct dt_lock *dtlck;
2828 struct lv *lv;
2829 u16 xflag_save;
2832 * If this was previously an non-empty directory, we need to remove
2833 * the old directory table.
2835 if (DO_INDEX(ip)) {
2836 if (!jfs_dirtable_inline(ip)) {
2837 struct tblock *tblk = tid_to_tblock(tid);
2839 * We're playing games with the tid's xflag. If
2840 * we're removing a regular file, the file's xtree
2841 * is committed with COMMIT_PMAP, but we always
2842 * commit the directories xtree with COMMIT_PWMAP.
2844 xflag_save = tblk->xflag;
2845 tblk->xflag = 0;
2847 * xtTruncate isn't guaranteed to fully truncate
2848 * the xtree. The caller needs to check i_size
2849 * after committing the transaction to see if
2850 * additional truncation is needed. The
2851 * COMMIT_Stale flag tells caller that we
2852 * initiated the truncation.
2854 xtTruncate(tid, ip, 0, COMMIT_PWMAP);
2855 set_cflag(COMMIT_Stale, ip);
2857 tblk->xflag = xflag_save;
2858 } else
2859 ip->i_size = 1;
2861 jfs_ip->next_index = 2;
2862 } else
2863 ip->i_size = IDATASIZE;
2866 * acquire a transaction lock on the root
2868 * action: directory initialization;
2870 tlck = txLock(tid, ip, (struct metapage *) & jfs_ip->bxflag,
2871 tlckDTREE | tlckENTRY | tlckBTROOT);
2872 dtlck = (struct dt_lock *) & tlck->lock;
2874 /* linelock root */
2875 ASSERT(dtlck->index == 0);
2876 lv = & dtlck->lv[0];
2877 lv->offset = 0;
2878 lv->length = DTROOTMAXSLOT;
2879 dtlck->index++;
2881 p = &jfs_ip->i_dtroot;
2883 p->header.flag = DXD_INDEX | BT_ROOT | BT_LEAF;
2885 p->header.nextindex = 0;
2887 /* init freelist */
2888 fsi = 1;
2889 f = &p->slot[fsi];
2891 /* init data area of root */
2892 for (fsi++; fsi < DTROOTMAXSLOT; f++, fsi++)
2893 f->next = fsi;
2894 f->next = -1;
2896 p->header.freelist = 1;
2897 p->header.freecnt = 8;
2899 /* init '..' entry */
2900 p->header.idotdot = cpu_to_le32(idotdot);
2902 return;
2906 * add_missing_indices()
2908 * function: Fix dtree page in which one or more entries has an invalid index.
2909 * fsck.jfs should really fix this, but it currently does not.
2910 * Called from jfs_readdir when bad index is detected.
2912 static void add_missing_indices(struct inode *inode, s64 bn)
2914 struct ldtentry *d;
2915 struct dt_lock *dtlck;
2916 int i;
2917 uint index;
2918 struct lv *lv;
2919 struct metapage *mp;
2920 dtpage_t *p;
2921 int rc;
2922 s8 *stbl;
2923 tid_t tid;
2924 struct tlock *tlck;
2926 tid = txBegin(inode->i_sb, 0);
2928 DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
2930 if (rc) {
2931 printk(KERN_ERR "DT_GETPAGE failed!\n");
2932 goto end;
2934 BT_MARK_DIRTY(mp, inode);
2936 ASSERT(p->header.flag & BT_LEAF);
2938 tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY);
2939 if (BT_IS_ROOT(mp))
2940 tlck->type |= tlckBTROOT;
2942 dtlck = (struct dt_lock *) &tlck->lock;
2944 stbl = DT_GETSTBL(p);
2945 for (i = 0; i < p->header.nextindex; i++) {
2946 d = (struct ldtentry *) &p->slot[stbl[i]];
2947 index = le32_to_cpu(d->index);
2948 if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
2949 d->index = cpu_to_le32(add_index(tid, inode, bn, i));
2950 if (dtlck->index >= dtlck->maxcnt)
2951 dtlck = (struct dt_lock *) txLinelock(dtlck);
2952 lv = &dtlck->lv[dtlck->index];
2953 lv->offset = stbl[i];
2954 lv->length = 1;
2955 dtlck->index++;
2959 DT_PUTPAGE(mp);
2960 (void) txCommit(tid, 1, &inode, 0);
2961 end:
2962 txEnd(tid);
2966 * Buffer to hold directory entry info while traversing a dtree page
2967 * before being fed to the filldir function
2969 struct jfs_dirent {
2970 loff_t position;
2971 int ino;
2972 u16 name_len;
2973 char name[0];
2977 * function to determine next variable-sized jfs_dirent in buffer
2979 static inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
2981 return (struct jfs_dirent *)
2982 ((char *)dirent +
2983 ((sizeof (struct jfs_dirent) + dirent->name_len + 1 +
2984 sizeof (loff_t) - 1) &
2985 ~(sizeof (loff_t) - 1)));
2989 * jfs_readdir()
2991 * function: read directory entries sequentially
2992 * from the specified entry offset
2994 * parameter:
2996 * return: offset = (pn, index) of start entry
2997 * of next jfs_readdir()/dtRead()
2999 int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
3001 struct inode *ip = filp->f_dentry->d_inode;
3002 struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
3003 int rc = 0;
3004 loff_t dtpos; /* legacy OS/2 style position */
3005 struct dtoffset {
3006 s16 pn;
3007 s16 index;
3008 s32 unused;
3009 } *dtoffset = (struct dtoffset *) &dtpos;
3010 s64 bn;
3011 struct metapage *mp;
3012 dtpage_t *p;
3013 int index;
3014 s8 *stbl;
3015 struct btstack btstack;
3016 int i, next;
3017 struct ldtentry *d;
3018 struct dtslot *t;
3019 int d_namleft, len, outlen;
3020 unsigned long dirent_buf;
3021 char *name_ptr;
3022 u32 dir_index;
3023 int do_index = 0;
3024 uint loop_count = 0;
3025 struct jfs_dirent *jfs_dirent;
3026 int jfs_dirents;
3027 int overflow, fix_page, page_fixed = 0;
3028 static int unique_pos = 2; /* If we can't fix broken index */
3030 if (filp->f_pos == DIREND)
3031 return 0;
3033 if (DO_INDEX(ip)) {
3035 * persistent index is stored in directory entries.
3036 * Special cases: 0 = .
3037 * 1 = ..
3038 * -1 = End of directory
3040 do_index = 1;
3042 dir_index = (u32) filp->f_pos;
3044 if (dir_index > 1) {
3045 struct dir_table_slot dirtab_slot;
3047 if (dtEmpty(ip) ||
3048 (dir_index >= JFS_IP(ip)->next_index)) {
3049 /* Stale position. Directory has shrunk */
3050 filp->f_pos = DIREND;
3051 return 0;
3053 repeat:
3054 rc = read_index(ip, dir_index, &dirtab_slot);
3055 if (rc) {
3056 filp->f_pos = DIREND;
3057 return rc;
3059 if (dirtab_slot.flag == DIR_INDEX_FREE) {
3060 if (loop_count++ > JFS_IP(ip)->next_index) {
3061 jfs_err("jfs_readdir detected "
3062 "infinite loop!");
3063 filp->f_pos = DIREND;
3064 return 0;
3066 dir_index = le32_to_cpu(dirtab_slot.addr2);
3067 if (dir_index == -1) {
3068 filp->f_pos = DIREND;
3069 return 0;
3071 goto repeat;
3073 bn = addressDTS(&dirtab_slot);
3074 index = dirtab_slot.slot;
3075 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3076 if (rc) {
3077 filp->f_pos = DIREND;
3078 return 0;
3080 if (p->header.flag & BT_INTERNAL) {
3081 jfs_err("jfs_readdir: bad index table");
3082 DT_PUTPAGE(mp);
3083 filp->f_pos = -1;
3084 return 0;
3086 } else {
3087 if (dir_index == 0) {
3089 * self "."
3091 filp->f_pos = 0;
3092 if (filldir(dirent, ".", 1, 0, ip->i_ino,
3093 DT_DIR))
3094 return 0;
3097 * parent ".."
3099 filp->f_pos = 1;
3100 if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR))
3101 return 0;
3104 * Find first entry of left-most leaf
3106 if (dtEmpty(ip)) {
3107 filp->f_pos = DIREND;
3108 return 0;
3111 if ((rc = dtReadFirst(ip, &btstack)))
3112 return rc;
3114 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
3116 } else {
3118 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
3120 * pn = index = 0: First entry "."
3121 * pn = 0; index = 1: Second entry ".."
3122 * pn > 0: Real entries, pn=1 -> leftmost page
3123 * pn = index = -1: No more entries
3125 dtpos = filp->f_pos;
3126 if (dtpos == 0) {
3127 /* build "." entry */
3129 if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
3130 DT_DIR))
3131 return 0;
3132 dtoffset->index = 1;
3133 filp->f_pos = dtpos;
3136 if (dtoffset->pn == 0) {
3137 if (dtoffset->index == 1) {
3138 /* build ".." entry */
3140 if (filldir(dirent, "..", 2, filp->f_pos,
3141 PARENT(ip), DT_DIR))
3142 return 0;
3143 } else {
3144 jfs_err("jfs_readdir called with "
3145 "invalid offset!");
3147 dtoffset->pn = 1;
3148 dtoffset->index = 0;
3149 filp->f_pos = dtpos;
3152 if (dtEmpty(ip)) {
3153 filp->f_pos = DIREND;
3154 return 0;
3157 if ((rc = dtReadNext(ip, &filp->f_pos, &btstack))) {
3158 jfs_err("jfs_readdir: unexpected rc = %d "
3159 "from dtReadNext", rc);
3160 filp->f_pos = DIREND;
3161 return 0;
3163 /* get start leaf page and index */
3164 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
3166 /* offset beyond directory eof ? */
3167 if (bn < 0) {
3168 filp->f_pos = DIREND;
3169 return 0;
3173 dirent_buf = __get_free_page(GFP_KERNEL);
3174 if (dirent_buf == 0) {
3175 DT_PUTPAGE(mp);
3176 jfs_warn("jfs_readdir: __get_free_page failed!");
3177 filp->f_pos = DIREND;
3178 return -ENOMEM;
3181 while (1) {
3182 jfs_dirent = (struct jfs_dirent *) dirent_buf;
3183 jfs_dirents = 0;
3184 overflow = fix_page = 0;
3186 stbl = DT_GETSTBL(p);
3188 for (i = index; i < p->header.nextindex; i++) {
3189 d = (struct ldtentry *) & p->slot[stbl[i]];
3191 if (((long) jfs_dirent + d->namlen + 1) >
3192 (dirent_buf + PAGE_SIZE)) {
3193 /* DBCS codepages could overrun dirent_buf */
3194 index = i;
3195 overflow = 1;
3196 break;
3199 d_namleft = d->namlen;
3200 name_ptr = jfs_dirent->name;
3201 jfs_dirent->ino = le32_to_cpu(d->inumber);
3203 if (do_index) {
3204 len = min(d_namleft, DTLHDRDATALEN);
3205 jfs_dirent->position = le32_to_cpu(d->index);
3207 * d->index should always be valid, but it
3208 * isn't. fsck.jfs doesn't create the
3209 * directory index for the lost+found
3210 * directory. Rather than let it go,
3211 * we can try to fix it.
3213 if ((jfs_dirent->position < 2) ||
3214 (jfs_dirent->position >=
3215 JFS_IP(ip)->next_index)) {
3216 if (!page_fixed && !isReadOnly(ip)) {
3217 fix_page = 1;
3219 * setting overflow and setting
3220 * index to i will cause the
3221 * same page to be processed
3222 * again starting here
3224 overflow = 1;
3225 index = i;
3226 break;
3228 jfs_dirent->position = unique_pos++;
3230 } else {
3231 jfs_dirent->position = dtpos;
3232 len = min(d_namleft, DTLHDRDATALEN_LEGACY);
3235 /* copy the name of head/only segment */
3236 outlen = jfs_strfromUCS_le(name_ptr, d->name, len,
3237 codepage);
3238 jfs_dirent->name_len = outlen;
3240 /* copy name in the additional segment(s) */
3241 next = d->next;
3242 while (next >= 0) {
3243 t = (struct dtslot *) & p->slot[next];
3244 name_ptr += outlen;
3245 d_namleft -= len;
3246 /* Sanity Check */
3247 if (d_namleft == 0) {
3248 jfs_error(ip->i_sb,
3249 "JFS:Dtree error: ino = "
3250 "%ld, bn=%Ld, index = %d",
3251 (long)ip->i_ino,
3252 (long long)bn,
3254 goto skip_one;
3256 len = min(d_namleft, DTSLOTDATALEN);
3257 outlen = jfs_strfromUCS_le(name_ptr, t->name,
3258 len, codepage);
3259 jfs_dirent->name_len += outlen;
3261 next = t->next;
3264 jfs_dirents++;
3265 jfs_dirent = next_jfs_dirent(jfs_dirent);
3266 skip_one:
3267 if (!do_index)
3268 dtoffset->index++;
3271 if (!overflow) {
3272 /* Point to next leaf page */
3273 if (p->header.flag & BT_ROOT)
3274 bn = 0;
3275 else {
3276 bn = le64_to_cpu(p->header.next);
3277 index = 0;
3278 /* update offset (pn:index) for new page */
3279 if (!do_index) {
3280 dtoffset->pn++;
3281 dtoffset->index = 0;
3284 page_fixed = 0;
3287 /* unpin previous leaf page */
3288 DT_PUTPAGE(mp);
3290 jfs_dirent = (struct jfs_dirent *) dirent_buf;
3291 while (jfs_dirents--) {
3292 filp->f_pos = jfs_dirent->position;
3293 if (filldir(dirent, jfs_dirent->name,
3294 jfs_dirent->name_len, filp->f_pos,
3295 jfs_dirent->ino, DT_UNKNOWN))
3296 goto out;
3297 jfs_dirent = next_jfs_dirent(jfs_dirent);
3300 if (fix_page) {
3301 add_missing_indices(ip, bn);
3302 page_fixed = 1;
3305 if (!overflow && (bn == 0)) {
3306 filp->f_pos = DIREND;
3307 break;
3310 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3311 if (rc) {
3312 free_page(dirent_buf);
3313 return rc;
3317 out:
3318 free_page(dirent_buf);
3320 return rc;
3325 * dtReadFirst()
3327 * function: get the leftmost page of the directory
3329 static int dtReadFirst(struct inode *ip, struct btstack * btstack)
3331 int rc = 0;
3332 s64 bn;
3333 int psize = 288; /* initial in-line directory */
3334 struct metapage *mp;
3335 dtpage_t *p;
3336 s8 *stbl;
3337 struct btframe *btsp;
3338 pxd_t *xd;
3340 BT_CLR(btstack); /* reset stack */
3343 * descend leftmost path of the tree
3345 * by convention, root bn = 0.
3347 for (bn = 0;;) {
3348 DT_GETPAGE(ip, bn, mp, psize, p, rc);
3349 if (rc)
3350 return rc;
3353 * leftmost leaf page
3355 if (p->header.flag & BT_LEAF) {
3356 /* return leftmost entry */
3357 btsp = btstack->top;
3358 btsp->bn = bn;
3359 btsp->index = 0;
3360 btsp->mp = mp;
3362 return 0;
3366 * descend down to leftmost child page
3368 if (BT_STACK_FULL(btstack)) {
3369 DT_PUTPAGE(mp);
3370 jfs_error(ip->i_sb, "dtReadFirst: btstack overrun");
3371 BT_STACK_DUMP(btstack);
3372 return -EIO;
3374 /* push (bn, index) of the parent page/entry */
3375 BT_PUSH(btstack, bn, 0);
3377 /* get the leftmost entry */
3378 stbl = DT_GETSTBL(p);
3379 xd = (pxd_t *) & p->slot[stbl[0]];
3381 /* get the child page block address */
3382 bn = addressPXD(xd);
3383 psize = lengthPXD(xd) << JFS_SBI(ip->i_sb)->l2bsize;
3385 /* unpin the parent page */
3386 DT_PUTPAGE(mp);
3392 * dtReadNext()
3394 * function: get the page of the specified offset (pn:index)
3396 * return: if (offset > eof), bn = -1;
3398 * note: if index > nextindex of the target leaf page,
3399 * start with 1st entry of next leaf page;
3401 static int dtReadNext(struct inode *ip, loff_t * offset,
3402 struct btstack * btstack)
3404 int rc = 0;
3405 struct dtoffset {
3406 s16 pn;
3407 s16 index;
3408 s32 unused;
3409 } *dtoffset = (struct dtoffset *) offset;
3410 s64 bn;
3411 struct metapage *mp;
3412 dtpage_t *p;
3413 int index;
3414 int pn;
3415 s8 *stbl;
3416 struct btframe *btsp, *parent;
3417 pxd_t *xd;
3420 * get leftmost leaf page pinned
3422 if ((rc = dtReadFirst(ip, btstack)))
3423 return rc;
3425 /* get leaf page */
3426 DT_GETSEARCH(ip, btstack->top, bn, mp, p, index);
3428 /* get the start offset (pn:index) */
3429 pn = dtoffset->pn - 1; /* Now pn = 0 represents leftmost leaf */
3430 index = dtoffset->index;
3432 /* start at leftmost page ? */
3433 if (pn == 0) {
3434 /* offset beyond eof ? */
3435 if (index < p->header.nextindex)
3436 goto out;
3438 if (p->header.flag & BT_ROOT) {
3439 bn = -1;
3440 goto out;
3443 /* start with 1st entry of next leaf page */
3444 dtoffset->pn++;
3445 dtoffset->index = index = 0;
3446 goto a;
3449 /* start at non-leftmost page: scan parent pages for large pn */
3450 if (p->header.flag & BT_ROOT) {
3451 bn = -1;
3452 goto out;
3455 /* start after next leaf page ? */
3456 if (pn > 1)
3457 goto b;
3459 /* get leaf page pn = 1 */
3461 bn = le64_to_cpu(p->header.next);
3463 /* unpin leaf page */
3464 DT_PUTPAGE(mp);
3466 /* offset beyond eof ? */
3467 if (bn == 0) {
3468 bn = -1;
3469 goto out;
3472 goto c;
3475 * scan last internal page level to get target leaf page
3478 /* unpin leftmost leaf page */
3479 DT_PUTPAGE(mp);
3481 /* get left most parent page */
3482 btsp = btstack->top;
3483 parent = btsp - 1;
3484 bn = parent->bn;
3485 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3486 if (rc)
3487 return rc;
3489 /* scan parent pages at last internal page level */
3490 while (pn >= p->header.nextindex) {
3491 pn -= p->header.nextindex;
3493 /* get next parent page address */
3494 bn = le64_to_cpu(p->header.next);
3496 /* unpin current parent page */
3497 DT_PUTPAGE(mp);
3499 /* offset beyond eof ? */
3500 if (bn == 0) {
3501 bn = -1;
3502 goto out;
3505 /* get next parent page */
3506 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3507 if (rc)
3508 return rc;
3510 /* update parent page stack frame */
3511 parent->bn = bn;
3514 /* get leaf page address */
3515 stbl = DT_GETSTBL(p);
3516 xd = (pxd_t *) & p->slot[stbl[pn]];
3517 bn = addressPXD(xd);
3519 /* unpin parent page */
3520 DT_PUTPAGE(mp);
3523 * get target leaf page
3526 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3527 if (rc)
3528 return rc;
3531 * leaf page has been completed:
3532 * start with 1st entry of next leaf page
3534 if (index >= p->header.nextindex) {
3535 bn = le64_to_cpu(p->header.next);
3537 /* unpin leaf page */
3538 DT_PUTPAGE(mp);
3540 /* offset beyond eof ? */
3541 if (bn == 0) {
3542 bn = -1;
3543 goto out;
3546 /* get next leaf page */
3547 DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
3548 if (rc)
3549 return rc;
3551 /* start with 1st entry of next leaf page */
3552 dtoffset->pn++;
3553 dtoffset->index = 0;
3556 out:
3557 /* return target leaf page pinned */
3558 btsp = btstack->top;
3559 btsp->bn = bn;
3560 btsp->index = dtoffset->index;
3561 btsp->mp = mp;
3563 return 0;
3568 * dtCompare()
3570 * function: compare search key with an internal entry
3572 * return:
3573 * < 0 if k is < record
3574 * = 0 if k is = record
3575 * > 0 if k is > record
3577 static int dtCompare(struct component_name * key, /* search key */
3578 dtpage_t * p, /* directory page */
3579 int si)
3580 { /* entry slot index */
3581 wchar_t *kname;
3582 __le16 *name;
3583 int klen, namlen, len, rc;
3584 struct idtentry *ih;
3585 struct dtslot *t;
3588 * force the left-most key on internal pages, at any level of
3589 * the tree, to be less than any search key.
3590 * this obviates having to update the leftmost key on an internal
3591 * page when the user inserts a new key in the tree smaller than
3592 * anything that has been stored.
3594 * (? if/when dtSearch() narrows down to 1st entry (index = 0),
3595 * at any internal page at any level of the tree,
3596 * it descends to child of the entry anyway -
3597 * ? make the entry as min size dummy entry)
3599 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
3600 * return (1);
3603 kname = key->name;
3604 klen = key->namlen;
3606 ih = (struct idtentry *) & p->slot[si];
3607 si = ih->next;
3608 name = ih->name;
3609 namlen = ih->namlen;
3610 len = min(namlen, DTIHDRDATALEN);
3612 /* compare with head/only segment */
3613 len = min(klen, len);
3614 if ((rc = UniStrncmp_le(kname, name, len)))
3615 return rc;
3617 klen -= len;
3618 namlen -= len;
3620 /* compare with additional segment(s) */
3621 kname += len;
3622 while (klen > 0 && namlen > 0) {
3623 /* compare with next name segment */
3624 t = (struct dtslot *) & p->slot[si];
3625 len = min(namlen, DTSLOTDATALEN);
3626 len = min(klen, len);
3627 name = t->name;
3628 if ((rc = UniStrncmp_le(kname, name, len)))
3629 return rc;
3631 klen -= len;
3632 namlen -= len;
3633 kname += len;
3634 si = t->next;
3637 return (klen - namlen);
3644 * ciCompare()
3646 * function: compare search key with an (leaf/internal) entry
3648 * return:
3649 * < 0 if k is < record
3650 * = 0 if k is = record
3651 * > 0 if k is > record
3653 static int ciCompare(struct component_name * key, /* search key */
3654 dtpage_t * p, /* directory page */
3655 int si, /* entry slot index */
3656 int flag)
3658 wchar_t *kname, x;
3659 __le16 *name;
3660 int klen, namlen, len, rc;
3661 struct ldtentry *lh;
3662 struct idtentry *ih;
3663 struct dtslot *t;
3664 int i;
3667 * force the left-most key on internal pages, at any level of
3668 * the tree, to be less than any search key.
3669 * this obviates having to update the leftmost key on an internal
3670 * page when the user inserts a new key in the tree smaller than
3671 * anything that has been stored.
3673 * (? if/when dtSearch() narrows down to 1st entry (index = 0),
3674 * at any internal page at any level of the tree,
3675 * it descends to child of the entry anyway -
3676 * ? make the entry as min size dummy entry)
3678 * if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & BT_LEAF))
3679 * return (1);
3682 kname = key->name;
3683 klen = key->namlen;
3686 * leaf page entry
3688 if (p->header.flag & BT_LEAF) {
3689 lh = (struct ldtentry *) & p->slot[si];
3690 si = lh->next;
3691 name = lh->name;
3692 namlen = lh->namlen;
3693 if (flag & JFS_DIR_INDEX)
3694 len = min(namlen, DTLHDRDATALEN);
3695 else
3696 len = min(namlen, DTLHDRDATALEN_LEGACY);
3699 * internal page entry
3701 else {
3702 ih = (struct idtentry *) & p->slot[si];
3703 si = ih->next;
3704 name = ih->name;
3705 namlen = ih->namlen;
3706 len = min(namlen, DTIHDRDATALEN);
3709 /* compare with head/only segment */
3710 len = min(klen, len);
3711 for (i = 0; i < len; i++, kname++, name++) {
3712 /* only uppercase if case-insensitive support is on */
3713 if ((flag & JFS_OS2) == JFS_OS2)
3714 x = UniToupper(le16_to_cpu(*name));
3715 else
3716 x = le16_to_cpu(*name);
3717 if ((rc = *kname - x))
3718 return rc;
3721 klen -= len;
3722 namlen -= len;
3724 /* compare with additional segment(s) */
3725 while (klen > 0 && namlen > 0) {
3726 /* compare with next name segment */
3727 t = (struct dtslot *) & p->slot[si];
3728 len = min(namlen, DTSLOTDATALEN);
3729 len = min(klen, len);
3730 name = t->name;
3731 for (i = 0; i < len; i++, kname++, name++) {
3732 /* only uppercase if case-insensitive support is on */
3733 if ((flag & JFS_OS2) == JFS_OS2)
3734 x = UniToupper(le16_to_cpu(*name));
3735 else
3736 x = le16_to_cpu(*name);
3738 if ((rc = *kname - x))
3739 return rc;
3742 klen -= len;
3743 namlen -= len;
3744 si = t->next;
3747 return (klen - namlen);
3752 * ciGetLeafPrefixKey()
3754 * function: compute prefix of suffix compression
3755 * from two adjacent leaf entries
3756 * across page boundary
3758 * return: non-zero on error
3761 static int ciGetLeafPrefixKey(dtpage_t * lp, int li, dtpage_t * rp,
3762 int ri, struct component_name * key, int flag)
3764 int klen, namlen;
3765 wchar_t *pl, *pr, *kname;
3766 struct component_name lkey;
3767 struct component_name rkey;
3769 lkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3770 GFP_KERNEL);
3771 if (lkey.name == NULL)
3772 return -ENOSPC;
3774 rkey.name = (wchar_t *) kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t),
3775 GFP_KERNEL);
3776 if (rkey.name == NULL) {
3777 kfree(lkey.name);
3778 return -ENOSPC;
3781 /* get left and right key */
3782 dtGetKey(lp, li, &lkey, flag);
3783 lkey.name[lkey.namlen] = 0;
3785 if ((flag & JFS_OS2) == JFS_OS2)
3786 ciToUpper(&lkey);
3788 dtGetKey(rp, ri, &rkey, flag);
3789 rkey.name[rkey.namlen] = 0;
3792 if ((flag & JFS_OS2) == JFS_OS2)
3793 ciToUpper(&rkey);
3795 /* compute prefix */
3796 klen = 0;
3797 kname = key->name;
3798 namlen = min(lkey.namlen, rkey.namlen);
3799 for (pl = lkey.name, pr = rkey.name;
3800 namlen; pl++, pr++, namlen--, klen++, kname++) {
3801 *kname = *pr;
3802 if (*pl != *pr) {
3803 key->namlen = klen + 1;
3804 goto free_names;
3808 /* l->namlen <= r->namlen since l <= r */
3809 if (lkey.namlen < rkey.namlen) {
3810 *kname = *pr;
3811 key->namlen = klen + 1;
3812 } else /* l->namelen == r->namelen */
3813 key->namlen = klen;
3815 free_names:
3816 kfree(lkey.name);
3817 kfree(rkey.name);
3818 return 0;
3824 * dtGetKey()
3826 * function: get key of the entry
3828 static void dtGetKey(dtpage_t * p, int i, /* entry index */
3829 struct component_name * key, int flag)
3831 int si;
3832 s8 *stbl;
3833 struct ldtentry *lh;
3834 struct idtentry *ih;
3835 struct dtslot *t;
3836 int namlen, len;
3837 wchar_t *kname;
3838 __le16 *name;
3840 /* get entry */
3841 stbl = DT_GETSTBL(p);
3842 si = stbl[i];
3843 if (p->header.flag & BT_LEAF) {
3844 lh = (struct ldtentry *) & p->slot[si];
3845 si = lh->next;
3846 namlen = lh->namlen;
3847 name = lh->name;
3848 if (flag & JFS_DIR_INDEX)
3849 len = min(namlen, DTLHDRDATALEN);
3850 else
3851 len = min(namlen, DTLHDRDATALEN_LEGACY);
3852 } else {
3853 ih = (struct idtentry *) & p->slot[si];
3854 si = ih->next;
3855 namlen = ih->namlen;
3856 name = ih->name;
3857 len = min(namlen, DTIHDRDATALEN);
3860 key->namlen = namlen;
3861 kname = key->name;
3864 * move head/only segment
3866 UniStrncpy_from_le(kname, name, len);
3869 * move additional segment(s)
3871 while (si >= 0) {
3872 /* get next segment */
3873 t = &p->slot[si];
3874 kname += len;
3875 namlen -= len;
3876 len = min(namlen, DTSLOTDATALEN);
3877 UniStrncpy_from_le(kname, t->name, len);
3879 si = t->next;
3885 * dtInsertEntry()
3887 * function: allocate free slot(s) and
3888 * write a leaf/internal entry
3890 * return: entry slot index
3892 static void dtInsertEntry(dtpage_t * p, int index, struct component_name * key,
3893 ddata_t * data, struct dt_lock ** dtlock)
3895 struct dtslot *h, *t;
3896 struct ldtentry *lh = NULL;
3897 struct idtentry *ih = NULL;
3898 int hsi, fsi, klen, len, nextindex;
3899 wchar_t *kname;
3900 __le16 *name;
3901 s8 *stbl;
3902 pxd_t *xd;
3903 struct dt_lock *dtlck = *dtlock;
3904 struct lv *lv;
3905 int xsi, n;
3906 s64 bn = 0;
3907 struct metapage *mp = NULL;
3909 klen = key->namlen;
3910 kname = key->name;
3912 /* allocate a free slot */
3913 hsi = fsi = p->header.freelist;
3914 h = &p->slot[fsi];
3915 p->header.freelist = h->next;
3916 --p->header.freecnt;
3918 /* open new linelock */
3919 if (dtlck->index >= dtlck->maxcnt)
3920 dtlck = (struct dt_lock *) txLinelock(dtlck);
3922 lv = & dtlck->lv[dtlck->index];
3923 lv->offset = hsi;
3925 /* write head/only segment */
3926 if (p->header.flag & BT_LEAF) {
3927 lh = (struct ldtentry *) h;
3928 lh->next = h->next;
3929 lh->inumber = cpu_to_le32(data->leaf.ino);
3930 lh->namlen = klen;
3931 name = lh->name;
3932 if (data->leaf.ip) {
3933 len = min(klen, DTLHDRDATALEN);
3934 if (!(p->header.flag & BT_ROOT))
3935 bn = addressPXD(&p->header.self);
3936 lh->index = cpu_to_le32(add_index(data->leaf.tid,
3937 data->leaf.ip,
3938 bn, index));
3939 } else
3940 len = min(klen, DTLHDRDATALEN_LEGACY);
3941 } else {
3942 ih = (struct idtentry *) h;
3943 ih->next = h->next;
3944 xd = (pxd_t *) ih;
3945 *xd = data->xd;
3946 ih->namlen = klen;
3947 name = ih->name;
3948 len = min(klen, DTIHDRDATALEN);
3951 UniStrncpy_to_le(name, kname, len);
3953 n = 1;
3954 xsi = hsi;
3956 /* write additional segment(s) */
3957 t = h;
3958 klen -= len;
3959 while (klen) {
3960 /* get free slot */
3961 fsi = p->header.freelist;
3962 t = &p->slot[fsi];
3963 p->header.freelist = t->next;
3964 --p->header.freecnt;
3966 /* is next slot contiguous ? */
3967 if (fsi != xsi + 1) {
3968 /* close current linelock */
3969 lv->length = n;
3970 dtlck->index++;
3972 /* open new linelock */
3973 if (dtlck->index < dtlck->maxcnt)
3974 lv++;
3975 else {
3976 dtlck = (struct dt_lock *) txLinelock(dtlck);
3977 lv = & dtlck->lv[0];
3980 lv->offset = fsi;
3981 n = 0;
3984 kname += len;
3985 len = min(klen, DTSLOTDATALEN);
3986 UniStrncpy_to_le(t->name, kname, len);
3988 n++;
3989 xsi = fsi;
3990 klen -= len;
3993 /* close current linelock */
3994 lv->length = n;
3995 dtlck->index++;
3997 *dtlock = dtlck;
3999 /* terminate last/only segment */
4000 if (h == t) {
4001 /* single segment entry */
4002 if (p->header.flag & BT_LEAF)
4003 lh->next = -1;
4004 else
4005 ih->next = -1;
4006 } else
4007 /* multi-segment entry */
4008 t->next = -1;
4010 /* if insert into middle, shift right succeeding entries in stbl */
4011 stbl = DT_GETSTBL(p);
4012 nextindex = p->header.nextindex;
4013 if (index < nextindex) {
4014 memmove(stbl + index + 1, stbl + index, nextindex - index);
4016 if ((p->header.flag & BT_LEAF) && data->leaf.ip) {
4017 s64 lblock;
4020 * Need to update slot number for entries that moved
4021 * in the stbl
4023 mp = NULL;
4024 for (n = index + 1; n <= nextindex; n++) {
4025 lh = (struct ldtentry *) & (p->slot[stbl[n]]);
4026 modify_index(data->leaf.tid, data->leaf.ip,
4027 le32_to_cpu(lh->index), bn, n,
4028 &mp, &lblock);
4030 if (mp)
4031 release_metapage(mp);
4035 stbl[index] = hsi;
4037 /* advance next available entry index of stbl */
4038 ++p->header.nextindex;
4043 * dtMoveEntry()
4045 * function: move entries from split/left page to new/right page
4047 * nextindex of dst page and freelist/freecnt of both pages
4048 * are updated.
4050 static void dtMoveEntry(dtpage_t * sp, int si, dtpage_t * dp,
4051 struct dt_lock ** sdtlock, struct dt_lock ** ddtlock,
4052 int do_index)
4054 int ssi, next; /* src slot index */
4055 int di; /* dst entry index */
4056 int dsi; /* dst slot index */
4057 s8 *sstbl, *dstbl; /* sorted entry table */
4058 int snamlen, len;
4059 struct ldtentry *slh, *dlh = NULL;
4060 struct idtentry *sih, *dih = NULL;
4061 struct dtslot *h, *s, *d;
4062 struct dt_lock *sdtlck = *sdtlock, *ddtlck = *ddtlock;
4063 struct lv *slv, *dlv;
4064 int xssi, ns, nd;
4065 int sfsi;
4067 sstbl = (s8 *) & sp->slot[sp->header.stblindex];
4068 dstbl = (s8 *) & dp->slot[dp->header.stblindex];
4070 dsi = dp->header.freelist; /* first (whole page) free slot */
4071 sfsi = sp->header.freelist;
4073 /* linelock destination entry slot */
4074 dlv = & ddtlck->lv[ddtlck->index];
4075 dlv->offset = dsi;
4077 /* linelock source entry slot */
4078 slv = & sdtlck->lv[sdtlck->index];
4079 slv->offset = sstbl[si];
4080 xssi = slv->offset - 1;
4083 * move entries
4085 ns = nd = 0;
4086 for (di = 0; si < sp->header.nextindex; si++, di++) {
4087 ssi = sstbl[si];
4088 dstbl[di] = dsi;
4090 /* is next slot contiguous ? */
4091 if (ssi != xssi + 1) {
4092 /* close current linelock */
4093 slv->length = ns;
4094 sdtlck->index++;
4096 /* open new linelock */
4097 if (sdtlck->index < sdtlck->maxcnt)
4098 slv++;
4099 else {
4100 sdtlck = (struct dt_lock *) txLinelock(sdtlck);
4101 slv = & sdtlck->lv[0];
4104 slv->offset = ssi;
4105 ns = 0;
4109 * move head/only segment of an entry
4111 /* get dst slot */
4112 h = d = &dp->slot[dsi];
4114 /* get src slot and move */
4115 s = &sp->slot[ssi];
4116 if (sp->header.flag & BT_LEAF) {
4117 /* get source entry */
4118 slh = (struct ldtentry *) s;
4119 dlh = (struct ldtentry *) h;
4120 snamlen = slh->namlen;
4122 if (do_index) {
4123 len = min(snamlen, DTLHDRDATALEN);
4124 dlh->index = slh->index; /* little-endian */
4125 } else
4126 len = min(snamlen, DTLHDRDATALEN_LEGACY);
4128 memcpy(dlh, slh, 6 + len * 2);
4130 next = slh->next;
4132 /* update dst head/only segment next field */
4133 dsi++;
4134 dlh->next = dsi;
4135 } else {
4136 sih = (struct idtentry *) s;
4137 snamlen = sih->namlen;
4139 len = min(snamlen, DTIHDRDATALEN);
4140 dih = (struct idtentry *) h;
4141 memcpy(dih, sih, 10 + len * 2);
4142 next = sih->next;
4144 dsi++;
4145 dih->next = dsi;
4148 /* free src head/only segment */
4149 s->next = sfsi;
4150 s->cnt = 1;
4151 sfsi = ssi;
4153 ns++;
4154 nd++;
4155 xssi = ssi;
4158 * move additional segment(s) of the entry
4160 snamlen -= len;
4161 while ((ssi = next) >= 0) {
4162 /* is next slot contiguous ? */
4163 if (ssi != xssi + 1) {
4164 /* close current linelock */
4165 slv->length = ns;
4166 sdtlck->index++;
4168 /* open new linelock */
4169 if (sdtlck->index < sdtlck->maxcnt)
4170 slv++;
4171 else {
4172 sdtlck =
4173 (struct dt_lock *)
4174 txLinelock(sdtlck);
4175 slv = & sdtlck->lv[0];
4178 slv->offset = ssi;
4179 ns = 0;
4182 /* get next source segment */
4183 s = &sp->slot[ssi];
4185 /* get next destination free slot */
4186 d++;
4188 len = min(snamlen, DTSLOTDATALEN);
4189 UniStrncpy_le(d->name, s->name, len);
4191 ns++;
4192 nd++;
4193 xssi = ssi;
4195 dsi++;
4196 d->next = dsi;
4198 /* free source segment */
4199 next = s->next;
4200 s->next = sfsi;
4201 s->cnt = 1;
4202 sfsi = ssi;
4204 snamlen -= len;
4205 } /* end while */
4207 /* terminate dst last/only segment */
4208 if (h == d) {
4209 /* single segment entry */
4210 if (dp->header.flag & BT_LEAF)
4211 dlh->next = -1;
4212 else
4213 dih->next = -1;
4214 } else
4215 /* multi-segment entry */
4216 d->next = -1;
4217 } /* end for */
4219 /* close current linelock */
4220 slv->length = ns;
4221 sdtlck->index++;
4222 *sdtlock = sdtlck;
4224 dlv->length = nd;
4225 ddtlck->index++;
4226 *ddtlock = ddtlck;
4228 /* update source header */
4229 sp->header.freelist = sfsi;
4230 sp->header.freecnt += nd;
4232 /* update destination header */
4233 dp->header.nextindex = di;
4235 dp->header.freelist = dsi;
4236 dp->header.freecnt -= nd;
4241 * dtDeleteEntry()
4243 * function: free a (leaf/internal) entry
4245 * log freelist header, stbl, and each segment slot of entry
4246 * (even though last/only segment next field is modified,
4247 * physical image logging requires all segment slots of
4248 * the entry logged to avoid applying previous updates
4249 * to the same slots)
4251 static void dtDeleteEntry(dtpage_t * p, int fi, struct dt_lock ** dtlock)
4253 int fsi; /* free entry slot index */
4254 s8 *stbl;
4255 struct dtslot *t;
4256 int si, freecnt;
4257 struct dt_lock *dtlck = *dtlock;
4258 struct lv *lv;
4259 int xsi, n;
4261 /* get free entry slot index */
4262 stbl = DT_GETSTBL(p);
4263 fsi = stbl[fi];
4265 /* open new linelock */
4266 if (dtlck->index >= dtlck->maxcnt)
4267 dtlck = (struct dt_lock *) txLinelock(dtlck);
4268 lv = & dtlck->lv[dtlck->index];
4270 lv->offset = fsi;
4272 /* get the head/only segment */
4273 t = &p->slot[fsi];
4274 if (p->header.flag & BT_LEAF)
4275 si = ((struct ldtentry *) t)->next;
4276 else
4277 si = ((struct idtentry *) t)->next;
4278 t->next = si;
4279 t->cnt = 1;
4281 n = freecnt = 1;
4282 xsi = fsi;
4284 /* find the last/only segment */
4285 while (si >= 0) {
4286 /* is next slot contiguous ? */
4287 if (si != xsi + 1) {
4288 /* close current linelock */
4289 lv->length = n;
4290 dtlck->index++;
4292 /* open new linelock */
4293 if (dtlck->index < dtlck->maxcnt)
4294 lv++;
4295 else {
4296 dtlck = (struct dt_lock *) txLinelock(dtlck);
4297 lv = & dtlck->lv[0];
4300 lv->offset = si;
4301 n = 0;
4304 n++;
4305 xsi = si;
4306 freecnt++;
4308 t = &p->slot[si];
4309 t->cnt = 1;
4310 si = t->next;
4313 /* close current linelock */
4314 lv->length = n;
4315 dtlck->index++;
4317 *dtlock = dtlck;
4319 /* update freelist */
4320 t->next = p->header.freelist;
4321 p->header.freelist = fsi;
4322 p->header.freecnt += freecnt;
4324 /* if delete from middle,
4325 * shift left the succedding entries in the stbl
4327 si = p->header.nextindex;
4328 if (fi < si - 1)
4329 memmove(&stbl[fi], &stbl[fi + 1], si - fi - 1);
4331 p->header.nextindex--;
4336 * dtTruncateEntry()
4338 * function: truncate a (leaf/internal) entry
4340 * log freelist header, stbl, and each segment slot of entry
4341 * (even though last/only segment next field is modified,
4342 * physical image logging requires all segment slots of
4343 * the entry logged to avoid applying previous updates
4344 * to the same slots)
4346 static void dtTruncateEntry(dtpage_t * p, int ti, struct dt_lock ** dtlock)
4348 int tsi; /* truncate entry slot index */
4349 s8 *stbl;
4350 struct dtslot *t;
4351 int si, freecnt;
4352 struct dt_lock *dtlck = *dtlock;
4353 struct lv *lv;
4354 int fsi, xsi, n;
4356 /* get free entry slot index */
4357 stbl = DT_GETSTBL(p);
4358 tsi = stbl[ti];
4360 /* open new linelock */
4361 if (dtlck->index >= dtlck->maxcnt)
4362 dtlck = (struct dt_lock *) txLinelock(dtlck);
4363 lv = & dtlck->lv[dtlck->index];
4365 lv->offset = tsi;
4367 /* get the head/only segment */
4368 t = &p->slot[tsi];
4369 ASSERT(p->header.flag & BT_INTERNAL);
4370 ((struct idtentry *) t)->namlen = 0;
4371 si = ((struct idtentry *) t)->next;
4372 ((struct idtentry *) t)->next = -1;
4374 n = 1;
4375 freecnt = 0;
4376 fsi = si;
4377 xsi = tsi;
4379 /* find the last/only segment */
4380 while (si >= 0) {
4381 /* is next slot contiguous ? */
4382 if (si != xsi + 1) {
4383 /* close current linelock */
4384 lv->length = n;
4385 dtlck->index++;
4387 /* open new linelock */
4388 if (dtlck->index < dtlck->maxcnt)
4389 lv++;
4390 else {
4391 dtlck = (struct dt_lock *) txLinelock(dtlck);
4392 lv = & dtlck->lv[0];
4395 lv->offset = si;
4396 n = 0;
4399 n++;
4400 xsi = si;
4401 freecnt++;
4403 t = &p->slot[si];
4404 t->cnt = 1;
4405 si = t->next;
4408 /* close current linelock */
4409 lv->length = n;
4410 dtlck->index++;
4412 *dtlock = dtlck;
4414 /* update freelist */
4415 if (freecnt == 0)
4416 return;
4417 t->next = p->header.freelist;
4418 p->header.freelist = fsi;
4419 p->header.freecnt += freecnt;
4424 * dtLinelockFreelist()
4426 static void dtLinelockFreelist(dtpage_t * p, /* directory page */
4427 int m, /* max slot index */
4428 struct dt_lock ** dtlock)
4430 int fsi; /* free entry slot index */
4431 struct dtslot *t;
4432 int si;
4433 struct dt_lock *dtlck = *dtlock;
4434 struct lv *lv;
4435 int xsi, n;
4437 /* get free entry slot index */
4438 fsi = p->header.freelist;
4440 /* open new linelock */
4441 if (dtlck->index >= dtlck->maxcnt)
4442 dtlck = (struct dt_lock *) txLinelock(dtlck);
4443 lv = & dtlck->lv[dtlck->index];
4445 lv->offset = fsi;
4447 n = 1;
4448 xsi = fsi;
4450 t = &p->slot[fsi];
4451 si = t->next;
4453 /* find the last/only segment */
4454 while (si < m && si >= 0) {
4455 /* is next slot contiguous ? */
4456 if (si != xsi + 1) {
4457 /* close current linelock */
4458 lv->length = n;
4459 dtlck->index++;
4461 /* open new linelock */
4462 if (dtlck->index < dtlck->maxcnt)
4463 lv++;
4464 else {
4465 dtlck = (struct dt_lock *) txLinelock(dtlck);
4466 lv = & dtlck->lv[0];
4469 lv->offset = si;
4470 n = 0;
4473 n++;
4474 xsi = si;
4476 t = &p->slot[si];
4477 si = t->next;
4480 /* close current linelock */
4481 lv->length = n;
4482 dtlck->index++;
4484 *dtlock = dtlck;
4489 * NAME: dtModify
4491 * FUNCTION: Modify the inode number part of a directory entry
4493 * PARAMETERS:
4494 * tid - Transaction id
4495 * ip - Inode of parent directory
4496 * key - Name of entry to be modified
4497 * orig_ino - Original inode number expected in entry
4498 * new_ino - New inode number to put into entry
4499 * flag - JFS_RENAME
4501 * RETURNS:
4502 * -ESTALE - If entry found does not match orig_ino passed in
4503 * -ENOENT - If no entry can be found to match key
4504 * 0 - If successfully modified entry
4506 int dtModify(tid_t tid, struct inode *ip,
4507 struct component_name * key, ino_t * orig_ino, ino_t new_ino, int flag)
4509 int rc;
4510 s64 bn;
4511 struct metapage *mp;
4512 dtpage_t *p;
4513 int index;
4514 struct btstack btstack;
4515 struct tlock *tlck;
4516 struct dt_lock *dtlck;
4517 struct lv *lv;
4518 s8 *stbl;
4519 int entry_si; /* entry slot index */
4520 struct ldtentry *entry;
4523 * search for the entry to modify:
4525 * dtSearch() returns (leaf page pinned, index at which to modify).
4527 if ((rc = dtSearch(ip, key, orig_ino, &btstack, flag)))
4528 return rc;
4530 /* retrieve search result */
4531 DT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
4533 BT_MARK_DIRTY(mp, ip);
4535 * acquire a transaction lock on the leaf page of named entry
4537 tlck = txLock(tid, ip, mp, tlckDTREE | tlckENTRY);
4538 dtlck = (struct dt_lock *) & tlck->lock;
4540 /* get slot index of the entry */
4541 stbl = DT_GETSTBL(p);
4542 entry_si = stbl[index];
4544 /* linelock entry */
4545 ASSERT(dtlck->index == 0);
4546 lv = & dtlck->lv[0];
4547 lv->offset = entry_si;
4548 lv->length = 1;
4549 dtlck->index++;
4551 /* get the head/only segment */
4552 entry = (struct ldtentry *) & p->slot[entry_si];
4554 /* substitute the inode number of the entry */
4555 entry->inumber = cpu_to_le32(new_ino);
4557 /* unpin the leaf page */
4558 DT_PUTPAGE(mp);
4560 return 0;