MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / fs / ntfs / index.c
blobb19a4882b68951764b25a212459d398c98e8c6f3
1 /*
2 * index.c - NTFS kernel index handling. Part of the Linux-NTFS project.
4 * Copyright (c) 2004 Anton Altaparmakov
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "ntfs.h"
23 #include "collate.h"
24 #include "index.h"
26 /**
27 * ntfs_index_ctx_get - allocate and initialize a new index context
28 * @idx_ni: ntfs index inode with which to initialize the context
30 * Allocate a new index context, initialize it with @idx_ni and return it.
31 * Return NULL if allocation failed.
33 * Locking: Caller must hold i_sem on the index inode.
35 ntfs_index_context *ntfs_index_ctx_get(ntfs_inode *idx_ni)
37 ntfs_index_context *ictx;
39 ictx = kmem_cache_alloc(ntfs_index_ctx_cache, SLAB_NOFS);
40 if (ictx) {
41 ictx->idx_ni = idx_ni;
42 ictx->entry = NULL;
43 ictx->data = NULL;
44 ictx->data_len = 0;
45 ictx->is_in_root = 0;
46 ictx->ir = NULL;
47 ictx->actx = NULL;
48 ictx->base_ni = NULL;
49 ictx->ia = NULL;
50 ictx->page = NULL;
52 return ictx;
55 /**
56 * ntfs_index_ctx_put - release an index context
57 * @ictx: index context to free
59 * Release the index context @ictx, releasing all associated resources.
61 * Locking: Caller must hold i_sem on the index inode.
63 void ntfs_index_ctx_put(ntfs_index_context *ictx)
65 if (ictx->entry) {
66 if (ictx->is_in_root) {
67 if (ictx->actx)
68 ntfs_attr_put_search_ctx(ictx->actx);
69 if (ictx->base_ni)
70 unmap_mft_record(ictx->base_ni);
71 } else {
72 struct page *page = ictx->page;
73 if (page) {
74 BUG_ON(!PageLocked(page));
75 unlock_page(page);
76 ntfs_unmap_page(page);
80 kmem_cache_free(ntfs_index_ctx_cache, ictx);
81 return;
84 /**
85 * ntfs_index_lookup - find a key in an index and return its index entry
86 * @key: [IN] key for which to search in the index
87 * @key_len: [IN] length of @key in bytes
88 * @ictx: [IN/OUT] context describing the index and the returned entry
90 * Before calling ntfs_index_lookup(), @ictx must have been obtained from a
91 * call to ntfs_index_ctx_get().
93 * Look for the @key in the index specified by the index lookup context @ictx.
94 * ntfs_index_lookup() walks the contents of the index looking for the @key.
96 * If the @key is found in the index, 0 is returned and @ictx is setup to
97 * describe the index entry containing the matching @key. @ictx->entry is the
98 * index entry and @ictx->data and @ictx->data_len are the index entry data and
99 * its length in bytes, respectively.
101 * If the @key is not found in the index, -ENOENT is returned and @ictx is
102 * setup to describe the index entry whose key collates immediately after the
103 * search @key, i.e. this is the position in the index at which an index entry
104 * with a key of @key would need to be inserted.
106 * If an error occurs return the negative error code and @ictx is left
107 * untouched.
109 * When finished with the entry and its data, call ntfs_index_ctx_put() to free
110 * the context and other associated resources.
112 * If the index entry was modified, call flush_dcache_index_entry_page()
113 * immediately after the modification and either ntfs_index_entry_mark_dirty()
114 * or ntfs_index_entry_write() before the call to ntfs_index_ctx_put() to
115 * ensure that the changes are written to disk.
117 * Locking: - Caller must hold i_sem on the index inode.
118 * - Each page cache page in the index allocation mapping must be
119 * locked whilst being accessed otherwise we may find a corrupt
120 * page due to it being under ->writepage at the moment which
121 * applies the mst protection fixups before writing out and then
122 * removes them again after the write is complete after which it
123 * unlocks the page.
125 int ntfs_index_lookup(const void *key, const int key_len,
126 ntfs_index_context *ictx)
128 VCN vcn, old_vcn;
129 ntfs_inode *idx_ni = ictx->idx_ni;
130 ntfs_volume *vol = idx_ni->vol;
131 struct super_block *sb = vol->sb;
132 ntfs_inode *base_ni = idx_ni->ext.base_ntfs_ino;
133 MFT_RECORD *m;
134 INDEX_ROOT *ir;
135 INDEX_ENTRY *ie;
136 INDEX_ALLOCATION *ia;
137 u8 *index_end, *kaddr;
138 ntfs_attr_search_ctx *actx;
139 struct address_space *ia_mapping;
140 struct page *page;
141 int rc, err = 0;
143 ntfs_debug("Entering.");
144 BUG_ON(!NInoAttr(idx_ni));
145 BUG_ON(idx_ni->type != AT_INDEX_ALLOCATION);
146 BUG_ON(idx_ni->nr_extents != -1);
147 BUG_ON(!base_ni);
148 BUG_ON(!key);
149 BUG_ON(key_len <= 0);
150 if (!ntfs_is_collation_rule_supported(
151 idx_ni->itype.index.collation_rule)) {
152 ntfs_error(sb, "Index uses unsupported collation rule 0x%x. "
153 "Aborting lookup.", le32_to_cpu(
154 idx_ni->itype.index.collation_rule));
155 return -EOPNOTSUPP;
157 /* Get hold of the mft record for the index inode. */
158 m = map_mft_record(base_ni);
159 if (IS_ERR(m)) {
160 ntfs_error(sb, "map_mft_record() failed with error code %ld.",
161 -PTR_ERR(m));
162 return PTR_ERR(m);
164 actx = ntfs_attr_get_search_ctx(base_ni, m);
165 if (unlikely(!actx)) {
166 err = -ENOMEM;
167 goto err_out;
169 /* Find the index root attribute in the mft record. */
170 err = ntfs_attr_lookup(AT_INDEX_ROOT, idx_ni->name, idx_ni->name_len,
171 CASE_SENSITIVE, 0, NULL, 0, actx);
172 if (unlikely(err)) {
173 if (err == -ENOENT) {
174 ntfs_error(sb, "Index root attribute missing in inode "
175 "0x%lx.", idx_ni->mft_no);
176 err = -EIO;
178 goto err_out;
180 /* Get to the index root value (it has been verified in read_inode). */
181 ir = (INDEX_ROOT*)((u8*)actx->attr +
182 le16_to_cpu(actx->attr->data.resident.value_offset));
183 index_end = (u8*)&ir->index + le32_to_cpu(ir->index.index_length);
184 /* The first index entry. */
185 ie = (INDEX_ENTRY*)((u8*)&ir->index +
186 le32_to_cpu(ir->index.entries_offset));
188 * Loop until we exceed valid memory (corruption case) or until we
189 * reach the last entry.
191 for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
192 /* Bounds checks. */
193 if ((u8*)ie < (u8*)actx->mrec || (u8*)ie +
194 sizeof(INDEX_ENTRY_HEADER) > index_end ||
195 (u8*)ie + le16_to_cpu(ie->length) > index_end)
196 goto idx_err_out;
198 * The last entry cannot contain a key. It can however contain
199 * a pointer to a child node in the B+tree so we just break out.
201 if (ie->flags & INDEX_ENTRY_END)
202 break;
203 /* Further bounds checks. */
204 if ((u32)sizeof(INDEX_ENTRY_HEADER) +
205 le16_to_cpu(ie->key_length) >
206 le16_to_cpu(ie->data.vi.data_offset) ||
207 (u32)le16_to_cpu(ie->data.vi.data_offset) +
208 le16_to_cpu(ie->data.vi.data_length) >
209 le16_to_cpu(ie->length))
210 goto idx_err_out;
211 /* If the keys match perfectly, we setup @ictx and return 0. */
212 if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
213 &ie->key, key_len)) {
214 ir_done:
215 ictx->is_in_root = TRUE;
216 ictx->actx = actx;
217 ictx->base_ni = base_ni;
218 ictx->ia = NULL;
219 ictx->page = NULL;
220 done:
221 ictx->entry = ie;
222 ictx->data = (u8*)ie +
223 le16_to_cpu(ie->data.vi.data_offset);
224 ictx->data_len = le16_to_cpu(ie->data.vi.data_length);
225 ntfs_debug("Done.");
226 return err;
229 * Not a perfect match, need to do full blown collation so we
230 * know which way in the B+tree we have to go.
232 rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
233 key_len, &ie->key, le16_to_cpu(ie->key_length));
235 * If @key collates before the key of the current entry, there
236 * is definitely no such key in this index but we might need to
237 * descend into the B+tree so we just break out of the loop.
239 if (rc == -1)
240 break;
242 * A match should never happen as the memcmp() call should have
243 * cought it, but we still treat it correctly.
245 if (!rc)
246 goto ir_done;
247 /* The keys are not equal, continue the search. */
250 * We have finished with this index without success. Check for the
251 * presence of a child node and if not present setup @ictx and return
252 * -ENOENT.
254 if (!(ie->flags & INDEX_ENTRY_NODE)) {
255 ntfs_debug("Entry not found.");
256 err = -ENOENT;
257 goto ir_done;
258 } /* Child node present, descend into it. */
259 /* Consistency check: Verify that an index allocation exists. */
260 if (!NInoIndexAllocPresent(idx_ni)) {
261 ntfs_error(sb, "No index allocation attribute but index entry "
262 "requires one. Inode 0x%lx is corrupt or "
263 "driver bug.", idx_ni->mft_no);
264 err = -EIO;
265 goto err_out;
267 /* Get the starting vcn of the index_block holding the child node. */
268 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
269 ia_mapping = VFS_I(idx_ni)->i_mapping;
271 * We are done with the index root and the mft record. Release them,
272 * otherwise we deadlock with ntfs_map_page().
274 ntfs_attr_put_search_ctx(actx);
275 unmap_mft_record(base_ni);
276 m = NULL;
277 actx = NULL;
278 descend_into_child_node:
280 * Convert vcn to index into the index allocation attribute in units
281 * of PAGE_CACHE_SIZE and map the page cache page, reading it from
282 * disk if necessary.
284 page = ntfs_map_page(ia_mapping, vcn <<
285 idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT);
286 if (IS_ERR(page)) {
287 ntfs_error(sb, "Failed to map index page, error %ld.",
288 -PTR_ERR(page));
289 err = PTR_ERR(page);
290 goto err_out;
292 lock_page(page);
293 kaddr = (u8*)page_address(page);
294 fast_descend_into_child_node:
295 /* Get to the index allocation block. */
296 ia = (INDEX_ALLOCATION*)(kaddr + ((vcn <<
297 idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK));
298 /* Bounds checks. */
299 if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) {
300 ntfs_error(sb, "Out of bounds check failed. Corrupt inode "
301 "0x%lx or driver bug.", idx_ni->mft_no);
302 err = -EIO;
303 goto unm_err_out;
305 if (sle64_to_cpu(ia->index_block_vcn) != vcn) {
306 ntfs_error(sb, "Actual VCN (0x%llx) of index buffer is "
307 "different from expected VCN (0x%llx). Inode "
308 "0x%lx is corrupt or driver bug.",
309 (unsigned long long)
310 sle64_to_cpu(ia->index_block_vcn),
311 (unsigned long long)vcn, idx_ni->mft_no);
312 err = -EIO;
313 goto unm_err_out;
315 if (le32_to_cpu(ia->index.allocated_size) + 0x18 !=
316 idx_ni->itype.index.block_size) {
317 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx has "
318 "a size (%u) differing from the index "
319 "specified size (%u). Inode is corrupt or "
320 "driver bug.", (unsigned long long)vcn,
321 idx_ni->mft_no,
322 le32_to_cpu(ia->index.allocated_size) + 0x18,
323 idx_ni->itype.index.block_size);
324 err = -EIO;
325 goto unm_err_out;
327 index_end = (u8*)ia + idx_ni->itype.index.block_size;
328 if (index_end > kaddr + PAGE_CACHE_SIZE) {
329 ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx "
330 "crosses page boundary. Impossible! Cannot "
331 "access! This is probably a bug in the "
332 "driver.", (unsigned long long)vcn,
333 idx_ni->mft_no);
334 err = -EIO;
335 goto unm_err_out;
337 index_end = (u8*)&ia->index + le32_to_cpu(ia->index.index_length);
338 if (index_end > (u8*)ia + idx_ni->itype.index.block_size) {
339 ntfs_error(sb, "Size of index buffer (VCN 0x%llx) of inode "
340 "0x%lx exceeds maximum size.",
341 (unsigned long long)vcn, idx_ni->mft_no);
342 err = -EIO;
343 goto unm_err_out;
345 /* The first index entry. */
346 ie = (INDEX_ENTRY*)((u8*)&ia->index +
347 le32_to_cpu(ia->index.entries_offset));
349 * Iterate similar to above big loop but applied to index buffer, thus
350 * loop until we exceed valid memory (corruption case) or until we
351 * reach the last entry.
353 for (;; ie = (INDEX_ENTRY*)((u8*)ie + le16_to_cpu(ie->length))) {
354 /* Bounds checks. */
355 if ((u8*)ie < (u8*)ia || (u8*)ie +
356 sizeof(INDEX_ENTRY_HEADER) > index_end ||
357 (u8*)ie + le16_to_cpu(ie->length) > index_end) {
358 ntfs_error(sb, "Index entry out of bounds in inode "
359 "0x%lx.", idx_ni->mft_no);
360 err = -EIO;
361 goto unm_err_out;
364 * The last entry cannot contain a ket. It can however contain
365 * a pointer to a child node in the B+tree so we just break out.
367 if (ie->flags & INDEX_ENTRY_END)
368 break;
369 /* Further bounds checks. */
370 if ((u32)sizeof(INDEX_ENTRY_HEADER) +
371 le16_to_cpu(ie->key_length) >
372 le16_to_cpu(ie->data.vi.data_offset) ||
373 (u32)le16_to_cpu(ie->data.vi.data_offset) +
374 le16_to_cpu(ie->data.vi.data_length) >
375 le16_to_cpu(ie->length)) {
376 ntfs_error(sb, "Index entry out of bounds in inode "
377 "0x%lx.", idx_ni->mft_no);
378 err = -EIO;
379 goto unm_err_out;
381 /* If the keys match perfectly, we setup @ictx and return 0. */
382 if ((key_len == le16_to_cpu(ie->key_length)) && !memcmp(key,
383 &ie->key, key_len)) {
384 ia_done:
385 ictx->is_in_root = FALSE;
386 ictx->actx = NULL;
387 ictx->base_ni = NULL;
388 ictx->ia = ia;
389 ictx->page = page;
390 goto done;
393 * Not a perfect match, need to do full blown collation so we
394 * know which way in the B+tree we have to go.
396 rc = ntfs_collate(vol, idx_ni->itype.index.collation_rule, key,
397 key_len, &ie->key, le16_to_cpu(ie->key_length));
399 * If @key collates before the key of the current entry, there
400 * is definitely no such key in this index but we might need to
401 * descend into the B+tree so we just break out of the loop.
403 if (rc == -1)
404 break;
406 * A match should never happen as the memcmp() call should have
407 * cought it, but we still treat it correctly.
409 if (!rc)
410 goto ia_done;
411 /* The keys are not equal, continue the search. */
414 * We have finished with this index buffer without success. Check for
415 * the presence of a child node and if not present return -ENOENT.
417 if (!(ie->flags & INDEX_ENTRY_NODE)) {
418 ntfs_debug("Entry not found.");
419 err = -ENOENT;
420 goto ia_done;
422 if ((ia->index.flags & NODE_MASK) == LEAF_NODE) {
423 ntfs_error(sb, "Index entry with child node found in a leaf "
424 "node in inode 0x%lx.", idx_ni->mft_no);
425 err = -EIO;
426 goto unm_err_out;
428 /* Child node present, descend into it. */
429 old_vcn = vcn;
430 vcn = sle64_to_cpup((sle64*)((u8*)ie + le16_to_cpu(ie->length) - 8));
431 if (vcn >= 0) {
433 * If vcn is in the same page cache page as old_vcn we recycle
434 * the mapped page.
436 if (old_vcn << vol->cluster_size_bits >>
437 PAGE_CACHE_SHIFT == vcn <<
438 vol->cluster_size_bits >>
439 PAGE_CACHE_SHIFT)
440 goto fast_descend_into_child_node;
441 unlock_page(page);
442 ntfs_unmap_page(page);
443 goto descend_into_child_node;
445 ntfs_error(sb, "Negative child node vcn in inode 0x%lx.",
446 idx_ni->mft_no);
447 err = -EIO;
448 unm_err_out:
449 unlock_page(page);
450 ntfs_unmap_page(page);
451 err_out:
452 if (actx)
453 ntfs_attr_put_search_ctx(actx);
454 if (m)
455 unmap_mft_record(base_ni);
456 return err;
457 idx_err_out:
458 ntfs_error(sb, "Corrupt index. Aborting lookup.");
459 err = -EIO;
460 goto err_out;
463 #ifdef NTFS_RW
466 * __ntfs_index_entry_mark_dirty - mark an index allocation entry dirty
467 * @ictx: ntfs index context describing the index entry
469 * NOTE: You want to use fs/ntfs/index.h::ntfs_index_entry_mark_dirty() instead!
471 * Mark the index allocation entry described by the index entry context @ictx
472 * dirty.
474 * The index entry must be in an index block belonging to the index allocation
475 * attribute. Mark the buffers belonging to the index record as well as the
476 * page cache page the index block is in dirty. This automatically marks the
477 * VFS inode of the ntfs index inode to which the index entry belongs dirty,
478 * too (I_DIRTY_PAGES) and this in turn ensures the page buffers, and hence the
479 * dirty index block, will be written out to disk later.
481 void __ntfs_index_entry_mark_dirty(ntfs_index_context *ictx)
483 ntfs_inode *ni;
484 struct page *page;
485 struct buffer_head *bh, *head;
486 unsigned int rec_start, rec_end, bh_size, bh_start, bh_end;
488 BUG_ON(ictx->is_in_root);
489 ni = ictx->idx_ni;
490 page = ictx->page;
491 BUG_ON(!page_has_buffers(page));
493 * If the index block is the same size as the page cache page, set all
494 * the buffers in the page, as well as the page itself, dirty.
496 if (ni->itype.index.block_size == PAGE_CACHE_SIZE) {
497 __set_page_dirty_buffers(page);
498 return;
500 /* Set only the buffers in which the index block is located dirty. */
501 rec_start = (unsigned int)((u8*)ictx->ia - (u8*)page_address(page));
502 rec_end = rec_start + ni->itype.index.block_size;
503 bh_size = ni->vol->sb->s_blocksize;
504 bh_start = 0;
505 bh = head = page_buffers(page);
506 do {
507 bh_end = bh_start + bh_size;
508 if ((bh_start >= rec_start) && (bh_end <= rec_end))
509 set_buffer_dirty(bh);
510 bh_start = bh_end;
511 } while ((bh = bh->b_this_page) != head);
512 /* Finally, set the page itself dirty, too. */
513 __set_page_dirty_nobuffers(page);
516 #endif /* NTFS_RW */