RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / fs / ntfs / attrib.c
blobdd5a53374d50cf0bb498ad232625e935eb13c49d
1 /**
2 * attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
4 * Copyright (c) 2001-2007 Anton Altaparmakov
5 * Copyright (c) 2002 Richard Russon
7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program/include file is distributed in the hope that it will be
13 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program (in the main directory of the Linux-NTFS
19 * distribution in the file COPYING); if not, write to the Free Software
20 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/buffer_head.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/swap.h>
27 #include <linux/writeback.h>
29 #include "attrib.h"
30 #include "debug.h"
31 #include "layout.h"
32 #include "lcnalloc.h"
33 #include "malloc.h"
34 #include "mft.h"
35 #include "ntfs.h"
36 #include "types.h"
38 /**
39 * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
40 * @ni: ntfs inode for which to map (part of) a runlist
41 * @vcn: map runlist part containing this vcn
42 * @ctx: active attribute search context if present or NULL if not
44 * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
46 * If @ctx is specified, it is an active search context of @ni and its base mft
47 * record. This is needed when ntfs_map_runlist_nolock() encounters unmapped
48 * runlist fragments and allows their mapping. If you do not have the mft
49 * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
50 * will perform the necessary mapping and unmapping.
52 * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
53 * restores it before returning. Thus, @ctx will be left pointing to the same
54 * attribute on return as on entry. However, the actual pointers in @ctx may
55 * point to different memory locations on return, so you must remember to reset
56 * any cached pointers from the @ctx, i.e. after the call to
57 * ntfs_map_runlist_nolock(), you will probably want to do:
58 * m = ctx->mrec;
59 * a = ctx->attr;
60 * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
61 * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
63 * Return 0 on success and -errno on error. There is one special error code
64 * which is not an error as such. This is -ENOENT. It means that @vcn is out
65 * of bounds of the runlist.
67 * Note the runlist can be NULL after this function returns if @vcn is zero and
68 * the attribute has zero allocated size, i.e. there simply is no runlist.
70 * WARNING: If @ctx is supplied, regardless of whether success or failure is
71 * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
72 * is no longer valid, i.e. you need to either call
73 * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
74 * In that case PTR_ERR(@ctx->mrec) will give you the error code for
75 * why the mapping of the old inode failed.
77 * Locking: - The runlist described by @ni must be locked for writing on entry
78 * and is locked on return. Note the runlist will be modified.
79 * - If @ctx is NULL, the base mft record of @ni must not be mapped on
80 * entry and it will be left unmapped on return.
81 * - If @ctx is not NULL, the base mft record must be mapped on entry
82 * and it will be left mapped on return.
84 int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
86 VCN end_vcn;
87 unsigned long flags;
88 ntfs_inode *base_ni;
89 MFT_RECORD *m;
90 ATTR_RECORD *a;
91 runlist_element *rl;
92 struct page *put_this_page = NULL;
93 int err = 0;
94 bool ctx_is_temporary, ctx_needs_reset;
95 ntfs_attr_search_ctx old_ctx = { NULL, };
97 ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
98 (unsigned long long)vcn);
99 if (!NInoAttr(ni))
100 base_ni = ni;
101 else
102 base_ni = ni->ext.base_ntfs_ino;
103 if (!ctx) {
104 ctx_is_temporary = ctx_needs_reset = true;
105 m = map_mft_record(base_ni);
106 if (IS_ERR(m))
107 return PTR_ERR(m);
108 ctx = ntfs_attr_get_search_ctx(base_ni, m);
109 if (unlikely(!ctx)) {
110 err = -ENOMEM;
111 goto err_out;
113 } else {
114 VCN allocated_size_vcn;
116 BUG_ON(IS_ERR(ctx->mrec));
117 a = ctx->attr;
118 BUG_ON(!a->non_resident);
119 ctx_is_temporary = false;
120 end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
121 read_lock_irqsave(&ni->size_lock, flags);
122 allocated_size_vcn = ni->allocated_size >>
123 ni->vol->cluster_size_bits;
124 read_unlock_irqrestore(&ni->size_lock, flags);
125 if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
126 end_vcn = allocated_size_vcn - 1;
128 * If we already have the attribute extent containing @vcn in
129 * @ctx, no need to look it up again. We slightly cheat in
130 * that if vcn exceeds the allocated size, we will refuse to
131 * map the runlist below, so there is definitely no need to get
132 * the right attribute extent.
134 if (vcn >= allocated_size_vcn || (a->type == ni->type &&
135 a->name_length == ni->name_len &&
136 !memcmp((u8*)a + le16_to_cpu(a->name_offset),
137 ni->name, ni->name_len) &&
138 sle64_to_cpu(a->data.non_resident.lowest_vcn)
139 <= vcn && end_vcn >= vcn))
140 ctx_needs_reset = false;
141 else {
142 /* Save the old search context. */
143 old_ctx = *ctx;
145 * If the currently mapped (extent) inode is not the
146 * base inode we will unmap it when we reinitialize the
147 * search context which means we need to get a
148 * reference to the page containing the mapped mft
149 * record so we do not accidentally drop changes to the
150 * mft record when it has not been marked dirty yet.
152 if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
153 old_ctx.base_ntfs_ino) {
154 put_this_page = old_ctx.ntfs_ino->page;
155 page_cache_get(put_this_page);
158 * Reinitialize the search context so we can lookup the
159 * needed attribute extent.
161 ntfs_attr_reinit_search_ctx(ctx);
162 ctx_needs_reset = true;
165 if (ctx_needs_reset) {
166 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
167 CASE_SENSITIVE, vcn, NULL, 0, ctx);
168 if (unlikely(err)) {
169 if (err == -ENOENT)
170 err = -EIO;
171 goto err_out;
173 BUG_ON(!ctx->attr->non_resident);
175 a = ctx->attr;
177 * Only decompress the mapping pairs if @vcn is inside it. Otherwise
178 * we get into problems when we try to map an out of bounds vcn because
179 * we then try to map the already mapped runlist fragment and
180 * ntfs_mapping_pairs_decompress() fails.
182 end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
183 if (unlikely(vcn && vcn >= end_vcn)) {
184 err = -ENOENT;
185 goto err_out;
187 rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
188 if (IS_ERR(rl))
189 err = PTR_ERR(rl);
190 else
191 ni->runlist.rl = rl;
192 err_out:
193 if (ctx_is_temporary) {
194 if (likely(ctx))
195 ntfs_attr_put_search_ctx(ctx);
196 unmap_mft_record(base_ni);
197 } else if (ctx_needs_reset) {
199 * If there is no attribute list, restoring the search context
200 * is acomplished simply by copying the saved context back over
201 * the caller supplied context. If there is an attribute list,
202 * things are more complicated as we need to deal with mapping
203 * of mft records and resulting potential changes in pointers.
205 if (NInoAttrList(base_ni)) {
207 * If the currently mapped (extent) inode is not the
208 * one we had before, we need to unmap it and map the
209 * old one.
211 if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
213 * If the currently mapped inode is not the
214 * base inode, unmap it.
216 if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
217 ctx->base_ntfs_ino) {
218 unmap_extent_mft_record(ctx->ntfs_ino);
219 ctx->mrec = ctx->base_mrec;
220 BUG_ON(!ctx->mrec);
223 * If the old mapped inode is not the base
224 * inode, map it.
226 if (old_ctx.base_ntfs_ino &&
227 old_ctx.ntfs_ino !=
228 old_ctx.base_ntfs_ino) {
229 retry_map:
230 ctx->mrec = map_mft_record(
231 old_ctx.ntfs_ino);
233 * Something bad has happened. If out
234 * of memory retry till it succeeds.
235 * Any other errors are fatal and we
236 * return the error code in ctx->mrec.
237 * Let the caller deal with it... We
238 * just need to fudge things so the
239 * caller can reinit and/or put the
240 * search context safely.
242 if (IS_ERR(ctx->mrec)) {
243 if (PTR_ERR(ctx->mrec) ==
244 -ENOMEM) {
245 schedule();
246 goto retry_map;
247 } else
248 old_ctx.ntfs_ino =
249 old_ctx.
250 base_ntfs_ino;
254 /* Update the changed pointers in the saved context. */
255 if (ctx->mrec != old_ctx.mrec) {
256 if (!IS_ERR(ctx->mrec))
257 old_ctx.attr = (ATTR_RECORD*)(
258 (u8*)ctx->mrec +
259 ((u8*)old_ctx.attr -
260 (u8*)old_ctx.mrec));
261 old_ctx.mrec = ctx->mrec;
264 /* Restore the search context to the saved one. */
265 *ctx = old_ctx;
267 * We drop the reference on the page we took earlier. In the
268 * case that IS_ERR(ctx->mrec) is true this means we might lose
269 * some changes to the mft record that had been made between
270 * the last time it was marked dirty/written out and now. This
271 * at this stage is not a problem as the mapping error is fatal
272 * enough that the mft record cannot be written out anyway and
273 * the caller is very likely to shutdown the whole inode
274 * immediately and mark the volume dirty for chkdsk to pick up
275 * the pieces anyway.
277 if (put_this_page)
278 page_cache_release(put_this_page);
280 return err;
284 * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
285 * @ni: ntfs inode for which to map (part of) a runlist
286 * @vcn: map runlist part containing this vcn
288 * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
290 * Return 0 on success and -errno on error. There is one special error code
291 * which is not an error as such. This is -ENOENT. It means that @vcn is out
292 * of bounds of the runlist.
294 * Locking: - The runlist must be unlocked on entry and is unlocked on return.
295 * - This function takes the runlist lock for writing and may modify
296 * the runlist.
298 int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
300 int err = 0;
302 down_write(&ni->runlist.lock);
303 /* Make sure someone else didn't do the work while we were sleeping. */
304 if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
305 LCN_RL_NOT_MAPPED))
306 err = ntfs_map_runlist_nolock(ni, vcn, NULL);
307 up_write(&ni->runlist.lock);
308 return err;
312 * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
313 * @ni: ntfs inode of the attribute whose runlist to search
314 * @vcn: vcn to convert
315 * @write_locked: true if the runlist is locked for writing
317 * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
318 * described by the ntfs inode @ni and return the corresponding logical cluster
319 * number (lcn).
321 * If the @vcn is not mapped yet, the attempt is made to map the attribute
322 * extent containing the @vcn and the vcn to lcn conversion is retried.
324 * If @write_locked is true the caller has locked the runlist for writing and
325 * if false for reading.
327 * Since lcns must be >= 0, we use negative return codes with special meaning:
329 * Return code Meaning / Description
330 * ==========================================
331 * LCN_HOLE Hole / not allocated on disk.
332 * LCN_ENOENT There is no such vcn in the runlist, i.e. @vcn is out of bounds.
333 * LCN_ENOMEM Not enough memory to map runlist.
334 * LCN_EIO Critical error (runlist/file is corrupt, i/o error, etc).
336 * Locking: - The runlist must be locked on entry and is left locked on return.
337 * - If @write_locked is 'false', i.e. the runlist is locked for reading,
338 * the lock may be dropped inside the function so you cannot rely on
339 * the runlist still being the same when this function returns.
341 LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
342 const bool write_locked)
344 LCN lcn;
345 unsigned long flags;
346 bool is_retry = false;
348 ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
349 ni->mft_no, (unsigned long long)vcn,
350 write_locked ? "write" : "read");
351 BUG_ON(!ni);
352 BUG_ON(!NInoNonResident(ni));
353 BUG_ON(vcn < 0);
354 if (!ni->runlist.rl) {
355 read_lock_irqsave(&ni->size_lock, flags);
356 if (!ni->allocated_size) {
357 read_unlock_irqrestore(&ni->size_lock, flags);
358 return LCN_ENOENT;
360 read_unlock_irqrestore(&ni->size_lock, flags);
362 retry_remap:
363 /* Convert vcn to lcn. If that fails map the runlist and retry once. */
364 lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
365 if (likely(lcn >= LCN_HOLE)) {
366 ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
367 return lcn;
369 if (lcn != LCN_RL_NOT_MAPPED) {
370 if (lcn != LCN_ENOENT)
371 lcn = LCN_EIO;
372 } else if (!is_retry) {
373 int err;
375 if (!write_locked) {
376 up_read(&ni->runlist.lock);
377 down_write(&ni->runlist.lock);
378 if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
379 LCN_RL_NOT_MAPPED)) {
380 up_write(&ni->runlist.lock);
381 down_read(&ni->runlist.lock);
382 goto retry_remap;
385 err = ntfs_map_runlist_nolock(ni, vcn, NULL);
386 if (!write_locked) {
387 up_write(&ni->runlist.lock);
388 down_read(&ni->runlist.lock);
390 if (likely(!err)) {
391 is_retry = true;
392 goto retry_remap;
394 if (err == -ENOENT)
395 lcn = LCN_ENOENT;
396 else if (err == -ENOMEM)
397 lcn = LCN_ENOMEM;
398 else
399 lcn = LCN_EIO;
401 if (lcn != LCN_ENOENT)
402 ntfs_error(ni->vol->sb, "Failed with error code %lli.",
403 (long long)lcn);
404 return lcn;
408 * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
409 * @ni: ntfs inode describing the runlist to search
410 * @vcn: vcn to find
411 * @ctx: active attribute search context if present or NULL if not
413 * Find the virtual cluster number @vcn in the runlist described by the ntfs
414 * inode @ni and return the address of the runlist element containing the @vcn.
416 * If the @vcn is not mapped yet, the attempt is made to map the attribute
417 * extent containing the @vcn and the vcn to lcn conversion is retried.
419 * If @ctx is specified, it is an active search context of @ni and its base mft
420 * record. This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
421 * runlist fragments and allows their mapping. If you do not have the mft
422 * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
423 * will perform the necessary mapping and unmapping.
425 * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
426 * restores it before returning. Thus, @ctx will be left pointing to the same
427 * attribute on return as on entry. However, the actual pointers in @ctx may
428 * point to different memory locations on return, so you must remember to reset
429 * any cached pointers from the @ctx, i.e. after the call to
430 * ntfs_attr_find_vcn_nolock(), you will probably want to do:
431 * m = ctx->mrec;
432 * a = ctx->attr;
433 * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
434 * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
435 * Note you need to distinguish between the lcn of the returned runlist element
436 * being >= 0 and LCN_HOLE. In the later case you have to return zeroes on
437 * read and allocate clusters on write.
439 * Return the runlist element containing the @vcn on success and
440 * ERR_PTR(-errno) on error. You need to test the return value with IS_ERR()
441 * to decide if the return is success or failure and PTR_ERR() to get to the
442 * error code if IS_ERR() is true.
444 * The possible error return codes are:
445 * -ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
446 * -ENOMEM - Not enough memory to map runlist.
447 * -EIO - Critical error (runlist/file is corrupt, i/o error, etc).
449 * WARNING: If @ctx is supplied, regardless of whether success or failure is
450 * returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
451 * is no longer valid, i.e. you need to either call
452 * ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
453 * In that case PTR_ERR(@ctx->mrec) will give you the error code for
454 * why the mapping of the old inode failed.
456 * Locking: - The runlist described by @ni must be locked for writing on entry
457 * and is locked on return. Note the runlist may be modified when
458 * needed runlist fragments need to be mapped.
459 * - If @ctx is NULL, the base mft record of @ni must not be mapped on
460 * entry and it will be left unmapped on return.
461 * - If @ctx is not NULL, the base mft record must be mapped on entry
462 * and it will be left mapped on return.
464 runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
465 ntfs_attr_search_ctx *ctx)
467 unsigned long flags;
468 runlist_element *rl;
469 int err = 0;
470 bool is_retry = false;
472 ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
473 ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
474 BUG_ON(!ni);
475 BUG_ON(!NInoNonResident(ni));
476 BUG_ON(vcn < 0);
477 if (!ni->runlist.rl) {
478 read_lock_irqsave(&ni->size_lock, flags);
479 if (!ni->allocated_size) {
480 read_unlock_irqrestore(&ni->size_lock, flags);
481 return ERR_PTR(-ENOENT);
483 read_unlock_irqrestore(&ni->size_lock, flags);
485 retry_remap:
486 rl = ni->runlist.rl;
487 if (likely(rl && vcn >= rl[0].vcn)) {
488 while (likely(rl->length)) {
489 if (unlikely(vcn < rl[1].vcn)) {
490 if (likely(rl->lcn >= LCN_HOLE)) {
491 ntfs_debug("Done.");
492 return rl;
494 break;
496 rl++;
498 if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
499 if (likely(rl->lcn == LCN_ENOENT))
500 err = -ENOENT;
501 else
502 err = -EIO;
505 if (!err && !is_retry) {
507 * If the search context is invalid we cannot map the unmapped
508 * region.
510 if (IS_ERR(ctx->mrec))
511 err = PTR_ERR(ctx->mrec);
512 else {
514 * The @vcn is in an unmapped region, map the runlist
515 * and retry.
517 err = ntfs_map_runlist_nolock(ni, vcn, ctx);
518 if (likely(!err)) {
519 is_retry = true;
520 goto retry_remap;
523 if (err == -EINVAL)
524 err = -EIO;
525 } else if (!err)
526 err = -EIO;
527 if (err != -ENOENT)
528 ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
529 return ERR_PTR(err);
533 * ntfs_attr_find - find (next) attribute in mft record
534 * @type: attribute type to find
535 * @name: attribute name to find (optional, i.e. NULL means don't care)
536 * @name_len: attribute name length (only needed if @name present)
537 * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
538 * @val: attribute value to find (optional, resident attributes only)
539 * @val_len: attribute value length
540 * @ctx: search context with mft record and attribute to search from
542 * You should not need to call this function directly. Use ntfs_attr_lookup()
543 * instead.
545 * ntfs_attr_find() takes a search context @ctx as parameter and searches the
546 * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
547 * attribute of @type, optionally @name and @val.
549 * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
550 * point to the found attribute.
552 * If the attribute is not found, ntfs_attr_find() returns -ENOENT and
553 * @ctx->attr will point to the attribute before which the attribute being
554 * searched for would need to be inserted if such an action were to be desired.
556 * On actual error, ntfs_attr_find() returns -EIO. In this case @ctx->attr is
557 * undefined and in particular do not rely on it not changing.
559 * If @ctx->is_first is 'true', the search begins with @ctx->attr itself. If it
560 * is 'false', the search begins after @ctx->attr.
562 * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
563 * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
564 * @ctx->mrec belongs. This is so we can get at the ntfs volume and hence at
565 * the upcase table. If @ic is CASE_SENSITIVE, the comparison is case
566 * sensitive. When @name is present, @name_len is the @name length in Unicode
567 * characters.
569 * If @name is not present (NULL), we assume that the unnamed attribute is
570 * being searched for.
572 * Finally, the resident attribute value @val is looked for, if present. If
573 * @val is not present (NULL), @val_len is ignored.
575 * ntfs_attr_find() only searches the specified mft record and it ignores the
576 * presence of an attribute list attribute (unless it is the one being searched
577 * for, obviously). If you need to take attribute lists into consideration,
578 * use ntfs_attr_lookup() instead (see below). This also means that you cannot
579 * use ntfs_attr_find() to search for extent records of non-resident
580 * attributes, as extents with lowest_vcn != 0 are usually described by the
581 * attribute list attribute only. - Note that it is possible that the first
582 * extent is only in the attribute list while the last extent is in the base
583 * mft record, so do not rely on being able to find the first extent in the
584 * base mft record.
586 * Warning: Never use @val when looking for attribute types which can be
587 * non-resident as this most likely will result in a crash!
589 static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
590 const u32 name_len, const IGNORE_CASE_BOOL ic,
591 const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
593 ATTR_RECORD *a;
594 ntfs_volume *vol = ctx->ntfs_ino->vol;
595 ntfschar *upcase = vol->upcase;
596 u32 upcase_len = vol->upcase_len;
599 * Iterate over attributes in mft record starting at @ctx->attr, or the
600 * attribute following that, if @ctx->is_first is 'true'.
602 if (ctx->is_first) {
603 a = ctx->attr;
604 ctx->is_first = false;
605 } else
606 a = (ATTR_RECORD*)((u8*)ctx->attr +
607 le32_to_cpu(ctx->attr->length));
608 for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
609 if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
610 le32_to_cpu(ctx->mrec->bytes_allocated))
611 break;
612 ctx->attr = a;
613 if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
614 a->type == AT_END))
615 return -ENOENT;
616 if (unlikely(!a->length))
617 break;
618 if (a->type != type)
619 continue;
621 * If @name is present, compare the two names. If @name is
622 * missing, assume we want an unnamed attribute.
624 if (!name) {
625 /* The search failed if the found attribute is named. */
626 if (a->name_length)
627 return -ENOENT;
628 } else if (!ntfs_are_names_equal(name, name_len,
629 (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
630 a->name_length, ic, upcase, upcase_len)) {
631 register int rc;
633 rc = ntfs_collate_names(name, name_len,
634 (ntfschar*)((u8*)a +
635 le16_to_cpu(a->name_offset)),
636 a->name_length, 1, IGNORE_CASE,
637 upcase, upcase_len);
639 * If @name collates before a->name, there is no
640 * matching attribute.
642 if (rc == -1)
643 return -ENOENT;
644 /* If the strings are not equal, continue search. */
645 if (rc)
646 continue;
647 rc = ntfs_collate_names(name, name_len,
648 (ntfschar*)((u8*)a +
649 le16_to_cpu(a->name_offset)),
650 a->name_length, 1, CASE_SENSITIVE,
651 upcase, upcase_len);
652 if (rc == -1)
653 return -ENOENT;
654 if (rc)
655 continue;
658 * The names match or @name not present and attribute is
659 * unnamed. If no @val specified, we have found the attribute
660 * and are done.
662 if (!val)
663 return 0;
664 /* @val is present; compare values. */
665 else {
666 register int rc;
668 rc = memcmp(val, (u8*)a + le16_to_cpu(
669 a->data.resident.value_offset),
670 min_t(u32, val_len, le32_to_cpu(
671 a->data.resident.value_length)));
673 * If @val collates before the current attribute's
674 * value, there is no matching attribute.
676 if (!rc) {
677 register u32 avl;
679 avl = le32_to_cpu(
680 a->data.resident.value_length);
681 if (val_len == avl)
682 return 0;
683 if (val_len < avl)
684 return -ENOENT;
685 } else if (rc < 0)
686 return -ENOENT;
689 ntfs_error(vol->sb, "Inode is corrupt. Run chkdsk.");
690 NVolSetErrors(vol);
691 return -EIO;
695 * load_attribute_list - load an attribute list into memory
696 * @vol: ntfs volume from which to read
697 * @runlist: runlist of the attribute list
698 * @al_start: destination buffer
699 * @size: size of the destination buffer in bytes
700 * @initialized_size: initialized size of the attribute list
702 * Walk the runlist @runlist and load all clusters from it copying them into
703 * the linear buffer @al. The maximum number of bytes copied to @al is @size
704 * bytes. Note, @size does not need to be a multiple of the cluster size. If
705 * @initialized_size is less than @size, the region in @al between
706 * @initialized_size and @size will be zeroed and not read from disk.
708 * Return 0 on success or -errno on error.
710 int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
711 const s64 size, const s64 initialized_size)
713 LCN lcn;
714 u8 *al = al_start;
715 u8 *al_end = al + initialized_size;
716 runlist_element *rl;
717 struct buffer_head *bh;
718 struct super_block *sb;
719 unsigned long block_size;
720 unsigned long block, max_block;
721 int err = 0;
722 unsigned char block_size_bits;
724 ntfs_debug("Entering.");
725 if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
726 initialized_size > size)
727 return -EINVAL;
728 if (!initialized_size) {
729 memset(al, 0, size);
730 return 0;
732 sb = vol->sb;
733 block_size = sb->s_blocksize;
734 block_size_bits = sb->s_blocksize_bits;
735 down_read(&runlist->lock);
736 rl = runlist->rl;
737 if (!rl) {
738 ntfs_error(sb, "Cannot read attribute list since runlist is "
739 "missing.");
740 goto err_out;
742 /* Read all clusters specified by the runlist one run at a time. */
743 while (rl->length) {
744 lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
745 ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
746 (unsigned long long)rl->vcn,
747 (unsigned long long)lcn);
748 /* The attribute list cannot be sparse. */
749 if (lcn < 0) {
750 ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed. Cannot "
751 "read attribute list.");
752 goto err_out;
754 block = lcn << vol->cluster_size_bits >> block_size_bits;
755 /* Read the run from device in chunks of block_size bytes. */
756 max_block = block + (rl->length << vol->cluster_size_bits >>
757 block_size_bits);
758 ntfs_debug("max_block = 0x%lx.", max_block);
759 do {
760 ntfs_debug("Reading block = 0x%lx.", block);
761 bh = sb_bread(sb, block);
762 if (!bh) {
763 ntfs_error(sb, "sb_bread() failed. Cannot "
764 "read attribute list.");
765 goto err_out;
767 if (al + block_size >= al_end)
768 goto do_final;
769 memcpy(al, bh->b_data, block_size);
770 brelse(bh);
771 al += block_size;
772 } while (++block < max_block);
773 rl++;
775 if (initialized_size < size) {
776 initialize:
777 memset(al_start + initialized_size, 0, size - initialized_size);
779 done:
780 up_read(&runlist->lock);
781 return err;
782 do_final:
783 if (al < al_end) {
785 * Partial block.
787 * Note: The attribute list can be smaller than its allocation
788 * by multiple clusters. This has been encountered by at least
789 * two people running Windows XP, thus we cannot do any
790 * truncation sanity checking here. (AIA)
792 memcpy(al, bh->b_data, al_end - al);
793 brelse(bh);
794 if (initialized_size < size)
795 goto initialize;
796 goto done;
798 brelse(bh);
799 /* Real overflow! */
800 ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
801 "is truncated.");
802 err_out:
803 err = -EIO;
804 goto done;
808 * ntfs_external_attr_find - find an attribute in the attribute list of an inode
809 * @type: attribute type to find
810 * @name: attribute name to find (optional, i.e. NULL means don't care)
811 * @name_len: attribute name length (only needed if @name present)
812 * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
813 * @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
814 * @val: attribute value to find (optional, resident attributes only)
815 * @val_len: attribute value length
816 * @ctx: search context with mft record and attribute to search from
818 * You should not need to call this function directly. Use ntfs_attr_lookup()
819 * instead.
821 * Find an attribute by searching the attribute list for the corresponding
822 * attribute list entry. Having found the entry, map the mft record if the
823 * attribute is in a different mft record/inode, ntfs_attr_find() the attribute
824 * in there and return it.
826 * On first search @ctx->ntfs_ino must be the base mft record and @ctx must
827 * have been obtained from a call to ntfs_attr_get_search_ctx(). On subsequent
828 * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
829 * then the base inode).
831 * After finishing with the attribute/mft record you need to call
832 * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
833 * mapped inodes, etc).
835 * If the attribute is found, ntfs_external_attr_find() returns 0 and
836 * @ctx->attr will point to the found attribute. @ctx->mrec will point to the
837 * mft record in which @ctx->attr is located and @ctx->al_entry will point to
838 * the attribute list entry for the attribute.
840 * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
841 * @ctx->attr will point to the attribute in the base mft record before which
842 * the attribute being searched for would need to be inserted if such an action
843 * were to be desired. @ctx->mrec will point to the mft record in which
844 * @ctx->attr is located and @ctx->al_entry will point to the attribute list
845 * entry of the attribute before which the attribute being searched for would
846 * need to be inserted if such an action were to be desired.
848 * Thus to insert the not found attribute, one wants to add the attribute to
849 * @ctx->mrec (the base mft record) and if there is not enough space, the
850 * attribute should be placed in a newly allocated extent mft record. The
851 * attribute list entry for the inserted attribute should be inserted in the
852 * attribute list attribute at @ctx->al_entry.
854 * On actual error, ntfs_external_attr_find() returns -EIO. In this case
855 * @ctx->attr is undefined and in particular do not rely on it not changing.
857 static int ntfs_external_attr_find(const ATTR_TYPE type,
858 const ntfschar *name, const u32 name_len,
859 const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
860 const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
862 ntfs_inode *base_ni, *ni;
863 ntfs_volume *vol;
864 ATTR_LIST_ENTRY *al_entry, *next_al_entry;
865 u8 *al_start, *al_end;
866 ATTR_RECORD *a;
867 ntfschar *al_name;
868 u32 al_name_len;
869 int err = 0;
870 static const char *es = " Unmount and run chkdsk.";
872 ni = ctx->ntfs_ino;
873 base_ni = ctx->base_ntfs_ino;
874 ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
875 if (!base_ni) {
876 /* First call happens with the base mft record. */
877 base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
878 ctx->base_mrec = ctx->mrec;
880 if (ni == base_ni)
881 ctx->base_attr = ctx->attr;
882 if (type == AT_END)
883 goto not_found;
884 vol = base_ni->vol;
885 al_start = base_ni->attr_list;
886 al_end = al_start + base_ni->attr_list_size;
887 if (!ctx->al_entry)
888 ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
890 * Iterate over entries in attribute list starting at @ctx->al_entry,
891 * or the entry following that, if @ctx->is_first is 'true'.
893 if (ctx->is_first) {
894 al_entry = ctx->al_entry;
895 ctx->is_first = false;
896 } else
897 al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
898 le16_to_cpu(ctx->al_entry->length));
899 for (;; al_entry = next_al_entry) {
900 /* Out of bounds check. */
901 if ((u8*)al_entry < base_ni->attr_list ||
902 (u8*)al_entry > al_end)
903 break; /* Inode is corrupt. */
904 ctx->al_entry = al_entry;
905 /* Catch the end of the attribute list. */
906 if ((u8*)al_entry == al_end)
907 goto not_found;
908 if (!al_entry->length)
909 break;
910 if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
911 le16_to_cpu(al_entry->length) > al_end)
912 break;
913 next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
914 le16_to_cpu(al_entry->length));
915 if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
916 goto not_found;
917 if (type != al_entry->type)
918 continue;
920 * If @name is present, compare the two names. If @name is
921 * missing, assume we want an unnamed attribute.
923 al_name_len = al_entry->name_length;
924 al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
925 if (!name) {
926 if (al_name_len)
927 goto not_found;
928 } else if (!ntfs_are_names_equal(al_name, al_name_len, name,
929 name_len, ic, vol->upcase, vol->upcase_len)) {
930 register int rc;
932 rc = ntfs_collate_names(name, name_len, al_name,
933 al_name_len, 1, IGNORE_CASE,
934 vol->upcase, vol->upcase_len);
936 * If @name collates before al_name, there is no
937 * matching attribute.
939 if (rc == -1)
940 goto not_found;
941 /* If the strings are not equal, continue search. */
942 if (rc)
943 continue;
944 rc = ntfs_collate_names(name, name_len, al_name,
945 al_name_len, 1, CASE_SENSITIVE,
946 vol->upcase, vol->upcase_len);
947 if (rc == -1)
948 goto not_found;
949 if (rc)
950 continue;
953 * The names match or @name not present and attribute is
954 * unnamed. Now check @lowest_vcn. Continue search if the
955 * next attribute list entry still fits @lowest_vcn. Otherwise
956 * we have reached the right one or the search has failed.
958 if (lowest_vcn && (u8*)next_al_entry >= al_start &&
959 (u8*)next_al_entry + 6 < al_end &&
960 (u8*)next_al_entry + le16_to_cpu(
961 next_al_entry->length) <= al_end &&
962 sle64_to_cpu(next_al_entry->lowest_vcn) <=
963 lowest_vcn &&
964 next_al_entry->type == al_entry->type &&
965 next_al_entry->name_length == al_name_len &&
966 ntfs_are_names_equal((ntfschar*)((u8*)
967 next_al_entry +
968 next_al_entry->name_offset),
969 next_al_entry->name_length,
970 al_name, al_name_len, CASE_SENSITIVE,
971 vol->upcase, vol->upcase_len))
972 continue;
973 if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
974 if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
975 ntfs_error(vol->sb, "Found stale mft "
976 "reference in attribute list "
977 "of base inode 0x%lx.%s",
978 base_ni->mft_no, es);
979 err = -EIO;
980 break;
982 } else { /* Mft references do not match. */
983 /* If there is a mapped record unmap it first. */
984 if (ni != base_ni)
985 unmap_extent_mft_record(ni);
986 /* Do we want the base record back? */
987 if (MREF_LE(al_entry->mft_reference) ==
988 base_ni->mft_no) {
989 ni = ctx->ntfs_ino = base_ni;
990 ctx->mrec = ctx->base_mrec;
991 } else {
992 /* We want an extent record. */
993 ctx->mrec = map_extent_mft_record(base_ni,
994 le64_to_cpu(
995 al_entry->mft_reference), &ni);
996 if (IS_ERR(ctx->mrec)) {
997 ntfs_error(vol->sb, "Failed to map "
998 "extent mft record "
999 "0x%lx of base inode "
1000 "0x%lx.%s",
1001 MREF_LE(al_entry->
1002 mft_reference),
1003 base_ni->mft_no, es);
1004 err = PTR_ERR(ctx->mrec);
1005 if (err == -ENOENT)
1006 err = -EIO;
1007 /* Cause @ctx to be sanitized below. */
1008 ni = NULL;
1009 break;
1011 ctx->ntfs_ino = ni;
1013 ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1014 le16_to_cpu(ctx->mrec->attrs_offset));
1017 * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
1018 * mft record containing the attribute represented by the
1019 * current al_entry.
1022 * We could call into ntfs_attr_find() to find the right
1023 * attribute in this mft record but this would be less
1024 * efficient and not quite accurate as ntfs_attr_find() ignores
1025 * the attribute instance numbers for example which become
1026 * important when one plays with attribute lists. Also,
1027 * because a proper match has been found in the attribute list
1028 * entry above, the comparison can now be optimized. So it is
1029 * worth re-implementing a simplified ntfs_attr_find() here.
1031 a = ctx->attr;
1033 * Use a manual loop so we can still use break and continue
1034 * with the same meanings as above.
1036 do_next_attr_loop:
1037 if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
1038 le32_to_cpu(ctx->mrec->bytes_allocated))
1039 break;
1040 if (a->type == AT_END)
1041 break;
1042 if (!a->length)
1043 break;
1044 if (al_entry->instance != a->instance)
1045 goto do_next_attr;
1047 * If the type and/or the name are mismatched between the
1048 * attribute list entry and the attribute record, there is
1049 * corruption so we break and return error EIO.
1051 if (al_entry->type != a->type)
1052 break;
1053 if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
1054 le16_to_cpu(a->name_offset)), a->name_length,
1055 al_name, al_name_len, CASE_SENSITIVE,
1056 vol->upcase, vol->upcase_len))
1057 break;
1058 ctx->attr = a;
1060 * If no @val specified or @val specified and it matches, we
1061 * have found it!
1063 if (!val || (!a->non_resident && le32_to_cpu(
1064 a->data.resident.value_length) == val_len &&
1065 !memcmp((u8*)a +
1066 le16_to_cpu(a->data.resident.value_offset),
1067 val, val_len))) {
1068 ntfs_debug("Done, found.");
1069 return 0;
1071 do_next_attr:
1072 /* Proceed to the next attribute in the current mft record. */
1073 a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
1074 goto do_next_attr_loop;
1076 if (!err) {
1077 ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
1078 "attribute list attribute.%s", base_ni->mft_no,
1079 es);
1080 err = -EIO;
1082 if (ni != base_ni) {
1083 if (ni)
1084 unmap_extent_mft_record(ni);
1085 ctx->ntfs_ino = base_ni;
1086 ctx->mrec = ctx->base_mrec;
1087 ctx->attr = ctx->base_attr;
1089 if (err != -ENOMEM)
1090 NVolSetErrors(vol);
1091 return err;
1092 not_found:
1094 * If we were looking for AT_END, we reset the search context @ctx and
1095 * use ntfs_attr_find() to seek to the end of the base mft record.
1097 if (type == AT_END) {
1098 ntfs_attr_reinit_search_ctx(ctx);
1099 return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
1100 ctx);
1103 * The attribute was not found. Before we return, we want to ensure
1104 * @ctx->mrec and @ctx->attr indicate the position at which the
1105 * attribute should be inserted in the base mft record. Since we also
1106 * want to preserve @ctx->al_entry we cannot reinitialize the search
1107 * context using ntfs_attr_reinit_search_ctx() as this would set
1108 * @ctx->al_entry to NULL. Thus we do the necessary bits manually (see
1109 * ntfs_attr_init_search_ctx() below). Note, we _only_ preserve
1110 * @ctx->al_entry as the remaining fields (base_*) are identical to
1111 * their non base_ counterparts and we cannot set @ctx->base_attr
1112 * correctly yet as we do not know what @ctx->attr will be set to by
1113 * the call to ntfs_attr_find() below.
1115 if (ni != base_ni)
1116 unmap_extent_mft_record(ni);
1117 ctx->mrec = ctx->base_mrec;
1118 ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1119 le16_to_cpu(ctx->mrec->attrs_offset));
1120 ctx->is_first = true;
1121 ctx->ntfs_ino = base_ni;
1122 ctx->base_ntfs_ino = NULL;
1123 ctx->base_mrec = NULL;
1124 ctx->base_attr = NULL;
1126 * In case there are multiple matches in the base mft record, need to
1127 * keep enumerating until we get an attribute not found response (or
1128 * another error), otherwise we would keep returning the same attribute
1129 * over and over again and all programs using us for enumeration would
1130 * lock up in a tight loop.
1132 do {
1133 err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
1134 ctx);
1135 } while (!err);
1136 ntfs_debug("Done, not found.");
1137 return err;
1141 * ntfs_attr_lookup - find an attribute in an ntfs inode
1142 * @type: attribute type to find
1143 * @name: attribute name to find (optional, i.e. NULL means don't care)
1144 * @name_len: attribute name length (only needed if @name present)
1145 * @ic: IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
1146 * @lowest_vcn: lowest vcn to find (optional, non-resident attributes only)
1147 * @val: attribute value to find (optional, resident attributes only)
1148 * @val_len: attribute value length
1149 * @ctx: search context with mft record and attribute to search from
1151 * Find an attribute in an ntfs inode. On first search @ctx->ntfs_ino must
1152 * be the base mft record and @ctx must have been obtained from a call to
1153 * ntfs_attr_get_search_ctx().
1155 * This function transparently handles attribute lists and @ctx is used to
1156 * continue searches where they were left off at.
1158 * After finishing with the attribute/mft record you need to call
1159 * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
1160 * mapped inodes, etc).
1162 * Return 0 if the search was successful and -errno if not.
1164 * When 0, @ctx->attr is the found attribute and it is in mft record
1165 * @ctx->mrec. If an attribute list attribute is present, @ctx->al_entry is
1166 * the attribute list entry of the found attribute.
1168 * When -ENOENT, @ctx->attr is the attribute which collates just after the
1169 * attribute being searched for, i.e. if one wants to add the attribute to the
1170 * mft record this is the correct place to insert it into. If an attribute
1171 * list attribute is present, @ctx->al_entry is the attribute list entry which
1172 * collates just after the attribute list entry of the attribute being searched
1173 * for, i.e. if one wants to add the attribute to the mft record this is the
1174 * correct place to insert its attribute list entry into.
1176 * When -errno != -ENOENT, an error occured during the lookup. @ctx->attr is
1177 * then undefined and in particular you should not rely on it not changing.
1179 int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
1180 const u32 name_len, const IGNORE_CASE_BOOL ic,
1181 const VCN lowest_vcn, const u8 *val, const u32 val_len,
1182 ntfs_attr_search_ctx *ctx)
1184 ntfs_inode *base_ni;
1186 ntfs_debug("Entering.");
1187 BUG_ON(IS_ERR(ctx->mrec));
1188 if (ctx->base_ntfs_ino)
1189 base_ni = ctx->base_ntfs_ino;
1190 else
1191 base_ni = ctx->ntfs_ino;
1192 /* Sanity check, just for debugging really. */
1193 BUG_ON(!base_ni);
1194 if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
1195 return ntfs_attr_find(type, name, name_len, ic, val, val_len,
1196 ctx);
1197 return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
1198 val, val_len, ctx);
1202 * ntfs_attr_init_search_ctx - initialize an attribute search context
1203 * @ctx: attribute search context to initialize
1204 * @ni: ntfs inode with which to initialize the search context
1205 * @mrec: mft record with which to initialize the search context
1207 * Initialize the attribute search context @ctx with @ni and @mrec.
1209 static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
1210 ntfs_inode *ni, MFT_RECORD *mrec)
1212 *ctx = (ntfs_attr_search_ctx) {
1213 .mrec = mrec,
1214 /* Sanity checks are performed elsewhere. */
1215 .attr = (ATTR_RECORD*)((u8*)mrec +
1216 le16_to_cpu(mrec->attrs_offset)),
1217 .is_first = true,
1218 .ntfs_ino = ni,
1223 * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
1224 * @ctx: attribute search context to reinitialize
1226 * Reinitialize the attribute search context @ctx, unmapping an associated
1227 * extent mft record if present, and initialize the search context again.
1229 * This is used when a search for a new attribute is being started to reset
1230 * the search context to the beginning.
1232 void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
1234 if (likely(!ctx->base_ntfs_ino)) {
1235 /* No attribute list. */
1236 ctx->is_first = true;
1237 /* Sanity checks are performed elsewhere. */
1238 ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1239 le16_to_cpu(ctx->mrec->attrs_offset));
1241 * This needs resetting due to ntfs_external_attr_find() which
1242 * can leave it set despite having zeroed ctx->base_ntfs_ino.
1244 ctx->al_entry = NULL;
1245 return;
1246 } /* Attribute list. */
1247 if (ctx->ntfs_ino != ctx->base_ntfs_ino)
1248 unmap_extent_mft_record(ctx->ntfs_ino);
1249 ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
1250 return;
1254 * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
1255 * @ni: ntfs inode with which to initialize the search context
1256 * @mrec: mft record with which to initialize the search context
1258 * Allocate a new attribute search context, initialize it with @ni and @mrec,
1259 * and return it. Return NULL if allocation failed.
1261 ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
1263 ntfs_attr_search_ctx *ctx;
1265 ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
1266 if (ctx)
1267 ntfs_attr_init_search_ctx(ctx, ni, mrec);
1268 return ctx;
1272 * ntfs_attr_put_search_ctx - release an attribute search context
1273 * @ctx: attribute search context to free
1275 * Release the attribute search context @ctx, unmapping an associated extent
1276 * mft record if present.
1278 void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
1280 if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
1281 unmap_extent_mft_record(ctx->ntfs_ino);
1282 kmem_cache_free(ntfs_attr_ctx_cache, ctx);
1283 return;
1286 #ifdef NTFS_RW
1289 * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
1290 * @vol: ntfs volume to which the attribute belongs
1291 * @type: attribute type which to find
1293 * Search for the attribute definition record corresponding to the attribute
1294 * @type in the $AttrDef system file.
1296 * Return the attribute type definition record if found and NULL if not found.
1298 static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
1299 const ATTR_TYPE type)
1301 ATTR_DEF *ad;
1303 BUG_ON(!vol->attrdef);
1304 BUG_ON(!type);
1305 for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
1306 vol->attrdef_size && ad->type; ++ad) {
1307 /* We have not found it yet, carry on searching. */
1308 if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
1309 continue;
1310 /* We found the attribute; return it. */
1311 if (likely(ad->type == type))
1312 return ad;
1313 /* We have gone too far already. No point in continuing. */
1314 break;
1316 /* Attribute not found. */
1317 ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
1318 le32_to_cpu(type));
1319 return NULL;
1323 * ntfs_attr_size_bounds_check - check a size of an attribute type for validity
1324 * @vol: ntfs volume to which the attribute belongs
1325 * @type: attribute type which to check
1326 * @size: size which to check
1328 * Check whether the @size in bytes is valid for an attribute of @type on the
1329 * ntfs volume @vol. This information is obtained from $AttrDef system file.
1331 * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
1332 * listed in $AttrDef.
1334 int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
1335 const s64 size)
1337 ATTR_DEF *ad;
1339 BUG_ON(size < 0);
1341 * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
1342 * listed in $AttrDef.
1344 if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
1345 return -ERANGE;
1346 /* Get the $AttrDef entry for the attribute @type. */
1347 ad = ntfs_attr_find_in_attrdef(vol, type);
1348 if (unlikely(!ad))
1349 return -ENOENT;
1350 /* Do the bounds check. */
1351 if (((sle64_to_cpu(ad->min_size) > 0) &&
1352 size < sle64_to_cpu(ad->min_size)) ||
1353 ((sle64_to_cpu(ad->max_size) > 0) && size >
1354 sle64_to_cpu(ad->max_size)))
1355 return -ERANGE;
1356 return 0;
1360 * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
1361 * @vol: ntfs volume to which the attribute belongs
1362 * @type: attribute type which to check
1364 * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1365 * be non-resident. This information is obtained from $AttrDef system file.
1367 * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
1368 * -ENOENT if the attribute is not listed in $AttrDef.
1370 int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1372 ATTR_DEF *ad;
1374 /* Find the attribute definition record in $AttrDef. */
1375 ad = ntfs_attr_find_in_attrdef(vol, type);
1376 if (unlikely(!ad))
1377 return -ENOENT;
1378 /* Check the flags and return the result. */
1379 if (ad->flags & ATTR_DEF_RESIDENT)
1380 return -EPERM;
1381 return 0;
1385 * ntfs_attr_can_be_resident - check if an attribute can be resident
1386 * @vol: ntfs volume to which the attribute belongs
1387 * @type: attribute type which to check
1389 * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1390 * be resident. This information is derived from our ntfs knowledge and may
1391 * not be completely accurate, especially when user defined attributes are
1392 * present. Basically we allow everything to be resident except for index
1393 * allocation and $EA attributes.
1395 * Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
1397 * Warning: In the system file $MFT the attribute $Bitmap must be non-resident
1398 * otherwise windows will not boot (blue screen of death)! We cannot
1399 * check for this here as we do not know which inode's $Bitmap is
1400 * being asked about so the caller needs to special case this.
1402 int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1404 if (type == AT_INDEX_ALLOCATION)
1405 return -EPERM;
1406 return 0;
1410 * ntfs_attr_record_resize - resize an attribute record
1411 * @m: mft record containing attribute record
1412 * @a: attribute record to resize
1413 * @new_size: new size in bytes to which to resize the attribute record @a
1415 * Resize the attribute record @a, i.e. the resident part of the attribute, in
1416 * the mft record @m to @new_size bytes.
1418 * Return 0 on success and -errno on error. The following error codes are
1419 * defined:
1420 * -ENOSPC - Not enough space in the mft record @m to perform the resize.
1422 * Note: On error, no modifications have been performed whatsoever.
1424 * Warning: If you make a record smaller without having copied all the data you
1425 * are interested in the data may be overwritten.
1427 int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
1429 ntfs_debug("Entering for new_size %u.", new_size);
1430 /* Align to 8 bytes if it is not already done. */
1431 if (new_size & 7)
1432 new_size = (new_size + 7) & ~7;
1433 /* If the actual attribute length has changed, move things around. */
1434 if (new_size != le32_to_cpu(a->length)) {
1435 u32 new_muse = le32_to_cpu(m->bytes_in_use) -
1436 le32_to_cpu(a->length) + new_size;
1437 /* Not enough space in this mft record. */
1438 if (new_muse > le32_to_cpu(m->bytes_allocated))
1439 return -ENOSPC;
1440 /* Move attributes following @a to their new location. */
1441 memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
1442 le32_to_cpu(m->bytes_in_use) - ((u8*)a -
1443 (u8*)m) - le32_to_cpu(a->length));
1444 /* Adjust @m to reflect the change in used space. */
1445 m->bytes_in_use = cpu_to_le32(new_muse);
1446 /* Adjust @a to reflect the new size. */
1447 if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
1448 a->length = cpu_to_le32(new_size);
1450 return 0;
1454 * ntfs_resident_attr_value_resize - resize the value of a resident attribute
1455 * @m: mft record containing attribute record
1456 * @a: attribute record whose value to resize
1457 * @new_size: new size in bytes to which to resize the attribute value of @a
1459 * Resize the value of the attribute @a in the mft record @m to @new_size bytes.
1460 * If the value is made bigger, the newly allocated space is cleared.
1462 * Return 0 on success and -errno on error. The following error codes are
1463 * defined:
1464 * -ENOSPC - Not enough space in the mft record @m to perform the resize.
1466 * Note: On error, no modifications have been performed whatsoever.
1468 * Warning: If you make a record smaller without having copied all the data you
1469 * are interested in the data may be overwritten.
1471 int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
1472 const u32 new_size)
1474 u32 old_size;
1476 /* Resize the resident part of the attribute record. */
1477 if (ntfs_attr_record_resize(m, a,
1478 le16_to_cpu(a->data.resident.value_offset) + new_size))
1479 return -ENOSPC;
1481 * The resize succeeded! If we made the attribute value bigger, clear
1482 * the area between the old size and @new_size.
1484 old_size = le32_to_cpu(a->data.resident.value_length);
1485 if (new_size > old_size)
1486 memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1487 old_size, 0, new_size - old_size);
1488 /* Finally update the length of the attribute value. */
1489 a->data.resident.value_length = cpu_to_le32(new_size);
1490 return 0;
1493 int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1495 s64 new_size;
1496 struct inode *vi = VFS_I(ni);
1497 ntfs_volume *vol = ni->vol;
1498 ntfs_inode *base_ni;
1499 MFT_RECORD *m;
1500 ATTR_RECORD *a;
1501 ntfs_attr_search_ctx *ctx;
1502 struct page *page;
1503 runlist_element *rl;
1504 u8 *kaddr;
1505 unsigned long flags;
1506 int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
1507 u32 attr_size;
1508 u8 old_res_attr_flags;
1510 /* Check that the attribute is allowed to be non-resident. */
1511 err = ntfs_attr_can_be_non_resident(vol, ni->type);
1512 if (unlikely(err)) {
1513 if (err == -EPERM)
1514 ntfs_debug("Attribute is not allowed to be "
1515 "non-resident.");
1516 else
1517 ntfs_debug("Attribute not defined on the NTFS "
1518 "volume!");
1519 return err;
1521 BUG_ON(NInoCompressed(ni));
1522 BUG_ON(NInoEncrypted(ni));
1524 * The size needs to be aligned to a cluster boundary for allocation
1525 * purposes.
1527 new_size = (data_size + vol->cluster_size - 1) &
1528 ~(vol->cluster_size - 1);
1529 if (new_size > 0) {
1531 * Will need the page later and since the page lock nests
1532 * outside all ntfs locks, we need to get the page now.
1534 page = find_or_create_page(vi->i_mapping, 0,
1535 mapping_gfp_mask(vi->i_mapping));
1536 if (unlikely(!page))
1537 return -ENOMEM;
1538 /* Start by allocating clusters to hold the attribute value. */
1539 rl = ntfs_cluster_alloc(vol, 0, new_size >>
1540 vol->cluster_size_bits, -1, DATA_ZONE, true);
1541 if (IS_ERR(rl)) {
1542 err = PTR_ERR(rl);
1543 ntfs_debug("Failed to allocate cluster%s, error code "
1544 "%i.", (new_size >>
1545 vol->cluster_size_bits) > 1 ? "s" : "",
1546 err);
1547 goto page_err_out;
1549 } else {
1550 rl = NULL;
1551 page = NULL;
1553 /* Determine the size of the mapping pairs array. */
1554 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
1555 if (unlikely(mp_size < 0)) {
1556 err = mp_size;
1557 ntfs_debug("Failed to get size for mapping pairs array, error "
1558 "code %i.", err);
1559 goto rl_err_out;
1561 down_write(&ni->runlist.lock);
1562 if (!NInoAttr(ni))
1563 base_ni = ni;
1564 else
1565 base_ni = ni->ext.base_ntfs_ino;
1566 m = map_mft_record(base_ni);
1567 if (IS_ERR(m)) {
1568 err = PTR_ERR(m);
1569 m = NULL;
1570 ctx = NULL;
1571 goto err_out;
1573 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1574 if (unlikely(!ctx)) {
1575 err = -ENOMEM;
1576 goto err_out;
1578 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1579 CASE_SENSITIVE, 0, NULL, 0, ctx);
1580 if (unlikely(err)) {
1581 if (err == -ENOENT)
1582 err = -EIO;
1583 goto err_out;
1585 m = ctx->mrec;
1586 a = ctx->attr;
1587 BUG_ON(NInoNonResident(ni));
1588 BUG_ON(a->non_resident);
1590 * Calculate new offsets for the name and the mapping pairs array.
1592 if (NInoSparse(ni) || NInoCompressed(ni))
1593 name_ofs = (offsetof(ATTR_REC,
1594 data.non_resident.compressed_size) +
1595 sizeof(a->data.non_resident.compressed_size) +
1596 7) & ~7;
1597 else
1598 name_ofs = (offsetof(ATTR_REC,
1599 data.non_resident.compressed_size) + 7) & ~7;
1600 mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1602 * Determine the size of the resident part of the now non-resident
1603 * attribute record.
1605 arec_size = (mp_ofs + mp_size + 7) & ~7;
1607 * If the page is not uptodate bring it uptodate by copying from the
1608 * attribute value.
1610 attr_size = le32_to_cpu(a->data.resident.value_length);
1611 BUG_ON(attr_size != data_size);
1612 if (page && !PageUptodate(page)) {
1613 kaddr = kmap_atomic(page, KM_USER0);
1614 memcpy(kaddr, (u8*)a +
1615 le16_to_cpu(a->data.resident.value_offset),
1616 attr_size);
1617 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
1618 kunmap_atomic(kaddr, KM_USER0);
1619 flush_dcache_page(page);
1620 SetPageUptodate(page);
1622 /* Backup the attribute flag. */
1623 old_res_attr_flags = a->data.resident.flags;
1624 /* Resize the resident part of the attribute record. */
1625 err = ntfs_attr_record_resize(m, a, arec_size);
1626 if (unlikely(err))
1627 goto err_out;
1629 * Convert the resident part of the attribute record to describe a
1630 * non-resident attribute.
1632 a->non_resident = 1;
1633 /* Move the attribute name if it exists and update the offset. */
1634 if (a->name_length)
1635 memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1636 a->name_length * sizeof(ntfschar));
1637 a->name_offset = cpu_to_le16(name_ofs);
1638 /* Setup the fields specific to non-resident attributes. */
1639 a->data.non_resident.lowest_vcn = 0;
1640 a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
1641 vol->cluster_size_bits);
1642 a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
1643 memset(&a->data.non_resident.reserved, 0,
1644 sizeof(a->data.non_resident.reserved));
1645 a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
1646 a->data.non_resident.data_size =
1647 a->data.non_resident.initialized_size =
1648 cpu_to_sle64(attr_size);
1649 if (NInoSparse(ni) || NInoCompressed(ni)) {
1650 a->data.non_resident.compression_unit = 0;
1651 if (NInoCompressed(ni) || vol->major_ver < 3)
1652 a->data.non_resident.compression_unit = 4;
1653 a->data.non_resident.compressed_size =
1654 a->data.non_resident.allocated_size;
1655 } else
1656 a->data.non_resident.compression_unit = 0;
1657 /* Generate the mapping pairs array into the attribute record. */
1658 err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
1659 arec_size - mp_ofs, rl, 0, -1, NULL);
1660 if (unlikely(err)) {
1661 ntfs_debug("Failed to build mapping pairs, error code %i.",
1662 err);
1663 goto undo_err_out;
1665 /* Setup the in-memory attribute structure to be non-resident. */
1666 ni->runlist.rl = rl;
1667 write_lock_irqsave(&ni->size_lock, flags);
1668 ni->allocated_size = new_size;
1669 if (NInoSparse(ni) || NInoCompressed(ni)) {
1670 ni->itype.compressed.size = ni->allocated_size;
1671 if (a->data.non_resident.compression_unit) {
1672 ni->itype.compressed.block_size = 1U << (a->data.
1673 non_resident.compression_unit +
1674 vol->cluster_size_bits);
1675 ni->itype.compressed.block_size_bits =
1676 ffs(ni->itype.compressed.block_size) -
1678 ni->itype.compressed.block_clusters = 1U <<
1679 a->data.non_resident.compression_unit;
1680 } else {
1681 ni->itype.compressed.block_size = 0;
1682 ni->itype.compressed.block_size_bits = 0;
1683 ni->itype.compressed.block_clusters = 0;
1685 vi->i_blocks = ni->itype.compressed.size >> 9;
1686 } else
1687 vi->i_blocks = ni->allocated_size >> 9;
1688 write_unlock_irqrestore(&ni->size_lock, flags);
1690 * This needs to be last since the address space operations ->readpage
1691 * and ->writepage can run concurrently with us as they are not
1692 * serialized on i_mutex. Note, we are not allowed to fail once we flip
1693 * this switch, which is another reason to do this last.
1695 NInoSetNonResident(ni);
1696 /* Mark the mft record dirty, so it gets written back. */
1697 flush_dcache_mft_record_page(ctx->ntfs_ino);
1698 mark_mft_record_dirty(ctx->ntfs_ino);
1699 ntfs_attr_put_search_ctx(ctx);
1700 unmap_mft_record(base_ni);
1701 up_write(&ni->runlist.lock);
1702 if (page) {
1703 set_page_dirty(page);
1704 unlock_page(page);
1705 mark_page_accessed(page);
1706 page_cache_release(page);
1708 ntfs_debug("Done.");
1709 return 0;
1710 undo_err_out:
1711 /* Convert the attribute back into a resident attribute. */
1712 a->non_resident = 0;
1713 /* Move the attribute name if it exists and update the offset. */
1714 name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
1715 sizeof(a->data.resident.reserved) + 7) & ~7;
1716 if (a->name_length)
1717 memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1718 a->name_length * sizeof(ntfschar));
1719 mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1720 a->name_offset = cpu_to_le16(name_ofs);
1721 arec_size = (mp_ofs + attr_size + 7) & ~7;
1722 /* Resize the resident part of the attribute record. */
1723 err2 = ntfs_attr_record_resize(m, a, arec_size);
1724 if (unlikely(err2)) {
1725 arec_size = le32_to_cpu(a->length);
1726 if ((mp_ofs + attr_size) > arec_size) {
1727 err2 = attr_size;
1728 attr_size = arec_size - mp_ofs;
1729 ntfs_error(vol->sb, "Failed to undo partial resident "
1730 "to non-resident attribute "
1731 "conversion. Truncating inode 0x%lx, "
1732 "attribute type 0x%x from %i bytes to "
1733 "%i bytes to maintain metadata "
1734 "consistency. THIS MEANS YOU ARE "
1735 "LOSING %i BYTES DATA FROM THIS %s.",
1736 vi->i_ino,
1737 (unsigned)le32_to_cpu(ni->type),
1738 err2, attr_size, err2 - attr_size,
1739 ((ni->type == AT_DATA) &&
1740 !ni->name_len) ? "FILE": "ATTRIBUTE");
1741 write_lock_irqsave(&ni->size_lock, flags);
1742 ni->initialized_size = attr_size;
1743 i_size_write(vi, attr_size);
1744 write_unlock_irqrestore(&ni->size_lock, flags);
1747 /* Setup the fields specific to resident attributes. */
1748 a->data.resident.value_length = cpu_to_le32(attr_size);
1749 a->data.resident.value_offset = cpu_to_le16(mp_ofs);
1750 a->data.resident.flags = old_res_attr_flags;
1751 memset(&a->data.resident.reserved, 0,
1752 sizeof(a->data.resident.reserved));
1753 /* Copy the data from the page back to the attribute value. */
1754 if (page) {
1755 kaddr = kmap_atomic(page, KM_USER0);
1756 memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1757 kunmap_atomic(kaddr, KM_USER0);
1759 /* Setup the allocated size in the ntfs inode in case it changed. */
1760 write_lock_irqsave(&ni->size_lock, flags);
1761 ni->allocated_size = arec_size - mp_ofs;
1762 write_unlock_irqrestore(&ni->size_lock, flags);
1763 /* Mark the mft record dirty, so it gets written back. */
1764 flush_dcache_mft_record_page(ctx->ntfs_ino);
1765 mark_mft_record_dirty(ctx->ntfs_ino);
1766 err_out:
1767 if (ctx)
1768 ntfs_attr_put_search_ctx(ctx);
1769 if (m)
1770 unmap_mft_record(base_ni);
1771 ni->runlist.rl = NULL;
1772 up_write(&ni->runlist.lock);
1773 rl_err_out:
1774 if (rl) {
1775 if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
1776 ntfs_error(vol->sb, "Failed to release allocated "
1777 "cluster(s) in error code path. Run "
1778 "chkdsk to recover the lost "
1779 "cluster(s).");
1780 NVolSetErrors(vol);
1782 ntfs_free(rl);
1783 page_err_out:
1784 unlock_page(page);
1785 page_cache_release(page);
1787 if (err == -EINVAL)
1788 err = -EIO;
1789 return err;
1793 * ntfs_attr_extend_allocation - extend the allocated space of an attribute
1794 * @ni: ntfs inode of the attribute whose allocation to extend
1795 * @new_alloc_size: new size in bytes to which to extend the allocation to
1796 * @new_data_size: new size in bytes to which to extend the data to
1797 * @data_start: beginning of region which is required to be non-sparse
1799 * Extend the allocated space of an attribute described by the ntfs inode @ni
1800 * to @new_alloc_size bytes. If @data_start is -1, the whole extension may be
1801 * implemented as a hole in the file (as long as both the volume and the ntfs
1802 * inode @ni have sparse support enabled). If @data_start is >= 0, then the
1803 * region between the old allocated size and @data_start - 1 may be made sparse
1804 * but the regions between @data_start and @new_alloc_size must be backed by
1805 * actual clusters.
1807 * If @new_data_size is -1, it is ignored. If it is >= 0, then the data size
1808 * of the attribute is extended to @new_data_size. Note that the i_size of the
1809 * vfs inode is not updated. Only the data size in the base attribute record
1810 * is updated. The caller has to update i_size separately if this is required.
1811 * WARNING: It is a BUG() for @new_data_size to be smaller than the old data
1812 * size as well as for @new_data_size to be greater than @new_alloc_size.
1814 * For resident attributes this involves resizing the attribute record and if
1815 * necessary moving it and/or other attributes into extent mft records and/or
1816 * converting the attribute to a non-resident attribute which in turn involves
1817 * extending the allocation of a non-resident attribute as described below.
1819 * For non-resident attributes this involves allocating clusters in the data
1820 * zone on the volume (except for regions that are being made sparse) and
1821 * extending the run list to describe the allocated clusters as well as
1822 * updating the mapping pairs array of the attribute. This in turn involves
1823 * resizing the attribute record and if necessary moving it and/or other
1824 * attributes into extent mft records and/or splitting the attribute record
1825 * into multiple extent attribute records.
1827 * Also, the attribute list attribute is updated if present and in some of the
1828 * above cases (the ones where extent mft records/attributes come into play),
1829 * an attribute list attribute is created if not already present.
1831 * Return the new allocated size on success and -errno on error. In the case
1832 * that an error is encountered but a partial extension at least up to
1833 * @data_start (if present) is possible, the allocation is partially extended
1834 * and this is returned. This means the caller must check the returned size to
1835 * determine if the extension was partial. If @data_start is -1 then partial
1836 * allocations are not performed.
1838 * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
1840 * Locking: This function takes the runlist lock of @ni for writing as well as
1841 * locking the mft record of the base ntfs inode. These locks are maintained
1842 * throughout execution of the function. These locks are required so that the
1843 * attribute can be resized safely and so that it can for example be converted
1844 * from resident to non-resident safely.
1846 * TODO: At present attribute list attribute handling is not implemented.
1848 * TODO: At present it is not safe to call this function for anything other
1849 * than the $DATA attribute(s) of an uncompressed and unencrypted file.
1851 s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1852 const s64 new_data_size, const s64 data_start)
1854 VCN vcn;
1855 s64 ll, allocated_size, start = data_start;
1856 struct inode *vi = VFS_I(ni);
1857 ntfs_volume *vol = ni->vol;
1858 ntfs_inode *base_ni;
1859 MFT_RECORD *m;
1860 ATTR_RECORD *a;
1861 ntfs_attr_search_ctx *ctx;
1862 runlist_element *rl, *rl2;
1863 unsigned long flags;
1864 int err, mp_size;
1865 u32 attr_len = 0; /* Silence stupid gcc warning. */
1866 bool mp_rebuilt;
1868 #ifdef DEBUG
1869 read_lock_irqsave(&ni->size_lock, flags);
1870 allocated_size = ni->allocated_size;
1871 read_unlock_irqrestore(&ni->size_lock, flags);
1872 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1873 "old_allocated_size 0x%llx, "
1874 "new_allocated_size 0x%llx, new_data_size 0x%llx, "
1875 "data_start 0x%llx.", vi->i_ino,
1876 (unsigned)le32_to_cpu(ni->type),
1877 (unsigned long long)allocated_size,
1878 (unsigned long long)new_alloc_size,
1879 (unsigned long long)new_data_size,
1880 (unsigned long long)start);
1881 #endif
1882 retry_extend:
1884 * For non-resident attributes, @start and @new_size need to be aligned
1885 * to cluster boundaries for allocation purposes.
1887 if (NInoNonResident(ni)) {
1888 if (start > 0)
1889 start &= ~(s64)vol->cluster_size_mask;
1890 new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1891 ~(s64)vol->cluster_size_mask;
1893 BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1894 /* Check if new size is allowed in $AttrDef. */
1895 err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1896 if (unlikely(err)) {
1897 /* Only emit errors when the write will fail completely. */
1898 read_lock_irqsave(&ni->size_lock, flags);
1899 allocated_size = ni->allocated_size;
1900 read_unlock_irqrestore(&ni->size_lock, flags);
1901 if (start < 0 || start >= allocated_size) {
1902 if (err == -ERANGE) {
1903 ntfs_error(vol->sb, "Cannot extend allocation "
1904 "of inode 0x%lx, attribute "
1905 "type 0x%x, because the new "
1906 "allocation would exceed the "
1907 "maximum allowed size for "
1908 "this attribute type.",
1909 vi->i_ino, (unsigned)
1910 le32_to_cpu(ni->type));
1911 } else {
1912 ntfs_error(vol->sb, "Cannot extend allocation "
1913 "of inode 0x%lx, attribute "
1914 "type 0x%x, because this "
1915 "attribute type is not "
1916 "defined on the NTFS volume. "
1917 "Possible corruption! You "
1918 "should run chkdsk!",
1919 vi->i_ino, (unsigned)
1920 le32_to_cpu(ni->type));
1923 /* Translate error code to be POSIX conformant for write(2). */
1924 if (err == -ERANGE)
1925 err = -EFBIG;
1926 else
1927 err = -EIO;
1928 return err;
1930 if (!NInoAttr(ni))
1931 base_ni = ni;
1932 else
1933 base_ni = ni->ext.base_ntfs_ino;
1935 * We will be modifying both the runlist (if non-resident) and the mft
1936 * record so lock them both down.
1938 down_write(&ni->runlist.lock);
1939 m = map_mft_record(base_ni);
1940 if (IS_ERR(m)) {
1941 err = PTR_ERR(m);
1942 m = NULL;
1943 ctx = NULL;
1944 goto err_out;
1946 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1947 if (unlikely(!ctx)) {
1948 err = -ENOMEM;
1949 goto err_out;
1951 read_lock_irqsave(&ni->size_lock, flags);
1952 allocated_size = ni->allocated_size;
1953 read_unlock_irqrestore(&ni->size_lock, flags);
1955 * If non-resident, seek to the last extent. If resident, there is
1956 * only one extent, so seek to that.
1958 vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
1961 * Abort if someone did the work whilst we waited for the locks. If we
1962 * just converted the attribute from resident to non-resident it is
1963 * likely that exactly this has happened already. We cannot quite
1964 * abort if we need to update the data size.
1966 if (unlikely(new_alloc_size <= allocated_size)) {
1967 ntfs_debug("Allocated size already exceeds requested size.");
1968 new_alloc_size = allocated_size;
1969 if (new_data_size < 0)
1970 goto done;
1972 * We want the first attribute extent so that we can update the
1973 * data size.
1975 vcn = 0;
1977 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1978 CASE_SENSITIVE, vcn, NULL, 0, ctx);
1979 if (unlikely(err)) {
1980 if (err == -ENOENT)
1981 err = -EIO;
1982 goto err_out;
1984 m = ctx->mrec;
1985 a = ctx->attr;
1986 /* Use goto to reduce indentation. */
1987 if (a->non_resident)
1988 goto do_non_resident_extend;
1989 BUG_ON(NInoNonResident(ni));
1990 /* The total length of the attribute value. */
1991 attr_len = le32_to_cpu(a->data.resident.value_length);
1993 * Extend the attribute record to be able to store the new attribute
1994 * size. ntfs_attr_record_resize() will not do anything if the size is
1995 * not changing.
1997 if (new_alloc_size < vol->mft_record_size &&
1998 !ntfs_attr_record_resize(m, a,
1999 le16_to_cpu(a->data.resident.value_offset) +
2000 new_alloc_size)) {
2001 /* The resize succeeded! */
2002 write_lock_irqsave(&ni->size_lock, flags);
2003 ni->allocated_size = le32_to_cpu(a->length) -
2004 le16_to_cpu(a->data.resident.value_offset);
2005 write_unlock_irqrestore(&ni->size_lock, flags);
2006 if (new_data_size >= 0) {
2007 BUG_ON(new_data_size < attr_len);
2008 a->data.resident.value_length =
2009 cpu_to_le32((u32)new_data_size);
2011 goto flush_done;
2014 * We have to drop all the locks so we can call
2015 * ntfs_attr_make_non_resident(). This could be optimised by try-
2016 * locking the first page cache page and only if that fails dropping
2017 * the locks, locking the page, and redoing all the locking and
2018 * lookups. While this would be a huge optimisation, it is not worth
2019 * it as this is definitely a slow code path.
2021 ntfs_attr_put_search_ctx(ctx);
2022 unmap_mft_record(base_ni);
2023 up_write(&ni->runlist.lock);
2025 * Not enough space in the mft record, try to make the attribute
2026 * non-resident and if successful restart the extension process.
2028 err = ntfs_attr_make_non_resident(ni, attr_len);
2029 if (likely(!err))
2030 goto retry_extend;
2032 * Could not make non-resident. If this is due to this not being
2033 * permitted for this attribute type or there not being enough space,
2034 * try to make other attributes non-resident. Otherwise fail.
2036 if (unlikely(err != -EPERM && err != -ENOSPC)) {
2037 /* Only emit errors when the write will fail completely. */
2038 read_lock_irqsave(&ni->size_lock, flags);
2039 allocated_size = ni->allocated_size;
2040 read_unlock_irqrestore(&ni->size_lock, flags);
2041 if (start < 0 || start >= allocated_size)
2042 ntfs_error(vol->sb, "Cannot extend allocation of "
2043 "inode 0x%lx, attribute type 0x%x, "
2044 "because the conversion from resident "
2045 "to non-resident attribute failed "
2046 "with error code %i.", vi->i_ino,
2047 (unsigned)le32_to_cpu(ni->type), err);
2048 if (err != -ENOMEM)
2049 err = -EIO;
2050 goto conv_err_out;
2052 /* TODO: Not implemented from here, abort. */
2053 read_lock_irqsave(&ni->size_lock, flags);
2054 allocated_size = ni->allocated_size;
2055 read_unlock_irqrestore(&ni->size_lock, flags);
2056 if (start < 0 || start >= allocated_size) {
2057 if (err == -ENOSPC)
2058 ntfs_error(vol->sb, "Not enough space in the mft "
2059 "record/on disk for the non-resident "
2060 "attribute value. This case is not "
2061 "implemented yet.");
2062 else /* if (err == -EPERM) */
2063 ntfs_error(vol->sb, "This attribute type may not be "
2064 "non-resident. This case is not "
2065 "implemented yet.");
2067 err = -EOPNOTSUPP;
2068 goto conv_err_out;
2069 do_non_resident_extend:
2070 BUG_ON(!NInoNonResident(ni));
2071 if (new_alloc_size == allocated_size) {
2072 BUG_ON(vcn);
2073 goto alloc_done;
2076 * If the data starts after the end of the old allocation, this is a
2077 * $DATA attribute and sparse attributes are enabled on the volume and
2078 * for this inode, then create a sparse region between the old
2079 * allocated size and the start of the data. Otherwise simply proceed
2080 * with filling the whole space between the old allocated size and the
2081 * new allocated size with clusters.
2083 if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2084 !NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2085 goto skip_sparse;
2086 // TODO: This is not implemented yet. We just fill in with real
2087 // clusters for now...
2088 ntfs_debug("Inserting holes is not-implemented yet. Falling back to "
2089 "allocating real clusters instead.");
2090 skip_sparse:
2091 rl = ni->runlist.rl;
2092 if (likely(rl)) {
2093 /* Seek to the end of the runlist. */
2094 while (rl->length)
2095 rl++;
2097 /* If this attribute extent is not mapped, map it now. */
2098 if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2099 (rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2100 (rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2101 if (!rl && !allocated_size)
2102 goto first_alloc;
2103 rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2104 if (IS_ERR(rl)) {
2105 err = PTR_ERR(rl);
2106 if (start < 0 || start >= allocated_size)
2107 ntfs_error(vol->sb, "Cannot extend allocation "
2108 "of inode 0x%lx, attribute "
2109 "type 0x%x, because the "
2110 "mapping of a runlist "
2111 "fragment failed with error "
2112 "code %i.", vi->i_ino,
2113 (unsigned)le32_to_cpu(ni->type),
2114 err);
2115 if (err != -ENOMEM)
2116 err = -EIO;
2117 goto err_out;
2119 ni->runlist.rl = rl;
2120 /* Seek to the end of the runlist. */
2121 while (rl->length)
2122 rl++;
2125 * We now know the runlist of the last extent is mapped and @rl is at
2126 * the end of the runlist. We want to begin allocating clusters
2127 * starting at the last allocated cluster to reduce fragmentation. If
2128 * there are no valid LCNs in the attribute we let the cluster
2129 * allocator choose the starting cluster.
2131 /* If the last LCN is a hole or simillar seek back to last real LCN. */
2132 while (rl->lcn < 0 && rl > ni->runlist.rl)
2133 rl--;
2134 first_alloc:
2135 // write can be performed when start >= 0. (Needed for POSIX write(2)
2136 // conformance.)
2137 rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2138 (new_alloc_size - allocated_size) >>
2139 vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2140 rl->lcn + rl->length : -1, DATA_ZONE, true);
2141 if (IS_ERR(rl2)) {
2142 err = PTR_ERR(rl2);
2143 if (start < 0 || start >= allocated_size)
2144 ntfs_error(vol->sb, "Cannot extend allocation of "
2145 "inode 0x%lx, attribute type 0x%x, "
2146 "because the allocation of clusters "
2147 "failed with error code %i.", vi->i_ino,
2148 (unsigned)le32_to_cpu(ni->type), err);
2149 if (err != -ENOMEM && err != -ENOSPC)
2150 err = -EIO;
2151 goto err_out;
2153 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2154 if (IS_ERR(rl)) {
2155 err = PTR_ERR(rl);
2156 if (start < 0 || start >= allocated_size)
2157 ntfs_error(vol->sb, "Cannot extend allocation of "
2158 "inode 0x%lx, attribute type 0x%x, "
2159 "because the runlist merge failed "
2160 "with error code %i.", vi->i_ino,
2161 (unsigned)le32_to_cpu(ni->type), err);
2162 if (err != -ENOMEM)
2163 err = -EIO;
2164 if (ntfs_cluster_free_from_rl(vol, rl2)) {
2165 ntfs_error(vol->sb, "Failed to release allocated "
2166 "cluster(s) in error code path. Run "
2167 "chkdsk to recover the lost "
2168 "cluster(s).");
2169 NVolSetErrors(vol);
2171 ntfs_free(rl2);
2172 goto err_out;
2174 ni->runlist.rl = rl;
2175 ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2176 allocated_size) >> vol->cluster_size_bits);
2177 /* Find the runlist element with which the attribute extent starts. */
2178 ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2179 rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2180 BUG_ON(!rl2);
2181 BUG_ON(!rl2->length);
2182 BUG_ON(rl2->lcn < LCN_HOLE);
2183 mp_rebuilt = false;
2184 /* Get the size for the new mapping pairs array for this extent. */
2185 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2186 if (unlikely(mp_size <= 0)) {
2187 err = mp_size;
2188 if (start < 0 || start >= allocated_size)
2189 ntfs_error(vol->sb, "Cannot extend allocation of "
2190 "inode 0x%lx, attribute type 0x%x, "
2191 "because determining the size for the "
2192 "mapping pairs failed with error code "
2193 "%i.", vi->i_ino,
2194 (unsigned)le32_to_cpu(ni->type), err);
2195 err = -EIO;
2196 goto undo_alloc;
2198 /* Extend the attribute record to fit the bigger mapping pairs array. */
2199 attr_len = le32_to_cpu(a->length);
2200 err = ntfs_attr_record_resize(m, a, mp_size +
2201 le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2202 if (unlikely(err)) {
2203 BUG_ON(err != -ENOSPC);
2204 // TODO: Deal with this by moving this extent to a new mft
2205 // record or by starting a new extent in a new mft record,
2206 // possibly by extending this extent partially and filling it
2207 // and creating a new extent for the remainder, or by making
2208 // other attributes non-resident and/or by moving other
2209 // attributes out of this mft record.
2210 if (start < 0 || start >= allocated_size)
2211 ntfs_error(vol->sb, "Not enough space in the mft "
2212 "record for the extended attribute "
2213 "record. This case is not "
2214 "implemented yet.");
2215 err = -EOPNOTSUPP;
2216 goto undo_alloc;
2218 mp_rebuilt = true;
2219 /* Generate the mapping pairs array directly into the attr record. */
2220 err = ntfs_mapping_pairs_build(vol, (u8*)a +
2221 le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2222 mp_size, rl2, ll, -1, NULL);
2223 if (unlikely(err)) {
2224 if (start < 0 || start >= allocated_size)
2225 ntfs_error(vol->sb, "Cannot extend allocation of "
2226 "inode 0x%lx, attribute type 0x%x, "
2227 "because building the mapping pairs "
2228 "failed with error code %i.", vi->i_ino,
2229 (unsigned)le32_to_cpu(ni->type), err);
2230 err = -EIO;
2231 goto undo_alloc;
2233 /* Update the highest_vcn. */
2234 a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2235 vol->cluster_size_bits) - 1);
2237 * We now have extended the allocated size of the attribute. Reflect
2238 * this in the ntfs_inode structure and the attribute record.
2240 if (a->data.non_resident.lowest_vcn) {
2242 * We are not in the first attribute extent, switch to it, but
2243 * first ensure the changes will make it to disk later.
2245 flush_dcache_mft_record_page(ctx->ntfs_ino);
2246 mark_mft_record_dirty(ctx->ntfs_ino);
2247 ntfs_attr_reinit_search_ctx(ctx);
2248 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2249 CASE_SENSITIVE, 0, NULL, 0, ctx);
2250 if (unlikely(err))
2251 goto restore_undo_alloc;
2252 /* @m is not used any more so no need to set it. */
2253 a = ctx->attr;
2255 write_lock_irqsave(&ni->size_lock, flags);
2256 ni->allocated_size = new_alloc_size;
2257 a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2258 if (NInoSparse(ni) || NInoCompressed(ni)) {
2259 ni->itype.compressed.size += new_alloc_size - allocated_size;
2260 a->data.non_resident.compressed_size =
2261 cpu_to_sle64(ni->itype.compressed.size);
2262 vi->i_blocks = ni->itype.compressed.size >> 9;
2263 } else
2264 vi->i_blocks = new_alloc_size >> 9;
2265 write_unlock_irqrestore(&ni->size_lock, flags);
2266 alloc_done:
2267 if (new_data_size >= 0) {
2268 BUG_ON(new_data_size <
2269 sle64_to_cpu(a->data.non_resident.data_size));
2270 a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2272 flush_done:
2273 /* Ensure the changes make it to disk. */
2274 flush_dcache_mft_record_page(ctx->ntfs_ino);
2275 mark_mft_record_dirty(ctx->ntfs_ino);
2276 done:
2277 ntfs_attr_put_search_ctx(ctx);
2278 unmap_mft_record(base_ni);
2279 up_write(&ni->runlist.lock);
2280 ntfs_debug("Done, new_allocated_size 0x%llx.",
2281 (unsigned long long)new_alloc_size);
2282 return new_alloc_size;
2283 restore_undo_alloc:
2284 if (start < 0 || start >= allocated_size)
2285 ntfs_error(vol->sb, "Cannot complete extension of allocation "
2286 "of inode 0x%lx, attribute type 0x%x, because "
2287 "lookup of first attribute extent failed with "
2288 "error code %i.", vi->i_ino,
2289 (unsigned)le32_to_cpu(ni->type), err);
2290 if (err == -ENOENT)
2291 err = -EIO;
2292 ntfs_attr_reinit_search_ctx(ctx);
2293 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2294 allocated_size >> vol->cluster_size_bits, NULL, 0,
2295 ctx)) {
2296 ntfs_error(vol->sb, "Failed to find last attribute extent of "
2297 "attribute in error code path. Run chkdsk to "
2298 "recover.");
2299 write_lock_irqsave(&ni->size_lock, flags);
2300 ni->allocated_size = new_alloc_size;
2301 if (NInoSparse(ni) || NInoCompressed(ni)) {
2302 ni->itype.compressed.size += new_alloc_size -
2303 allocated_size;
2304 vi->i_blocks = ni->itype.compressed.size >> 9;
2305 } else
2306 vi->i_blocks = new_alloc_size >> 9;
2307 write_unlock_irqrestore(&ni->size_lock, flags);
2308 ntfs_attr_put_search_ctx(ctx);
2309 unmap_mft_record(base_ni);
2310 up_write(&ni->runlist.lock);
2312 * The only thing that is now wrong is the allocated size of the
2313 * base attribute extent which chkdsk should be able to fix.
2315 NVolSetErrors(vol);
2316 return err;
2318 ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2319 (allocated_size >> vol->cluster_size_bits) - 1);
2320 undo_alloc:
2321 ll = allocated_size >> vol->cluster_size_bits;
2322 if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2323 ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2324 "in error code path. Run chkdsk to recover "
2325 "the lost cluster(s).");
2326 NVolSetErrors(vol);
2328 m = ctx->mrec;
2329 a = ctx->attr;
2331 * If the runlist truncation fails and/or the search context is no
2332 * longer valid, we cannot resize the attribute record or build the
2333 * mapping pairs array thus we mark the inode bad so that no access to
2334 * the freed clusters can happen.
2336 if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2337 ntfs_error(vol->sb, "Failed to %s in error code path. Run "
2338 "chkdsk to recover.", IS_ERR(m) ?
2339 "restore attribute search context" :
2340 "truncate attribute runlist");
2341 NVolSetErrors(vol);
2342 } else if (mp_rebuilt) {
2343 if (ntfs_attr_record_resize(m, a, attr_len)) {
2344 ntfs_error(vol->sb, "Failed to restore attribute "
2345 "record in error code path. Run "
2346 "chkdsk to recover.");
2347 NVolSetErrors(vol);
2348 } else /* if (success) */ {
2349 if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2350 a->data.non_resident.
2351 mapping_pairs_offset), attr_len -
2352 le16_to_cpu(a->data.non_resident.
2353 mapping_pairs_offset), rl2, ll, -1,
2354 NULL)) {
2355 ntfs_error(vol->sb, "Failed to restore "
2356 "mapping pairs array in error "
2357 "code path. Run chkdsk to "
2358 "recover.");
2359 NVolSetErrors(vol);
2361 flush_dcache_mft_record_page(ctx->ntfs_ino);
2362 mark_mft_record_dirty(ctx->ntfs_ino);
2365 err_out:
2366 if (ctx)
2367 ntfs_attr_put_search_ctx(ctx);
2368 if (m)
2369 unmap_mft_record(base_ni);
2370 up_write(&ni->runlist.lock);
2371 conv_err_out:
2372 ntfs_debug("Failed. Returning error code %i.", err);
2373 return err;
2377 * ntfs_attr_set - fill (a part of) an attribute with a byte
2378 * @ni: ntfs inode describing the attribute to fill
2379 * @ofs: offset inside the attribute at which to start to fill
2380 * @cnt: number of bytes to fill
2381 * @val: the unsigned 8-bit value with which to fill the attribute
2383 * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
2384 * byte offset @ofs inside the attribute with the constant byte @val.
2386 * This function is effectively like memset() applied to an ntfs attribute.
2387 * Note thie function actually only operates on the page cache pages belonging
2388 * to the ntfs attribute and it marks them dirty after doing the memset().
2389 * Thus it relies on the vm dirty page write code paths to cause the modified
2390 * pages to be written to the mft record/disk.
2392 * Return 0 on success and -errno on error. An error code of -ESPIPE means
2393 * that @ofs + @cnt were outside the end of the attribute and no write was
2394 * performed.
2396 int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2398 ntfs_volume *vol = ni->vol;
2399 struct address_space *mapping;
2400 struct page *page;
2401 u8 *kaddr;
2402 pgoff_t idx, end;
2403 unsigned start_ofs, end_ofs, size;
2405 ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
2406 (long long)ofs, (long long)cnt, val);
2407 BUG_ON(ofs < 0);
2408 BUG_ON(cnt < 0);
2409 if (!cnt)
2410 goto done;
2411 BUG_ON(NInoCompressed(ni));
2412 BUG_ON(NInoEncrypted(ni));
2413 mapping = VFS_I(ni)->i_mapping;
2414 /* Work out the starting index and page offset. */
2415 idx = ofs >> PAGE_CACHE_SHIFT;
2416 start_ofs = ofs & ~PAGE_CACHE_MASK;
2417 /* Work out the ending index and page offset. */
2418 end = ofs + cnt;
2419 end_ofs = end & ~PAGE_CACHE_MASK;
2420 /* If the end is outside the inode size return -ESPIPE. */
2421 if (unlikely(end > i_size_read(VFS_I(ni)))) {
2422 ntfs_error(vol->sb, "Request exceeds end of attribute.");
2423 return -ESPIPE;
2425 end >>= PAGE_CACHE_SHIFT;
2426 /* If there is a first partial page, need to do it the slow way. */
2427 if (start_ofs) {
2428 page = read_mapping_page(mapping, idx, NULL);
2429 if (IS_ERR(page)) {
2430 ntfs_error(vol->sb, "Failed to read first partial "
2431 "page (error, index 0x%lx).", idx);
2432 return PTR_ERR(page);
2435 * If the last page is the same as the first page, need to
2436 * limit the write to the end offset.
2438 size = PAGE_CACHE_SIZE;
2439 if (idx == end)
2440 size = end_ofs;
2441 kaddr = kmap_atomic(page, KM_USER0);
2442 memset(kaddr + start_ofs, val, size - start_ofs);
2443 flush_dcache_page(page);
2444 kunmap_atomic(kaddr, KM_USER0);
2445 set_page_dirty(page);
2446 page_cache_release(page);
2447 balance_dirty_pages_ratelimited(mapping);
2448 cond_resched();
2449 if (idx == end)
2450 goto done;
2451 idx++;
2453 /* Do the whole pages the fast way. */
2454 for (; idx < end; idx++) {
2455 /* Find or create the current page. (The page is locked.) */
2456 page = grab_cache_page(mapping, idx);
2457 if (unlikely(!page)) {
2458 ntfs_error(vol->sb, "Insufficient memory to grab "
2459 "page (index 0x%lx).", idx);
2460 return -ENOMEM;
2462 kaddr = kmap_atomic(page, KM_USER0);
2463 memset(kaddr, val, PAGE_CACHE_SIZE);
2464 flush_dcache_page(page);
2465 kunmap_atomic(kaddr, KM_USER0);
2467 * If the page has buffers, mark them uptodate since buffer
2468 * state and not page state is definitive in 2.6 kernels.
2470 if (page_has_buffers(page)) {
2471 struct buffer_head *bh, *head;
2473 bh = head = page_buffers(page);
2474 do {
2475 set_buffer_uptodate(bh);
2476 } while ((bh = bh->b_this_page) != head);
2478 /* Now that buffers are uptodate, set the page uptodate, too. */
2479 SetPageUptodate(page);
2481 * Set the page and all its buffers dirty and mark the inode
2482 * dirty, too. The VM will write the page later on.
2484 set_page_dirty(page);
2485 /* Finally unlock and release the page. */
2486 unlock_page(page);
2487 page_cache_release(page);
2488 balance_dirty_pages_ratelimited(mapping);
2489 cond_resched();
2491 /* If there is a last partial page, need to do it the slow way. */
2492 if (end_ofs) {
2493 page = read_mapping_page(mapping, idx, NULL);
2494 if (IS_ERR(page)) {
2495 ntfs_error(vol->sb, "Failed to read last partial page "
2496 "(error, index 0x%lx).", idx);
2497 return PTR_ERR(page);
2499 kaddr = kmap_atomic(page, KM_USER0);
2500 memset(kaddr, val, end_ofs);
2501 flush_dcache_page(page);
2502 kunmap_atomic(kaddr, KM_USER0);
2503 set_page_dirty(page);
2504 page_cache_release(page);
2505 balance_dirty_pages_ratelimited(mapping);
2506 cond_resched();
2508 done:
2509 ntfs_debug("Done.");
2510 return 0;
2513 #endif /* NTFS_RW */