2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.29 2008/02/08 08:30:59 dillon Exp $
42 * The kernel is not actively referencing this vnode but is still holding
46 hammer_vop_inactive(struct vop_inactive_args
*ap
)
48 struct hammer_inode
*ip
= VTOI(ap
->a_vp
);
59 * If the inode no longer has any references we recover its
60 * in-memory resources immediately.
62 if (ip
->ino_rec
.ino_nlinks
== 0)
68 * Release the vnode association. This is typically (but not always)
69 * the last reference on the inode and will flush the inode to the
72 * XXX Currently our sync code only runs through inodes with vnode
73 * associations, so we depend on hammer_rel_inode() to sync any inode
74 * record data to the block device prior to losing the association.
75 * Otherwise transactions that the user expected to be distinct by
76 * doing a manual sync may be merged.
79 hammer_vop_reclaim(struct vop_reclaim_args
*ap
)
81 struct hammer_inode
*ip
;
86 if ((ip
= vp
->v_data
) != NULL
) {
89 hammer_rel_inode(ip
, 0);
95 * Return a locked vnode for the specified inode. The inode must be
96 * referenced but NOT LOCKED on entry and will remain referenced on
100 hammer_get_vnode(struct hammer_inode
*ip
, int lktype
, struct vnode
**vpp
)
106 if ((vp
= ip
->vp
) == NULL
) {
107 error
= getnewvnode(VT_HAMMER
, ip
->hmp
->mp
, vpp
, 0, 0);
110 hammer_lock_ex(&ip
->lock
);
111 if (ip
->vp
!= NULL
) {
112 hammer_unlock(&ip
->lock
);
117 hammer_ref(&ip
->lock
);
120 vp
->v_type
= hammer_get_vnode_type(
121 ip
->ino_rec
.base
.base
.obj_type
);
123 switch(ip
->ino_rec
.base
.base
.obj_type
) {
124 case HAMMER_OBJTYPE_CDEV
:
125 case HAMMER_OBJTYPE_BDEV
:
126 vp
->v_ops
= &ip
->hmp
->mp
->mnt_vn_spec_ops
;
127 addaliasu(vp
, ip
->ino_data
.rmajor
,
128 ip
->ino_data
.rminor
);
130 case HAMMER_OBJTYPE_FIFO
:
131 vp
->v_ops
= &ip
->hmp
->mp
->mnt_vn_fifo_ops
;
138 * Only mark as the root vnode if the ip is not
139 * historical, otherwise the VFS cache will get
140 * confused. The other half of the special handling
141 * is in hammer_vop_nlookupdotdot().
143 if (ip
->obj_id
== HAMMER_OBJID_ROOT
&&
144 ip
->obj_asof
== ip
->hmp
->asof
) {
148 vp
->v_data
= (void *)ip
;
149 /* vnode locked by getnewvnode() */
150 /* make related vnode dirty if inode dirty? */
151 hammer_unlock(&ip
->lock
);
152 if (vp
->v_type
== VREG
)
153 vinitvmio(vp
, ip
->ino_rec
.ino_size
);
158 * loop if the vget fails (aka races), or if the vp
159 * no longer matches ip->vp.
161 if (vget(vp
, LK_EXCLUSIVE
) == 0) {
172 * Acquire a HAMMER inode. The returned inode is not locked. These functions
173 * do not attach or detach the related vnode (use hammer_get_vnode() for
176 * The flags argument is only applied for newly created inodes, and only
177 * certain flags are inherited.
179 struct hammer_inode
*
180 hammer_get_inode(struct hammer_mount
*hmp
, struct hammer_node
**cache
,
181 u_int64_t obj_id
, hammer_tid_t asof
, int flags
, int *errorp
)
183 struct hammer_inode_info iinfo
;
184 struct hammer_cursor cursor
;
185 struct hammer_inode
*ip
;
188 * Determine if we already have an inode cached. If we do then
191 iinfo
.obj_id
= obj_id
;
192 iinfo
.obj_asof
= asof
;
194 ip
= hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp
->rb_inos_root
, &iinfo
);
196 hammer_ref(&ip
->lock
);
201 ip
= kmalloc(sizeof(*ip
), M_HAMMER
, M_WAITOK
|M_ZERO
);
202 ++hammer_count_inodes
;
204 ip
->obj_asof
= iinfo
.obj_asof
;
206 ip
->flags
= flags
& HAMMER_INODE_RO
;
208 ip
->flags
|= HAMMER_INODE_RO
;
209 RB_INIT(&ip
->rec_tree
);
212 * Locate the on-disk inode.
215 hammer_init_cursor_hmp(&cursor
, cache
, hmp
);
216 cursor
.key_beg
.obj_id
= ip
->obj_id
;
217 cursor
.key_beg
.key
= 0;
218 cursor
.key_beg
.create_tid
= 0;
219 cursor
.key_beg
.delete_tid
= 0;
220 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
221 cursor
.key_beg
.obj_type
= 0;
222 cursor
.asof
= iinfo
.obj_asof
;
223 cursor
.flags
= HAMMER_CURSOR_GET_RECORD
| HAMMER_CURSOR_GET_DATA
|
226 *errorp
= hammer_btree_lookup(&cursor
);
227 if (*errorp
== EDEADLK
) {
228 hammer_done_cursor(&cursor
);
233 * On success the B-Tree lookup will hold the appropriate
234 * buffer cache buffers and provide a pointer to the requested
235 * information. Copy the information to the in-memory inode
236 * and cache the B-Tree node to improve future operations.
239 ip
->ino_rec
= cursor
.record
->inode
;
240 ip
->ino_data
= cursor
.data1
->inode
;
241 hammer_cache_node(cursor
.node
, &ip
->cache
[0]);
243 hammer_cache_node(cursor
.node
, cache
);
247 * On success load the inode's record and data and insert the
248 * inode into the B-Tree. It is possible to race another lookup
249 * insertion of the same inode so deal with that condition too.
251 * The cursor's locked node interlocks against others creating and
252 * destroying ip while we were blocked.
255 hammer_ref(&ip
->lock
);
256 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
257 hammer_uncache_node(&ip
->cache
[0]);
258 hammer_uncache_node(&ip
->cache
[1]);
259 hammer_unref(&ip
->lock
);
260 --hammer_count_inodes
;
262 hammer_done_cursor(&cursor
);
265 ip
->flags
|= HAMMER_INODE_ONDISK
;
267 --hammer_count_inodes
;
271 hammer_done_cursor(&cursor
);
276 * Create a new filesystem object, returning the inode in *ipp. The
277 * returned inode will be referenced but not locked.
279 * The inode is created in-memory and will be delay-synchronized to the
283 hammer_create_inode(hammer_transaction_t trans
, struct vattr
*vap
,
284 struct ucred
*cred
, hammer_inode_t dip
,
285 struct hammer_inode
**ipp
)
292 ip
= kmalloc(sizeof(*ip
), M_HAMMER
, M_WAITOK
|M_ZERO
);
293 ++hammer_count_inodes
;
294 ip
->obj_id
= hammer_alloc_tid(trans
);
295 KKASSERT(ip
->obj_id
!= 0);
296 ip
->obj_asof
= hmp
->asof
;
298 ip
->flags
= HAMMER_INODE_DDIRTY
| HAMMER_INODE_RDIRTY
|
299 HAMMER_INODE_ITIMES
| HAMMER_INODE_TIDLOCKED
;
300 ip
->last_tid
= trans
->tid
;
302 RB_INIT(&ip
->rec_tree
);
304 ip
->ino_rec
.ino_atime
= trans
->tid
;
305 ip
->ino_rec
.ino_mtime
= trans
->tid
;
306 ip
->ino_rec
.ino_size
= 0;
307 ip
->ino_rec
.ino_nlinks
= 0;
309 ip
->ino_rec
.base
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
310 ip
->ino_rec
.base
.base
.obj_id
= ip
->obj_id
;
311 ip
->ino_rec
.base
.base
.key
= 0;
312 ip
->ino_rec
.base
.base
.create_tid
= trans
->tid
;
313 ip
->ino_rec
.base
.base
.delete_tid
= 0;
314 ip
->ino_rec
.base
.base
.rec_type
= HAMMER_RECTYPE_INODE
;
315 ip
->ino_rec
.base
.base
.obj_type
= hammer_get_obj_type(vap
->va_type
);
317 ip
->ino_data
.version
= HAMMER_INODE_DATA_VERSION
;
318 ip
->ino_data
.mode
= vap
->va_mode
;
319 ip
->ino_data
.ctime
= trans
->tid
;
320 ip
->ino_data
.parent_obj_id
= (dip
) ? dip
->ino_rec
.base
.base
.obj_id
: 0;
322 switch(ip
->ino_rec
.base
.base
.obj_type
) {
323 case HAMMER_OBJTYPE_CDEV
:
324 case HAMMER_OBJTYPE_BDEV
:
325 ip
->ino_data
.rmajor
= vap
->va_rmajor
;
326 ip
->ino_data
.rminor
= vap
->va_rminor
;
333 * Calculate default uid/gid and overwrite with information from
336 xuid
= hammer_to_unix_xid(&dip
->ino_data
.uid
);
337 ip
->ino_data
.gid
= dip
->ino_data
.gid
;
338 xuid
= vop_helper_create_uid(hmp
->mp
, dip
->ino_data
.mode
, xuid
, cred
,
340 ip
->ino_data
.mode
= vap
->va_mode
;
342 if (vap
->va_vaflags
& VA_UID_UUID_VALID
)
343 ip
->ino_data
.uid
= vap
->va_uid_uuid
;
344 else if (vap
->va_uid
!= (uid_t
)VNOVAL
)
345 hammer_guid_to_uuid(&ip
->ino_data
.uid
, xuid
);
346 if (vap
->va_vaflags
& VA_GID_UUID_VALID
)
347 ip
->ino_data
.gid
= vap
->va_gid_uuid
;
348 else if (vap
->va_gid
!= (gid_t
)VNOVAL
)
349 hammer_guid_to_uuid(&ip
->ino_data
.gid
, vap
->va_gid
);
351 hammer_ref(&ip
->lock
);
352 if (RB_INSERT(hammer_ino_rb_tree
, &hmp
->rb_inos_root
, ip
)) {
353 hammer_unref(&ip
->lock
);
354 panic("hammer_create_inode: duplicate obj_id %llx", ip
->obj_id
);
361 * Called by hammer_sync_inode().
364 hammer_update_inode(hammer_inode_t ip
)
366 struct hammer_cursor cursor
;
367 hammer_record_t record
;
369 hammer_tid_t last_tid
;
372 * Locate the record on-disk and mark it as deleted. Both the B-Tree
373 * node and the record must be marked deleted. The record may or
374 * may not be physically deleted, depending on the retention policy.
376 * If the inode has already been deleted on-disk we have nothing
379 * XXX Update the inode record and data in-place if the retention
382 last_tid
= ip
->last_tid
;
386 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
387 HAMMER_INODE_ONDISK
) {
388 hammer_init_cursor_hmp(&cursor
, &ip
->cache
[0], ip
->hmp
);
389 cursor
.key_beg
.obj_id
= ip
->obj_id
;
390 cursor
.key_beg
.key
= 0;
391 cursor
.key_beg
.create_tid
= 0;
392 cursor
.key_beg
.delete_tid
= 0;
393 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
394 cursor
.key_beg
.obj_type
= 0;
395 cursor
.asof
= ip
->obj_asof
;
396 cursor
.flags
|= HAMMER_CURSOR_GET_RECORD
| HAMMER_CURSOR_ASOF
;
398 error
= hammer_btree_lookup(&cursor
);
401 error
= hammer_ip_delete_record(&cursor
, last_tid
);
403 ip
->flags
|= HAMMER_INODE_DELONDISK
;
404 hammer_cache_node(cursor
.node
, &ip
->cache
[0]);
406 hammer_done_cursor(&cursor
);
407 if (error
== EDEADLK
)
412 * Write out a new record if the in-memory inode is not marked
413 * as having been deleted. Update our inode statistics if this
414 * is the first application of the inode on-disk.
416 * If the inode has been deleted permanently, HAMMER_INODE_DELONDISK
417 * will remain set and prevent further updates.
419 if (error
== 0 && (ip
->flags
& HAMMER_INODE_DELETED
) == 0) {
420 record
= hammer_alloc_mem_record(ip
, sizeof(struct hammer_inode_record
));
421 record
->rec
.inode
= ip
->ino_rec
;
422 record
->rec
.inode
.base
.base
.create_tid
= last_tid
;
423 record
->rec
.inode
.base
.data_len
= sizeof(ip
->ino_data
);
424 record
->data
= (void *)&ip
->ino_data
;
425 error
= hammer_ip_sync_record(record
);
426 record
->flags
|= HAMMER_RECF_DELETED
;
427 hammer_rel_mem_record(record
);
429 ip
->flags
&= ~(HAMMER_INODE_RDIRTY
|
430 HAMMER_INODE_DDIRTY
|
431 HAMMER_INODE_DELONDISK
|
432 HAMMER_INODE_ITIMES
);
433 if ((ip
->flags
& HAMMER_INODE_ONDISK
) == 0) {
434 hammer_modify_volume(ip
->hmp
->rootvol
, NULL
, 0);
435 ++ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
436 ip
->flags
|= HAMMER_INODE_ONDISK
;
440 * Unlock the sync TID if it was locked, now that
441 * we have written it out to disk.
443 ip
->flags
&= ~HAMMER_INODE_TIDLOCKED
;
450 * Update only the itimes fields. This is done no-historically. The
451 * record is updated in-place on the disk.
454 hammer_update_itimes(hammer_inode_t ip
)
456 struct hammer_cursor cursor
;
457 struct hammer_inode_record
*rec
;
462 if ((ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DELONDISK
)) ==
463 HAMMER_INODE_ONDISK
) {
464 hammer_init_cursor_hmp(&cursor
, &ip
->cache
[0], ip
->hmp
);
465 cursor
.key_beg
.obj_id
= ip
->obj_id
;
466 cursor
.key_beg
.key
= 0;
467 cursor
.key_beg
.create_tid
= 0;
468 cursor
.key_beg
.delete_tid
= 0;
469 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
;
470 cursor
.key_beg
.obj_type
= 0;
471 cursor
.asof
= ip
->obj_asof
;
472 cursor
.flags
|= HAMMER_CURSOR_GET_RECORD
| HAMMER_CURSOR_ASOF
;
474 error
= hammer_btree_lookup(&cursor
);
476 rec
= &cursor
.record
->inode
;
477 hammer_modify_buffer(cursor
.record_buffer
, NULL
, 0);
478 rec
->ino_atime
= ip
->ino_rec
.ino_atime
;
479 rec
->ino_mtime
= ip
->ino_rec
.ino_mtime
;
480 ip
->flags
&= ~HAMMER_INODE_ITIMES
;
481 /* XXX recalculate crc */
482 hammer_cache_node(cursor
.node
, &ip
->cache
[0]);
484 hammer_done_cursor(&cursor
);
485 if (error
== EDEADLK
)
492 * Release a reference on an inode. If asked to flush the last release
493 * will flush the inode.
496 hammer_rel_inode(struct hammer_inode
*ip
, int flush
)
498 hammer_unref(&ip
->lock
);
500 ip
->flags
|= HAMMER_INODE_FLUSH
;
501 if (ip
->lock
.refs
== 0) {
502 if (ip
->flags
& HAMMER_INODE_FLUSH
)
503 hammer_unload_inode(ip
, (void *)MNT_WAIT
);
505 hammer_unload_inode(ip
, (void *)MNT_NOWAIT
);
510 * Unload and destroy the specified inode.
512 * (typically called via RB_SCAN)
515 hammer_unload_inode(struct hammer_inode
*ip
, void *data
)
519 KASSERT(ip
->lock
.refs
== 0,
520 ("hammer_unload_inode: %d refs\n", ip
->lock
.refs
));
521 KKASSERT(ip
->vp
== NULL
);
522 hammer_ref(&ip
->lock
);
524 error
= hammer_sync_inode(ip
, (int)data
, 1);
526 kprintf("hammer_sync_inode failed error %d\n", error
);
527 if (ip
->lock
.refs
== 1) {
528 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
529 RB_REMOVE(hammer_ino_rb_tree
, &ip
->hmp
->rb_inos_root
, ip
);
531 hammer_uncache_node(&ip
->cache
[0]);
532 hammer_uncache_node(&ip
->cache
[1]);
533 --hammer_count_inodes
;
536 hammer_unref(&ip
->lock
);
542 * A transaction has modified an inode, requiring updates as specified by
545 * HAMMER_INODE_RDIRTY: Inode record has been updated
546 * HAMMER_INODE_DDIRTY: Inode data has been updated
547 * HAMMER_INODE_DELETED: Inode record/data must be deleted
548 * HAMMER_INODE_ITIMES: mtime/atime has been updated
550 * last_tid is the TID to use to generate the correct TID when the inode
551 * is synced to disk. The first inode record laid out on disk must match
552 * the transaction id of the related directory entry so only update last_tid
553 * if that has already occured.
556 hammer_modify_inode(struct hammer_transaction
*trans
,
557 struct hammer_inode
*ip
, int flags
)
559 KKASSERT ((ip
->flags
& HAMMER_INODE_RO
) == 0 ||
560 (HAMMER_INODE_RDIRTY
|HAMMER_INODE_DDIRTY
|
561 HAMMER_INODE_DELETED
|HAMMER_INODE_ITIMES
) == 0);
564 (HAMMER_INODE_RDIRTY
|HAMMER_INODE_DDIRTY
|HAMMER_INODE_DELETED
)) {
565 if (hammer_debug_tid
) {
566 kprintf("hammer_modify_inode: %016llx (%08x)\n",
567 trans
->tid
, (int)(trans
->tid
/ 1000000000LL));
571 * Update the inode sync transaction id unless it's locked
572 * due to some prior required synchroznization. Locking the
573 * tid in the new flags overrides this (used by rename).
575 if ((ip
->flags
& HAMMER_INODE_TIDLOCKED
) == 0)
576 ip
->last_tid
= trans
->tid
;
577 else if (flags
& HAMMER_INODE_TIDLOCKED
)
578 ip
->last_tid
= trans
->tid
;
584 * Sync any dirty buffers and records associated with an inode. The
585 * inode's last_tid field is used as the transaction id for the sync,
586 * overriding any intermediate TIDs that were used for records. Note
587 * that the dirty buffer cache buffers do not have any knowledge of
588 * the transaction id they were modified under.
591 hammer_sync_inode_callback(hammer_record_t rec
, void *data __unused
)
595 hammer_ref(&rec
->lock
);
596 error
= hammer_ip_sync_record(rec
);
597 hammer_rel_mem_record(rec
);
601 if (error
!= -ENOSPC
) {
602 kprintf("hammer_sync_inode_callback: sync failed rec "
603 "%p, error %d\n", rec
, error
);
613 hammer_sync_inode(hammer_inode_t ip
, int waitfor
, int handle_delete
)
615 struct hammer_transaction trans
;
618 if ((ip
->flags
& HAMMER_INODE_MODMASK
) == 0) {
622 hammer_lock_ex(&ip
->lock
);
625 * Use the transaction id of the last operation to sync.
628 hammer_start_transaction_tid(&trans
, ip
->hmp
, ip
->last_tid
);
630 hammer_start_transaction(&trans
, ip
->hmp
);
633 * If the inode has been deleted (nlinks == 0), and the OS no longer
634 * has any references to it (handle_delete != 0), clean up in-memory
637 * NOTE: We do not set the RDIRTY flag when updating the delete_tid,
638 * setting HAMMER_INODE_DELETED takes care of it.
640 * NOTE: Because we may sync records within this new transaction,
641 * force the inode update later on to use our transaction id or
642 * the delete_tid of the inode may be less then the create_tid of
643 * the inode update. XXX shouldn't happen but don't take the chance.
645 if (ip
->ino_rec
.ino_nlinks
== 0 && handle_delete
&&
646 (ip
->flags
& HAMMER_INODE_GONE
) == 0) {
647 ip
->flags
|= HAMMER_INODE_GONE
;
649 vtruncbuf(ip
->vp
, 0, HAMMER_BUFSIZE
);
650 error
= hammer_ip_delete_range_all(&trans
, ip
);
651 KKASSERT(RB_EMPTY(&ip
->rec_tree
));
652 ip
->ino_rec
.base
.base
.delete_tid
= trans
.tid
;
653 hammer_modify_inode(&trans
, ip
, HAMMER_INODE_DELETED
);
654 hammer_modify_volume(ip
->hmp
->rootvol
, NULL
, 0);
655 --ip
->hmp
->rootvol
->ondisk
->vol0_stat_inodes
;
659 * Sync the buffer cache.
661 if (ip
->vp
!= NULL
) {
662 error
= vfsync(ip
->vp
, waitfor
, 1, NULL
, NULL
);
663 if (RB_ROOT(&ip
->vp
->v_rbdirty_tree
) == NULL
)
664 ip
->flags
&= ~HAMMER_INODE_BUFS
;
671 * Now sync related records
674 error
= RB_SCAN(hammer_rec_rb_tree
, &ip
->rec_tree
, NULL
,
675 hammer_sync_inode_callback
, NULL
);
676 KKASSERT(error
<= 0);
681 if (RB_EMPTY(&ip
->rec_tree
))
682 ip
->flags
&= ~HAMMER_INODE_XDIRTY
;
685 * Now update the inode's on-disk inode-data and/or on-disk record.
687 switch(ip
->flags
& (HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
)) {
688 case HAMMER_INODE_DELETED
|HAMMER_INODE_ONDISK
:
690 * If deleted and on-disk, don't set any additional flags.
691 * the delete flag takes care of things.
694 case HAMMER_INODE_DELETED
:
696 * Take care of the case where a deleted inode was never
697 * flushed to the disk in the first place.
699 ip
->flags
&= ~(HAMMER_INODE_RDIRTY
|HAMMER_INODE_DDIRTY
|
700 HAMMER_INODE_XDIRTY
|HAMMER_INODE_ITIMES
);
701 while (RB_ROOT(&ip
->rec_tree
)) {
702 hammer_record_t rec
= RB_ROOT(&ip
->rec_tree
);
703 hammer_ref(&rec
->lock
);
704 rec
->flags
|= HAMMER_RECF_DELETED
;
705 hammer_rel_mem_record(rec
);
708 case HAMMER_INODE_ONDISK
:
710 * If already on-disk, do not set any additional flags.
715 * If not on-disk and not deleted, set both dirty flags
716 * to force an initial record to be written.
718 ip
->flags
|= HAMMER_INODE_RDIRTY
| HAMMER_INODE_DDIRTY
;
723 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
724 * is already on-disk the old record is marked as deleted.
726 * If DELETED is set hammer_update_inode() will delete the existing
727 * record without writing out a new one.
729 * If *ONLY* the ITIMES flag is set we can update the record in-place.
731 if ((ip
->flags
& (HAMMER_INODE_RDIRTY
| HAMMER_INODE_DDIRTY
|
732 HAMMER_INODE_ITIMES
| HAMMER_INODE_DELETED
)) ==
733 HAMMER_INODE_ITIMES
) {
734 error
= hammer_update_itimes(ip
);
736 if (ip
->flags
& (HAMMER_INODE_RDIRTY
| HAMMER_INODE_DDIRTY
|
737 HAMMER_INODE_ITIMES
| HAMMER_INODE_DELETED
)) {
738 error
= hammer_update_inode(ip
);
740 hammer_commit_transaction(&trans
);
741 hammer_unlock(&ip
->lock
);