HAMMER 59E/Many: Stabilization pass
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
blob80c3f51d2eb21c8f1c2f866f996a372f8f3c1a54
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.90 2008/06/30 02:45:30 dillon Exp $
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
46 static int hammer_setup_parent_inodes(hammer_inode_t ip);
47 static int hammer_setup_parent_inodes_helper(hammer_record_t record);
48 static void hammer_inode_wakereclaims(hammer_inode_t ip);
50 #ifdef DEBUG_TRUNCATE
51 extern struct hammer_inode *HammerTruncIp;
52 #endif
55 * Red-Black tree support for inode structures.
57 * Insertions
59 int
60 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
62 if (ip1->obj_localization < ip2->obj_localization)
63 return(-1);
64 if (ip1->obj_localization > ip2->obj_localization)
65 return(1);
66 if (ip1->obj_id < ip2->obj_id)
67 return(-1);
68 if (ip1->obj_id > ip2->obj_id)
69 return(1);
70 if (ip1->obj_asof < ip2->obj_asof)
71 return(-1);
72 if (ip1->obj_asof > ip2->obj_asof)
73 return(1);
74 return(0);
78 * LOOKUP_INFO
80 static int
81 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
83 if (info->obj_localization < ip->obj_localization)
84 return(-1);
85 if (info->obj_localization > ip->obj_localization)
86 return(1);
87 if (info->obj_id < ip->obj_id)
88 return(-1);
89 if (info->obj_id > ip->obj_id)
90 return(1);
91 if (info->obj_asof < ip->obj_asof)
92 return(-1);
93 if (info->obj_asof > ip->obj_asof)
94 return(1);
95 return(0);
99 * Used by hammer_scan_inode_snapshots() to locate all of an object's
100 * snapshots. Note that the asof field is not tested, which we can get
101 * away with because it is the lowest-priority field.
103 static int
104 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
106 hammer_inode_info_t info = data;
108 if (ip->obj_localization > info->obj_localization)
109 return(1);
110 if (ip->obj_localization < info->obj_localization)
111 return(-1);
112 if (ip->obj_id > info->obj_id)
113 return(1);
114 if (ip->obj_id < info->obj_id)
115 return(-1);
116 return(0);
119 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
120 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
121 hammer_inode_info_cmp, hammer_inode_info_t);
124 * The kernel is not actively referencing this vnode but is still holding
125 * it cached.
127 * This is called from the frontend.
130 hammer_vop_inactive(struct vop_inactive_args *ap)
132 struct hammer_inode *ip = VTOI(ap->a_vp);
135 * Degenerate case
137 if (ip == NULL) {
138 vrecycle(ap->a_vp);
139 return(0);
143 * If the inode no longer has visibility in the filesystem try to
144 * recycle it immediately, even if the inode is dirty. Recycling
145 * it quickly allows the system to reclaim buffer cache and VM
146 * resources which can matter a lot in a heavily loaded system.
148 * This can deadlock in vfsync() if we aren't careful.
150 * Do not queue the inode to the flusher if we still have visibility,
151 * otherwise namespace calls such as chmod will unnecessarily generate
152 * multiple inode updates.
154 hammer_inode_unloadable_check(ip, 0);
155 if (ip->ino_data.nlinks == 0) {
156 if (ip->flags & HAMMER_INODE_MODMASK)
157 hammer_flush_inode(ip, 0);
158 vrecycle(ap->a_vp);
160 return(0);
164 * Release the vnode association. This is typically (but not always)
165 * the last reference on the inode.
167 * Once the association is lost we are on our own with regards to
168 * flushing the inode.
171 hammer_vop_reclaim(struct vop_reclaim_args *ap)
173 struct hammer_inode *ip;
174 hammer_mount_t hmp;
175 struct vnode *vp;
177 vp = ap->a_vp;
179 if ((ip = vp->v_data) != NULL) {
180 hmp = ip->hmp;
181 vp->v_data = NULL;
182 ip->vp = NULL;
184 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
185 ++hammer_count_reclaiming;
186 ++hmp->inode_reclaims;
187 ip->flags |= HAMMER_INODE_RECLAIM;
188 if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
189 (hmp->inode_reclaims & 255) == 0) {
190 hammer_flusher_async(hmp);
193 hammer_rel_inode(ip, 1);
195 return(0);
199 * Return a locked vnode for the specified inode. The inode must be
200 * referenced but NOT LOCKED on entry and will remain referenced on
201 * return.
203 * Called from the frontend.
206 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
208 hammer_mount_t hmp;
209 struct vnode *vp;
210 int error = 0;
212 hmp = ip->hmp;
214 for (;;) {
215 if ((vp = ip->vp) == NULL) {
216 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
217 if (error)
218 break;
219 hammer_lock_ex(&ip->lock);
220 if (ip->vp != NULL) {
221 hammer_unlock(&ip->lock);
222 vp->v_type = VBAD;
223 vx_put(vp);
224 continue;
226 hammer_ref(&ip->lock);
227 vp = *vpp;
228 ip->vp = vp;
229 vp->v_type =
230 hammer_get_vnode_type(ip->ino_data.obj_type);
232 hammer_inode_wakereclaims(ip);
234 switch(ip->ino_data.obj_type) {
235 case HAMMER_OBJTYPE_CDEV:
236 case HAMMER_OBJTYPE_BDEV:
237 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
238 addaliasu(vp, ip->ino_data.rmajor,
239 ip->ino_data.rminor);
240 break;
241 case HAMMER_OBJTYPE_FIFO:
242 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
243 break;
244 default:
245 break;
249 * Only mark as the root vnode if the ip is not
250 * historical, otherwise the VFS cache will get
251 * confused. The other half of the special handling
252 * is in hammer_vop_nlookupdotdot().
254 * Pseudo-filesystem roots also do not count.
256 if (ip->obj_id == HAMMER_OBJID_ROOT &&
257 ip->obj_asof == hmp->asof &&
258 ip->obj_localization == 0) {
259 vp->v_flag |= VROOT;
262 vp->v_data = (void *)ip;
263 /* vnode locked by getnewvnode() */
264 /* make related vnode dirty if inode dirty? */
265 hammer_unlock(&ip->lock);
266 if (vp->v_type == VREG)
267 vinitvmio(vp, ip->ino_data.size);
268 break;
272 * loop if the vget fails (aka races), or if the vp
273 * no longer matches ip->vp.
275 if (vget(vp, LK_EXCLUSIVE) == 0) {
276 if (vp == ip->vp)
277 break;
278 vput(vp);
281 *vpp = vp;
282 return(error);
286 * Locate all copies of the inode for obj_id compatible with the specified
287 * asof, reference, and issue the related call-back. This routine is used
288 * for direct-io invalidation and does not create any new inodes.
290 void
291 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
292 int (*callback)(hammer_inode_t ip, void *data),
293 void *data)
295 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
296 hammer_inode_info_cmp_all_history,
297 callback, iinfo);
301 * Acquire a HAMMER inode. The returned inode is not locked. These functions
302 * do not attach or detach the related vnode (use hammer_get_vnode() for
303 * that).
305 * The flags argument is only applied for newly created inodes, and only
306 * certain flags are inherited.
308 * Called from the frontend.
310 struct hammer_inode *
311 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
312 u_int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
313 int flags, int *errorp)
315 hammer_mount_t hmp = trans->hmp;
316 struct hammer_inode_info iinfo;
317 struct hammer_cursor cursor;
318 struct hammer_inode *ip;
321 * Determine if we already have an inode cached. If we do then
322 * we are golden.
324 iinfo.obj_id = obj_id;
325 iinfo.obj_asof = asof;
326 iinfo.obj_localization = localization;
327 loop:
328 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
329 if (ip) {
330 hammer_ref(&ip->lock);
331 *errorp = 0;
332 return(ip);
336 * Allocate a new inode structure and deal with races later.
338 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
339 ++hammer_count_inodes;
340 ++hmp->count_inodes;
341 ip->obj_id = obj_id;
342 ip->obj_asof = iinfo.obj_asof;
343 ip->obj_localization = localization;
344 ip->hmp = hmp;
345 ip->flags = flags & HAMMER_INODE_RO;
346 ip->cache[0].ip = ip;
347 ip->cache[1].ip = ip;
348 if (hmp->ronly || (hmp->hflags & HMNT_SLAVE))
349 ip->flags |= HAMMER_INODE_RO;
350 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
351 0x7FFFFFFFFFFFFFFFLL;
352 RB_INIT(&ip->rec_tree);
353 TAILQ_INIT(&ip->target_list);
356 * Locate the on-disk inode.
358 retry:
359 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
360 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
361 cursor.key_beg.obj_id = ip->obj_id;
362 cursor.key_beg.key = 0;
363 cursor.key_beg.create_tid = 0;
364 cursor.key_beg.delete_tid = 0;
365 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
366 cursor.key_beg.obj_type = 0;
367 cursor.asof = iinfo.obj_asof;
368 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
369 HAMMER_CURSOR_ASOF;
371 *errorp = hammer_btree_lookup(&cursor);
372 if (*errorp == EDEADLK) {
373 hammer_done_cursor(&cursor);
374 goto retry;
378 * On success the B-Tree lookup will hold the appropriate
379 * buffer cache buffers and provide a pointer to the requested
380 * information. Copy the information to the in-memory inode
381 * and cache the B-Tree node to improve future operations.
383 if (*errorp == 0) {
384 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
385 ip->ino_data = cursor.data->inode;
388 * cache[0] tries to cache the location of the object inode.
389 * The assumption is that it is near the directory inode.
391 * cache[1] tries to cache the location of the object data.
392 * The assumption is that it is near the directory data.
394 hammer_cache_node(&ip->cache[0], cursor.node);
395 if (dip && dip->cache[1].node)
396 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
399 * The file should not contain any data past the file size
400 * stored in the inode. Setting save_trunc_off to the
401 * file size instead of max reduces B-Tree lookup overheads
402 * on append by allowing the flusher to avoid checking for
403 * record overwrites.
405 ip->save_trunc_off = ip->ino_data.size;
409 * The inode is placed on the red-black tree and will be synced to
410 * the media when flushed or by the filesystem sync. If this races
411 * another instantiation/lookup the insertion will fail.
413 if (*errorp == 0) {
414 hammer_ref(&ip->lock);
415 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
416 hammer_uncache_node(&ip->cache[0]);
417 hammer_uncache_node(&ip->cache[1]);
418 KKASSERT(ip->lock.refs == 1);
419 --hammer_count_inodes;
420 --hmp->count_inodes;
421 kfree(ip, M_HAMMER);
422 hammer_done_cursor(&cursor);
423 goto loop;
425 ip->flags |= HAMMER_INODE_ONDISK;
426 } else {
427 if (ip->flags & HAMMER_INODE_RSV_INODES) {
428 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
429 --hmp->rsv_inodes;
431 hmp->rsv_databufs -= ip->rsv_databufs;
432 ip->rsv_databufs = 0; /* sanity */
434 --hammer_count_inodes;
435 --hmp->count_inodes;
436 kfree(ip, M_HAMMER);
437 ip = NULL;
439 hammer_done_cursor(&cursor);
440 return (ip);
444 * Create a new filesystem object, returning the inode in *ipp. The
445 * returned inode will be referenced.
447 * The inode is created in-memory.
450 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
451 struct ucred *cred, hammer_inode_t dip,
452 int pseudofs, struct hammer_inode **ipp)
454 hammer_mount_t hmp;
455 hammer_inode_t ip;
456 uid_t xuid;
457 u_int32_t localization;
458 int error;
460 hmp = trans->hmp;
463 * Assign the localization domain. If if dip is NULL we are creating
464 * a pseudo-fs and must locate an unused localization domain.
466 if (pseudofs) {
467 for (localization = HAMMER_DEF_LOCALIZATION;
468 localization < HAMMER_LOCALIZE_PSEUDOFS_MASK;
469 localization += HAMMER_LOCALIZE_PSEUDOFS_INC) {
470 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
471 hmp->asof, localization,
472 0, &error);
473 if (ip == NULL) {
474 if (error != ENOENT)
475 return(error);
476 break;
478 if (ip)
479 hammer_rel_inode(ip, 0);
481 } else {
482 localization = dip->obj_localization;
485 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
486 ++hammer_count_inodes;
487 ++hmp->count_inodes;
490 * Allocate a new object id. If creating a new pseudo-fs the
491 * obj_id is 1.
493 if (pseudofs)
494 ip->obj_id = HAMMER_OBJID_ROOT;
495 else
496 ip->obj_id = hammer_alloc_objid(hmp, dip);
497 ip->obj_localization = localization;
499 KKASSERT(ip->obj_id != 0);
500 ip->obj_asof = hmp->asof;
501 ip->hmp = hmp;
502 ip->flush_state = HAMMER_FST_IDLE;
503 ip->flags = HAMMER_INODE_DDIRTY |
504 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
505 ip->cache[0].ip = ip;
506 ip->cache[1].ip = ip;
508 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
509 /* ip->save_trunc_off = 0; (already zero) */
510 RB_INIT(&ip->rec_tree);
511 TAILQ_INIT(&ip->target_list);
513 ip->ino_data.atime = trans->time;
514 ip->ino_data.mtime = trans->time;
515 ip->ino_data.size = 0;
516 ip->ino_data.nlinks = 0;
519 * A nohistory designator on the parent directory is inherited by
520 * the child. We will do this even for pseudo-fs creation... the
521 * sysad can turn it off.
523 ip->ino_data.uflags = dip->ino_data.uflags &
524 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
526 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
527 ip->ino_leaf.base.localization = ip->obj_localization +
528 HAMMER_LOCALIZE_INODE;
529 ip->ino_leaf.base.obj_id = ip->obj_id;
530 ip->ino_leaf.base.key = 0;
531 ip->ino_leaf.base.create_tid = 0;
532 ip->ino_leaf.base.delete_tid = 0;
533 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
534 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
536 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
537 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
538 ip->ino_data.mode = vap->va_mode;
539 ip->ino_data.ctime = trans->time;
542 * Setup the ".." pointer. This only needs to be done for directories
543 * but we do it for all objects as a recovery aid.
545 * The parent_obj_localization field only applies to pseudo-fs roots.
547 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
548 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
549 ip->obj_id == HAMMER_OBJID_ROOT) {
550 ip->ino_data.ext.obj.parent_obj_localization =
551 dip->obj_localization;
554 switch(ip->ino_leaf.base.obj_type) {
555 case HAMMER_OBJTYPE_CDEV:
556 case HAMMER_OBJTYPE_BDEV:
557 ip->ino_data.rmajor = vap->va_rmajor;
558 ip->ino_data.rminor = vap->va_rminor;
559 break;
560 default:
561 break;
565 * Calculate default uid/gid and overwrite with information from
566 * the vap.
568 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
569 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
570 &vap->va_mode);
571 ip->ino_data.mode = vap->va_mode;
573 if (vap->va_vaflags & VA_UID_UUID_VALID)
574 ip->ino_data.uid = vap->va_uid_uuid;
575 else if (vap->va_uid != (uid_t)VNOVAL)
576 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
577 else
578 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
580 if (vap->va_vaflags & VA_GID_UUID_VALID)
581 ip->ino_data.gid = vap->va_gid_uuid;
582 else if (vap->va_gid != (gid_t)VNOVAL)
583 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
584 else
585 ip->ino_data.gid = dip->ino_data.gid;
587 hammer_ref(&ip->lock);
588 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
589 hammer_unref(&ip->lock);
590 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
592 *ipp = ip;
593 return(0);
597 * Called by hammer_sync_inode().
599 static int
600 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
602 hammer_transaction_t trans = cursor->trans;
603 hammer_record_t record;
604 int error;
605 int redirty;
607 retry:
608 error = 0;
611 * If the inode has a presence on-disk then locate it and mark
612 * it deleted, setting DELONDISK.
614 * The record may or may not be physically deleted, depending on
615 * the retention policy.
617 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
618 HAMMER_INODE_ONDISK) {
619 hammer_normalize_cursor(cursor);
620 cursor->key_beg.localization = ip->obj_localization +
621 HAMMER_LOCALIZE_INODE;
622 cursor->key_beg.obj_id = ip->obj_id;
623 cursor->key_beg.key = 0;
624 cursor->key_beg.create_tid = 0;
625 cursor->key_beg.delete_tid = 0;
626 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
627 cursor->key_beg.obj_type = 0;
628 cursor->asof = ip->obj_asof;
629 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
630 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
631 cursor->flags |= HAMMER_CURSOR_BACKEND;
633 error = hammer_btree_lookup(cursor);
634 if (hammer_debug_inode)
635 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
636 if (error) {
637 kprintf("error %d\n", error);
638 Debugger("hammer_update_inode");
641 if (error == 0) {
642 error = hammer_ip_delete_record(cursor, ip, trans->tid);
643 if (hammer_debug_inode)
644 kprintf(" error %d\n", error);
645 if (error && error != EDEADLK) {
646 kprintf("error %d\n", error);
647 Debugger("hammer_update_inode2");
649 if (error == 0) {
650 ip->flags |= HAMMER_INODE_DELONDISK;
652 if (cursor->node)
653 hammer_cache_node(&ip->cache[0], cursor->node);
655 if (error == EDEADLK) {
656 hammer_done_cursor(cursor);
657 error = hammer_init_cursor(trans, cursor,
658 &ip->cache[0], ip);
659 if (hammer_debug_inode)
660 kprintf("IPDED %p %d\n", ip, error);
661 if (error == 0)
662 goto retry;
667 * Ok, write out the initial record or a new record (after deleting
668 * the old one), unless the DELETED flag is set. This routine will
669 * clear DELONDISK if it writes out a record.
671 * Update our inode statistics if this is the first application of
672 * the inode on-disk.
674 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
676 * Generate a record and write it to the media
678 record = hammer_alloc_mem_record(ip, 0);
679 record->type = HAMMER_MEM_RECORD_INODE;
680 record->flush_state = HAMMER_FST_FLUSH;
681 record->leaf = ip->sync_ino_leaf;
682 record->leaf.base.create_tid = trans->tid;
683 record->leaf.data_len = sizeof(ip->sync_ino_data);
684 record->leaf.create_ts = trans->time32;
685 record->data = (void *)&ip->sync_ino_data;
686 record->flags |= HAMMER_RECF_INTERLOCK_BE;
689 * If this flag is set we cannot sync the new file size
690 * because we haven't finished related truncations. The
691 * inode will be flushed in another flush group to finish
692 * the job.
694 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
695 ip->sync_ino_data.size != ip->ino_data.size) {
696 redirty = 1;
697 ip->sync_ino_data.size = ip->ino_data.size;
698 } else {
699 redirty = 0;
702 for (;;) {
703 error = hammer_ip_sync_record_cursor(cursor, record);
704 if (hammer_debug_inode)
705 kprintf("GENREC %p rec %08x %d\n",
706 ip, record->flags, error);
707 if (error != EDEADLK)
708 break;
709 hammer_done_cursor(cursor);
710 error = hammer_init_cursor(trans, cursor,
711 &ip->cache[0], ip);
712 if (hammer_debug_inode)
713 kprintf("GENREC reinit %d\n", error);
714 if (error)
715 break;
717 if (error) {
718 kprintf("error %d\n", error);
719 Debugger("hammer_update_inode3");
723 * The record isn't managed by the inode's record tree,
724 * destroy it whether we succeed or fail.
726 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
727 record->flags |= HAMMER_RECF_DELETED_FE;
728 record->flush_state = HAMMER_FST_IDLE;
729 hammer_rel_mem_record(record);
732 * Finish up.
734 if (error == 0) {
735 if (hammer_debug_inode)
736 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
737 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
738 HAMMER_INODE_ATIME |
739 HAMMER_INODE_MTIME);
740 ip->flags &= ~HAMMER_INODE_DELONDISK;
741 if (redirty)
742 ip->sync_flags |= HAMMER_INODE_DDIRTY;
745 * Root volume count of inodes
747 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
748 hammer_modify_volume_field(trans,
749 trans->rootvol,
750 vol0_stat_inodes);
751 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
752 hammer_modify_volume_done(trans->rootvol);
753 ip->flags |= HAMMER_INODE_ONDISK;
754 if (hammer_debug_inode)
755 kprintf("NOWONDISK %p\n", ip);
761 * If the inode has been destroyed, clean out any left-over flags
762 * that may have been set by the frontend.
764 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
765 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
766 HAMMER_INODE_ATIME |
767 HAMMER_INODE_MTIME);
769 return(error);
773 * Update only the itimes fields.
775 * ATIME can be updated without generating any UNDO. MTIME is updated
776 * with UNDO so it is guaranteed to be synchronized properly in case of
777 * a crash.
779 * Neither field is included in the B-Tree leaf element's CRC, which is how
780 * we can get away with updating ATIME the way we do.
782 static int
783 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
785 hammer_transaction_t trans = cursor->trans;
786 int error;
788 retry:
789 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
790 HAMMER_INODE_ONDISK) {
791 return(0);
794 hammer_normalize_cursor(cursor);
795 cursor->key_beg.localization = ip->obj_localization +
796 HAMMER_LOCALIZE_INODE;
797 cursor->key_beg.obj_id = ip->obj_id;
798 cursor->key_beg.key = 0;
799 cursor->key_beg.create_tid = 0;
800 cursor->key_beg.delete_tid = 0;
801 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
802 cursor->key_beg.obj_type = 0;
803 cursor->asof = ip->obj_asof;
804 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
805 cursor->flags |= HAMMER_CURSOR_ASOF;
806 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
807 cursor->flags |= HAMMER_CURSOR_GET_DATA;
808 cursor->flags |= HAMMER_CURSOR_BACKEND;
810 error = hammer_btree_lookup(cursor);
811 if (error) {
812 kprintf("error %d\n", error);
813 Debugger("hammer_update_itimes1");
815 if (error == 0) {
816 hammer_cache_node(&ip->cache[0], cursor->node);
817 if (ip->sync_flags & HAMMER_INODE_MTIME) {
819 * Updating MTIME requires an UNDO. Just cover
820 * both atime and mtime.
822 hammer_modify_buffer(trans, cursor->data_buffer,
823 HAMMER_ITIMES_BASE(&cursor->data->inode),
824 HAMMER_ITIMES_BYTES);
825 cursor->data->inode.atime = ip->sync_ino_data.atime;
826 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
827 hammer_modify_buffer_done(cursor->data_buffer);
828 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
830 * Updating atime only can be done in-place with
831 * no UNDO.
833 hammer_modify_buffer(trans, cursor->data_buffer,
834 NULL, 0);
835 cursor->data->inode.atime = ip->sync_ino_data.atime;
836 hammer_modify_buffer_done(cursor->data_buffer);
838 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
840 if (error == EDEADLK) {
841 hammer_done_cursor(cursor);
842 error = hammer_init_cursor(trans, cursor,
843 &ip->cache[0], ip);
844 if (error == 0)
845 goto retry;
847 return(error);
851 * Release a reference on an inode, flush as requested.
853 * On the last reference we queue the inode to the flusher for its final
854 * disposition.
856 void
857 hammer_rel_inode(struct hammer_inode *ip, int flush)
859 hammer_mount_t hmp = ip->hmp;
862 * Handle disposition when dropping the last ref.
864 for (;;) {
865 if (ip->lock.refs == 1) {
867 * Determine whether on-disk action is needed for
868 * the inode's final disposition.
870 KKASSERT(ip->vp == NULL);
871 hammer_inode_unloadable_check(ip, 0);
872 if (ip->flags & HAMMER_INODE_MODMASK) {
873 if (hmp->rsv_inodes > desiredvnodes) {
874 hammer_flush_inode(ip,
875 HAMMER_FLUSH_SIGNAL);
876 } else {
877 hammer_flush_inode(ip, 0);
879 } else if (ip->lock.refs == 1) {
880 hammer_unload_inode(ip);
881 break;
883 } else {
884 if (flush)
885 hammer_flush_inode(ip, 0);
888 * The inode still has multiple refs, try to drop
889 * one ref.
891 KKASSERT(ip->lock.refs >= 1);
892 if (ip->lock.refs > 1) {
893 hammer_unref(&ip->lock);
894 break;
901 * Unload and destroy the specified inode. Must be called with one remaining
902 * reference. The reference is disposed of.
904 * This can only be called in the context of the flusher.
906 static int
907 hammer_unload_inode(struct hammer_inode *ip)
909 hammer_mount_t hmp = ip->hmp;
911 KASSERT(ip->lock.refs == 1,
912 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
913 KKASSERT(ip->vp == NULL);
914 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
915 KKASSERT(ip->cursor_ip_refs == 0);
916 KKASSERT(ip->lock.lockcount == 0);
917 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
919 KKASSERT(RB_EMPTY(&ip->rec_tree));
920 KKASSERT(TAILQ_EMPTY(&ip->target_list));
922 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
924 hammer_uncache_node(&ip->cache[0]);
925 hammer_uncache_node(&ip->cache[1]);
926 if (ip->objid_cache)
927 hammer_clear_objid(ip);
928 --hammer_count_inodes;
929 --hmp->count_inodes;
931 hammer_inode_wakereclaims(ip);
932 kfree(ip, M_HAMMER);
934 return(0);
938 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
939 * the read-only flag for cached inodes.
941 * This routine is called from a RB_SCAN().
944 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
946 hammer_mount_t hmp = ip->hmp;
948 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
949 ip->flags |= HAMMER_INODE_RO;
950 else
951 ip->flags &= ~HAMMER_INODE_RO;
952 return(0);
956 * A transaction has modified an inode, requiring updates as specified by
957 * the passed flags.
959 * HAMMER_INODE_DDIRTY: Inode data has been updated
960 * HAMMER_INODE_XDIRTY: Dirty in-memory records
961 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
962 * HAMMER_INODE_DELETED: Inode record/data must be deleted
963 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
965 void
966 hammer_modify_inode(hammer_inode_t ip, int flags)
968 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
969 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
970 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
971 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
972 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
973 ip->flags |= HAMMER_INODE_RSV_INODES;
974 ++ip->hmp->rsv_inodes;
977 ip->flags |= flags;
981 * Request that an inode be flushed. This whole mess cannot block and may
982 * recurse (if not synchronous). Once requested HAMMER will attempt to
983 * actively flush the inode until the flush can be done.
985 * The inode may already be flushing, or may be in a setup state. We can
986 * place the inode in a flushing state if it is currently idle and flag it
987 * to reflush if it is currently flushing.
989 * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
990 * flush the indoe synchronously using the caller's context.
992 void
993 hammer_flush_inode(hammer_inode_t ip, int flags)
995 int good;
998 * Trivial 'nothing to flush' case. If the inode is ina SETUP
999 * state we have to put it back into an IDLE state so we can
1000 * drop the extra ref.
1002 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1003 if (ip->flush_state == HAMMER_FST_SETUP) {
1004 ip->flush_state = HAMMER_FST_IDLE;
1005 hammer_rel_inode(ip, 0);
1007 return;
1011 * Our flush action will depend on the current state.
1013 switch(ip->flush_state) {
1014 case HAMMER_FST_IDLE:
1016 * We have no dependancies and can flush immediately. Some
1017 * our children may not be flushable so we have to re-test
1018 * with that additional knowledge.
1020 hammer_flush_inode_core(ip, flags);
1021 break;
1022 case HAMMER_FST_SETUP:
1024 * Recurse upwards through dependancies via target_list
1025 * and start their flusher actions going if possible.
1027 * 'good' is our connectivity. -1 means we have none and
1028 * can't flush, 0 means there weren't any dependancies, and
1029 * 1 means we have good connectivity.
1031 good = hammer_setup_parent_inodes(ip);
1034 * We can continue if good >= 0. Determine how many records
1035 * under our inode can be flushed (and mark them).
1037 if (good >= 0) {
1038 hammer_flush_inode_core(ip, flags);
1039 } else {
1040 ip->flags |= HAMMER_INODE_REFLUSH;
1041 if (flags & HAMMER_FLUSH_SIGNAL) {
1042 ip->flags |= HAMMER_INODE_RESIGNAL;
1043 hammer_flusher_async(ip->hmp);
1046 break;
1047 default:
1049 * We are already flushing, flag the inode to reflush
1050 * if needed after it completes its current flush.
1052 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1053 ip->flags |= HAMMER_INODE_REFLUSH;
1054 if (flags & HAMMER_FLUSH_SIGNAL) {
1055 ip->flags |= HAMMER_INODE_RESIGNAL;
1056 hammer_flusher_async(ip->hmp);
1058 break;
1063 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1064 * ip which reference our ip.
1066 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1067 * so for now do not ref/deref the structures. Note that if we use the
1068 * ref/rel code later, the rel CAN block.
1070 static int
1071 hammer_setup_parent_inodes(hammer_inode_t ip)
1073 hammer_record_t depend;
1074 #if 0
1075 hammer_record_t next;
1076 hammer_inode_t pip;
1077 #endif
1078 int good;
1079 int r;
1081 good = 0;
1082 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1083 r = hammer_setup_parent_inodes_helper(depend);
1084 KKASSERT(depend->target_ip == ip);
1085 if (r < 0 && good == 0)
1086 good = -1;
1087 if (r > 0)
1088 good = 1;
1090 return(good);
1092 #if 0
1093 retry:
1094 good = 0;
1095 next = TAILQ_FIRST(&ip->target_list);
1096 if (next) {
1097 hammer_ref(&next->lock);
1098 hammer_ref(&next->ip->lock);
1100 while ((depend = next) != NULL) {
1101 if (depend->target_ip == NULL) {
1102 pip = depend->ip;
1103 hammer_rel_mem_record(depend);
1104 hammer_rel_inode(pip, 0);
1105 goto retry;
1107 KKASSERT(depend->target_ip == ip);
1108 next = TAILQ_NEXT(depend, target_entry);
1109 if (next) {
1110 hammer_ref(&next->lock);
1111 hammer_ref(&next->ip->lock);
1113 r = hammer_setup_parent_inodes_helper(depend);
1114 if (r < 0 && good == 0)
1115 good = -1;
1116 if (r > 0)
1117 good = 1;
1118 pip = depend->ip;
1119 hammer_rel_mem_record(depend);
1120 hammer_rel_inode(pip, 0);
1122 return(good);
1123 #endif
1127 * This helper function takes a record representing the dependancy between
1128 * the parent inode and child inode.
1130 * record->ip = parent inode
1131 * record->target_ip = child inode
1133 * We are asked to recurse upwards and convert the record from SETUP
1134 * to FLUSH if possible.
1136 * Return 1 if the record gives us connectivity
1138 * Return 0 if the record is not relevant
1140 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1142 static int
1143 hammer_setup_parent_inodes_helper(hammer_record_t record)
1145 hammer_mount_t hmp;
1146 hammer_inode_t pip;
1147 int good;
1149 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1150 pip = record->ip;
1151 hmp = pip->hmp;
1154 * If the record is already flushing, is it in our flush group?
1156 * If it is in our flush group but it is a general record or a
1157 * delete-on-disk, it does not improve our connectivity (return 0),
1158 * and if the target inode is not trying to destroy itself we can't
1159 * allow the operation yet anyway (the second return -1).
1161 if (record->flush_state == HAMMER_FST_FLUSH) {
1162 if (record->flush_group != hmp->flusher.next) {
1163 pip->flags |= HAMMER_INODE_REFLUSH;
1164 return(-1);
1166 if (record->type == HAMMER_MEM_RECORD_ADD)
1167 return(1);
1168 /* GENERAL or DEL */
1169 return(0);
1173 * It must be a setup record. Try to resolve the setup dependancies
1174 * by recursing upwards so we can place ip on the flush list.
1176 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1178 good = hammer_setup_parent_inodes(pip);
1181 * We can't flush ip because it has no connectivity (XXX also check
1182 * nlinks for pre-existing connectivity!). Flag it so any resolution
1183 * recurses back down.
1185 if (good < 0) {
1186 pip->flags |= HAMMER_INODE_REFLUSH;
1187 return(good);
1191 * We are go, place the parent inode in a flushing state so we can
1192 * place its record in a flushing state. Note that the parent
1193 * may already be flushing. The record must be in the same flush
1194 * group as the parent.
1196 if (pip->flush_state != HAMMER_FST_FLUSH)
1197 hammer_flush_inode_core(pip, HAMMER_FLUSH_RECURSION);
1198 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1199 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1201 #if 0
1202 if (record->type == HAMMER_MEM_RECORD_DEL &&
1203 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1205 * Regardless of flushing state we cannot sync this path if the
1206 * record represents a delete-on-disk but the target inode
1207 * is not ready to sync its own deletion.
1209 * XXX need to count effective nlinks to determine whether
1210 * the flush is ok, otherwise removing a hardlink will
1211 * just leave the DEL record to rot.
1213 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1214 return(-1);
1215 } else
1216 #endif
1217 if (pip->flush_group == pip->hmp->flusher.next) {
1219 * This is the record we wanted to synchronize. If the
1220 * record went into a flush state while we blocked it
1221 * had better be in the correct flush group.
1223 if (record->flush_state != HAMMER_FST_FLUSH) {
1224 record->flush_state = HAMMER_FST_FLUSH;
1225 record->flush_group = pip->flush_group;
1226 hammer_ref(&record->lock);
1227 } else {
1228 KKASSERT(record->flush_group == pip->flush_group);
1230 if (record->type == HAMMER_MEM_RECORD_ADD)
1231 return(1);
1234 * A general or delete-on-disk record does not contribute
1235 * to our visibility. We can still flush it, however.
1237 return(0);
1238 } else {
1240 * We couldn't resolve the dependancies, request that the
1241 * inode be flushed when the dependancies can be resolved.
1243 pip->flags |= HAMMER_INODE_REFLUSH;
1244 return(-1);
1249 * This is the core routine placing an inode into the FST_FLUSH state.
1251 static void
1252 hammer_flush_inode_core(hammer_inode_t ip, int flags)
1254 int go_count;
1257 * Set flush state and prevent the flusher from cycling into
1258 * the next flush group. Do not place the ip on the list yet.
1259 * Inodes not in the idle state get an extra reference.
1261 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1262 if (ip->flush_state == HAMMER_FST_IDLE)
1263 hammer_ref(&ip->lock);
1264 ip->flush_state = HAMMER_FST_FLUSH;
1265 ip->flush_group = ip->hmp->flusher.next;
1266 ++ip->hmp->flusher.group_lock;
1267 ++ip->hmp->count_iqueued;
1268 ++hammer_count_iqueued;
1271 * We need to be able to vfsync/truncate from the backend.
1273 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1274 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1275 ip->flags |= HAMMER_INODE_VHELD;
1276 vref(ip->vp);
1280 * Figure out how many in-memory records we can actually flush
1281 * (not including inode meta-data, buffers, etc).
1283 * Do not add new records to the flush if this is a recursion or
1284 * if we must still complete a flush from the previous flush cycle.
1286 if (flags & HAMMER_FLUSH_RECURSION) {
1287 go_count = 1;
1288 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1289 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1290 hammer_syncgrp_child_callback, NULL);
1291 go_count = 1;
1292 } else {
1293 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1294 hammer_setup_child_callback, NULL);
1298 * This is a more involved test that includes go_count. If we
1299 * can't flush, flag the inode and return. If go_count is 0 we
1300 * were are unable to flush any records in our rec_tree and
1301 * must ignore the XDIRTY flag.
1303 if (go_count == 0) {
1304 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1305 ip->flags |= HAMMER_INODE_REFLUSH;
1307 --ip->hmp->count_iqueued;
1308 --hammer_count_iqueued;
1310 ip->flush_state = HAMMER_FST_SETUP;
1311 if (ip->flags & HAMMER_INODE_VHELD) {
1312 ip->flags &= ~HAMMER_INODE_VHELD;
1313 vrele(ip->vp);
1315 if (flags & HAMMER_FLUSH_SIGNAL) {
1316 ip->flags |= HAMMER_INODE_RESIGNAL;
1317 hammer_flusher_async(ip->hmp);
1319 if (--ip->hmp->flusher.group_lock == 0)
1320 wakeup(&ip->hmp->flusher.group_lock);
1321 return;
1326 * Snapshot the state of the inode for the backend flusher.
1328 * We continue to retain save_trunc_off even when all truncations
1329 * have been resolved as an optimization to determine if we can
1330 * skip the B-Tree lookup for overwrite deletions.
1332 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1333 * and stays in ip->flags. Once set, it stays set until the
1334 * inode is destroyed.
1336 * NOTE: If a truncation from a previous flush cycle had to be
1337 * continued into this one, the TRUNCATED flag will still be
1338 * set in sync_flags as will WOULDBLOCK. When this occurs
1339 * we CANNOT safely integrate a new truncation from the front-end
1340 * because there may be data records in-memory assigned a flush
1341 * state from the previous cycle that are supposed to be flushed
1342 * before the next frontend truncation.
1344 if ((ip->flags & (HAMMER_INODE_TRUNCATED | HAMMER_INODE_WOULDBLOCK)) ==
1345 HAMMER_INODE_TRUNCATED) {
1346 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1347 ip->sync_trunc_off = ip->trunc_off;
1348 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1349 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1350 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1353 * The save_trunc_off used to cache whether the B-Tree
1354 * holds any records past that point is not used until
1355 * after the truncation has succeeded, so we can safely
1356 * set it now.
1358 if (ip->save_trunc_off > ip->sync_trunc_off)
1359 ip->save_trunc_off = ip->sync_trunc_off;
1361 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1362 ~HAMMER_INODE_TRUNCATED);
1363 ip->sync_ino_leaf = ip->ino_leaf;
1364 ip->sync_ino_data = ip->ino_data;
1365 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1366 #ifdef DEBUG_TRUNCATE
1367 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1368 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1369 #endif
1372 * The flusher list inherits our inode and reference.
1374 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1375 if (--ip->hmp->flusher.group_lock == 0)
1376 wakeup(&ip->hmp->flusher.group_lock);
1378 if (flags & HAMMER_FLUSH_SIGNAL) {
1379 hammer_flusher_async(ip->hmp);
1384 * Callback for scan of ip->rec_tree. Try to include each record in our
1385 * flush. ip->flush_group has been set but the inode has not yet been
1386 * moved into a flushing state.
1388 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1389 * both inodes.
1391 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1392 * the caller from shortcutting the flush.
1394 static int
1395 hammer_setup_child_callback(hammer_record_t rec, void *data)
1397 hammer_inode_t target_ip;
1398 hammer_inode_t ip;
1399 int r;
1402 * Deleted records are ignored. Note that the flush detects deleted
1403 * front-end records at multiple points to deal with races. This is
1404 * just the first line of defense. The only time DELETED_FE cannot
1405 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1407 * Don't get confused between record deletion and, say, directory
1408 * entry deletion. The deletion of a directory entry that is on
1409 * the media has nothing to do with the record deletion flags.
1411 * The flush_group for a record already in a flush state must
1412 * be updated. This case can only occur if the inode deleting
1413 * too many records had to be moved to the next flush group.
1415 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1416 if (rec->flush_state == HAMMER_FST_FLUSH) {
1417 KKASSERT(rec->ip->flags & HAMMER_INODE_WOULDBLOCK);
1418 rec->flush_group = rec->ip->flush_group;
1419 r = 1;
1420 } else {
1421 r = 0;
1423 return(r);
1427 * If the record is in an idle state it has no dependancies and
1428 * can be flushed.
1430 ip = rec->ip;
1431 r = 0;
1433 switch(rec->flush_state) {
1434 case HAMMER_FST_IDLE:
1436 * Record has no setup dependancy, we can flush it.
1438 KKASSERT(rec->target_ip == NULL);
1439 rec->flush_state = HAMMER_FST_FLUSH;
1440 rec->flush_group = ip->flush_group;
1441 hammer_ref(&rec->lock);
1442 r = 1;
1443 break;
1444 case HAMMER_FST_SETUP:
1446 * Record has a setup dependancy. Try to include the
1447 * target ip in the flush.
1449 * We have to be careful here, if we do not do the right
1450 * thing we can lose track of dirty inodes and the system
1451 * will lockup trying to allocate buffers.
1453 target_ip = rec->target_ip;
1454 KKASSERT(target_ip != NULL);
1455 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1456 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1458 * If the target IP is already flushing in our group
1459 * we are golden, otherwise make sure the target
1460 * reflushes.
1462 if (target_ip->flush_group == ip->flush_group) {
1463 rec->flush_state = HAMMER_FST_FLUSH;
1464 rec->flush_group = ip->flush_group;
1465 hammer_ref(&rec->lock);
1466 r = 1;
1467 } else {
1468 target_ip->flags |= HAMMER_INODE_REFLUSH;
1470 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1472 * If the target IP is not flushing we can force
1473 * it to flush, even if it is unable to write out
1474 * any of its own records we have at least one in
1475 * hand that we CAN deal with.
1477 rec->flush_state = HAMMER_FST_FLUSH;
1478 rec->flush_group = ip->flush_group;
1479 hammer_ref(&rec->lock);
1480 hammer_flush_inode_core(target_ip,
1481 HAMMER_FLUSH_RECURSION);
1482 r = 1;
1483 } else {
1485 * General or delete-on-disk record.
1487 * XXX this needs help. If a delete-on-disk we could
1488 * disconnect the target. If the target has its own
1489 * dependancies they really need to be flushed.
1491 * XXX
1493 rec->flush_state = HAMMER_FST_FLUSH;
1494 rec->flush_group = ip->flush_group;
1495 hammer_ref(&rec->lock);
1496 hammer_flush_inode_core(target_ip,
1497 HAMMER_FLUSH_RECURSION);
1498 r = 1;
1500 break;
1501 case HAMMER_FST_FLUSH:
1503 * If the WOULDBLOCK flag is set records may have been left
1504 * over from a previous flush attempt and should be moved
1505 * to the current flush group. If it is not set then all
1506 * such records had better have been flushed already or
1507 * already associated with the current flush group.
1509 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1510 rec->flush_group = ip->flush_group;
1511 } else {
1512 KKASSERT(rec->flush_group == ip->flush_group);
1514 r = 1;
1515 break;
1517 return(r);
1521 * This version just moves records already in a flush state to the new
1522 * flush group and that is it.
1524 static int
1525 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1527 hammer_inode_t ip = rec->ip;
1529 switch(rec->flush_state) {
1530 case HAMMER_FST_FLUSH:
1531 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1532 rec->flush_group = ip->flush_group;
1533 } else {
1534 KKASSERT(rec->flush_group == ip->flush_group);
1536 break;
1537 default:
1538 break;
1540 return(0);
1544 * Wait for a previously queued flush to complete. Not only do we need to
1545 * wait for the inode to sync out, we also may have to run the flusher again
1546 * to get it past the UNDO position pertaining to the flush so a crash does
1547 * not 'undo' our flush.
1549 void
1550 hammer_wait_inode(hammer_inode_t ip)
1552 hammer_mount_t hmp = ip->hmp;
1553 int sync_group;
1554 int waitcount;
1556 sync_group = ip->flush_group;
1557 waitcount = (ip->flags & HAMMER_INODE_REFLUSH) ? 2 : 1;
1559 if (ip->flush_state == HAMMER_FST_SETUP) {
1560 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1562 /* XXX can we make this != FST_IDLE ? check SETUP depends */
1563 while (ip->flush_state == HAMMER_FST_FLUSH &&
1564 (ip->flush_group - sync_group) < waitcount) {
1565 ip->flags |= HAMMER_INODE_FLUSHW;
1566 tsleep(&ip->flags, 0, "hmrwin", 0);
1568 while (hmp->flusher.done - sync_group < waitcount) {
1569 kprintf("Y");
1570 hammer_flusher_sync(hmp);
1575 * Called by the backend code when a flush has been completed.
1576 * The inode has already been removed from the flush list.
1578 * A pipelined flush can occur, in which case we must re-enter the
1579 * inode on the list and re-copy its fields.
1581 void
1582 hammer_flush_inode_done(hammer_inode_t ip)
1584 hammer_mount_t hmp;
1585 int dorel;
1587 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1589 hmp = ip->hmp;
1592 * Merge left-over flags back into the frontend and fix the state.
1593 * Incomplete truncations are retained by the backend.
1595 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
1596 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
1599 * The backend may have adjusted nlinks, so if the adjusted nlinks
1600 * does not match the fronttend set the frontend's RDIRTY flag again.
1602 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1603 ip->flags |= HAMMER_INODE_DDIRTY;
1606 * Fix up the dirty buffer status. IO completions will also
1607 * try to clean up rsv_databufs.
1609 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1610 ip->flags |= HAMMER_INODE_BUFS;
1611 } else {
1612 hmp->rsv_databufs -= ip->rsv_databufs;
1613 ip->rsv_databufs = 0;
1617 * Re-set the XDIRTY flag if some of the inode's in-memory records
1618 * could not be flushed.
1620 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1621 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1622 (!RB_EMPTY(&ip->rec_tree) &&
1623 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1626 * Do not lose track of inodes which no longer have vnode
1627 * assocations, otherwise they may never get flushed again.
1629 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1630 ip->flags |= HAMMER_INODE_REFLUSH;
1633 * Clean up the vnode ref
1635 if (ip->flags & HAMMER_INODE_VHELD) {
1636 ip->flags &= ~HAMMER_INODE_VHELD;
1637 vrele(ip->vp);
1641 * Adjust flush_state. The target state (idle or setup) shouldn't
1642 * be terribly important since we will reflush if we really need
1643 * to do anything.
1645 * If the WOULDBLOCK flag is set we must re-flush immediately
1646 * to continue a potentially large deletion. The flag also causes
1647 * the hammer_setup_child_callback() to move records in the old
1648 * flush group to the new one.
1650 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1651 kprintf("B");
1652 ip->flush_state = HAMMER_FST_IDLE;
1653 hammer_flush_inode_core(ip, HAMMER_FLUSH_SIGNAL);
1654 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
1655 dorel = 1;
1656 } else if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1657 ip->flush_state = HAMMER_FST_IDLE;
1658 dorel = 1;
1659 } else {
1660 ip->flush_state = HAMMER_FST_SETUP;
1661 dorel = 0;
1664 --hmp->count_iqueued;
1665 --hammer_count_iqueued;
1668 * If the frontend made more changes and requested another flush,
1669 * then try to get it running.
1671 if (ip->flags & HAMMER_INODE_REFLUSH) {
1672 ip->flags &= ~HAMMER_INODE_REFLUSH;
1673 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1674 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1675 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1676 } else {
1677 hammer_flush_inode(ip, 0);
1682 * If the inode is now clean drop the space reservation.
1684 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1685 (ip->flags & HAMMER_INODE_RSV_INODES)) {
1686 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1687 --hmp->rsv_inodes;
1691 * Finally, if the frontend is waiting for a flush to complete,
1692 * wake it up.
1694 if (ip->flush_state != HAMMER_FST_FLUSH) {
1695 if (ip->flags & HAMMER_INODE_FLUSHW) {
1696 ip->flags &= ~HAMMER_INODE_FLUSHW;
1697 wakeup(&ip->flags);
1700 if (dorel)
1701 hammer_rel_inode(ip, 0);
1705 * Called from hammer_sync_inode() to synchronize in-memory records
1706 * to the media.
1708 static int
1709 hammer_sync_record_callback(hammer_record_t record, void *data)
1711 hammer_cursor_t cursor = data;
1712 hammer_transaction_t trans = cursor->trans;
1713 int error;
1716 * Skip records that do not belong to the current flush.
1718 ++hammer_stats_record_iterations;
1719 if (record->flush_state != HAMMER_FST_FLUSH)
1720 return(0);
1722 #if 1
1723 if (record->flush_group != record->ip->flush_group) {
1724 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1725 Debugger("blah2");
1726 return(0);
1728 #endif
1729 KKASSERT(record->flush_group == record->ip->flush_group);
1732 * Interlock the record using the BE flag. Once BE is set the
1733 * frontend cannot change the state of FE.
1735 * NOTE: If FE is set prior to us setting BE we still sync the
1736 * record out, but the flush completion code converts it to
1737 * a delete-on-disk record instead of destroying it.
1739 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1740 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1743 * The backend may have already disposed of the record.
1745 if (record->flags & HAMMER_RECF_DELETED_BE) {
1746 error = 0;
1747 goto done;
1751 * If the whole inode is being deleting all on-disk records will
1752 * be deleted very soon, we can't sync any new records to disk
1753 * because they will be deleted in the same transaction they were
1754 * created in (delete_tid == create_tid), which will assert.
1756 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1757 * that we currently panic on.
1759 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1760 switch(record->type) {
1761 case HAMMER_MEM_RECORD_DATA:
1763 * We don't have to do anything, if the record was
1764 * committed the space will have been accounted for
1765 * in the blockmap.
1767 /* fall through */
1768 case HAMMER_MEM_RECORD_GENERAL:
1769 record->flags |= HAMMER_RECF_DELETED_FE;
1770 record->flags |= HAMMER_RECF_DELETED_BE;
1771 error = 0;
1772 goto done;
1773 case HAMMER_MEM_RECORD_ADD:
1774 panic("hammer_sync_record_callback: illegal add "
1775 "during inode deletion record %p", record);
1776 break; /* NOT REACHED */
1777 case HAMMER_MEM_RECORD_INODE:
1778 panic("hammer_sync_record_callback: attempt to "
1779 "sync inode record %p?", record);
1780 break; /* NOT REACHED */
1781 case HAMMER_MEM_RECORD_DEL:
1783 * Follow through and issue the on-disk deletion
1785 break;
1790 * If DELETED_FE is set special handling is needed for directory
1791 * entries. Dependant pieces related to the directory entry may
1792 * have already been synced to disk. If this occurs we have to
1793 * sync the directory entry and then change the in-memory record
1794 * from an ADD to a DELETE to cover the fact that it's been
1795 * deleted by the frontend.
1797 * A directory delete covering record (MEM_RECORD_DEL) can never
1798 * be deleted by the frontend.
1800 * Any other record type (aka DATA) can be deleted by the frontend.
1801 * XXX At the moment the flusher must skip it because there may
1802 * be another data record in the flush group for the same block,
1803 * meaning that some frontend data changes can leak into the backend's
1804 * synchronization point.
1806 if (record->flags & HAMMER_RECF_DELETED_FE) {
1807 if (record->type == HAMMER_MEM_RECORD_ADD) {
1808 record->flags |= HAMMER_RECF_CONVERT_DELETE;
1809 } else {
1810 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1811 record->flags |= HAMMER_RECF_DELETED_BE;
1812 error = 0;
1813 goto done;
1818 * Assign the create_tid for new records. Deletions already
1819 * have the record's entire key properly set up.
1821 if (record->type != HAMMER_MEM_RECORD_DEL)
1822 record->leaf.base.create_tid = trans->tid;
1823 record->leaf.create_ts = trans->time32;
1824 for (;;) {
1825 error = hammer_ip_sync_record_cursor(cursor, record);
1826 if (error != EDEADLK)
1827 break;
1828 hammer_done_cursor(cursor);
1829 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1830 record->ip);
1831 if (error)
1832 break;
1834 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1836 if (error) {
1837 error = -error;
1838 if (error != -ENOSPC) {
1839 kprintf("hammer_sync_record_callback: sync failed rec "
1840 "%p, error %d\n", record, error);
1841 Debugger("sync failed rec");
1844 done:
1845 hammer_flush_record_done(record, error);
1846 return(error);
1850 * XXX error handling
1853 hammer_sync_inode(hammer_inode_t ip)
1855 struct hammer_transaction trans;
1856 struct hammer_cursor cursor;
1857 hammer_node_t tmp_node;
1858 hammer_record_t depend;
1859 hammer_record_t next;
1860 int error, tmp_error;
1861 u_int64_t nlinks;
1863 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1864 return(0);
1866 hammer_start_transaction_fls(&trans, ip->hmp);
1867 error = hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1868 if (error)
1869 goto done;
1872 * Any directory records referencing this inode which are not in
1873 * our current flush group must adjust our nlink count for the
1874 * purposes of synchronization to disk.
1876 * Records which are in our flush group can be unlinked from our
1877 * inode now, potentially allowing the inode to be physically
1878 * deleted.
1880 * This cannot block.
1882 nlinks = ip->ino_data.nlinks;
1883 next = TAILQ_FIRST(&ip->target_list);
1884 while ((depend = next) != NULL) {
1885 next = TAILQ_NEXT(depend, target_entry);
1886 if (depend->flush_state == HAMMER_FST_FLUSH &&
1887 depend->flush_group == ip->hmp->flusher.act) {
1889 * If this is an ADD that was deleted by the frontend
1890 * the frontend nlinks count will have already been
1891 * decremented, but the backend is going to sync its
1892 * directory entry and must account for it. The
1893 * record will be converted to a delete-on-disk when
1894 * it gets synced.
1896 * If the ADD was not deleted by the frontend we
1897 * can remove the dependancy from our target_list.
1899 if (depend->flags & HAMMER_RECF_DELETED_FE) {
1900 ++nlinks;
1901 } else {
1902 TAILQ_REMOVE(&ip->target_list, depend,
1903 target_entry);
1904 depend->target_ip = NULL;
1906 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1908 * Not part of our flush group
1910 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1911 switch(depend->type) {
1912 case HAMMER_MEM_RECORD_ADD:
1913 --nlinks;
1914 break;
1915 case HAMMER_MEM_RECORD_DEL:
1916 ++nlinks;
1917 break;
1918 default:
1919 break;
1925 * Set dirty if we had to modify the link count.
1927 if (ip->sync_ino_data.nlinks != nlinks) {
1928 KKASSERT((int64_t)nlinks >= 0);
1929 ip->sync_ino_data.nlinks = nlinks;
1930 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1934 * If there is a trunction queued destroy any data past the (aligned)
1935 * truncation point. Userland will have dealt with the buffer
1936 * containing the truncation point for us.
1938 * We don't flush pending frontend data buffers until after we've
1939 * dealt with the truncation.
1941 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1943 * Interlock trunc_off. The VOP front-end may continue to
1944 * make adjustments to it while we are blocked.
1946 off_t trunc_off;
1947 off_t aligned_trunc_off;
1948 int blkmask;
1950 trunc_off = ip->sync_trunc_off;
1951 blkmask = hammer_blocksize(trunc_off) - 1;
1952 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
1955 * Delete any whole blocks on-media. The front-end has
1956 * already cleaned out any partial block and made it
1957 * pending. The front-end may have updated trunc_off
1958 * while we were blocked so we only use sync_trunc_off.
1960 * This operation can blow out the buffer cache, EWOULDBLOCK
1961 * means we were unable to complete the deletion. The
1962 * deletion will update sync_trunc_off in that case.
1964 error = hammer_ip_delete_range(&cursor, ip,
1965 aligned_trunc_off,
1966 0x7FFFFFFFFFFFFFFFLL, 2);
1967 if (error == EWOULDBLOCK) {
1968 ip->flags |= HAMMER_INODE_WOULDBLOCK;
1969 error = 0;
1970 goto defer_buffer_flush;
1973 if (error)
1974 Debugger("hammer_ip_delete_range errored");
1977 * Clear the truncation flag on the backend after we have
1978 * complete the deletions. Backend data is now good again
1979 * (including new records we are about to sync, below).
1981 * Leave sync_trunc_off intact. As we write additional
1982 * records the backend will update sync_trunc_off. This
1983 * tells the backend whether it can skip the overwrite
1984 * test. This should work properly even when the backend
1985 * writes full blocks where the truncation point straddles
1986 * the block because the comparison is against the base
1987 * offset of the record.
1989 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1990 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
1991 } else {
1992 error = 0;
1996 * Now sync related records. These will typically be directory
1997 * entries or delete-on-disk records.
1999 * Not all records will be flushed, but clear XDIRTY anyway. We
2000 * will set it again in the frontend hammer_flush_inode_done()
2001 * if records remain.
2003 if (error == 0) {
2004 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2005 hammer_sync_record_callback, &cursor);
2006 if (tmp_error < 0)
2007 tmp_error = -error;
2008 if (tmp_error)
2009 error = tmp_error;
2011 hammer_cache_node(&ip->cache[1], cursor.node);
2014 * Re-seek for inode update, assuming our cache hasn't been ripped
2015 * out from under us.
2017 if (error == 0) {
2018 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2019 if (tmp_node) {
2020 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2021 hammer_cursor_seek(&cursor, tmp_node, 0);
2022 hammer_rel_node(tmp_node);
2024 error = 0;
2028 * If we are deleting the inode the frontend had better not have
2029 * any active references on elements making up the inode.
2031 * The call to hammer_ip_delete_clean() cleans up auxillary records
2032 * but not DB or DATA records. Those must have already been deleted
2033 * by the normal truncation mechanic.
2035 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2036 RB_EMPTY(&ip->rec_tree) &&
2037 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2038 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2039 int count1 = 0;
2041 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2042 if (error == 0) {
2043 ip->flags |= HAMMER_INODE_DELETED;
2044 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2045 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2046 KKASSERT(RB_EMPTY(&ip->rec_tree));
2049 * Set delete_tid in both the frontend and backend
2050 * copy of the inode record. The DELETED flag handles
2051 * this, do not set RDIRTY.
2053 ip->ino_leaf.base.delete_tid = trans.tid;
2054 ip->sync_ino_leaf.base.delete_tid = trans.tid;
2055 ip->ino_leaf.delete_ts = trans.time32;
2056 ip->sync_ino_leaf.delete_ts = trans.time32;
2060 * Adjust the inode count in the volume header
2062 if (ip->flags & HAMMER_INODE_ONDISK) {
2063 hammer_modify_volume_field(&trans,
2064 trans.rootvol,
2065 vol0_stat_inodes);
2066 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2067 hammer_modify_volume_done(trans.rootvol);
2069 } else {
2070 Debugger("hammer_ip_delete_clean errored");
2074 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2076 if (error)
2077 Debugger("RB_SCAN errored");
2079 defer_buffer_flush:
2081 * Now update the inode's on-disk inode-data and/or on-disk record.
2082 * DELETED and ONDISK are managed only in ip->flags.
2084 * In the case of a defered buffer flush we still update the on-disk
2085 * inode to satisfy visibility requirements if there happen to be
2086 * directory dependancies.
2088 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2089 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2091 * If deleted and on-disk, don't set any additional flags.
2092 * the delete flag takes care of things.
2094 * Clear flags which may have been set by the frontend.
2096 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2097 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2098 HAMMER_INODE_DELETING);
2099 break;
2100 case HAMMER_INODE_DELETED:
2102 * Take care of the case where a deleted inode was never
2103 * flushed to the disk in the first place.
2105 * Clear flags which may have been set by the frontend.
2107 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2108 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2109 HAMMER_INODE_DELETING);
2110 while (RB_ROOT(&ip->rec_tree)) {
2111 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2112 hammer_ref(&record->lock);
2113 KKASSERT(record->lock.refs == 1);
2114 record->flags |= HAMMER_RECF_DELETED_FE;
2115 record->flags |= HAMMER_RECF_DELETED_BE;
2116 hammer_rel_mem_record(record);
2118 break;
2119 case HAMMER_INODE_ONDISK:
2121 * If already on-disk, do not set any additional flags.
2123 break;
2124 default:
2126 * If not on-disk and not deleted, set DDIRTY to force
2127 * an initial record to be written.
2129 * Also set the create_tid in both the frontend and backend
2130 * copy of the inode record.
2132 ip->ino_leaf.base.create_tid = trans.tid;
2133 ip->ino_leaf.create_ts = trans.time32;
2134 ip->sync_ino_leaf.base.create_tid = trans.tid;
2135 ip->sync_ino_leaf.create_ts = trans.time32;
2136 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2137 break;
2141 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2142 * is already on-disk the old record is marked as deleted.
2144 * If DELETED is set hammer_update_inode() will delete the existing
2145 * record without writing out a new one.
2147 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2149 if (ip->flags & HAMMER_INODE_DELETED) {
2150 error = hammer_update_inode(&cursor, ip);
2151 } else
2152 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2153 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2154 error = hammer_update_itimes(&cursor, ip);
2155 } else
2156 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2157 error = hammer_update_inode(&cursor, ip);
2159 if (error)
2160 Debugger("hammer_update_itimes/inode errored");
2161 done:
2163 * Save the TID we used to sync the inode with to make sure we
2164 * do not improperly reuse it.
2166 hammer_done_cursor(&cursor);
2167 hammer_done_transaction(&trans);
2168 return(error);
2172 * This routine is called when the OS is no longer actively referencing
2173 * the inode (but might still be keeping it cached), or when releasing
2174 * the last reference to an inode.
2176 * At this point if the inode's nlinks count is zero we want to destroy
2177 * it, which may mean destroying it on-media too.
2179 void
2180 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2182 struct vnode *vp;
2185 * Set the DELETING flag when the link count drops to 0 and the
2186 * OS no longer has any opens on the inode.
2188 * The backend will clear DELETING (a mod flag) and set DELETED
2189 * (a state flag) when it is actually able to perform the
2190 * operation.
2192 if (ip->ino_data.nlinks == 0 &&
2193 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2194 ip->flags |= HAMMER_INODE_DELETING;
2195 ip->flags |= HAMMER_INODE_TRUNCATED;
2196 ip->trunc_off = 0;
2197 vp = NULL;
2198 if (getvp) {
2199 if (hammer_get_vnode(ip, &vp) != 0)
2200 return;
2204 * Final cleanup
2206 if (ip->vp) {
2207 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2208 vnode_pager_setsize(ip->vp, 0);
2210 if (getvp) {
2211 vput(vp);
2217 * Re-test an inode when a dependancy had gone away to see if we
2218 * can chain flush it.
2220 void
2221 hammer_test_inode(hammer_inode_t ip)
2223 if (ip->flags & HAMMER_INODE_REFLUSH) {
2224 ip->flags &= ~HAMMER_INODE_REFLUSH;
2225 hammer_ref(&ip->lock);
2226 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2227 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2228 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2229 } else {
2230 hammer_flush_inode(ip, 0);
2232 hammer_rel_inode(ip, 0);
2237 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2238 * reassociated with a vp or just before it gets freed.
2240 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2241 * the inode the thread is waiting on behalf of is a different inode then
2242 * the inode we are called with. This is to create a pipeline.
2244 static void
2245 hammer_inode_wakereclaims(hammer_inode_t ip)
2247 struct hammer_reclaim *reclaim;
2248 hammer_mount_t hmp = ip->hmp;
2250 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2251 return;
2253 --hammer_count_reclaiming;
2254 --hmp->inode_reclaims;
2255 ip->flags &= ~HAMMER_INODE_RECLAIM;
2257 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2258 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2259 reclaim->okydoky = 1;
2260 wakeup(reclaim);
2265 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2266 * inodes build up before we start blocking.
2268 * When we block we don't care *which* inode has finished reclaiming,
2269 * as lone as one does. This is somewhat heuristical... we also put a
2270 * cap on how long we are willing to wait.
2272 void
2273 hammer_inode_waitreclaims(hammer_mount_t hmp)
2275 struct hammer_reclaim reclaim;
2276 int delay;
2278 if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2279 reclaim.okydoky = 0;
2280 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2281 &reclaim, entry);
2282 } else {
2283 reclaim.okydoky = 1;
2286 if (reclaim.okydoky == 0) {
2287 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2288 HAMMER_RECLAIM_WAIT;
2289 if (delay >= 0)
2290 tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2291 if (reclaim.okydoky == 0)
2292 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);