less(1): Regenerate defines.h and update Makefile
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
blob3f19a6312dbc658add569b636e43c365f6f04e57
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <vm/vm_page2.h>
37 #include "hammer.h"
39 static int hammer_unload_inode(hammer_inode_t ip);
40 static void hammer_free_inode(hammer_inode_t ip);
41 static void hammer_flush_inode_core(hammer_inode_t ip,
42 hammer_flush_group_t flg, int flags);
43 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
44 #if 0
45 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
46 #endif
47 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
48 hammer_flush_group_t flg);
49 static int hammer_setup_parent_inodes_helper(hammer_record_t record,
50 int depth, hammer_flush_group_t flg);
51 static void hammer_inode_wakereclaims(hammer_inode_t ip);
52 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
53 pid_t pid);
54 static hammer_inode_t __hammer_find_inode(hammer_transaction_t trans,
55 int64_t obj_id, hammer_tid_t asof,
56 uint32_t localization);
58 struct krate hammer_gen_krate = { 1 };
61 * RB-Tree support for inode structures
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_localization < ip2->obj_localization)
67 return(-1);
68 if (ip1->obj_localization > ip2->obj_localization)
69 return(1);
70 if (ip1->obj_id < ip2->obj_id)
71 return(-1);
72 if (ip1->obj_id > ip2->obj_id)
73 return(1);
74 if (ip1->obj_asof < ip2->obj_asof)
75 return(-1);
76 if (ip1->obj_asof > ip2->obj_asof)
77 return(1);
78 return(0);
81 int
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
84 if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85 return(-1);
86 if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87 return(1);
88 return(0);
92 * RB-Tree support for inode structures / special LOOKUP_INFO
94 static int
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
97 if (info->obj_localization < ip->obj_localization)
98 return(-1);
99 if (info->obj_localization > ip->obj_localization)
100 return(1);
101 if (info->obj_id < ip->obj_id)
102 return(-1);
103 if (info->obj_id > ip->obj_id)
104 return(1);
105 if (info->obj_asof < ip->obj_asof)
106 return(-1);
107 if (info->obj_asof > ip->obj_asof)
108 return(1);
109 return(0);
113 * Used by hammer_scan_inode_snapshots() to locate all of an object's
114 * snapshots. Note that the asof field is not tested, which we can get
115 * away with because it is the lowest-priority field.
117 static int
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
120 hammer_inode_info_t info = data;
122 if (ip->obj_localization > info->obj_localization)
123 return(1);
124 if (ip->obj_localization < info->obj_localization)
125 return(-1);
126 if (ip->obj_id > info->obj_id)
127 return(1);
128 if (ip->obj_id < info->obj_id)
129 return(-1);
130 return(0);
134 * Used by hammer_unload_pseudofs() to locate all inodes associated with
135 * a particular PFS.
137 static int
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
140 uint32_t localization = *(uint32_t *)data;
141 if (ip->obj_localization > localization)
142 return(1);
143 if (ip->obj_localization < localization)
144 return(-1);
145 return(0);
149 * RB-Tree support for pseudofs structures
151 static int
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
154 if (p1->localization < p2->localization)
155 return(-1);
156 if (p1->localization > p2->localization)
157 return(1);
158 return(0);
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164 hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166 hammer_pfs_rb_compare, uint32_t, localization);
169 * The kernel is not actively referencing this vnode but is still holding
170 * it cached.
172 * This is called from the frontend.
174 * MPALMOSTSAFE
177 hammer_vop_inactive(struct vop_inactive_args *ap)
179 hammer_inode_t ip = VTOI(ap->a_vp);
180 hammer_mount_t hmp;
183 * Degenerate case
185 if (ip == NULL) {
186 vrecycle(ap->a_vp);
187 return(0);
191 * If the inode no longer has visibility in the filesystem try to
192 * recycle it immediately, even if the inode is dirty. Recycling
193 * it quickly allows the system to reclaim buffer cache and VM
194 * resources which can matter a lot in a heavily loaded system.
196 * This can deadlock in vfsync() if we aren't careful.
198 * Do not queue the inode to the flusher if we still have visibility,
199 * otherwise namespace calls such as chmod will unnecessarily generate
200 * multiple inode updates.
202 if (ip->ino_data.nlinks == 0) {
203 hmp = ip->hmp;
204 lwkt_gettoken(&hmp->fs_token);
205 hammer_inode_unloadable_check(ip, 0);
206 if (ip->flags & HAMMER_INODE_MODMASK)
207 hammer_flush_inode(ip, 0);
208 lwkt_reltoken(&hmp->fs_token);
209 vrecycle(ap->a_vp);
211 return(0);
215 * Release the vnode association. This is typically (but not always)
216 * the last reference on the inode.
218 * Once the association is lost we are on our own with regards to
219 * flushing the inode.
221 * We must interlock ip->vp so hammer_get_vnode() can avoid races.
224 hammer_vop_reclaim(struct vop_reclaim_args *ap)
226 hammer_inode_t ip;
227 hammer_mount_t hmp;
228 struct vnode *vp;
230 vp = ap->a_vp;
232 if ((ip = vp->v_data) != NULL) {
233 hmp = ip->hmp;
234 lwkt_gettoken(&hmp->fs_token);
235 hammer_lock_ex(&ip->lock);
236 vp->v_data = NULL;
237 ip->vp = NULL;
239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
240 ++hammer_count_reclaims;
241 ++hmp->count_reclaims;
242 ip->flags |= HAMMER_INODE_RECLAIM;
244 hammer_unlock(&ip->lock);
245 vclrisdirty(vp);
246 hammer_rel_inode(ip, 1);
247 lwkt_reltoken(&hmp->fs_token);
249 return(0);
253 * Inform the kernel that the inode is dirty. This will be checked
254 * by vn_unlock().
256 * Theoretically in order to reclaim a vnode the hammer_vop_reclaim()
257 * must be called which will interlock against our inode lock, so
258 * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty())
259 * should be stable without having to acquire any new locks.
261 void
262 hammer_inode_dirty(hammer_inode_t ip)
264 struct vnode *vp;
266 if ((ip->flags & HAMMER_INODE_MODMASK) &&
267 (vp = ip->vp) != NULL &&
268 (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) {
269 vsetisdirty(vp);
274 * Return a locked vnode for the specified inode. The inode must be
275 * referenced but NOT LOCKED on entry and will remain referenced on
276 * return.
278 * Called from the frontend.
281 hammer_get_vnode(hammer_inode_t ip, struct vnode **vpp)
283 hammer_mount_t hmp;
284 struct vnode *vp;
285 int error = 0;
286 uint8_t obj_type;
288 hmp = ip->hmp;
290 for (;;) {
291 if ((vp = ip->vp) == NULL) {
292 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
293 if (error)
294 break;
295 hammer_lock_ex(&ip->lock);
296 if (ip->vp != NULL) {
297 hammer_unlock(&ip->lock);
298 vp = *vpp;
299 vp->v_type = VBAD;
300 vx_put(vp);
301 continue;
303 hammer_ref(&ip->lock);
304 vp = *vpp;
305 ip->vp = vp;
307 obj_type = ip->ino_data.obj_type;
308 vp->v_type = hammer_get_vnode_type(obj_type);
310 hammer_inode_wakereclaims(ip);
312 switch(ip->ino_data.obj_type) {
313 case HAMMER_OBJTYPE_CDEV:
314 case HAMMER_OBJTYPE_BDEV:
315 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
316 addaliasu(vp, ip->ino_data.rmajor,
317 ip->ino_data.rminor);
318 break;
319 case HAMMER_OBJTYPE_FIFO:
320 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
321 break;
322 case HAMMER_OBJTYPE_REGFILE:
323 break;
324 default:
325 break;
329 * Only mark as the root vnode if the ip is not
330 * historical, otherwise the VFS cache will get
331 * confused. The other half of the special handling
332 * is in hammer_vop_nlookupdotdot().
334 * Pseudo-filesystem roots can be accessed via
335 * non-root filesystem paths and setting VROOT may
336 * confuse the namecache. Set VPFSROOT instead.
338 if (ip->obj_id == HAMMER_OBJID_ROOT) {
339 if (ip->obj_asof == hmp->asof) {
340 if (ip->obj_localization ==
341 HAMMER_DEF_LOCALIZATION)
342 vsetflags(vp, VROOT);
343 else
344 vsetflags(vp, VPFSROOT);
345 } else {
346 vsetflags(vp, VPFSROOT);
350 vp->v_data = (void *)ip;
351 /* vnode locked by getnewvnode() */
352 /* make related vnode dirty if inode dirty? */
353 hammer_unlock(&ip->lock);
354 if (vp->v_type == VREG) {
355 vinitvmio(vp, ip->ino_data.size,
356 hammer_blocksize(ip->ino_data.size),
357 hammer_blockoff(ip->ino_data.size));
359 vx_downgrade(vp);
360 break;
364 * Interlock vnode clearing. This does not prevent the
365 * vnode from going into a reclaimed state but it does
366 * prevent it from being destroyed or reused so the vget()
367 * will properly fail.
369 hammer_lock_ex(&ip->lock);
370 if ((vp = ip->vp) == NULL) {
371 hammer_unlock(&ip->lock);
372 continue;
374 vhold(vp);
375 hammer_unlock(&ip->lock);
378 * loop if the vget fails (aka races), or if the vp
379 * no longer matches ip->vp.
381 if (vget(vp, LK_EXCLUSIVE) == 0) {
382 if (vp == ip->vp) {
383 vdrop(vp);
384 break;
386 vput(vp);
388 vdrop(vp);
390 *vpp = vp;
391 return(error);
395 * Locate all copies of the inode for obj_id compatible with the specified
396 * asof, reference, and issue the related call-back. This routine is used
397 * for direct-io invalidation and does not create any new inodes.
399 void
400 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
401 int (*callback)(hammer_inode_t ip, void *data),
402 void *data)
404 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
405 hammer_inode_info_cmp_all_history,
406 callback, iinfo);
410 * Acquire a HAMMER inode. The returned inode is not locked. These functions
411 * do not attach or detach the related vnode (use hammer_get_vnode() for
412 * that).
414 * The flags argument is only applied for newly created inodes, and only
415 * certain flags are inherited.
417 * Called from the frontend.
419 hammer_inode_t
420 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
421 int64_t obj_id, hammer_tid_t asof, uint32_t localization,
422 int flags, int *errorp)
424 hammer_mount_t hmp = trans->hmp;
425 struct hammer_node_cache *cachep;
426 struct hammer_cursor cursor;
427 hammer_inode_t ip;
431 * Determine if we already have an inode cached. If we do then
432 * we are golden.
434 * If we find an inode with no vnode we have to mark the
435 * transaction such that hammer_inode_waitreclaims() is
436 * called later on to avoid building up an infinite number
437 * of inodes. Otherwise we can continue to * add new inodes
438 * faster then they can be disposed of, even with the tsleep
439 * delay.
441 * If we find a dummy inode we return a failure so dounlink
442 * (which does another lookup) doesn't try to mess with the
443 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
444 * to ref dummy inodes.
446 loop:
447 *errorp = 0;
448 ip = __hammer_find_inode(trans, obj_id, asof, localization);
449 if (ip) {
450 if (ip->flags & HAMMER_INODE_DUMMY) {
451 *errorp = ENOENT;
452 return(NULL);
454 hammer_ref(&ip->lock);
455 return(ip);
459 * Allocate a new inode structure and deal with races later.
461 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
462 ++hammer_count_inodes;
463 ++hmp->count_inodes;
464 ip->obj_id = obj_id;
465 ip->obj_asof = asof;
466 ip->obj_localization = localization;
467 ip->hmp = hmp;
468 ip->flags = flags & HAMMER_INODE_RO;
469 ip->cache[0].ip = ip;
470 ip->cache[1].ip = ip;
471 ip->cache[2].ip = ip;
472 ip->cache[3].ip = ip;
473 if (hmp->ronly)
474 ip->flags |= HAMMER_INODE_RO;
475 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
476 HAMMER_MAX_KEY;
477 RB_INIT(&ip->rec_tree);
478 TAILQ_INIT(&ip->target_list);
479 hammer_ref(&ip->lock);
482 * Locate the on-disk inode. If this is a PFS root we always
483 * access the current version of the root inode and (if it is not
484 * a master) always access information under it with a snapshot
485 * TID.
487 * We cache recent inode lookups in this directory in dip->cache[2].
488 * If we can't find it we assume the inode we are looking for is
489 * close to the directory inode.
491 retry:
492 cachep = NULL;
493 if (dip) {
494 if (dip->cache[2].node)
495 cachep = &dip->cache[2];
496 else
497 cachep = &dip->cache[0];
499 hammer_init_cursor(trans, &cursor, cachep, NULL);
500 cursor.key_beg.localization = localization | HAMMER_LOCALIZE_INODE;
501 cursor.key_beg.obj_id = ip->obj_id;
502 cursor.key_beg.key = 0;
503 cursor.key_beg.create_tid = 0;
504 cursor.key_beg.delete_tid = 0;
505 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
506 cursor.key_beg.obj_type = 0;
508 cursor.asof = asof;
509 cursor.flags = HAMMER_CURSOR_GET_DATA | HAMMER_CURSOR_ASOF;
511 *errorp = hammer_btree_lookup(&cursor);
512 if (*errorp == EDEADLK) {
513 hammer_done_cursor(&cursor);
514 goto retry;
518 * On success the B-Tree lookup will hold the appropriate
519 * buffer cache buffers and provide a pointer to the requested
520 * information. Copy the information to the in-memory inode
521 * and cache the B-Tree node to improve future operations.
523 if (*errorp == 0) {
524 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
525 ip->ino_data = cursor.data->inode;
528 * cache[0] tries to cache the location of the object inode.
529 * The assumption is that it is near the directory inode.
531 * cache[1] tries to cache the location of the object data.
532 * We might have something in the governing directory from
533 * scan optimizations (see the strategy code in
534 * hammer_vnops.c).
536 * We update dip->cache[2], if possible, with the location
537 * of the object inode for future directory shortcuts.
539 hammer_cache_node(&ip->cache[0], cursor.node);
540 if (dip) {
541 if (dip->cache[3].node) {
542 hammer_cache_node(&ip->cache[1],
543 dip->cache[3].node);
545 hammer_cache_node(&dip->cache[2], cursor.node);
549 * The file should not contain any data past the file size
550 * stored in the inode. Setting save_trunc_off to the
551 * file size instead of max reduces B-Tree lookup overheads
552 * on append by allowing the flusher to avoid checking for
553 * record overwrites.
555 ip->save_trunc_off = ip->ino_data.size;
558 * Locate and assign the pseudofs management structure to
559 * the inode.
561 if (dip && dip->obj_localization == ip->obj_localization) {
562 ip->pfsm = dip->pfsm;
563 hammer_ref(&ip->pfsm->lock);
564 } else {
565 ip->pfsm = hammer_load_pseudofs(trans,
566 ip->obj_localization,
567 errorp);
568 *errorp = 0; /* ignore ENOENT */
573 * The inode is placed on the red-black tree and will be synced to
574 * the media when flushed or by the filesystem sync. If this races
575 * another instantiation/lookup the insertion will fail.
577 if (*errorp == 0) {
578 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
579 hammer_free_inode(ip);
580 hammer_done_cursor(&cursor);
581 goto loop;
583 ip->flags |= HAMMER_INODE_ONDISK;
584 } else {
585 if (ip->flags & HAMMER_INODE_RSV_INODES) {
586 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
587 --hmp->rsv_inodes;
590 hammer_free_inode(ip);
591 ip = NULL;
593 hammer_done_cursor(&cursor);
596 * NEWINODE is only set if the inode becomes dirty later,
597 * setting it here just leads to unnecessary stalls.
599 * trans->flags |= HAMMER_TRANSF_NEWINODE;
601 return (ip);
605 * Get a dummy inode to placemark a broken directory entry.
607 hammer_inode_t
608 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
609 int64_t obj_id, hammer_tid_t asof, uint32_t localization,
610 int flags, int *errorp)
612 hammer_mount_t hmp = trans->hmp;
613 hammer_inode_t ip;
616 * Determine if we already have an inode cached. If we do then
617 * we are golden.
619 * If we find an inode with no vnode we have to mark the
620 * transaction such that hammer_inode_waitreclaims() is
621 * called later on to avoid building up an infinite number
622 * of inodes. Otherwise we can continue to * add new inodes
623 * faster then they can be disposed of, even with the tsleep
624 * delay.
626 * If we find a non-fake inode we return an error. Only fake
627 * inodes can be returned by this routine.
629 loop:
630 *errorp = 0;
631 ip = __hammer_find_inode(trans, obj_id, asof, localization);
632 if (ip) {
633 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
634 *errorp = ENOENT;
635 return(NULL);
637 hammer_ref(&ip->lock);
638 return(ip);
642 * Allocate a new inode structure and deal with races later.
644 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
645 ++hammer_count_inodes;
646 ++hmp->count_inodes;
647 ip->obj_id = obj_id;
648 ip->obj_asof = asof;
649 ip->obj_localization = localization;
650 ip->hmp = hmp;
651 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
652 ip->cache[0].ip = ip;
653 ip->cache[1].ip = ip;
654 ip->cache[2].ip = ip;
655 ip->cache[3].ip = ip;
656 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
657 HAMMER_MAX_KEY;
658 RB_INIT(&ip->rec_tree);
659 TAILQ_INIT(&ip->target_list);
660 hammer_ref(&ip->lock);
663 * Populate the dummy inode. Leave everything zero'd out.
665 * (ip->ino_leaf and ip->ino_data)
667 * Make the dummy inode a FIFO object which most copy programs
668 * will properly ignore.
670 ip->save_trunc_off = ip->ino_data.size;
671 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
674 * Locate and assign the pseudofs management structure to
675 * the inode.
677 if (dip && dip->obj_localization == ip->obj_localization) {
678 ip->pfsm = dip->pfsm;
679 hammer_ref(&ip->pfsm->lock);
680 } else {
681 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
682 errorp);
683 *errorp = 0; /* ignore ENOENT */
687 * The inode is placed on the red-black tree and will be synced to
688 * the media when flushed or by the filesystem sync. If this races
689 * another instantiation/lookup the insertion will fail.
691 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
693 if (*errorp == 0) {
694 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
695 hammer_free_inode(ip);
696 goto loop;
698 } else {
699 if (ip->flags & HAMMER_INODE_RSV_INODES) {
700 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
701 --hmp->rsv_inodes;
703 hammer_free_inode(ip);
704 ip = NULL;
706 trans->flags |= HAMMER_TRANSF_NEWINODE;
707 return (ip);
711 * Return a referenced inode only if it is in our inode cache.
712 * Dummy inodes do not count.
714 hammer_inode_t
715 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
716 hammer_tid_t asof, uint32_t localization)
718 hammer_inode_t ip;
720 ip = __hammer_find_inode(trans, obj_id, asof, localization);
721 if (ip) {
722 if (ip->flags & HAMMER_INODE_DUMMY)
723 ip = NULL;
724 else
725 hammer_ref(&ip->lock);
727 return(ip);
731 * Return a referenced inode only if it is in our inode cache.
732 * This function does not reference inode.
734 static hammer_inode_t
735 __hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
736 hammer_tid_t asof, uint32_t localization)
738 hammer_mount_t hmp = trans->hmp;
739 struct hammer_inode_info iinfo;
740 hammer_inode_t ip;
742 iinfo.obj_id = obj_id;
743 iinfo.obj_asof = asof;
744 iinfo.obj_localization = localization;
746 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
748 return(ip);
752 * Create a new filesystem object, returning the inode in *ipp. The
753 * returned inode will be referenced. The inode is created in-memory.
755 * If pfsm is non-NULL the caller wishes to create the root inode for
756 * a non-root PFS.
759 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
760 struct ucred *cred,
761 hammer_inode_t dip, const char *name, int namelen,
762 hammer_pseudofs_inmem_t pfsm, hammer_inode_t *ipp)
764 hammer_mount_t hmp;
765 hammer_inode_t ip;
766 uid_t xuid;
767 int error;
768 int64_t namekey;
769 uint32_t dummy;
771 hmp = trans->hmp;
774 * Disallow the creation of new inodes in directories which
775 * have been deleted. In HAMMER, this will cause a record
776 * syncing assertion later on in the flush code.
778 if (dip && dip->ino_data.nlinks == 0) {
779 *ipp = NULL;
780 return (EINVAL);
784 * Allocate inode
786 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
787 ++hammer_count_inodes;
788 ++hmp->count_inodes;
789 trans->flags |= HAMMER_TRANSF_NEWINODE;
791 if (pfsm) {
792 KKASSERT(pfsm->localization != HAMMER_DEF_LOCALIZATION);
793 ip->obj_id = HAMMER_OBJID_ROOT;
794 ip->obj_localization = pfsm->localization;
795 } else {
796 KKASSERT(dip != NULL);
797 namekey = hammer_direntry_namekey(dip, name, namelen, &dummy);
798 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
799 ip->obj_localization = dip->obj_localization;
802 KKASSERT(ip->obj_id != 0);
803 ip->obj_asof = hmp->asof;
804 ip->hmp = hmp;
805 ip->flush_state = HAMMER_FST_IDLE;
806 ip->flags = HAMMER_INODE_DDIRTY |
807 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
808 ip->cache[0].ip = ip;
809 ip->cache[1].ip = ip;
810 ip->cache[2].ip = ip;
811 ip->cache[3].ip = ip;
813 ip->trunc_off = HAMMER_MAX_KEY;
814 /* ip->save_trunc_off = 0; (already zero) */
815 RB_INIT(&ip->rec_tree);
816 TAILQ_INIT(&ip->target_list);
818 ip->ino_data.atime = trans->time;
819 ip->ino_data.mtime = trans->time;
820 ip->ino_data.size = 0;
821 ip->ino_data.nlinks = 0;
824 * A nohistory designator on the parent directory is inherited by
825 * the child. We will do this even for pseudo-fs creation... the
826 * sysad can turn it off.
828 if (dip) {
829 ip->ino_data.uflags = dip->ino_data.uflags &
830 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
833 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
834 ip->ino_leaf.base.localization = ip->obj_localization |
835 HAMMER_LOCALIZE_INODE;
836 ip->ino_leaf.base.obj_id = ip->obj_id;
837 ip->ino_leaf.base.key = 0;
838 ip->ino_leaf.base.create_tid = 0;
839 ip->ino_leaf.base.delete_tid = 0;
840 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
841 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
843 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
844 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
845 ip->ino_data.mode = vap->va_mode;
846 ip->ino_data.ctime = trans->time;
849 * If we are running version 2 or greater directory entries are
850 * inode-localized instead of data-localized.
852 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
853 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
854 ip->ino_data.cap_flags |=
855 HAMMER_INODE_CAP_DIR_LOCAL_INO;
858 if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) {
859 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
860 ip->ino_data.cap_flags |=
861 HAMMER_INODE_CAP_DIRHASH_ALG1;
866 * Setup the ".." pointer. This only needs to be done for directories
867 * but we do it for all objects as a recovery aid if dip exists.
868 * The inode is probably a PFS root if dip is NULL.
870 if (dip)
871 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
873 switch(ip->ino_leaf.base.obj_type) {
874 case HAMMER_OBJTYPE_CDEV:
875 case HAMMER_OBJTYPE_BDEV:
876 ip->ino_data.rmajor = vap->va_rmajor;
877 ip->ino_data.rminor = vap->va_rminor;
878 break;
879 default:
880 break;
884 * Calculate default uid/gid and overwrite with information from
885 * the vap.
887 if (dip) {
888 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
889 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
890 xuid, cred, &vap->va_mode);
891 } else {
892 xuid = 0;
894 ip->ino_data.mode = vap->va_mode;
896 if (vap->va_vaflags & VA_UID_UUID_VALID)
897 ip->ino_data.uid = vap->va_uid_uuid;
898 else if (vap->va_uid != (uid_t)VNOVAL)
899 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
900 else
901 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
903 if (vap->va_vaflags & VA_GID_UUID_VALID)
904 ip->ino_data.gid = vap->va_gid_uuid;
905 else if (vap->va_gid != (gid_t)VNOVAL)
906 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
907 else if (dip)
908 ip->ino_data.gid = dip->ino_data.gid;
910 hammer_ref(&ip->lock);
912 if (pfsm) {
913 ip->pfsm = pfsm;
914 hammer_ref(&pfsm->lock);
915 error = 0;
916 } else if (dip->obj_localization == ip->obj_localization) {
917 ip->pfsm = dip->pfsm;
918 hammer_ref(&ip->pfsm->lock);
919 error = 0;
920 } else {
921 ip->pfsm = hammer_load_pseudofs(trans,
922 ip->obj_localization,
923 &error);
924 error = 0; /* ignore ENOENT */
927 if (error) {
928 hammer_free_inode(ip);
929 ip = NULL;
930 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
931 hpanic("duplicate obj_id %jx", (intmax_t)ip->obj_id);
932 /* not reached */
933 hammer_free_inode(ip);
935 *ipp = ip;
936 return(error);
940 * Final cleanup / freeing of an inode structure
942 static void
943 hammer_free_inode(hammer_inode_t ip)
945 hammer_mount_t hmp;
947 hmp = ip->hmp;
948 KKASSERT(hammer_oneref(&ip->lock));
949 hammer_uncache_node(&ip->cache[0]);
950 hammer_uncache_node(&ip->cache[1]);
951 hammer_uncache_node(&ip->cache[2]);
952 hammer_uncache_node(&ip->cache[3]);
953 hammer_inode_wakereclaims(ip);
954 if (ip->objid_cache)
955 hammer_clear_objid(ip);
956 --hammer_count_inodes;
957 --hmp->count_inodes;
958 if (ip->pfsm) {
959 hammer_rel_pseudofs(hmp, ip->pfsm);
960 ip->pfsm = NULL;
962 kfree(ip, hmp->m_inodes);
966 * Retrieve pseudo-fs data. NULL will never be returned.
968 * If an error occurs *errorp will be set and a default template is returned,
969 * otherwise *errorp is set to 0. Typically when an error occurs it will
970 * be ENOENT.
972 hammer_pseudofs_inmem_t
973 hammer_load_pseudofs(hammer_transaction_t trans,
974 uint32_t localization, int *errorp)
976 hammer_mount_t hmp = trans->hmp;
977 hammer_inode_t ip;
978 hammer_pseudofs_inmem_t pfsm;
979 struct hammer_cursor cursor;
980 int bytes;
982 retry:
983 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
984 if (pfsm) {
985 hammer_ref(&pfsm->lock);
986 *errorp = 0;
987 return(pfsm);
991 * PFS records are associated with the root inode (not the PFS root
992 * inode, but the real root). Avoid an infinite recursion if loading
993 * the PFS for the real root.
995 if (localization) {
996 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
997 HAMMER_MAX_TID,
998 HAMMER_DEF_LOCALIZATION, 0, errorp);
999 } else {
1000 ip = NULL;
1003 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
1004 pfsm->localization = localization;
1005 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
1006 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
1008 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
1009 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION |
1010 HAMMER_LOCALIZE_MISC;
1011 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1012 cursor.key_beg.create_tid = 0;
1013 cursor.key_beg.delete_tid = 0;
1014 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1015 cursor.key_beg.obj_type = 0;
1016 cursor.key_beg.key = localization;
1017 cursor.asof = HAMMER_MAX_TID;
1018 cursor.flags |= HAMMER_CURSOR_ASOF;
1020 if (ip)
1021 *errorp = hammer_ip_lookup(&cursor);
1022 else
1023 *errorp = hammer_btree_lookup(&cursor);
1024 if (*errorp == 0) {
1025 *errorp = hammer_ip_resolve_data(&cursor);
1026 if (*errorp == 0) {
1027 if (hammer_is_pfs_deleted(&cursor.data->pfsd)) {
1028 *errorp = ENOENT;
1029 } else {
1030 bytes = cursor.leaf->data_len;
1031 if (bytes > sizeof(pfsm->pfsd))
1032 bytes = sizeof(pfsm->pfsd);
1033 bcopy(cursor.data, &pfsm->pfsd, bytes);
1037 hammer_done_cursor(&cursor);
1039 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1040 hammer_ref(&pfsm->lock);
1041 if (ip)
1042 hammer_rel_inode(ip, 0);
1043 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1044 kfree(pfsm, hmp->m_misc);
1045 goto retry;
1047 return(pfsm);
1051 * Store pseudo-fs data. The backend will automatically delete any prior
1052 * on-disk pseudo-fs data but we have to delete in-memory versions.
1055 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1057 struct hammer_cursor cursor;
1058 hammer_record_t record;
1059 hammer_inode_t ip;
1060 int error;
1063 * PFS records are associated with the root inode (not the PFS root
1064 * inode, but the real root).
1066 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1067 HAMMER_DEF_LOCALIZATION, 0, &error);
1068 retry:
1069 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1070 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1071 cursor.key_beg.localization = ip->obj_localization |
1072 HAMMER_LOCALIZE_MISC;
1073 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1074 cursor.key_beg.create_tid = 0;
1075 cursor.key_beg.delete_tid = 0;
1076 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1077 cursor.key_beg.obj_type = 0;
1078 cursor.key_beg.key = pfsm->localization;
1079 cursor.asof = HAMMER_MAX_TID;
1080 cursor.flags |= HAMMER_CURSOR_ASOF;
1083 * Replace any in-memory version of the record.
1085 error = hammer_ip_lookup(&cursor);
1086 if (error == 0 && hammer_cursor_inmem(&cursor)) {
1087 record = cursor.iprec;
1088 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1089 KKASSERT(cursor.deadlk_rec == NULL);
1090 hammer_ref(&record->lock);
1091 cursor.deadlk_rec = record;
1092 error = EDEADLK;
1093 } else {
1094 record->flags |= HAMMER_RECF_DELETED_FE;
1095 error = 0;
1100 * Allocate replacement general record. The backend flush will
1101 * delete any on-disk version of the record.
1103 if (error == 0 || error == ENOENT) {
1104 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1105 record->type = HAMMER_MEM_RECORD_GENERAL;
1107 record->leaf.base.localization = ip->obj_localization |
1108 HAMMER_LOCALIZE_MISC;
1109 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1110 record->leaf.base.key = pfsm->localization;
1111 record->leaf.data_len = sizeof(pfsm->pfsd);
1112 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1113 error = hammer_ip_add_record(trans, record);
1115 hammer_done_cursor(&cursor);
1116 if (error == EDEADLK)
1117 goto retry;
1118 hammer_rel_inode(ip, 0);
1119 return(error);
1123 * Create a root directory for a PFS if one does not alredy exist.
1125 * The PFS root stands alone so we must also bump the nlinks count
1126 * to prevent it from being destroyed on release.
1128 * Make sure a caller isn't creating a PFS from non-root PFS.
1131 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1132 hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip)
1134 hammer_inode_t ip;
1135 struct vattr vap;
1136 int error;
1138 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1139 pfsm->localization, 0, &error);
1140 if (ip == NULL) {
1141 if (lo_to_pfs(dip->obj_localization) != HAMMER_ROOT_PFSID) {
1142 hmkprintf(trans->hmp,
1143 "Warning: creating a PFS from non-root PFS "
1144 "is not allowed\n");
1145 return(EINVAL);
1147 vattr_null(&vap);
1148 vap.va_mode = 0755;
1149 vap.va_type = VDIR;
1150 error = hammer_create_inode(trans, &vap, cred,
1151 NULL, NULL, 0,
1152 pfsm, &ip);
1153 if (error == 0) {
1154 ++ip->ino_data.nlinks;
1155 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1158 if (ip)
1159 hammer_rel_inode(ip, 0);
1160 return(error);
1164 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1165 * if we are unable to disassociate all the inodes.
1167 static
1169 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1171 int res;
1173 hammer_ref(&ip->lock);
1174 if (ip->vp && (ip->vp->v_flag & VPFSROOT)) {
1176 * The hammer pfs-upgrade directive itself might have the
1177 * root of the pfs open. Just allow it.
1179 res = 0;
1180 } else {
1182 * Don't allow any subdirectories or files to be open.
1184 if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1185 vclean_unlocked(ip->vp); /* might not succeed */
1186 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1187 res = 0;
1188 else
1189 res = -1; /* stop, someone is using the inode */
1191 hammer_rel_inode(ip, 0);
1192 return(res);
1196 hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization)
1198 int res;
1199 int try;
1201 for (try = res = 0; try < 4; ++try) {
1202 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1203 hammer_inode_pfs_cmp,
1204 hammer_unload_pseudofs_callback,
1205 &localization);
1206 if (res == 0 && try > 1)
1207 break;
1208 hammer_flusher_sync(trans->hmp);
1210 if (res != 0)
1211 res = ENOTEMPTY;
1212 return(res);
1217 * Release a reference on a PFS
1219 void
1220 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1222 hammer_rel(&pfsm->lock);
1223 if (hammer_norefs(&pfsm->lock)) {
1224 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1225 kfree(pfsm, hmp->m_misc);
1230 * Called by hammer_sync_inode().
1232 static int
1233 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1235 hammer_transaction_t trans = cursor->trans;
1236 hammer_record_t record;
1237 int error;
1238 int redirty;
1240 retry:
1241 error = 0;
1244 * If the inode has a presence on-disk then locate it and mark
1245 * it deleted, setting DELONDISK.
1247 * The record may or may not be physically deleted, depending on
1248 * the retention policy.
1250 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1251 HAMMER_INODE_ONDISK) {
1252 hammer_normalize_cursor(cursor);
1253 cursor->key_beg.localization = ip->obj_localization |
1254 HAMMER_LOCALIZE_INODE;
1255 cursor->key_beg.obj_id = ip->obj_id;
1256 cursor->key_beg.key = 0;
1257 cursor->key_beg.create_tid = 0;
1258 cursor->key_beg.delete_tid = 0;
1259 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1260 cursor->key_beg.obj_type = 0;
1261 cursor->asof = ip->obj_asof;
1262 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1263 cursor->flags |= HAMMER_CURSOR_ASOF;
1264 cursor->flags |= HAMMER_CURSOR_BACKEND;
1266 error = hammer_btree_lookup(cursor);
1267 if (hammer_debug_inode)
1268 hdkprintf("IPDEL %p %08x %d\n", ip, ip->flags, error);
1270 if (error == 0) {
1271 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1272 if (hammer_debug_inode)
1273 hdkprintf("error %d\n", error);
1274 if (error == 0) {
1275 ip->flags |= HAMMER_INODE_DELONDISK;
1277 if (cursor->node)
1278 hammer_cache_node(&ip->cache[0], cursor->node);
1280 if (error == EDEADLK) {
1281 hammer_done_cursor(cursor);
1282 error = hammer_init_cursor(trans, cursor,
1283 &ip->cache[0], ip);
1284 if (hammer_debug_inode)
1285 hdkprintf("IPDED %p %d\n", ip, error);
1286 if (error == 0)
1287 goto retry;
1292 * Ok, write out the initial record or a new record (after deleting
1293 * the old one), unless the DELETED flag is set. This routine will
1294 * clear DELONDISK if it writes out a record.
1296 * Update our inode statistics if this is the first application of
1297 * the inode on-disk.
1299 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1301 * Generate a record and write it to the media. We clean-up
1302 * the state before releasing so we do not have to set-up
1303 * a flush_group.
1305 record = hammer_alloc_mem_record(ip, 0);
1306 record->type = HAMMER_MEM_RECORD_INODE;
1307 record->flush_state = HAMMER_FST_FLUSH;
1308 record->leaf = ip->sync_ino_leaf;
1309 record->leaf.base.create_tid = trans->tid;
1310 record->leaf.data_len = sizeof(ip->sync_ino_data);
1311 record->leaf.create_ts = trans->time32;
1312 record->data = (void *)&ip->sync_ino_data;
1313 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1316 * If this flag is set we cannot sync the new file size
1317 * because we haven't finished related truncations. The
1318 * inode will be flushed in another flush group to finish
1319 * the job.
1321 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1322 ip->sync_ino_data.size != ip->ino_data.size) {
1323 redirty = 1;
1324 ip->sync_ino_data.size = ip->ino_data.size;
1325 } else {
1326 redirty = 0;
1329 for (;;) {
1330 error = hammer_ip_sync_record_cursor(cursor, record);
1331 if (hammer_debug_inode)
1332 hdkprintf("GENREC %p rec %08x %d\n",
1333 ip, record->flags, error);
1334 if (error != EDEADLK)
1335 break;
1336 hammer_done_cursor(cursor);
1337 error = hammer_init_cursor(trans, cursor,
1338 &ip->cache[0], ip);
1339 if (hammer_debug_inode)
1340 hdkprintf("GENREC reinit %d\n", error);
1341 if (error)
1342 break;
1346 * Note: The record was never on the inode's record tree
1347 * so just wave our hands importantly and destroy it.
1349 record->flags |= HAMMER_RECF_COMMITTED;
1350 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1351 record->flush_state = HAMMER_FST_IDLE;
1352 ++ip->rec_generation;
1353 hammer_rel_mem_record(record);
1356 * Finish up.
1358 if (error == 0) {
1359 if (hammer_debug_inode)
1360 hdkprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1361 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1362 HAMMER_INODE_SDIRTY |
1363 HAMMER_INODE_ATIME |
1364 HAMMER_INODE_MTIME);
1365 ip->flags &= ~HAMMER_INODE_DELONDISK;
1366 if (redirty)
1367 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1370 * Root volume count of inodes
1372 hammer_sync_lock_sh(trans);
1373 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1374 hammer_modify_volume_field(trans,
1375 trans->rootvol,
1376 vol0_stat_inodes);
1377 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1378 hammer_modify_volume_done(trans->rootvol);
1379 ip->flags |= HAMMER_INODE_ONDISK;
1380 if (hammer_debug_inode)
1381 hdkprintf("NOWONDISK %p\n", ip);
1383 hammer_sync_unlock(trans);
1388 * If the inode has been destroyed, clean out any left-over flags
1389 * that may have been set by the frontend.
1391 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1392 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1393 HAMMER_INODE_SDIRTY |
1394 HAMMER_INODE_ATIME |
1395 HAMMER_INODE_MTIME);
1397 return(error);
1401 * Update only the itimes fields.
1403 * ATIME can be updated without generating any UNDO. MTIME is updated
1404 * with UNDO so it is guaranteed to be synchronized properly in case of
1405 * a crash.
1407 * Neither field is included in the B-Tree leaf element's CRC, which is how
1408 * we can get away with updating ATIME the way we do.
1410 static int
1411 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1413 hammer_transaction_t trans = cursor->trans;
1414 int error;
1416 retry:
1417 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1418 HAMMER_INODE_ONDISK) {
1419 return(0);
1422 hammer_normalize_cursor(cursor);
1423 cursor->key_beg.localization = ip->obj_localization |
1424 HAMMER_LOCALIZE_INODE;
1425 cursor->key_beg.obj_id = ip->obj_id;
1426 cursor->key_beg.key = 0;
1427 cursor->key_beg.create_tid = 0;
1428 cursor->key_beg.delete_tid = 0;
1429 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1430 cursor->key_beg.obj_type = 0;
1431 cursor->asof = ip->obj_asof;
1432 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1433 cursor->flags |= HAMMER_CURSOR_ASOF;
1434 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1435 cursor->flags |= HAMMER_CURSOR_BACKEND;
1437 error = hammer_btree_lookup(cursor);
1438 if (error == 0) {
1439 hammer_cache_node(&ip->cache[0], cursor->node);
1440 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1442 * Updating MTIME requires an UNDO. Just cover
1443 * both atime and mtime.
1445 hammer_sync_lock_sh(trans);
1446 hammer_modify_buffer(trans, cursor->data_buffer,
1447 &cursor->data->inode.mtime,
1448 sizeof(cursor->data->inode.atime) +
1449 sizeof(cursor->data->inode.mtime));
1450 cursor->data->inode.atime = ip->sync_ino_data.atime;
1451 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1452 hammer_modify_buffer_done(cursor->data_buffer);
1453 hammer_sync_unlock(trans);
1454 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1456 * Updating atime only can be done in-place with
1457 * no UNDO.
1459 hammer_sync_lock_sh(trans);
1460 hammer_modify_buffer_noundo(trans, cursor->data_buffer);
1461 cursor->data->inode.atime = ip->sync_ino_data.atime;
1462 hammer_modify_buffer_done(cursor->data_buffer);
1463 hammer_sync_unlock(trans);
1465 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1467 if (error == EDEADLK) {
1468 hammer_done_cursor(cursor);
1469 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1470 if (error == 0)
1471 goto retry;
1473 return(error);
1477 * Release a reference on an inode, flush as requested.
1479 * On the last reference we queue the inode to the flusher for its final
1480 * disposition.
1482 void
1483 hammer_rel_inode(hammer_inode_t ip, int flush)
1486 * Handle disposition when dropping the last ref.
1488 for (;;) {
1489 if (hammer_oneref(&ip->lock)) {
1491 * Determine whether on-disk action is needed for
1492 * the inode's final disposition.
1494 KKASSERT(ip->vp == NULL);
1495 hammer_inode_unloadable_check(ip, 0);
1496 if (ip->flags & HAMMER_INODE_MODMASK) {
1497 hammer_flush_inode(ip, 0);
1498 } else if (hammer_oneref(&ip->lock)) {
1499 hammer_unload_inode(ip);
1500 break;
1502 } else {
1503 if (flush)
1504 hammer_flush_inode(ip, 0);
1507 * The inode still has multiple refs, try to drop
1508 * one ref.
1510 KKASSERT(hammer_isactive(&ip->lock) >= 1);
1511 if (hammer_isactive(&ip->lock) > 1) {
1512 hammer_rel(&ip->lock);
1513 break;
1520 * Unload and destroy the specified inode. Must be called with one remaining
1521 * reference. The reference is disposed of.
1523 * The inode must be completely clean.
1525 static int
1526 hammer_unload_inode(hammer_inode_t ip)
1528 hammer_mount_t hmp = ip->hmp;
1530 KASSERT(hammer_oneref(&ip->lock),
1531 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock)));
1532 KKASSERT(ip->vp == NULL);
1533 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1534 KKASSERT(ip->cursor_ip_refs == 0);
1535 KKASSERT(hammer_notlocked(&ip->lock));
1536 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1538 KKASSERT(RB_EMPTY(&ip->rec_tree));
1539 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1541 if (ip->flags & HAMMER_INODE_RDIRTY) {
1542 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1543 ip->flags &= ~HAMMER_INODE_RDIRTY;
1545 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1547 hammer_free_inode(ip);
1548 return(0);
1552 * Called during unmounting if a critical error occured. The in-memory
1553 * inode and all related structures are destroyed.
1555 * If a critical error did not occur the unmount code calls the standard
1556 * release and asserts that the inode is gone.
1559 hammer_destroy_inode_callback(hammer_inode_t ip, void *data __unused)
1561 hammer_record_t rec;
1564 * Get rid of the inodes in-memory records, regardless of their
1565 * state, and clear the mod-mask.
1567 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1568 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1569 rec->target_ip = NULL;
1570 if (rec->flush_state == HAMMER_FST_SETUP)
1571 rec->flush_state = HAMMER_FST_IDLE;
1573 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1574 if (rec->flush_state == HAMMER_FST_FLUSH)
1575 --rec->flush_group->refs;
1576 else
1577 hammer_ref(&rec->lock);
1578 KKASSERT(hammer_oneref(&rec->lock));
1579 rec->flush_state = HAMMER_FST_IDLE;
1580 rec->flush_group = NULL;
1581 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1582 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1583 ++ip->rec_generation;
1584 hammer_rel_mem_record(rec);
1586 ip->flags &= ~HAMMER_INODE_MODMASK;
1587 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1588 KKASSERT(ip->vp == NULL);
1591 * Remove the inode from any flush group, force it idle. FLUSH
1592 * and SETUP states have an inode ref.
1594 switch(ip->flush_state) {
1595 case HAMMER_FST_FLUSH:
1596 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1597 --ip->flush_group->refs;
1598 ip->flush_group = NULL;
1599 /* fall through */
1600 case HAMMER_FST_SETUP:
1601 hammer_rel(&ip->lock);
1602 ip->flush_state = HAMMER_FST_IDLE;
1603 /* fall through */
1604 case HAMMER_FST_IDLE:
1605 break;
1609 * There shouldn't be any associated vnode. The unload needs at
1610 * least one ref, if we do have a vp steal its ip ref.
1612 if (ip->vp) {
1613 hdkprintf("Unexpected vnode association ip %p vp %p\n",
1614 ip, ip->vp);
1615 ip->vp->v_data = NULL;
1616 ip->vp = NULL;
1617 } else {
1618 hammer_ref(&ip->lock);
1620 hammer_unload_inode(ip);
1621 return(0);
1625 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1626 * the read-only flag for cached inodes.
1628 * This routine is called from a RB_SCAN().
1631 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1633 hammer_mount_t hmp = ip->hmp;
1635 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1636 ip->flags |= HAMMER_INODE_RO;
1637 else
1638 ip->flags &= ~HAMMER_INODE_RO;
1639 return(0);
1643 * A transaction has modified an inode, requiring updates as specified by
1644 * the passed flags.
1646 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1647 * and not including size changes due to write-append
1648 * (but other size changes are included).
1649 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1650 * write-append.
1651 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1652 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1653 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1654 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1656 void
1657 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1660 * ronly of 0 or 2 does not trigger assertion.
1661 * 2 is a special error state
1663 KKASSERT(ip->hmp->ronly != 1 ||
1664 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1665 HAMMER_INODE_SDIRTY |
1666 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1667 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1668 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1669 ip->flags |= HAMMER_INODE_RSV_INODES;
1670 ++ip->hmp->rsv_inodes;
1674 * Set the NEWINODE flag in the transaction if the inode
1675 * transitions to a dirty state. This is used to track
1676 * the load on the inode cache.
1678 if (trans &&
1679 (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1680 (flags & HAMMER_INODE_MODMASK)) {
1681 trans->flags |= HAMMER_TRANSF_NEWINODE;
1683 if (flags & HAMMER_INODE_MODMASK)
1684 hammer_inode_dirty(ip);
1685 ip->flags |= flags;
1689 * Attempt to quickly update the atime for a hammer inode. Return 0 on
1690 * success, -1 on failure.
1692 * We attempt to update the atime with only the ip lock and not the
1693 * whole filesystem lock in order to improve concurrency. We can only
1694 * do this safely if the ATIME flag is already pending on the inode.
1696 * This function is called via a vnops path (ip pointer is stable) without
1697 * fs_token held.
1700 hammer_update_atime_quick(hammer_inode_t ip)
1702 struct timespec ts;
1703 int res = -1;
1705 if ((ip->flags & HAMMER_INODE_RO) ||
1706 (ip->hmp->mp->mnt_flag & MNT_NOATIME)) {
1708 * Silently indicate success on read-only mount/snap
1710 res = 0;
1711 } else if (ip->flags & HAMMER_INODE_ATIME) {
1713 * Double check with inode lock held against backend. This
1714 * is only safe if all we need to do is update
1715 * ino_data.atime.
1717 vfs_timestamp(&ts);
1718 hammer_lock_ex(&ip->lock);
1719 if (ip->flags & HAMMER_INODE_ATIME) {
1720 ip->ino_data.atime =
1721 (unsigned long)ts.tv_sec * 1000000ULL +
1722 ts.tv_nsec / 1000;
1723 res = 0;
1725 hammer_unlock(&ip->lock);
1727 return res;
1731 * Request that an inode be flushed. This whole mess cannot block and may
1732 * recurse (if not synchronous). Once requested HAMMER will attempt to
1733 * actively flush the inode until the flush can be done.
1735 * The inode may already be flushing, or may be in a setup state. We can
1736 * place the inode in a flushing state if it is currently idle and flag it
1737 * to reflush if it is currently flushing.
1739 * Upon return if the inode could not be flushed due to a setup
1740 * dependancy, then it will be automatically flushed when the dependancy
1741 * is satisfied.
1743 void
1744 hammer_flush_inode(hammer_inode_t ip, int flags)
1746 hammer_mount_t hmp;
1747 hammer_flush_group_t flg;
1748 int good;
1751 * fill_flush_group is the first flush group we may be able to
1752 * continue filling, it may be open or closed but it will always
1753 * be past the currently flushing (running) flg.
1755 * next_flush_group is the next open flush group.
1757 hmp = ip->hmp;
1758 while ((flg = hmp->fill_flush_group) != NULL) {
1759 KKASSERT(flg->running == 0);
1760 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit &&
1761 flg->total_count <= hammer_autoflush) {
1762 break;
1764 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
1765 hammer_flusher_async(ip->hmp, flg);
1767 if (flg == NULL) {
1768 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1769 flg->seq = hmp->flusher.next++;
1770 if (hmp->next_flush_group == NULL)
1771 hmp->next_flush_group = flg;
1772 if (hmp->fill_flush_group == NULL)
1773 hmp->fill_flush_group = flg;
1774 RB_INIT(&flg->flush_tree);
1775 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1779 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1780 * state we have to put it back into an IDLE state so we can
1781 * drop the extra ref.
1783 * If we have a parent dependancy we must still fall through
1784 * so we can run it.
1786 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1787 if (ip->flush_state == HAMMER_FST_SETUP &&
1788 TAILQ_EMPTY(&ip->target_list)) {
1789 ip->flush_state = HAMMER_FST_IDLE;
1790 hammer_rel_inode(ip, 0);
1792 if (ip->flush_state == HAMMER_FST_IDLE)
1793 return;
1797 * Our flush action will depend on the current state.
1799 switch(ip->flush_state) {
1800 case HAMMER_FST_IDLE:
1802 * We have no dependancies and can flush immediately. Some
1803 * our children may not be flushable so we have to re-test
1804 * with that additional knowledge.
1806 hammer_flush_inode_core(ip, flg, flags);
1807 break;
1808 case HAMMER_FST_SETUP:
1810 * Recurse upwards through dependancies via target_list
1811 * and start their flusher actions going if possible.
1813 * 'good' is our connectivity. -1 means we have none and
1814 * can't flush, 0 means there weren't any dependancies, and
1815 * 1 means we have good connectivity.
1817 good = hammer_setup_parent_inodes(ip, 0, flg);
1819 if (good >= 0) {
1821 * We can continue if good >= 0. Determine how
1822 * many records under our inode can be flushed (and
1823 * mark them).
1825 hammer_flush_inode_core(ip, flg, flags);
1826 } else {
1828 * Parent has no connectivity, tell it to flush
1829 * us as soon as it does.
1831 * The REFLUSH flag is also needed to trigger
1832 * dependancy wakeups.
1834 ip->flags |= HAMMER_INODE_CONN_DOWN |
1835 HAMMER_INODE_REFLUSH;
1836 if (flags & HAMMER_FLUSH_SIGNAL) {
1837 ip->flags |= HAMMER_INODE_RESIGNAL;
1838 hammer_flusher_async(ip->hmp, flg);
1841 break;
1842 case HAMMER_FST_FLUSH:
1844 * We are already flushing, flag the inode to reflush
1845 * if needed after it completes its current flush.
1847 * The REFLUSH flag is also needed to trigger
1848 * dependancy wakeups.
1850 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1851 ip->flags |= HAMMER_INODE_REFLUSH;
1852 if (flags & HAMMER_FLUSH_SIGNAL) {
1853 ip->flags |= HAMMER_INODE_RESIGNAL;
1854 hammer_flusher_async(ip->hmp, flg);
1856 break;
1861 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1862 * ip which reference our ip.
1864 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1865 * so for now do not ref/deref the structures. Note that if we use the
1866 * ref/rel code later, the rel CAN block.
1868 static int
1869 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1870 hammer_flush_group_t flg)
1872 hammer_record_t depend;
1873 int good;
1874 int r;
1877 * If we hit our recursion limit and we have parent dependencies
1878 * We cannot continue. Returning < 0 will cause us to be flagged
1879 * for reflush. Returning -2 cuts off additional dependency checks
1880 * because they are likely to also hit the depth limit.
1882 * We cannot return < 0 if there are no dependencies or there might
1883 * not be anything to wakeup (ip).
1885 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1886 if (hammer_debug_general & 0x10000)
1887 hkrateprintf(&hammer_gen_krate,
1888 "Warning: depth limit reached on "
1889 "setup recursion, inode %p %016jx\n",
1890 ip, (intmax_t)ip->obj_id);
1891 return(-2);
1895 * Scan dependencies
1897 good = 0;
1898 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1899 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1900 KKASSERT(depend->target_ip == ip);
1901 if (r < 0 && good == 0)
1902 good = -1;
1903 if (r > 0)
1904 good = 1;
1907 * If we failed due to the recursion depth limit then stop
1908 * now.
1910 if (r == -2)
1911 break;
1913 return(good);
1917 * This helper function takes a record representing the dependancy between
1918 * the parent inode and child inode.
1920 * record = record in question (*rec in below)
1921 * record->ip = parent inode (*pip in below)
1922 * record->target_ip = child inode (*ip in below)
1924 * *pip--------------\
1925 * ^ \rec_tree
1926 * \ \
1927 * \ip /\\\\\ rbtree of recs from parent inode's view
1928 * \ //\\\\\\
1929 * \ / ........
1930 * \ /
1931 * \------*rec------target_ip------>*ip
1932 * ...target_entry<----...----->target_list<---...
1933 * list of recs from inode's view
1935 * We are asked to recurse upwards and convert the record from SETUP
1936 * to FLUSH if possible.
1938 * Return 1 if the record gives us connectivity
1940 * Return 0 if the record is not relevant
1942 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1944 static int
1945 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1946 hammer_flush_group_t flg)
1948 hammer_inode_t pip;
1949 int good;
1951 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1952 pip = record->ip;
1955 * If the record is already flushing, is it in our flush group?
1957 * If it is in our flush group but it is a general record or a
1958 * delete-on-disk, it does not improve our connectivity (return 0),
1959 * and if the target inode is not trying to destroy itself we can't
1960 * allow the operation yet anyway (the second return -1).
1962 if (record->flush_state == HAMMER_FST_FLUSH) {
1964 * If not in our flush group ask the parent to reflush
1965 * us as soon as possible.
1967 if (record->flush_group != flg) {
1968 pip->flags |= HAMMER_INODE_REFLUSH;
1969 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1970 return(-1);
1974 * If in our flush group everything is already set up,
1975 * just return whether the record will improve our
1976 * visibility or not.
1978 if (record->type == HAMMER_MEM_RECORD_ADD)
1979 return(1);
1980 return(0);
1984 * It must be a setup record. Try to resolve the setup dependancies
1985 * by recursing upwards so we can place ip on the flush list.
1987 * Limit ourselves to 20 levels of recursion to avoid blowing out
1988 * the kernel stack. If we hit the recursion limit we can't flush
1989 * until the parent flushes. The parent will flush independantly
1990 * on its own and ultimately a deep recursion will be resolved.
1992 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1994 good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1997 * If good < 0 the parent has no connectivity and we cannot safely
1998 * flush the directory entry, which also means we can't flush our
1999 * ip. Flag us for downward recursion once the parent's
2000 * connectivity is resolved. Flag the parent for [re]flush or it
2001 * may not check for downward recursions.
2003 if (good < 0) {
2004 pip->flags |= HAMMER_INODE_REFLUSH;
2005 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2006 return(good);
2010 * We are go, place the parent inode in a flushing state so we can
2011 * place its record in a flushing state. Note that the parent
2012 * may already be flushing. The record must be in the same flush
2013 * group as the parent.
2015 if (pip->flush_state != HAMMER_FST_FLUSH)
2016 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
2017 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
2020 * It is possible for a rename to create a loop in the recursion
2021 * and revisit a record. This will result in the record being
2022 * placed in a flush state unexpectedly. This check deals with
2023 * the case.
2025 if (record->flush_state == HAMMER_FST_FLUSH) {
2026 if (record->type == HAMMER_MEM_RECORD_ADD)
2027 return(1);
2028 return(0);
2031 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
2033 #if 0
2034 if (record->type == HAMMER_MEM_RECORD_DEL &&
2035 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
2037 * Regardless of flushing state we cannot sync this path if the
2038 * record represents a delete-on-disk but the target inode
2039 * is not ready to sync its own deletion.
2041 * XXX need to count effective nlinks to determine whether
2042 * the flush is ok, otherwise removing a hardlink will
2043 * just leave the DEL record to rot.
2045 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
2046 return(-1);
2047 } else
2048 #endif
2049 if (pip->flush_group == flg) {
2051 * Because we have not calculated nlinks yet we can just
2052 * set records to the flush state if the parent is in
2053 * the same flush group as we are.
2055 record->flush_state = HAMMER_FST_FLUSH;
2056 record->flush_group = flg;
2057 ++record->flush_group->refs;
2058 hammer_ref(&record->lock);
2061 * A general directory-add contributes to our visibility.
2063 * Otherwise it is probably a directory-delete or
2064 * delete-on-disk record and does not contribute to our
2065 * visibility (but we can still flush it).
2067 if (record->type == HAMMER_MEM_RECORD_ADD)
2068 return(1);
2069 return(0);
2070 } else {
2072 * If the parent is not in our flush group we cannot
2073 * flush this record yet, there is no visibility.
2074 * We tell the parent to reflush and mark ourselves
2075 * so the parent knows it should flush us too.
2077 pip->flags |= HAMMER_INODE_REFLUSH;
2078 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2079 return(-1);
2084 * This is the core routine placing an inode into the FST_FLUSH state.
2086 static void
2087 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
2089 hammer_mount_t hmp = ip->hmp;
2090 int go_count;
2093 * Set flush state and prevent the flusher from cycling into
2094 * the next flush group. Do not place the ip on the list yet.
2095 * Inodes not in the idle state get an extra reference.
2097 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
2098 if (ip->flush_state == HAMMER_FST_IDLE)
2099 hammer_ref(&ip->lock);
2100 ip->flush_state = HAMMER_FST_FLUSH;
2101 ip->flush_group = flg;
2102 ++hmp->flusher.group_lock;
2103 ++hmp->count_iqueued;
2104 ++hammer_count_iqueued;
2105 ++flg->total_count;
2106 hammer_redo_fifo_start_flush(ip);
2108 #if 0
2110 * We need to be able to vfsync/truncate from the backend.
2112 * XXX Any truncation from the backend will acquire the vnode
2113 * independently.
2115 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2116 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2117 ip->flags |= HAMMER_INODE_VHELD;
2118 vref(ip->vp);
2120 #endif
2123 * Figure out how many in-memory records we can actually flush
2124 * (not including inode meta-data, buffers, etc).
2126 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2127 if (flags & HAMMER_FLUSH_RECURSION) {
2129 * If this is a upwards recursion we do not want to
2130 * recurse down again!
2132 go_count = 1;
2133 #if 0
2134 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2136 * No new records are added if we must complete a flush
2137 * from a previous cycle, but we do have to move the records
2138 * from the previous cycle to the current one.
2140 #if 0
2141 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2142 hammer_syncgrp_child_callback, NULL);
2143 #endif
2144 go_count = 1;
2145 #endif
2146 } else {
2148 * Normal flush, scan records and bring them into the flush.
2149 * Directory adds and deletes are usually skipped (they are
2150 * grouped with the related inode rather then with the
2151 * directory).
2153 * go_count can be negative, which means the scan aborted
2154 * due to the flush group being over-full and we should
2155 * flush what we have.
2157 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2158 hammer_setup_child_callback, NULL);
2162 * This is a more involved test that includes go_count. If we
2163 * can't flush, flag the inode and return. If go_count is 0 we
2164 * were are unable to flush any records in our rec_tree and
2165 * must ignore the XDIRTY flag.
2167 if (go_count == 0) {
2168 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2169 --hmp->count_iqueued;
2170 --hammer_count_iqueued;
2172 --flg->total_count;
2173 ip->flush_state = HAMMER_FST_SETUP;
2174 ip->flush_group = NULL;
2175 if (flags & HAMMER_FLUSH_SIGNAL) {
2176 ip->flags |= HAMMER_INODE_REFLUSH |
2177 HAMMER_INODE_RESIGNAL;
2178 } else {
2179 ip->flags |= HAMMER_INODE_REFLUSH;
2181 #if 0
2182 if (ip->flags & HAMMER_INODE_VHELD) {
2183 ip->flags &= ~HAMMER_INODE_VHELD;
2184 vrele(ip->vp);
2186 #endif
2189 * REFLUSH is needed to trigger dependancy wakeups
2190 * when an inode is in SETUP.
2192 ip->flags |= HAMMER_INODE_REFLUSH;
2193 if (--hmp->flusher.group_lock == 0)
2194 wakeup(&hmp->flusher.group_lock);
2195 return;
2200 * Snapshot the state of the inode for the backend flusher.
2202 * We continue to retain save_trunc_off even when all truncations
2203 * have been resolved as an optimization to determine if we can
2204 * skip the B-Tree lookup for overwrite deletions.
2206 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2207 * and stays in ip->flags. Once set, it stays set until the
2208 * inode is destroyed.
2210 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2211 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2212 ip->sync_trunc_off = ip->trunc_off;
2213 ip->trunc_off = HAMMER_MAX_KEY;
2214 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2215 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2218 * The save_trunc_off used to cache whether the B-Tree
2219 * holds any records past that point is not used until
2220 * after the truncation has succeeded, so we can safely
2221 * set it now.
2223 if (ip->save_trunc_off > ip->sync_trunc_off)
2224 ip->save_trunc_off = ip->sync_trunc_off;
2226 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2227 ~HAMMER_INODE_TRUNCATED);
2228 ip->sync_ino_leaf = ip->ino_leaf;
2229 ip->sync_ino_data = ip->ino_data;
2230 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2233 * The flusher list inherits our inode and reference.
2235 KKASSERT(flg->running == 0);
2236 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2237 if (--hmp->flusher.group_lock == 0)
2238 wakeup(&hmp->flusher.group_lock);
2241 * Auto-flush the group if it grows too large. Make sure the
2242 * inode reclaim wait pipeline continues to work.
2244 if (flg->total_count >= hammer_autoflush ||
2245 flg->total_count >= hammer_limit_reclaims / 4) {
2246 if (hmp->fill_flush_group == flg)
2247 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
2248 hammer_flusher_async(hmp, flg);
2253 * Callback for scan of ip->rec_tree. Try to include each record in our
2254 * flush. ip->flush_group has been set but the inode has not yet been
2255 * moved into a flushing state.
2257 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2258 * both inodes.
2260 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2261 * the caller from shortcutting the flush.
2263 static int
2264 hammer_setup_child_callback(hammer_record_t rec, void *data)
2266 hammer_flush_group_t flg;
2267 hammer_inode_t target_ip;
2268 hammer_inode_t ip;
2269 int r;
2272 * Records deleted or committed by the backend are ignored.
2273 * Note that the flush detects deleted frontend records at
2274 * multiple points to deal with races. This is just the first
2275 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2276 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2277 * messes up link-count calculations.
2279 * NOTE: Don't get confused between record deletion and, say,
2280 * directory entry deletion. The deletion of a directory entry
2281 * which is on-media has nothing to do with the record deletion
2282 * flags.
2284 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2285 HAMMER_RECF_COMMITTED)) {
2286 if (rec->flush_state == HAMMER_FST_FLUSH) {
2287 KKASSERT(rec->flush_group == rec->ip->flush_group);
2288 r = 1;
2289 } else {
2290 r = 0;
2292 return(r);
2296 * If the record is in an idle state it has no dependancies and
2297 * can be flushed.
2299 ip = rec->ip;
2300 flg = ip->flush_group;
2301 r = 0;
2303 switch(rec->flush_state) {
2304 case HAMMER_FST_IDLE:
2306 * The record has no setup dependancy, we can flush it.
2308 KKASSERT(rec->target_ip == NULL);
2309 rec->flush_state = HAMMER_FST_FLUSH;
2310 rec->flush_group = flg;
2311 ++flg->refs;
2312 hammer_ref(&rec->lock);
2313 r = 1;
2314 break;
2315 case HAMMER_FST_SETUP:
2317 * The record has a setup dependancy. These are typically
2318 * directory entry adds and deletes. Such entries will be
2319 * flushed when their inodes are flushed so we do not
2320 * usually have to add them to the flush here. However,
2321 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2322 * it is asking us to flush this record (and it).
2324 target_ip = rec->target_ip;
2325 KKASSERT(target_ip != NULL);
2326 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2329 * If the target IP is already flushing in our group
2330 * we could associate the record, but target_ip has
2331 * already synced ino_data to sync_ino_data and we
2332 * would also have to adjust nlinks. Plus there are
2333 * ordering issues for adds and deletes.
2335 * Reflush downward if this is an ADD, and upward if
2336 * this is a DEL.
2338 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2339 if (rec->type == HAMMER_MEM_RECORD_ADD)
2340 ip->flags |= HAMMER_INODE_REFLUSH;
2341 else
2342 target_ip->flags |= HAMMER_INODE_REFLUSH;
2343 break;
2347 * Target IP is not yet flushing. This can get complex
2348 * because we have to be careful about the recursion.
2350 * Directories create an issue for us in that if a flush
2351 * of a directory is requested the expectation is to flush
2352 * any pending directory entries, but this will cause the
2353 * related inodes to recursively flush as well. We can't
2354 * really defer the operation so just get as many as we
2355 * can and
2357 #if 0
2358 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2359 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2361 * We aren't reclaiming and the target ip was not
2362 * previously prevented from flushing due to this
2363 * record dependancy. Do not flush this record.
2365 /*r = 0;*/
2366 } else
2367 #endif
2368 if (flg->total_count + flg->refs >
2369 ip->hmp->undo_rec_limit) {
2371 * Our flush group is over-full and we risk blowing
2372 * out the UNDO FIFO. Stop the scan, flush what we
2373 * have, then reflush the directory.
2375 * The directory may be forced through multiple
2376 * flush groups before it can be completely
2377 * flushed.
2379 ip->flags |= HAMMER_INODE_RESIGNAL |
2380 HAMMER_INODE_REFLUSH;
2381 r = -1;
2382 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2384 * If the target IP is not flushing we can force
2385 * it to flush, even if it is unable to write out
2386 * any of its own records we have at least one in
2387 * hand that we CAN deal with.
2389 rec->flush_state = HAMMER_FST_FLUSH;
2390 rec->flush_group = flg;
2391 ++flg->refs;
2392 hammer_ref(&rec->lock);
2393 hammer_flush_inode_core(target_ip, flg,
2394 HAMMER_FLUSH_RECURSION);
2395 r = 1;
2396 } else {
2398 * General or delete-on-disk record.
2400 * XXX this needs help. If a delete-on-disk we could
2401 * disconnect the target. If the target has its own
2402 * dependancies they really need to be flushed.
2404 * XXX
2406 rec->flush_state = HAMMER_FST_FLUSH;
2407 rec->flush_group = flg;
2408 ++flg->refs;
2409 hammer_ref(&rec->lock);
2410 hammer_flush_inode_core(target_ip, flg,
2411 HAMMER_FLUSH_RECURSION);
2412 r = 1;
2414 break;
2415 case HAMMER_FST_FLUSH:
2417 * The record could be part of a previous flush group if the
2418 * inode is a directory (the record being a directory entry).
2419 * Once the flush group was closed a hammer_test_inode()
2420 * function can cause a new flush group to be setup, placing
2421 * the directory inode itself in a new flush group.
2423 * When associated with a previous flush group we count it
2424 * as if it were in our current flush group, since it will
2425 * effectively be flushed by the time we flush our current
2426 * flush group.
2428 KKASSERT(
2429 rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY ||
2430 rec->flush_group == flg);
2431 r = 1;
2432 break;
2434 return(r);
2437 #if 0
2439 * This version just moves records already in a flush state to the new
2440 * flush group and that is it.
2442 static int
2443 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2445 hammer_inode_t ip = rec->ip;
2447 switch(rec->flush_state) {
2448 case HAMMER_FST_FLUSH:
2449 KKASSERT(rec->flush_group == ip->flush_group);
2450 break;
2451 default:
2452 break;
2454 return(0);
2456 #endif
2459 * Wait for a previously queued flush to complete.
2461 * If a critical error occured we don't try to wait.
2463 void
2464 hammer_wait_inode(hammer_inode_t ip)
2467 * The inode can be in a SETUP state in which case RESIGNAL
2468 * should be set. If RESIGNAL is not set then the previous
2469 * flush completed and a later operation placed the inode
2470 * in a passive setup state again, so we're done.
2472 * The inode can be in a FLUSH state in which case we
2473 * can just wait for completion.
2475 while (ip->flush_state == HAMMER_FST_FLUSH ||
2476 (ip->flush_state == HAMMER_FST_SETUP &&
2477 (ip->flags & HAMMER_INODE_RESIGNAL))) {
2479 * Don't try to flush on a critical error
2481 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
2482 break;
2485 * If the inode was already being flushed its flg
2486 * may not have been queued to the backend. We
2487 * have to make sure it gets queued or we can wind
2488 * up blocked or deadlocked (particularly if we are
2489 * the vnlru thread).
2491 if (ip->flush_state == HAMMER_FST_FLUSH) {
2492 KKASSERT(ip->flush_group);
2493 if (ip->flush_group->closed == 0) {
2494 if (hammer_debug_inode) {
2495 hkprintf("debug: forcing "
2496 "async flush ip %016jx\n",
2497 (intmax_t)ip->obj_id);
2499 hammer_flusher_async(ip->hmp, ip->flush_group);
2500 continue; /* retest */
2505 * In a flush state with the flg queued to the backend
2506 * or in a setup state with RESIGNAL set, we can safely
2507 * wait.
2509 ip->flags |= HAMMER_INODE_FLUSHW;
2510 tsleep(&ip->flags, 0, "hmrwin", 0);
2513 #if 0
2515 * The inode may have been in a passive setup state,
2516 * call flush to make sure we get signaled.
2518 if (ip->flush_state == HAMMER_FST_SETUP)
2519 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2520 #endif
2525 * Called by the backend code when a flush has been completed.
2526 * The inode has already been removed from the flush list.
2528 * A pipelined flush can occur, in which case we must re-enter the
2529 * inode on the list and re-copy its fields.
2531 void
2532 hammer_sync_inode_done(hammer_inode_t ip, int error)
2534 hammer_mount_t hmp;
2535 int dorel;
2537 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2539 hmp = ip->hmp;
2542 * Auto-reflush if the backend could not completely flush
2543 * the inode. This fixes a case where a deferred buffer flush
2544 * could cause fsync to return early.
2546 if (ip->sync_flags & HAMMER_INODE_MODMASK)
2547 ip->flags |= HAMMER_INODE_REFLUSH;
2550 * Merge left-over flags back into the frontend and fix the state.
2551 * Incomplete truncations are retained by the backend.
2553 ip->error = error;
2554 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2555 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2558 * The backend may have adjusted nlinks, so if the adjusted nlinks
2559 * does not match the fronttend set the frontend's DDIRTY flag again.
2561 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2562 ip->flags |= HAMMER_INODE_DDIRTY;
2565 * Fix up the dirty buffer status.
2567 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2568 ip->flags |= HAMMER_INODE_BUFS;
2570 hammer_redo_fifo_end_flush(ip);
2573 * Re-set the XDIRTY flag if some of the inode's in-memory records
2574 * could not be flushed.
2576 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2577 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2578 (!RB_EMPTY(&ip->rec_tree) &&
2579 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2582 * Do not lose track of inodes which no longer have vnode
2583 * assocations, otherwise they may never get flushed again.
2585 * The reflush flag can be set superfluously, causing extra pain
2586 * for no reason. If the inode is no longer modified it no longer
2587 * needs to be flushed.
2589 if (ip->flags & HAMMER_INODE_MODMASK) {
2590 if (ip->vp == NULL)
2591 ip->flags |= HAMMER_INODE_REFLUSH;
2592 } else {
2593 ip->flags &= ~HAMMER_INODE_REFLUSH;
2597 * The fs token is held but the inode lock is not held. Because this
2598 * is a backend flush it is possible that the vnode has no references
2599 * and cause a reclaim race inside vsetisdirty() if/when it blocks.
2601 * Therefore, we must lock the inode around this particular dirtying
2602 * operation. We don't have to around other dirtying operations
2603 * where the vnode is implicitly or explicitly held.
2605 if (ip->flags & HAMMER_INODE_MODMASK) {
2606 hammer_lock_ex(&ip->lock);
2607 hammer_inode_dirty(ip);
2608 hammer_unlock(&ip->lock);
2612 * Adjust the flush state.
2614 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2616 * We were unable to flush out all our records, leave the
2617 * inode in a flush state and in the current flush group.
2618 * The flush group will be re-run.
2620 * This occurs if the UNDO block gets too full or there is
2621 * too much dirty meta-data and allows the flusher to
2622 * finalize the UNDO block and then re-flush.
2624 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2625 dorel = 0;
2626 } else {
2628 * Remove from the flush_group
2630 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2631 ip->flush_group = NULL;
2633 #if 0
2635 * Clean up the vnode ref and tracking counts.
2637 if (ip->flags & HAMMER_INODE_VHELD) {
2638 ip->flags &= ~HAMMER_INODE_VHELD;
2639 vrele(ip->vp);
2641 #endif
2642 --hmp->count_iqueued;
2643 --hammer_count_iqueued;
2646 * And adjust the state.
2648 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2649 ip->flush_state = HAMMER_FST_IDLE;
2650 dorel = 1;
2651 } else {
2652 ip->flush_state = HAMMER_FST_SETUP;
2653 dorel = 0;
2657 * If the frontend is waiting for a flush to complete,
2658 * wake it up.
2660 if (ip->flags & HAMMER_INODE_FLUSHW) {
2661 ip->flags &= ~HAMMER_INODE_FLUSHW;
2662 wakeup(&ip->flags);
2666 * If the frontend made more changes and requested another
2667 * flush, then try to get it running.
2669 * Reflushes are aborted when the inode is errored out.
2671 if (ip->flags & HAMMER_INODE_REFLUSH) {
2672 ip->flags &= ~HAMMER_INODE_REFLUSH;
2673 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2674 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2675 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2676 } else {
2677 hammer_flush_inode(ip, 0);
2683 * If we have no parent dependancies we can clear CONN_DOWN
2685 if (TAILQ_EMPTY(&ip->target_list))
2686 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2689 * If the inode is now clean drop the space reservation.
2691 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2692 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2693 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2694 --hmp->rsv_inodes;
2697 ip->flags &= ~HAMMER_INODE_SLAVEFLUSH;
2699 if (dorel)
2700 hammer_rel_inode(ip, 0);
2704 * Called from hammer_sync_inode() to synchronize in-memory records
2705 * to the media.
2707 static int
2708 hammer_sync_record_callback(hammer_record_t record, void *data)
2710 hammer_cursor_t cursor = data;
2711 hammer_transaction_t trans = cursor->trans;
2712 hammer_mount_t hmp = trans->hmp;
2713 int error;
2716 * Skip records that do not belong to the current flush.
2718 ++hammer_stats_record_iterations;
2719 if (record->flush_state != HAMMER_FST_FLUSH)
2720 return(0);
2722 if (record->flush_group != record->ip->flush_group) {
2723 hdkprintf("rec %p ip %p bad flush group %p %p\n",
2724 record,
2725 record->ip,
2726 record->flush_group,
2727 record->ip->flush_group);
2728 if (hammer_debug_critical)
2729 Debugger("blah2");
2730 return(0);
2732 KKASSERT(record->flush_group == record->ip->flush_group);
2735 * Interlock the record using the BE flag. Once BE is set the
2736 * frontend cannot change the state of FE.
2738 * NOTE: If FE is set prior to us setting BE we still sync the
2739 * record out, but the flush completion code converts it to
2740 * a delete-on-disk record instead of destroying it.
2742 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2743 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2746 * The backend has already disposed of the record.
2748 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2749 error = 0;
2750 goto done;
2754 * If the whole inode is being deleted and all on-disk records will
2755 * be deleted very soon, we can't sync any new records to disk
2756 * because they will be deleted in the same transaction they were
2757 * created in (delete_tid == create_tid), which will assert.
2759 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2760 * that we currently panic on.
2762 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2763 switch(record->type) {
2764 case HAMMER_MEM_RECORD_DATA:
2766 * We don't have to do anything, if the record was
2767 * committed the space will have been accounted for
2768 * in the blockmap.
2770 /* fall through */
2771 case HAMMER_MEM_RECORD_GENERAL:
2773 * Set deleted-by-backend flag. Do not set the
2774 * backend committed flag, because we are throwing
2775 * the record away.
2777 record->flags |= HAMMER_RECF_DELETED_BE;
2778 ++record->ip->rec_generation;
2779 error = 0;
2780 goto done;
2781 case HAMMER_MEM_RECORD_ADD:
2782 hpanic("illegal add during inode deletion record %p",
2783 record);
2784 break; /* NOT REACHED */
2785 case HAMMER_MEM_RECORD_INODE:
2786 hpanic("attempt to sync inode record %p?", record);
2787 break; /* NOT REACHED */
2788 case HAMMER_MEM_RECORD_DEL:
2790 * Follow through and issue the on-disk deletion
2792 break;
2797 * If DELETED_FE is set special handling is needed for directory
2798 * entries. Dependant pieces related to the directory entry may
2799 * have already been synced to disk. If this occurs we have to
2800 * sync the directory entry and then change the in-memory record
2801 * from an ADD to a DELETE to cover the fact that it's been
2802 * deleted by the frontend.
2804 * A directory delete covering record (MEM_RECORD_DEL) can never
2805 * be deleted by the frontend.
2807 * Any other record type (aka DATA) can be deleted by the frontend.
2808 * XXX At the moment the flusher must skip it because there may
2809 * be another data record in the flush group for the same block,
2810 * meaning that some frontend data changes can leak into the backend's
2811 * synchronization point.
2813 if (record->flags & HAMMER_RECF_DELETED_FE) {
2814 if (record->type == HAMMER_MEM_RECORD_ADD) {
2816 * Convert a front-end deleted directory-add to
2817 * a directory-delete entry later.
2819 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2820 } else {
2822 * Dispose of the record (race case). Mark as
2823 * deleted by backend (and not committed).
2825 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2826 record->flags |= HAMMER_RECF_DELETED_BE;
2827 ++record->ip->rec_generation;
2828 error = 0;
2829 goto done;
2834 * Assign the create_tid for new records. Deletions already
2835 * have the record's entire key properly set up.
2837 if (record->type != HAMMER_MEM_RECORD_DEL) {
2838 record->leaf.base.create_tid = trans->tid;
2839 record->leaf.create_ts = trans->time32;
2843 * This actually moves the record to the on-media B-Tree. We
2844 * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2845 * indicating that the related REDO_WRITE(s) have been committed.
2847 * During recovery any REDO_TERM's within the nominal recovery span
2848 * are ignored since the related meta-data is being undone, causing
2849 * any matching REDO_WRITEs to execute. The REDO_TERMs outside
2850 * the nominal recovery span will match against REDO_WRITEs and
2851 * prevent them from being executed (because the meta-data has
2852 * already been synchronized).
2854 if (record->flags & HAMMER_RECF_REDO) {
2855 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2856 hammer_generate_redo(trans, record->ip,
2857 record->leaf.base.key -
2858 record->leaf.data_len,
2859 HAMMER_REDO_TERM_WRITE,
2860 NULL,
2861 record->leaf.data_len);
2864 for (;;) {
2865 error = hammer_ip_sync_record_cursor(cursor, record);
2866 if (error != EDEADLK)
2867 break;
2868 hammer_done_cursor(cursor);
2869 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2870 record->ip);
2871 if (error)
2872 break;
2874 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2876 if (error)
2877 error = -error;
2878 done:
2879 hammer_flush_record_done(record, error);
2882 * Do partial finalization if we have built up too many dirty
2883 * buffers. Otherwise a buffer cache deadlock can occur when
2884 * doing things like creating tens of thousands of tiny files.
2886 * We must release our cursor lock to avoid a 3-way deadlock
2887 * due to the exclusive sync lock the finalizer must get.
2889 * WARNING: See warnings in hammer_unlock_cursor() function.
2891 if (hammer_flusher_meta_limit(hmp) ||
2892 vm_paging_severe()) {
2893 hammer_unlock_cursor(cursor);
2894 hammer_flusher_finalize(trans, 0);
2895 hammer_lock_cursor(cursor);
2897 return(error);
2901 * Backend function called by the flusher to sync an inode to media.
2904 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2906 struct hammer_cursor cursor;
2907 hammer_node_t tmp_node;
2908 hammer_record_t depend;
2909 hammer_record_t next;
2910 int error, tmp_error;
2911 uint64_t nlinks;
2913 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2914 return(0);
2916 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2917 if (error)
2918 goto done;
2921 * Any directory records referencing this inode which are not in
2922 * our current flush group must adjust our nlink count for the
2923 * purposes of synchronizating to disk.
2925 * Records which are in our flush group can be unlinked from our
2926 * inode now, potentially allowing the inode to be physically
2927 * deleted.
2929 * This cannot block.
2931 nlinks = ip->ino_data.nlinks;
2932 next = TAILQ_FIRST(&ip->target_list);
2933 while ((depend = next) != NULL) {
2934 next = TAILQ_NEXT(depend, target_entry);
2935 if (depend->flush_state == HAMMER_FST_FLUSH &&
2936 depend->flush_group == ip->flush_group) {
2938 * If this is an ADD that was deleted by the frontend
2939 * the frontend nlinks count will have already been
2940 * decremented, but the backend is going to sync its
2941 * directory entry and must account for it. The
2942 * record will be converted to a delete-on-disk when
2943 * it gets synced.
2945 * If the ADD was not deleted by the frontend we
2946 * can remove the dependancy from our target_list.
2948 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2949 ++nlinks;
2950 } else {
2951 TAILQ_REMOVE(&ip->target_list, depend,
2952 target_entry);
2953 depend->target_ip = NULL;
2955 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2957 * Not part of our flush group and not deleted by
2958 * the front-end, adjust the link count synced to
2959 * the media (undo what the frontend did when it
2960 * queued the record).
2962 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2963 switch(depend->type) {
2964 case HAMMER_MEM_RECORD_ADD:
2965 --nlinks;
2966 break;
2967 case HAMMER_MEM_RECORD_DEL:
2968 ++nlinks;
2969 break;
2970 default:
2971 break;
2977 * Set dirty if we had to modify the link count.
2979 if (ip->sync_ino_data.nlinks != nlinks) {
2980 KKASSERT((int64_t)nlinks >= 0);
2981 ip->sync_ino_data.nlinks = nlinks;
2982 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2986 * If there is a trunction queued destroy any data past the (aligned)
2987 * truncation point. Userland will have dealt with the buffer
2988 * containing the truncation point for us.
2990 * We don't flush pending frontend data buffers until after we've
2991 * dealt with the truncation.
2993 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2995 * Interlock trunc_off. The VOP front-end may continue to
2996 * make adjustments to it while we are blocked.
2998 off_t trunc_off;
2999 off_t aligned_trunc_off;
3000 int blkmask;
3002 trunc_off = ip->sync_trunc_off;
3003 blkmask = hammer_blocksize(trunc_off) - 1;
3004 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
3007 * Delete any whole blocks on-media. The front-end has
3008 * already cleaned out any partial block and made it
3009 * pending. The front-end may have updated trunc_off
3010 * while we were blocked so we only use sync_trunc_off.
3012 * This operation can blow out the buffer cache, EWOULDBLOCK
3013 * means we were unable to complete the deletion. The
3014 * deletion will update sync_trunc_off in that case.
3016 error = hammer_ip_delete_range(&cursor, ip,
3017 aligned_trunc_off,
3018 HAMMER_MAX_KEY, 2);
3019 if (error == EWOULDBLOCK) {
3020 ip->flags |= HAMMER_INODE_WOULDBLOCK;
3021 error = 0;
3022 goto defer_buffer_flush;
3025 if (error)
3026 goto done;
3029 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
3031 * XXX we do this even if we did not previously generate
3032 * a REDO_TRUNC record. This operation may enclosed the
3033 * range for multiple prior truncation entries in the REDO
3034 * log.
3036 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
3037 (ip->flags & HAMMER_INODE_RDIRTY)) {
3038 hammer_generate_redo(trans, ip, aligned_trunc_off,
3039 HAMMER_REDO_TERM_TRUNC,
3040 NULL, 0);
3044 * Clear the truncation flag on the backend after we have
3045 * completed the deletions. Backend data is now good again
3046 * (including new records we are about to sync, below).
3048 * Leave sync_trunc_off intact. As we write additional
3049 * records the backend will update sync_trunc_off. This
3050 * tells the backend whether it can skip the overwrite
3051 * test. This should work properly even when the backend
3052 * writes full blocks where the truncation point straddles
3053 * the block because the comparison is against the base
3054 * offset of the record.
3056 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3057 /* ip->sync_trunc_off = HAMMER_MAX_KEY; */
3058 } else {
3059 error = 0;
3063 * Now sync related records. These will typically be directory
3064 * entries, records tracking direct-writes, or delete-on-disk records.
3066 if (error == 0) {
3067 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
3068 hammer_sync_record_callback, &cursor);
3069 if (tmp_error < 0)
3070 tmp_error = -error;
3071 if (tmp_error)
3072 error = tmp_error;
3074 hammer_cache_node(&ip->cache[1], cursor.node);
3077 * Re-seek for inode update, assuming our cache hasn't been ripped
3078 * out from under us.
3080 if (error == 0) {
3081 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
3082 if (tmp_node) {
3083 hammer_cursor_downgrade(&cursor);
3084 hammer_lock_sh(&tmp_node->lock);
3085 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
3086 hammer_cursor_seek(&cursor, tmp_node, 0);
3087 hammer_unlock(&tmp_node->lock);
3088 hammer_rel_node(tmp_node);
3090 error = 0;
3094 * If we are deleting the inode the frontend had better not have
3095 * any active references on elements making up the inode.
3097 * The call to hammer_ip_delete_clean() cleans up auxillary records
3098 * but not DB or DATA records. Those must have already been deleted
3099 * by the normal truncation mechanic.
3101 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
3102 RB_EMPTY(&ip->rec_tree) &&
3103 (ip->sync_flags & HAMMER_INODE_DELETING) &&
3104 (ip->flags & HAMMER_INODE_DELETED) == 0) {
3105 int count1 = 0;
3107 error = hammer_ip_delete_clean(&cursor, ip, &count1);
3108 if (error == 0) {
3109 ip->flags |= HAMMER_INODE_DELETED;
3110 ip->sync_flags &= ~HAMMER_INODE_DELETING;
3111 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3112 KKASSERT(RB_EMPTY(&ip->rec_tree));
3115 * Set delete_tid in both the frontend and backend
3116 * copy of the inode record. The DELETED flag handles
3117 * this, do not set DDIRTY.
3119 ip->ino_leaf.base.delete_tid = trans->tid;
3120 ip->sync_ino_leaf.base.delete_tid = trans->tid;
3121 ip->ino_leaf.delete_ts = trans->time32;
3122 ip->sync_ino_leaf.delete_ts = trans->time32;
3126 * Adjust the inode count in the volume header
3128 hammer_sync_lock_sh(trans);
3129 if (ip->flags & HAMMER_INODE_ONDISK) {
3130 hammer_modify_volume_field(trans,
3131 trans->rootvol,
3132 vol0_stat_inodes);
3133 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
3134 hammer_modify_volume_done(trans->rootvol);
3136 hammer_sync_unlock(trans);
3140 if (error)
3141 goto done;
3142 ip->sync_flags &= ~HAMMER_INODE_BUFS;
3144 defer_buffer_flush:
3146 * Now update the inode's on-disk inode-data and/or on-disk record.
3147 * DELETED and ONDISK are managed only in ip->flags.
3149 * In the case of a defered buffer flush we still update the on-disk
3150 * inode to satisfy visibility requirements if there happen to be
3151 * directory dependancies.
3153 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
3154 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
3156 * If deleted and on-disk, don't set any additional flags.
3157 * the delete flag takes care of things.
3159 * Clear flags which may have been set by the frontend.
3161 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3162 HAMMER_INODE_SDIRTY |
3163 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3164 HAMMER_INODE_DELETING);
3165 break;
3166 case HAMMER_INODE_DELETED:
3168 * Take care of the case where a deleted inode was never
3169 * flushed to the disk in the first place.
3171 * Clear flags which may have been set by the frontend.
3173 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3174 HAMMER_INODE_SDIRTY |
3175 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3176 HAMMER_INODE_DELETING);
3177 while (RB_ROOT(&ip->rec_tree)) {
3178 hammer_record_t record = RB_ROOT(&ip->rec_tree);
3179 hammer_ref(&record->lock);
3180 KKASSERT(hammer_oneref(&record->lock));
3181 record->flags |= HAMMER_RECF_DELETED_BE;
3182 ++record->ip->rec_generation;
3183 hammer_rel_mem_record(record);
3185 break;
3186 case HAMMER_INODE_ONDISK:
3188 * If already on-disk, do not set any additional flags.
3190 break;
3191 default:
3193 * If not on-disk and not deleted, set DDIRTY to force
3194 * an initial record to be written.
3196 * Also set the create_tid in both the frontend and backend
3197 * copy of the inode record.
3199 ip->ino_leaf.base.create_tid = trans->tid;
3200 ip->ino_leaf.create_ts = trans->time32;
3201 ip->sync_ino_leaf.base.create_tid = trans->tid;
3202 ip->sync_ino_leaf.create_ts = trans->time32;
3203 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3204 break;
3208 * If DDIRTY or SDIRTY is set, write out a new record.
3209 * If the inode is already on-disk the old record is marked as
3210 * deleted.
3212 * If DELETED is set hammer_update_inode() will delete the existing
3213 * record without writing out a new one.
3215 if (ip->flags & HAMMER_INODE_DELETED) {
3216 error = hammer_update_inode(&cursor, ip);
3217 } else
3218 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3219 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3220 error = hammer_update_itimes(&cursor, ip);
3221 } else
3222 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3223 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3224 error = hammer_update_inode(&cursor, ip);
3226 done:
3227 if (ip->flags & HAMMER_INODE_MODMASK)
3228 hammer_inode_dirty(ip);
3229 if (error) {
3230 hammer_critical_error(ip->hmp, ip, error,
3231 "while syncing inode");
3233 hammer_done_cursor(&cursor);
3234 return(error);
3238 * This routine is called when the OS is no longer actively referencing
3239 * the inode (but might still be keeping it cached), or when releasing
3240 * the last reference to an inode.
3242 * At this point if the inode's nlinks count is zero we want to destroy
3243 * it, which may mean destroying it on-media too.
3245 void
3246 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3248 struct vnode *vp;
3251 * Set the DELETING flag when the link count drops to 0 and the
3252 * OS no longer has any opens on the inode.
3254 * The backend will clear DELETING (a mod flag) and set DELETED
3255 * (a state flag) when it is actually able to perform the
3256 * operation.
3258 * Don't reflag the deletion if the flusher is currently syncing
3259 * one that was already flagged. A previously set DELETING flag
3260 * may bounce around flags and sync_flags until the operation is
3261 * completely done.
3263 * Do not attempt to modify a snapshot inode (one set to read-only).
3265 if (ip->ino_data.nlinks == 0 &&
3266 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3267 ip->flags |= HAMMER_INODE_DELETING;
3268 ip->flags |= HAMMER_INODE_TRUNCATED;
3269 ip->trunc_off = 0;
3270 vp = NULL;
3271 if (getvp) {
3272 if (hammer_get_vnode(ip, &vp) != 0)
3273 return;
3277 * Final cleanup
3279 if (ip->vp)
3280 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0);
3281 if (ip->flags & HAMMER_INODE_MODMASK)
3282 hammer_inode_dirty(ip);
3283 if (getvp)
3284 vput(vp);
3289 * After potentially resolving a dependancy the inode is tested
3290 * to determine whether it needs to be reflushed.
3292 void
3293 hammer_test_inode(hammer_inode_t ip)
3295 if (ip->flags & HAMMER_INODE_REFLUSH) {
3296 ip->flags &= ~HAMMER_INODE_REFLUSH;
3297 hammer_ref(&ip->lock);
3298 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3299 ip->flags &= ~HAMMER_INODE_RESIGNAL;
3300 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3301 } else {
3302 hammer_flush_inode(ip, 0);
3304 hammer_rel_inode(ip, 0);
3309 * Clear the RECLAIM flag on an inode. This occurs when the inode is
3310 * reassociated with a vp or just before it gets freed.
3312 * Pipeline wakeups to threads blocked due to an excessive number of
3313 * detached inodes. This typically occurs when atime updates accumulate
3314 * while scanning a directory tree.
3316 static void
3317 hammer_inode_wakereclaims(hammer_inode_t ip)
3319 struct hammer_reclaim *reclaim;
3320 hammer_mount_t hmp = ip->hmp;
3322 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3323 return;
3325 --hammer_count_reclaims;
3326 --hmp->count_reclaims;
3327 ip->flags &= ~HAMMER_INODE_RECLAIM;
3329 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3330 KKASSERT(reclaim->count > 0);
3331 if (--reclaim->count == 0) {
3332 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3333 wakeup(reclaim);
3339 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3340 * inodes build up before we start blocking. This routine is called
3341 * if a new inode is created or an inode is loaded from media.
3343 * When we block we don't care *which* inode has finished reclaiming,
3344 * as long as one does.
3346 * The reclaim pipeline is primarily governed by the auto-flush which is
3347 * 1/4 hammer_limit_reclaims. We don't want to block if the count is
3348 * less than 1/2 hammer_limit_reclaims. From 1/2 to full count is
3349 * dynamically governed.
3351 void
3352 hammer_inode_waitreclaims(hammer_transaction_t trans)
3354 hammer_mount_t hmp = trans->hmp;
3355 struct hammer_reclaim reclaim;
3356 int lower_limit;
3359 * Track inode load, delay if the number of reclaiming inodes is
3360 * between 2/4 and 4/4 hammer_limit_reclaims, depending.
3362 if (curthread->td_proc) {
3363 struct hammer_inostats *stats;
3365 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3366 ++stats->count;
3368 if (stats->count > hammer_limit_reclaims / 2)
3369 stats->count = hammer_limit_reclaims / 2;
3370 lower_limit = hammer_limit_reclaims - stats->count;
3371 if (hammer_debug_general & 0x10000) {
3372 hdkprintf("pid %5d limit %d\n",
3373 (int)curthread->td_proc->p_pid, lower_limit);
3375 } else {
3376 lower_limit = hammer_limit_reclaims * 3 / 4;
3378 if (hmp->count_reclaims >= lower_limit) {
3379 reclaim.count = 1;
3380 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3381 tsleep(&reclaim, 0, "hmrrcm", hz);
3382 if (reclaim.count > 0)
3383 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3388 * Keep track of reclaim statistics on a per-pid basis using a loose
3389 * 4-way set associative hash table. Collisions inherit the count of
3390 * the previous entry.
3392 * NOTE: We want to be careful here to limit the chain size. If the chain
3393 * size is too large a pid will spread its stats out over too many
3394 * entries under certain types of heavy filesystem activity and
3395 * wind up not delaying long enough.
3397 static
3398 struct hammer_inostats *
3399 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3401 struct hammer_inostats *stats;
3402 int delta;
3403 int chain;
3404 static volatile int iterator; /* we don't care about MP races */
3407 * Chain up to 4 times to find our entry.
3409 for (chain = 0; chain < 4; ++chain) {
3410 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3411 if (stats->pid == pid)
3412 break;
3416 * Replace one of the four chaining entries with our new entry.
3418 if (chain == 4) {
3419 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3420 HAMMER_INOSTATS_HMASK];
3421 stats->pid = pid;
3425 * Decay the entry
3427 if (stats->count && stats->ltick != ticks) {
3428 delta = ticks - stats->ltick;
3429 stats->ltick = ticks;
3430 if (delta <= 0 || delta > hz * 60)
3431 stats->count = 0;
3432 else
3433 stats->count = stats->count * hz / (hz + delta);
3435 if (hammer_debug_general & 0x10000)
3436 hdkprintf("pid %5d stats %d\n", (int)pid, stats->count);
3437 return (stats);
3440 #if 0
3443 * XXX not used, doesn't work very well due to the large batching nature
3444 * of flushes.
3446 * A larger then normal backlog of inodes is sitting in the flusher,
3447 * enforce a general slowdown to let it catch up. This routine is only
3448 * called on completion of a non-flusher-related transaction which
3449 * performed B-Tree node I/O.
3451 * It is possible for the flusher to stall in a continuous load.
3452 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3453 * If the flusher is unable to catch up the inode count can bloat until
3454 * we run out of kvm.
3456 * This is a bit of a hack.
3458 void
3459 hammer_inode_waithard(hammer_mount_t hmp)
3462 * Hysteresis.
3464 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3465 if (hmp->count_reclaims < hammer_limit_reclaims / 2 &&
3466 hmp->count_iqueued < hmp->count_inodes / 20) {
3467 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3468 return;
3470 } else {
3471 if (hmp->count_reclaims < hammer_limit_reclaims ||
3472 hmp->count_iqueued < hmp->count_inodes / 10) {
3473 return;
3475 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3479 * Block for one flush cycle.
3481 hammer_flusher_wait_next(hmp);
3484 #endif