Use per-mount kmalloc pools for bulk data structures, particularly inodes
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
blob18abae74189b4d6f622efd817f2efd6421e14dde
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_free_inode(hammer_inode_t ip);
44 static void hammer_flush_inode_core(hammer_inode_t ip,
45 hammer_flush_group_t flg, int flags);
46 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
47 #if 0
48 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
49 #endif
50 static int hammer_setup_parent_inodes(hammer_inode_t ip,
51 hammer_flush_group_t flg);
52 static int hammer_setup_parent_inodes_helper(hammer_record_t record,
53 hammer_flush_group_t flg);
54 static void hammer_inode_wakereclaims(hammer_inode_t ip);
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
61 * RB-Tree support for inode structures
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_localization < ip2->obj_localization)
67 return(-1);
68 if (ip1->obj_localization > ip2->obj_localization)
69 return(1);
70 if (ip1->obj_id < ip2->obj_id)
71 return(-1);
72 if (ip1->obj_id > ip2->obj_id)
73 return(1);
74 if (ip1->obj_asof < ip2->obj_asof)
75 return(-1);
76 if (ip1->obj_asof > ip2->obj_asof)
77 return(1);
78 return(0);
82 * RB-Tree support for inode structures / special LOOKUP_INFO
84 static int
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
87 if (info->obj_localization < ip->obj_localization)
88 return(-1);
89 if (info->obj_localization > ip->obj_localization)
90 return(1);
91 if (info->obj_id < ip->obj_id)
92 return(-1);
93 if (info->obj_id > ip->obj_id)
94 return(1);
95 if (info->obj_asof < ip->obj_asof)
96 return(-1);
97 if (info->obj_asof > ip->obj_asof)
98 return(1);
99 return(0);
103 * Used by hammer_scan_inode_snapshots() to locate all of an object's
104 * snapshots. Note that the asof field is not tested, which we can get
105 * away with because it is the lowest-priority field.
107 static int
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
110 hammer_inode_info_t info = data;
112 if (ip->obj_localization > info->obj_localization)
113 return(1);
114 if (ip->obj_localization < info->obj_localization)
115 return(-1);
116 if (ip->obj_id > info->obj_id)
117 return(1);
118 if (ip->obj_id < info->obj_id)
119 return(-1);
120 return(0);
124 * Used by hammer_unload_pseudofs() to locate all inodes associated with
125 * a particular PFS.
127 static int
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
130 u_int32_t localization = *(u_int32_t *)data;
131 if (ip->obj_localization > localization)
132 return(1);
133 if (ip->obj_localization < localization)
134 return(-1);
135 return(0);
139 * RB-Tree support for pseudofs structures
141 static int
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
144 if (p1->localization < p2->localization)
145 return(-1);
146 if (p1->localization > p2->localization)
147 return(1);
148 return(0);
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154 hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156 hammer_pfs_rb_compare, u_int32_t, localization);
159 * The kernel is not actively referencing this vnode but is still holding
160 * it cached.
162 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args *ap)
167 struct hammer_inode *ip = VTOI(ap->a_vp);
170 * Degenerate case
172 if (ip == NULL) {
173 vrecycle(ap->a_vp);
174 return(0);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 hammer_inode_unloadable_check(ip, 0);
190 if (ip->ino_data.nlinks == 0) {
191 if (ip->flags & HAMMER_INODE_MODMASK)
192 hammer_flush_inode(ip, 0);
193 vrecycle(ap->a_vp);
195 return(0);
199 * Release the vnode association. This is typically (but not always)
200 * the last reference on the inode.
202 * Once the association is lost we are on our own with regards to
203 * flushing the inode.
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
208 struct hammer_inode *ip;
209 hammer_mount_t hmp;
210 struct vnode *vp;
212 vp = ap->a_vp;
214 if ((ip = vp->v_data) != NULL) {
215 hmp = ip->hmp;
216 vp->v_data = NULL;
217 ip->vp = NULL;
219 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220 ++hammer_count_reclaiming;
221 ++hmp->inode_reclaims;
222 ip->flags |= HAMMER_INODE_RECLAIM;
224 hammer_rel_inode(ip, 1);
226 return(0);
230 * Return a locked vnode for the specified inode. The inode must be
231 * referenced but NOT LOCKED on entry and will remain referenced on
232 * return.
234 * Called from the frontend.
237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
239 hammer_mount_t hmp;
240 struct vnode *vp;
241 int error = 0;
242 u_int8_t obj_type;
244 hmp = ip->hmp;
246 for (;;) {
247 if ((vp = ip->vp) == NULL) {
248 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
249 if (error)
250 break;
251 hammer_lock_ex(&ip->lock);
252 if (ip->vp != NULL) {
253 hammer_unlock(&ip->lock);
254 vp->v_type = VBAD;
255 vx_put(vp);
256 continue;
258 hammer_ref(&ip->lock);
259 vp = *vpp;
260 ip->vp = vp;
262 obj_type = ip->ino_data.obj_type;
263 vp->v_type = hammer_get_vnode_type(obj_type);
265 hammer_inode_wakereclaims(ip);
267 switch(ip->ino_data.obj_type) {
268 case HAMMER_OBJTYPE_CDEV:
269 case HAMMER_OBJTYPE_BDEV:
270 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
271 addaliasu(vp, ip->ino_data.rmajor,
272 ip->ino_data.rminor);
273 break;
274 case HAMMER_OBJTYPE_FIFO:
275 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
276 break;
277 default:
278 break;
282 * Only mark as the root vnode if the ip is not
283 * historical, otherwise the VFS cache will get
284 * confused. The other half of the special handling
285 * is in hammer_vop_nlookupdotdot().
287 * Pseudo-filesystem roots can be accessed via
288 * non-root filesystem paths and setting VROOT may
289 * confuse the namecache. Set VPFSROOT instead.
291 if (ip->obj_id == HAMMER_OBJID_ROOT &&
292 ip->obj_asof == hmp->asof) {
293 if (ip->obj_localization == 0)
294 vp->v_flag |= VROOT;
295 else
296 vp->v_flag |= VPFSROOT;
299 vp->v_data = (void *)ip;
300 /* vnode locked by getnewvnode() */
301 /* make related vnode dirty if inode dirty? */
302 hammer_unlock(&ip->lock);
303 if (vp->v_type == VREG)
304 vinitvmio(vp, ip->ino_data.size);
305 break;
309 * loop if the vget fails (aka races), or if the vp
310 * no longer matches ip->vp.
312 if (vget(vp, LK_EXCLUSIVE) == 0) {
313 if (vp == ip->vp)
314 break;
315 vput(vp);
318 *vpp = vp;
319 return(error);
323 * Locate all copies of the inode for obj_id compatible with the specified
324 * asof, reference, and issue the related call-back. This routine is used
325 * for direct-io invalidation and does not create any new inodes.
327 void
328 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
329 int (*callback)(hammer_inode_t ip, void *data),
330 void *data)
332 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
333 hammer_inode_info_cmp_all_history,
334 callback, iinfo);
338 * Acquire a HAMMER inode. The returned inode is not locked. These functions
339 * do not attach or detach the related vnode (use hammer_get_vnode() for
340 * that).
342 * The flags argument is only applied for newly created inodes, and only
343 * certain flags are inherited.
345 * Called from the frontend.
347 struct hammer_inode *
348 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
349 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
350 int flags, int *errorp)
352 hammer_mount_t hmp = trans->hmp;
353 struct hammer_inode_info iinfo;
354 struct hammer_cursor cursor;
355 struct hammer_inode *ip;
359 * Determine if we already have an inode cached. If we do then
360 * we are golden.
362 iinfo.obj_id = obj_id;
363 iinfo.obj_asof = asof;
364 iinfo.obj_localization = localization;
365 loop:
366 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
367 if (ip) {
368 hammer_ref(&ip->lock);
369 *errorp = 0;
370 return(ip);
374 * Allocate a new inode structure and deal with races later.
376 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
377 ++hammer_count_inodes;
378 ++hmp->count_inodes;
379 ip->obj_id = obj_id;
380 ip->obj_asof = iinfo.obj_asof;
381 ip->obj_localization = localization;
382 ip->hmp = hmp;
383 ip->flags = flags & HAMMER_INODE_RO;
384 ip->cache[0].ip = ip;
385 ip->cache[1].ip = ip;
386 if (hmp->ronly)
387 ip->flags |= HAMMER_INODE_RO;
388 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
389 0x7FFFFFFFFFFFFFFFLL;
390 RB_INIT(&ip->rec_tree);
391 TAILQ_INIT(&ip->target_list);
392 hammer_ref(&ip->lock);
395 * Locate the on-disk inode. If this is a PFS root we always
396 * access the current version of the root inode and (if it is not
397 * a master) always access information under it with a snapshot
398 * TID.
400 retry:
401 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
402 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
403 cursor.key_beg.obj_id = ip->obj_id;
404 cursor.key_beg.key = 0;
405 cursor.key_beg.create_tid = 0;
406 cursor.key_beg.delete_tid = 0;
407 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
408 cursor.key_beg.obj_type = 0;
410 cursor.asof = iinfo.obj_asof;
411 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
412 HAMMER_CURSOR_ASOF;
414 *errorp = hammer_btree_lookup(&cursor);
415 if (*errorp == EDEADLK) {
416 hammer_done_cursor(&cursor);
417 goto retry;
421 * On success the B-Tree lookup will hold the appropriate
422 * buffer cache buffers and provide a pointer to the requested
423 * information. Copy the information to the in-memory inode
424 * and cache the B-Tree node to improve future operations.
426 if (*errorp == 0) {
427 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
428 ip->ino_data = cursor.data->inode;
431 * cache[0] tries to cache the location of the object inode.
432 * The assumption is that it is near the directory inode.
434 * cache[1] tries to cache the location of the object data.
435 * The assumption is that it is near the directory data.
437 hammer_cache_node(&ip->cache[0], cursor.node);
438 if (dip && dip->cache[1].node)
439 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
442 * The file should not contain any data past the file size
443 * stored in the inode. Setting save_trunc_off to the
444 * file size instead of max reduces B-Tree lookup overheads
445 * on append by allowing the flusher to avoid checking for
446 * record overwrites.
448 ip->save_trunc_off = ip->ino_data.size;
451 * Locate and assign the pseudofs management structure to
452 * the inode.
454 if (dip && dip->obj_localization == ip->obj_localization) {
455 ip->pfsm = dip->pfsm;
456 hammer_ref(&ip->pfsm->lock);
457 } else {
458 ip->pfsm = hammer_load_pseudofs(trans,
459 ip->obj_localization,
460 errorp);
461 *errorp = 0; /* ignore ENOENT */
466 * The inode is placed on the red-black tree and will be synced to
467 * the media when flushed or by the filesystem sync. If this races
468 * another instantiation/lookup the insertion will fail.
470 if (*errorp == 0) {
471 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
472 hammer_free_inode(ip);
473 hammer_done_cursor(&cursor);
474 goto loop;
476 ip->flags |= HAMMER_INODE_ONDISK;
477 } else {
478 if (ip->flags & HAMMER_INODE_RSV_INODES) {
479 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
480 --hmp->rsv_inodes;
483 hammer_free_inode(ip);
484 ip = NULL;
486 hammer_done_cursor(&cursor);
487 trans->flags |= HAMMER_TRANSF_NEWINODE;
488 return (ip);
492 * Create a new filesystem object, returning the inode in *ipp. The
493 * returned inode will be referenced. The inode is created in-memory.
495 * If pfsm is non-NULL the caller wishes to create the root inode for
496 * a master PFS.
499 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
500 struct ucred *cred, hammer_inode_t dip,
501 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
503 hammer_mount_t hmp;
504 hammer_inode_t ip;
505 uid_t xuid;
506 int error;
508 hmp = trans->hmp;
510 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
511 ++hammer_count_inodes;
512 ++hmp->count_inodes;
514 if (pfsm) {
515 KKASSERT(pfsm->localization != 0);
516 ip->obj_id = HAMMER_OBJID_ROOT;
517 ip->obj_localization = pfsm->localization;
518 } else {
519 KKASSERT(dip != NULL);
520 ip->obj_id = hammer_alloc_objid(hmp, dip);
521 ip->obj_localization = dip->obj_localization;
524 KKASSERT(ip->obj_id != 0);
525 ip->obj_asof = hmp->asof;
526 ip->hmp = hmp;
527 ip->flush_state = HAMMER_FST_IDLE;
528 ip->flags = HAMMER_INODE_DDIRTY |
529 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
530 ip->cache[0].ip = ip;
531 ip->cache[1].ip = ip;
533 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
534 /* ip->save_trunc_off = 0; (already zero) */
535 RB_INIT(&ip->rec_tree);
536 TAILQ_INIT(&ip->target_list);
538 ip->ino_data.atime = trans->time;
539 ip->ino_data.mtime = trans->time;
540 ip->ino_data.size = 0;
541 ip->ino_data.nlinks = 0;
544 * A nohistory designator on the parent directory is inherited by
545 * the child. We will do this even for pseudo-fs creation... the
546 * sysad can turn it off.
548 if (dip) {
549 ip->ino_data.uflags = dip->ino_data.uflags &
550 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
553 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
554 ip->ino_leaf.base.localization = ip->obj_localization +
555 HAMMER_LOCALIZE_INODE;
556 ip->ino_leaf.base.obj_id = ip->obj_id;
557 ip->ino_leaf.base.key = 0;
558 ip->ino_leaf.base.create_tid = 0;
559 ip->ino_leaf.base.delete_tid = 0;
560 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
561 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
563 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
564 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
565 ip->ino_data.mode = vap->va_mode;
566 ip->ino_data.ctime = trans->time;
569 * If we are running version 2 or greater we use dirhash algorithm #1
570 * which is semi-sorted. Algorithm #0 was just a pure crc.
572 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
573 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
574 ip->ino_data.cap_flags |= HAMMER_INODE_CAP_DIRHASH_ALG1;
579 * Setup the ".." pointer. This only needs to be done for directories
580 * but we do it for all objects as a recovery aid.
582 if (dip)
583 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
584 #if 0
586 * The parent_obj_localization field only applies to pseudo-fs roots.
587 * XXX this is no longer applicable, PFSs are no longer directly
588 * tied into the parent's directory structure.
590 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
591 ip->obj_id == HAMMER_OBJID_ROOT) {
592 ip->ino_data.ext.obj.parent_obj_localization =
593 dip->obj_localization;
595 #endif
597 switch(ip->ino_leaf.base.obj_type) {
598 case HAMMER_OBJTYPE_CDEV:
599 case HAMMER_OBJTYPE_BDEV:
600 ip->ino_data.rmajor = vap->va_rmajor;
601 ip->ino_data.rminor = vap->va_rminor;
602 break;
603 default:
604 break;
608 * Calculate default uid/gid and overwrite with information from
609 * the vap.
611 if (dip) {
612 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
613 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
614 xuid, cred, &vap->va_mode);
615 } else {
616 xuid = 0;
618 ip->ino_data.mode = vap->va_mode;
620 if (vap->va_vaflags & VA_UID_UUID_VALID)
621 ip->ino_data.uid = vap->va_uid_uuid;
622 else if (vap->va_uid != (uid_t)VNOVAL)
623 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
624 else
625 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
627 if (vap->va_vaflags & VA_GID_UUID_VALID)
628 ip->ino_data.gid = vap->va_gid_uuid;
629 else if (vap->va_gid != (gid_t)VNOVAL)
630 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
631 else if (dip)
632 ip->ino_data.gid = dip->ino_data.gid;
634 hammer_ref(&ip->lock);
636 if (pfsm) {
637 ip->pfsm = pfsm;
638 hammer_ref(&pfsm->lock);
639 error = 0;
640 } else if (dip->obj_localization == ip->obj_localization) {
641 ip->pfsm = dip->pfsm;
642 hammer_ref(&ip->pfsm->lock);
643 error = 0;
644 } else {
645 ip->pfsm = hammer_load_pseudofs(trans,
646 ip->obj_localization,
647 &error);
648 error = 0; /* ignore ENOENT */
651 if (error) {
652 hammer_free_inode(ip);
653 ip = NULL;
654 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
655 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
656 /* not reached */
657 hammer_free_inode(ip);
659 *ipp = ip;
660 return(error);
664 * Final cleanup / freeing of an inode structure
666 static void
667 hammer_free_inode(hammer_inode_t ip)
669 struct hammer_mount *hmp;
671 hmp = ip->hmp;
672 KKASSERT(ip->lock.refs == 1);
673 hammer_uncache_node(&ip->cache[0]);
674 hammer_uncache_node(&ip->cache[1]);
675 hammer_inode_wakereclaims(ip);
676 if (ip->objid_cache)
677 hammer_clear_objid(ip);
678 --hammer_count_inodes;
679 --hmp->count_inodes;
680 if (ip->pfsm) {
681 hammer_rel_pseudofs(hmp, ip->pfsm);
682 ip->pfsm = NULL;
684 kfree(ip, hmp->m_inodes);
685 ip = NULL;
689 * Retrieve pseudo-fs data. NULL will never be returned.
691 * If an error occurs *errorp will be set and a default template is returned,
692 * otherwise *errorp is set to 0. Typically when an error occurs it will
693 * be ENOENT.
695 hammer_pseudofs_inmem_t
696 hammer_load_pseudofs(hammer_transaction_t trans,
697 u_int32_t localization, int *errorp)
699 hammer_mount_t hmp = trans->hmp;
700 hammer_inode_t ip;
701 hammer_pseudofs_inmem_t pfsm;
702 struct hammer_cursor cursor;
703 int bytes;
705 retry:
706 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
707 if (pfsm) {
708 hammer_ref(&pfsm->lock);
709 *errorp = 0;
710 return(pfsm);
714 * PFS records are stored in the root inode (not the PFS root inode,
715 * but the real root). Avoid an infinite recursion if loading
716 * the PFS for the real root.
718 if (localization) {
719 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
720 HAMMER_MAX_TID,
721 HAMMER_DEF_LOCALIZATION, 0, errorp);
722 } else {
723 ip = NULL;
726 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
727 pfsm->localization = localization;
728 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
729 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
731 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
732 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
733 HAMMER_LOCALIZE_MISC;
734 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
735 cursor.key_beg.create_tid = 0;
736 cursor.key_beg.delete_tid = 0;
737 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
738 cursor.key_beg.obj_type = 0;
739 cursor.key_beg.key = localization;
740 cursor.asof = HAMMER_MAX_TID;
741 cursor.flags |= HAMMER_CURSOR_ASOF;
743 if (ip)
744 *errorp = hammer_ip_lookup(&cursor);
745 else
746 *errorp = hammer_btree_lookup(&cursor);
747 if (*errorp == 0) {
748 *errorp = hammer_ip_resolve_data(&cursor);
749 if (*errorp == 0) {
750 if (cursor.data->pfsd.mirror_flags &
751 HAMMER_PFSD_DELETED) {
752 *errorp = ENOENT;
753 } else {
754 bytes = cursor.leaf->data_len;
755 if (bytes > sizeof(pfsm->pfsd))
756 bytes = sizeof(pfsm->pfsd);
757 bcopy(cursor.data, &pfsm->pfsd, bytes);
761 hammer_done_cursor(&cursor);
763 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
764 hammer_ref(&pfsm->lock);
765 if (ip)
766 hammer_rel_inode(ip, 0);
767 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
768 kfree(pfsm, hmp->m_misc);
769 goto retry;
771 return(pfsm);
775 * Store pseudo-fs data. The backend will automatically delete any prior
776 * on-disk pseudo-fs data but we have to delete in-memory versions.
779 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
781 struct hammer_cursor cursor;
782 hammer_record_t record;
783 hammer_inode_t ip;
784 int error;
786 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
787 HAMMER_DEF_LOCALIZATION, 0, &error);
788 retry:
789 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
790 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
791 cursor.key_beg.localization = ip->obj_localization +
792 HAMMER_LOCALIZE_MISC;
793 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
794 cursor.key_beg.create_tid = 0;
795 cursor.key_beg.delete_tid = 0;
796 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
797 cursor.key_beg.obj_type = 0;
798 cursor.key_beg.key = pfsm->localization;
799 cursor.asof = HAMMER_MAX_TID;
800 cursor.flags |= HAMMER_CURSOR_ASOF;
802 error = hammer_ip_lookup(&cursor);
803 if (error == 0 && hammer_cursor_inmem(&cursor)) {
804 record = cursor.iprec;
805 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
806 KKASSERT(cursor.deadlk_rec == NULL);
807 hammer_ref(&record->lock);
808 cursor.deadlk_rec = record;
809 error = EDEADLK;
810 } else {
811 record->flags |= HAMMER_RECF_DELETED_FE;
812 error = 0;
815 if (error == 0 || error == ENOENT) {
816 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
817 record->type = HAMMER_MEM_RECORD_GENERAL;
819 record->leaf.base.localization = ip->obj_localization +
820 HAMMER_LOCALIZE_MISC;
821 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
822 record->leaf.base.key = pfsm->localization;
823 record->leaf.data_len = sizeof(pfsm->pfsd);
824 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
825 error = hammer_ip_add_record(trans, record);
827 hammer_done_cursor(&cursor);
828 if (error == EDEADLK)
829 goto retry;
830 hammer_rel_inode(ip, 0);
831 return(error);
835 * Create a root directory for a PFS if one does not alredy exist.
837 * The PFS root stands alone so we must also bump the nlinks count
838 * to prevent it from being destroyed on release.
841 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
842 hammer_pseudofs_inmem_t pfsm)
844 hammer_inode_t ip;
845 struct vattr vap;
846 int error;
848 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
849 pfsm->localization, 0, &error);
850 if (ip == NULL) {
851 vattr_null(&vap);
852 vap.va_mode = 0755;
853 vap.va_type = VDIR;
854 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
855 if (error == 0) {
856 ++ip->ino_data.nlinks;
857 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
860 if (ip)
861 hammer_rel_inode(ip, 0);
862 return(error);
866 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
867 * if we are unable to disassociate all the inodes.
869 static
871 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
873 int res;
875 hammer_ref(&ip->lock);
876 if (ip->lock.refs == 2 && ip->vp)
877 vclean_unlocked(ip->vp);
878 if (ip->lock.refs == 1 && ip->vp == NULL)
879 res = 0;
880 else
881 res = -1; /* stop, someone is using the inode */
882 hammer_rel_inode(ip, 0);
883 return(res);
887 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
889 int res;
890 int try;
892 for (try = res = 0; try < 4; ++try) {
893 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
894 hammer_inode_pfs_cmp,
895 hammer_unload_pseudofs_callback,
896 &localization);
897 if (res == 0 && try > 1)
898 break;
899 hammer_flusher_sync(trans->hmp);
901 if (res != 0)
902 res = ENOTEMPTY;
903 return(res);
908 * Release a reference on a PFS
910 void
911 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
913 hammer_unref(&pfsm->lock);
914 if (pfsm->lock.refs == 0) {
915 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
916 kfree(pfsm, hmp->m_misc);
921 * Called by hammer_sync_inode().
923 static int
924 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
926 hammer_transaction_t trans = cursor->trans;
927 hammer_record_t record;
928 int error;
929 int redirty;
931 retry:
932 error = 0;
935 * If the inode has a presence on-disk then locate it and mark
936 * it deleted, setting DELONDISK.
938 * The record may or may not be physically deleted, depending on
939 * the retention policy.
941 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
942 HAMMER_INODE_ONDISK) {
943 hammer_normalize_cursor(cursor);
944 cursor->key_beg.localization = ip->obj_localization +
945 HAMMER_LOCALIZE_INODE;
946 cursor->key_beg.obj_id = ip->obj_id;
947 cursor->key_beg.key = 0;
948 cursor->key_beg.create_tid = 0;
949 cursor->key_beg.delete_tid = 0;
950 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
951 cursor->key_beg.obj_type = 0;
952 cursor->asof = ip->obj_asof;
953 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
954 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
955 cursor->flags |= HAMMER_CURSOR_BACKEND;
957 error = hammer_btree_lookup(cursor);
958 if (hammer_debug_inode)
959 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
961 if (error == 0) {
962 error = hammer_ip_delete_record(cursor, ip, trans->tid);
963 if (hammer_debug_inode)
964 kprintf(" error %d\n", error);
965 if (error == 0) {
966 ip->flags |= HAMMER_INODE_DELONDISK;
968 if (cursor->node)
969 hammer_cache_node(&ip->cache[0], cursor->node);
971 if (error == EDEADLK) {
972 hammer_done_cursor(cursor);
973 error = hammer_init_cursor(trans, cursor,
974 &ip->cache[0], ip);
975 if (hammer_debug_inode)
976 kprintf("IPDED %p %d\n", ip, error);
977 if (error == 0)
978 goto retry;
983 * Ok, write out the initial record or a new record (after deleting
984 * the old one), unless the DELETED flag is set. This routine will
985 * clear DELONDISK if it writes out a record.
987 * Update our inode statistics if this is the first application of
988 * the inode on-disk.
990 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
992 * Generate a record and write it to the media. We clean-up
993 * the state before releasing so we do not have to set-up
994 * a flush_group.
996 record = hammer_alloc_mem_record(ip, 0);
997 record->type = HAMMER_MEM_RECORD_INODE;
998 record->flush_state = HAMMER_FST_FLUSH;
999 record->leaf = ip->sync_ino_leaf;
1000 record->leaf.base.create_tid = trans->tid;
1001 record->leaf.data_len = sizeof(ip->sync_ino_data);
1002 record->leaf.create_ts = trans->time32;
1003 record->data = (void *)&ip->sync_ino_data;
1004 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1007 * If this flag is set we cannot sync the new file size
1008 * because we haven't finished related truncations. The
1009 * inode will be flushed in another flush group to finish
1010 * the job.
1012 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1013 ip->sync_ino_data.size != ip->ino_data.size) {
1014 redirty = 1;
1015 ip->sync_ino_data.size = ip->ino_data.size;
1016 } else {
1017 redirty = 0;
1020 for (;;) {
1021 error = hammer_ip_sync_record_cursor(cursor, record);
1022 if (hammer_debug_inode)
1023 kprintf("GENREC %p rec %08x %d\n",
1024 ip, record->flags, error);
1025 if (error != EDEADLK)
1026 break;
1027 hammer_done_cursor(cursor);
1028 error = hammer_init_cursor(trans, cursor,
1029 &ip->cache[0], ip);
1030 if (hammer_debug_inode)
1031 kprintf("GENREC reinit %d\n", error);
1032 if (error)
1033 break;
1037 * The record isn't managed by the inode's record tree,
1038 * destroy it whether we succeed or fail.
1040 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1041 record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED;
1042 record->flush_state = HAMMER_FST_IDLE;
1043 hammer_rel_mem_record(record);
1046 * Finish up.
1048 if (error == 0) {
1049 if (hammer_debug_inode)
1050 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1051 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1052 HAMMER_INODE_ATIME |
1053 HAMMER_INODE_MTIME);
1054 ip->flags &= ~HAMMER_INODE_DELONDISK;
1055 if (redirty)
1056 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1059 * Root volume count of inodes
1061 hammer_sync_lock_sh(trans);
1062 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1063 hammer_modify_volume_field(trans,
1064 trans->rootvol,
1065 vol0_stat_inodes);
1066 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1067 hammer_modify_volume_done(trans->rootvol);
1068 ip->flags |= HAMMER_INODE_ONDISK;
1069 if (hammer_debug_inode)
1070 kprintf("NOWONDISK %p\n", ip);
1072 hammer_sync_unlock(trans);
1077 * If the inode has been destroyed, clean out any left-over flags
1078 * that may have been set by the frontend.
1080 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1081 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1082 HAMMER_INODE_ATIME |
1083 HAMMER_INODE_MTIME);
1085 return(error);
1089 * Update only the itimes fields.
1091 * ATIME can be updated without generating any UNDO. MTIME is updated
1092 * with UNDO so it is guaranteed to be synchronized properly in case of
1093 * a crash.
1095 * Neither field is included in the B-Tree leaf element's CRC, which is how
1096 * we can get away with updating ATIME the way we do.
1098 static int
1099 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1101 hammer_transaction_t trans = cursor->trans;
1102 int error;
1104 retry:
1105 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1106 HAMMER_INODE_ONDISK) {
1107 return(0);
1110 hammer_normalize_cursor(cursor);
1111 cursor->key_beg.localization = ip->obj_localization +
1112 HAMMER_LOCALIZE_INODE;
1113 cursor->key_beg.obj_id = ip->obj_id;
1114 cursor->key_beg.key = 0;
1115 cursor->key_beg.create_tid = 0;
1116 cursor->key_beg.delete_tid = 0;
1117 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1118 cursor->key_beg.obj_type = 0;
1119 cursor->asof = ip->obj_asof;
1120 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1121 cursor->flags |= HAMMER_CURSOR_ASOF;
1122 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1123 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1124 cursor->flags |= HAMMER_CURSOR_BACKEND;
1126 error = hammer_btree_lookup(cursor);
1127 if (error == 0) {
1128 hammer_cache_node(&ip->cache[0], cursor->node);
1129 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1131 * Updating MTIME requires an UNDO. Just cover
1132 * both atime and mtime.
1134 hammer_sync_lock_sh(trans);
1135 hammer_modify_buffer(trans, cursor->data_buffer,
1136 HAMMER_ITIMES_BASE(&cursor->data->inode),
1137 HAMMER_ITIMES_BYTES);
1138 cursor->data->inode.atime = ip->sync_ino_data.atime;
1139 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1140 hammer_modify_buffer_done(cursor->data_buffer);
1141 hammer_sync_unlock(trans);
1142 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1144 * Updating atime only can be done in-place with
1145 * no UNDO.
1147 hammer_sync_lock_sh(trans);
1148 hammer_modify_buffer(trans, cursor->data_buffer,
1149 NULL, 0);
1150 cursor->data->inode.atime = ip->sync_ino_data.atime;
1151 hammer_modify_buffer_done(cursor->data_buffer);
1152 hammer_sync_unlock(trans);
1154 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1156 if (error == EDEADLK) {
1157 hammer_done_cursor(cursor);
1158 error = hammer_init_cursor(trans, cursor,
1159 &ip->cache[0], ip);
1160 if (error == 0)
1161 goto retry;
1163 return(error);
1167 * Release a reference on an inode, flush as requested.
1169 * On the last reference we queue the inode to the flusher for its final
1170 * disposition.
1172 void
1173 hammer_rel_inode(struct hammer_inode *ip, int flush)
1175 /*hammer_mount_t hmp = ip->hmp;*/
1178 * Handle disposition when dropping the last ref.
1180 for (;;) {
1181 if (ip->lock.refs == 1) {
1183 * Determine whether on-disk action is needed for
1184 * the inode's final disposition.
1186 KKASSERT(ip->vp == NULL);
1187 hammer_inode_unloadable_check(ip, 0);
1188 if (ip->flags & HAMMER_INODE_MODMASK) {
1189 hammer_flush_inode(ip, 0);
1190 } else if (ip->lock.refs == 1) {
1191 hammer_unload_inode(ip);
1192 break;
1194 } else {
1195 if (flush)
1196 hammer_flush_inode(ip, 0);
1199 * The inode still has multiple refs, try to drop
1200 * one ref.
1202 KKASSERT(ip->lock.refs >= 1);
1203 if (ip->lock.refs > 1) {
1204 hammer_unref(&ip->lock);
1205 break;
1212 * Unload and destroy the specified inode. Must be called with one remaining
1213 * reference. The reference is disposed of.
1215 * The inode must be completely clean.
1217 static int
1218 hammer_unload_inode(struct hammer_inode *ip)
1220 hammer_mount_t hmp = ip->hmp;
1222 KASSERT(ip->lock.refs == 1,
1223 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1224 KKASSERT(ip->vp == NULL);
1225 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1226 KKASSERT(ip->cursor_ip_refs == 0);
1227 KKASSERT(ip->lock.lockcount == 0);
1228 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1230 KKASSERT(RB_EMPTY(&ip->rec_tree));
1231 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1233 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1235 hammer_free_inode(ip);
1236 return(0);
1240 * Called during unmounting if a critical error occured. The in-memory
1241 * inode and all related structures are destroyed.
1243 * If a critical error did not occur the unmount code calls the standard
1244 * release and asserts that the inode is gone.
1247 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1249 hammer_record_t rec;
1252 * Get rid of the inodes in-memory records, regardless of their
1253 * state, and clear the mod-mask.
1255 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1256 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1257 rec->target_ip = NULL;
1258 if (rec->flush_state == HAMMER_FST_SETUP)
1259 rec->flush_state = HAMMER_FST_IDLE;
1261 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1262 if (rec->flush_state == HAMMER_FST_FLUSH)
1263 --rec->flush_group->refs;
1264 else
1265 hammer_ref(&rec->lock);
1266 KKASSERT(rec->lock.refs == 1);
1267 rec->flush_state = HAMMER_FST_IDLE;
1268 rec->flush_group = NULL;
1269 rec->flags |= HAMMER_RECF_DELETED_FE;
1270 rec->flags |= HAMMER_RECF_DELETED_BE;
1271 hammer_rel_mem_record(rec);
1273 ip->flags &= ~HAMMER_INODE_MODMASK;
1274 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1275 KKASSERT(ip->vp == NULL);
1278 * Remove the inode from any flush group, force it idle. FLUSH
1279 * and SETUP states have an inode ref.
1281 switch(ip->flush_state) {
1282 case HAMMER_FST_FLUSH:
1283 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1284 --ip->flush_group->refs;
1285 ip->flush_group = NULL;
1286 /* fall through */
1287 case HAMMER_FST_SETUP:
1288 hammer_unref(&ip->lock);
1289 ip->flush_state = HAMMER_FST_IDLE;
1290 /* fall through */
1291 case HAMMER_FST_IDLE:
1292 break;
1296 * There shouldn't be any associated vnode. The unload needs at
1297 * least one ref, if we do have a vp steal its ip ref.
1299 if (ip->vp) {
1300 kprintf("hammer_destroy_inode_callback: Unexpected "
1301 "vnode association ip %p vp %p\n", ip, ip->vp);
1302 ip->vp->v_data = NULL;
1303 ip->vp = NULL;
1304 } else {
1305 hammer_ref(&ip->lock);
1307 hammer_unload_inode(ip);
1308 return(0);
1312 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1313 * the read-only flag for cached inodes.
1315 * This routine is called from a RB_SCAN().
1318 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1320 hammer_mount_t hmp = ip->hmp;
1322 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1323 ip->flags |= HAMMER_INODE_RO;
1324 else
1325 ip->flags &= ~HAMMER_INODE_RO;
1326 return(0);
1330 * A transaction has modified an inode, requiring updates as specified by
1331 * the passed flags.
1333 * HAMMER_INODE_DDIRTY: Inode data has been updated
1334 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1335 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1336 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1337 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1339 void
1340 hammer_modify_inode(hammer_inode_t ip, int flags)
1343 * ronly of 0 or 2 does not trigger assertion.
1344 * 2 is a special error state
1346 KKASSERT(ip->hmp->ronly != 1 ||
1347 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1348 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1349 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1350 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1351 ip->flags |= HAMMER_INODE_RSV_INODES;
1352 ++ip->hmp->rsv_inodes;
1355 ip->flags |= flags;
1359 * Request that an inode be flushed. This whole mess cannot block and may
1360 * recurse (if not synchronous). Once requested HAMMER will attempt to
1361 * actively flush the inode until the flush can be done.
1363 * The inode may already be flushing, or may be in a setup state. We can
1364 * place the inode in a flushing state if it is currently idle and flag it
1365 * to reflush if it is currently flushing.
1367 * Upon return if the inode could not be flushed due to a setup
1368 * dependancy, then it will be automatically flushed when the dependancy
1369 * is satisfied.
1371 void
1372 hammer_flush_inode(hammer_inode_t ip, int flags)
1374 hammer_mount_t hmp;
1375 hammer_flush_group_t flg;
1376 int good;
1379 * next_flush_group is the first flush group we can place the inode
1380 * in. It may be NULL. If it becomes full we append a new flush
1381 * group and make that the next_flush_group.
1383 hmp = ip->hmp;
1384 while ((flg = hmp->next_flush_group) != NULL) {
1385 KKASSERT(flg->running == 0);
1386 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1387 break;
1388 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1389 hammer_flusher_async(ip->hmp, flg);
1391 if (flg == NULL) {
1392 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1393 hmp->next_flush_group = flg;
1394 TAILQ_INIT(&flg->flush_list);
1395 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1399 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1400 * state we have to put it back into an IDLE state so we can
1401 * drop the extra ref.
1403 * If we have a parent dependancy we must still fall through
1404 * so we can run it.
1406 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1407 if (ip->flush_state == HAMMER_FST_SETUP &&
1408 TAILQ_EMPTY(&ip->target_list)) {
1409 ip->flush_state = HAMMER_FST_IDLE;
1410 hammer_rel_inode(ip, 0);
1412 if (ip->flush_state == HAMMER_FST_IDLE)
1413 return;
1417 * Our flush action will depend on the current state.
1419 switch(ip->flush_state) {
1420 case HAMMER_FST_IDLE:
1422 * We have no dependancies and can flush immediately. Some
1423 * our children may not be flushable so we have to re-test
1424 * with that additional knowledge.
1426 hammer_flush_inode_core(ip, flg, flags);
1427 break;
1428 case HAMMER_FST_SETUP:
1430 * Recurse upwards through dependancies via target_list
1431 * and start their flusher actions going if possible.
1433 * 'good' is our connectivity. -1 means we have none and
1434 * can't flush, 0 means there weren't any dependancies, and
1435 * 1 means we have good connectivity.
1437 good = hammer_setup_parent_inodes(ip, flg);
1439 if (good >= 0) {
1441 * We can continue if good >= 0. Determine how
1442 * many records under our inode can be flushed (and
1443 * mark them).
1445 hammer_flush_inode_core(ip, flg, flags);
1446 } else {
1448 * Parent has no connectivity, tell it to flush
1449 * us as soon as it does.
1451 * The REFLUSH flag is also needed to trigger
1452 * dependancy wakeups.
1454 ip->flags |= HAMMER_INODE_CONN_DOWN |
1455 HAMMER_INODE_REFLUSH;
1456 if (flags & HAMMER_FLUSH_SIGNAL) {
1457 ip->flags |= HAMMER_INODE_RESIGNAL;
1458 hammer_flusher_async(ip->hmp, flg);
1461 break;
1462 case HAMMER_FST_FLUSH:
1464 * We are already flushing, flag the inode to reflush
1465 * if needed after it completes its current flush.
1467 * The REFLUSH flag is also needed to trigger
1468 * dependancy wakeups.
1470 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1471 ip->flags |= HAMMER_INODE_REFLUSH;
1472 if (flags & HAMMER_FLUSH_SIGNAL) {
1473 ip->flags |= HAMMER_INODE_RESIGNAL;
1474 hammer_flusher_async(ip->hmp, flg);
1476 break;
1481 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1482 * ip which reference our ip.
1484 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1485 * so for now do not ref/deref the structures. Note that if we use the
1486 * ref/rel code later, the rel CAN block.
1488 static int
1489 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg)
1491 hammer_record_t depend;
1492 int good;
1493 int r;
1495 good = 0;
1496 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1497 r = hammer_setup_parent_inodes_helper(depend, flg);
1498 KKASSERT(depend->target_ip == ip);
1499 if (r < 0 && good == 0)
1500 good = -1;
1501 if (r > 0)
1502 good = 1;
1504 return(good);
1508 * This helper function takes a record representing the dependancy between
1509 * the parent inode and child inode.
1511 * record->ip = parent inode
1512 * record->target_ip = child inode
1514 * We are asked to recurse upwards and convert the record from SETUP
1515 * to FLUSH if possible.
1517 * Return 1 if the record gives us connectivity
1519 * Return 0 if the record is not relevant
1521 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1523 static int
1524 hammer_setup_parent_inodes_helper(hammer_record_t record,
1525 hammer_flush_group_t flg)
1527 hammer_mount_t hmp;
1528 hammer_inode_t pip;
1529 int good;
1531 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1532 pip = record->ip;
1533 hmp = pip->hmp;
1536 * If the record is already flushing, is it in our flush group?
1538 * If it is in our flush group but it is a general record or a
1539 * delete-on-disk, it does not improve our connectivity (return 0),
1540 * and if the target inode is not trying to destroy itself we can't
1541 * allow the operation yet anyway (the second return -1).
1543 if (record->flush_state == HAMMER_FST_FLUSH) {
1545 * If not in our flush group ask the parent to reflush
1546 * us as soon as possible.
1548 if (record->flush_group != flg) {
1549 pip->flags |= HAMMER_INODE_REFLUSH;
1550 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1551 return(-1);
1555 * If in our flush group everything is already set up,
1556 * just return whether the record will improve our
1557 * visibility or not.
1559 if (record->type == HAMMER_MEM_RECORD_ADD)
1560 return(1);
1561 return(0);
1565 * It must be a setup record. Try to resolve the setup dependancies
1566 * by recursing upwards so we can place ip on the flush list.
1568 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1570 good = hammer_setup_parent_inodes(pip, flg);
1573 * If good < 0 the parent has no connectivity and we cannot safely
1574 * flush the directory entry, which also means we can't flush our
1575 * ip. Flag the parent and us for downward recursion once the
1576 * parent's connectivity is resolved.
1578 if (good < 0) {
1579 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1580 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1581 return(good);
1585 * We are go, place the parent inode in a flushing state so we can
1586 * place its record in a flushing state. Note that the parent
1587 * may already be flushing. The record must be in the same flush
1588 * group as the parent.
1590 if (pip->flush_state != HAMMER_FST_FLUSH)
1591 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1592 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1593 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1595 #if 0
1596 if (record->type == HAMMER_MEM_RECORD_DEL &&
1597 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1599 * Regardless of flushing state we cannot sync this path if the
1600 * record represents a delete-on-disk but the target inode
1601 * is not ready to sync its own deletion.
1603 * XXX need to count effective nlinks to determine whether
1604 * the flush is ok, otherwise removing a hardlink will
1605 * just leave the DEL record to rot.
1607 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1608 return(-1);
1609 } else
1610 #endif
1611 if (pip->flush_group == flg) {
1613 * Because we have not calculated nlinks yet we can just
1614 * set records to the flush state if the parent is in
1615 * the same flush group as we are.
1617 record->flush_state = HAMMER_FST_FLUSH;
1618 record->flush_group = flg;
1619 ++record->flush_group->refs;
1620 hammer_ref(&record->lock);
1623 * A general directory-add contributes to our visibility.
1625 * Otherwise it is probably a directory-delete or
1626 * delete-on-disk record and does not contribute to our
1627 * visbility (but we can still flush it).
1629 if (record->type == HAMMER_MEM_RECORD_ADD)
1630 return(1);
1631 return(0);
1632 } else {
1634 * If the parent is not in our flush group we cannot
1635 * flush this record yet, there is no visibility.
1636 * We tell the parent to reflush and mark ourselves
1637 * so the parent knows it should flush us too.
1639 pip->flags |= HAMMER_INODE_REFLUSH;
1640 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1641 return(-1);
1646 * This is the core routine placing an inode into the FST_FLUSH state.
1648 static void
1649 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1651 int go_count;
1654 * Set flush state and prevent the flusher from cycling into
1655 * the next flush group. Do not place the ip on the list yet.
1656 * Inodes not in the idle state get an extra reference.
1658 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1659 if (ip->flush_state == HAMMER_FST_IDLE)
1660 hammer_ref(&ip->lock);
1661 ip->flush_state = HAMMER_FST_FLUSH;
1662 ip->flush_group = flg;
1663 ++ip->hmp->flusher.group_lock;
1664 ++ip->hmp->count_iqueued;
1665 ++hammer_count_iqueued;
1666 ++flg->total_count;
1669 * If the flush group reaches the autoflush limit we want to signal
1670 * the flusher. This is particularly important for remove()s.
1672 if (flg->total_count == hammer_autoflush)
1673 flags |= HAMMER_FLUSH_SIGNAL;
1676 * We need to be able to vfsync/truncate from the backend.
1678 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1679 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1680 ip->flags |= HAMMER_INODE_VHELD;
1681 vref(ip->vp);
1685 * Figure out how many in-memory records we can actually flush
1686 * (not including inode meta-data, buffers, etc).
1688 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1689 if (flags & HAMMER_FLUSH_RECURSION) {
1691 * If this is a upwards recursion we do not want to
1692 * recurse down again!
1694 go_count = 1;
1695 #if 0
1696 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1698 * No new records are added if we must complete a flush
1699 * from a previous cycle, but we do have to move the records
1700 * from the previous cycle to the current one.
1702 #if 0
1703 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1704 hammer_syncgrp_child_callback, NULL);
1705 #endif
1706 go_count = 1;
1707 #endif
1708 } else {
1710 * Normal flush, scan records and bring them into the flush.
1711 * Directory adds and deletes are usually skipped (they are
1712 * grouped with the related inode rather then with the
1713 * directory).
1715 * go_count can be negative, which means the scan aborted
1716 * due to the flush group being over-full and we should
1717 * flush what we have.
1719 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1720 hammer_setup_child_callback, NULL);
1724 * This is a more involved test that includes go_count. If we
1725 * can't flush, flag the inode and return. If go_count is 0 we
1726 * were are unable to flush any records in our rec_tree and
1727 * must ignore the XDIRTY flag.
1729 if (go_count == 0) {
1730 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1731 --ip->hmp->count_iqueued;
1732 --hammer_count_iqueued;
1734 --flg->total_count;
1735 ip->flush_state = HAMMER_FST_SETUP;
1736 ip->flush_group = NULL;
1737 if (ip->flags & HAMMER_INODE_VHELD) {
1738 ip->flags &= ~HAMMER_INODE_VHELD;
1739 vrele(ip->vp);
1743 * REFLUSH is needed to trigger dependancy wakeups
1744 * when an inode is in SETUP.
1746 ip->flags |= HAMMER_INODE_REFLUSH;
1747 if (flags & HAMMER_FLUSH_SIGNAL) {
1748 ip->flags |= HAMMER_INODE_RESIGNAL;
1749 hammer_flusher_async(ip->hmp, flg);
1751 if (--ip->hmp->flusher.group_lock == 0)
1752 wakeup(&ip->hmp->flusher.group_lock);
1753 return;
1758 * Snapshot the state of the inode for the backend flusher.
1760 * We continue to retain save_trunc_off even when all truncations
1761 * have been resolved as an optimization to determine if we can
1762 * skip the B-Tree lookup for overwrite deletions.
1764 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1765 * and stays in ip->flags. Once set, it stays set until the
1766 * inode is destroyed.
1768 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1769 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1770 ip->sync_trunc_off = ip->trunc_off;
1771 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1772 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1773 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1776 * The save_trunc_off used to cache whether the B-Tree
1777 * holds any records past that point is not used until
1778 * after the truncation has succeeded, so we can safely
1779 * set it now.
1781 if (ip->save_trunc_off > ip->sync_trunc_off)
1782 ip->save_trunc_off = ip->sync_trunc_off;
1784 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1785 ~HAMMER_INODE_TRUNCATED);
1786 ip->sync_ino_leaf = ip->ino_leaf;
1787 ip->sync_ino_data = ip->ino_data;
1788 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1789 #ifdef DEBUG_TRUNCATE
1790 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1791 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1792 #endif
1795 * The flusher list inherits our inode and reference.
1797 KKASSERT(flg->running == 0);
1798 TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
1799 if (--ip->hmp->flusher.group_lock == 0)
1800 wakeup(&ip->hmp->flusher.group_lock);
1802 if (flags & HAMMER_FLUSH_SIGNAL) {
1803 hammer_flusher_async(ip->hmp, flg);
1808 * Callback for scan of ip->rec_tree. Try to include each record in our
1809 * flush. ip->flush_group has been set but the inode has not yet been
1810 * moved into a flushing state.
1812 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1813 * both inodes.
1815 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1816 * the caller from shortcutting the flush.
1818 static int
1819 hammer_setup_child_callback(hammer_record_t rec, void *data)
1821 hammer_flush_group_t flg;
1822 hammer_inode_t target_ip;
1823 hammer_inode_t ip;
1824 int r;
1827 * Deleted records are ignored. Note that the flush detects deleted
1828 * front-end records at multiple points to deal with races. This is
1829 * just the first line of defense. The only time DELETED_FE cannot
1830 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1832 * Don't get confused between record deletion and, say, directory
1833 * entry deletion. The deletion of a directory entry that is on
1834 * the media has nothing to do with the record deletion flags.
1836 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1837 if (rec->flush_state == HAMMER_FST_FLUSH) {
1838 KKASSERT(rec->flush_group == rec->ip->flush_group);
1839 r = 1;
1840 } else {
1841 r = 0;
1843 return(r);
1847 * If the record is in an idle state it has no dependancies and
1848 * can be flushed.
1850 ip = rec->ip;
1851 flg = ip->flush_group;
1852 r = 0;
1854 switch(rec->flush_state) {
1855 case HAMMER_FST_IDLE:
1857 * The record has no setup dependancy, we can flush it.
1859 KKASSERT(rec->target_ip == NULL);
1860 rec->flush_state = HAMMER_FST_FLUSH;
1861 rec->flush_group = flg;
1862 ++flg->refs;
1863 hammer_ref(&rec->lock);
1864 r = 1;
1865 break;
1866 case HAMMER_FST_SETUP:
1868 * The record has a setup dependancy. These are typically
1869 * directory entry adds and deletes. Such entries will be
1870 * flushed when their inodes are flushed so we do not
1871 * usually have to add them to the flush here. However,
1872 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1873 * it is asking us to flush this record (and it).
1875 target_ip = rec->target_ip;
1876 KKASSERT(target_ip != NULL);
1877 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1880 * If the target IP is already flushing in our group
1881 * we could associate the record, but target_ip has
1882 * already synced ino_data to sync_ino_data and we
1883 * would also have to adjust nlinks. Plus there are
1884 * ordering issues for adds and deletes.
1886 * Reflush downward if this is an ADD, and upward if
1887 * this is a DEL.
1889 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1890 if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
1891 ip->flags |= HAMMER_INODE_REFLUSH;
1892 else
1893 target_ip->flags |= HAMMER_INODE_REFLUSH;
1894 break;
1898 * Target IP is not yet flushing. This can get complex
1899 * because we have to be careful about the recursion.
1901 * Directories create an issue for us in that if a flush
1902 * of a directory is requested the expectation is to flush
1903 * any pending directory entries, but this will cause the
1904 * related inodes to recursively flush as well. We can't
1905 * really defer the operation so just get as many as we
1906 * can and
1908 #if 0
1909 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
1910 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
1912 * We aren't reclaiming and the target ip was not
1913 * previously prevented from flushing due to this
1914 * record dependancy. Do not flush this record.
1916 /*r = 0;*/
1917 } else
1918 #endif
1919 if (flg->total_count + flg->refs >
1920 ip->hmp->undo_rec_limit) {
1922 * Our flush group is over-full and we risk blowing
1923 * out the UNDO FIFO. Stop the scan, flush what we
1924 * have, then reflush the directory.
1926 * The directory may be forced through multiple
1927 * flush groups before it can be completely
1928 * flushed.
1930 ip->flags |= HAMMER_INODE_RESIGNAL |
1931 HAMMER_INODE_REFLUSH;
1932 r = -1;
1933 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1935 * If the target IP is not flushing we can force
1936 * it to flush, even if it is unable to write out
1937 * any of its own records we have at least one in
1938 * hand that we CAN deal with.
1940 rec->flush_state = HAMMER_FST_FLUSH;
1941 rec->flush_group = flg;
1942 ++flg->refs;
1943 hammer_ref(&rec->lock);
1944 hammer_flush_inode_core(target_ip, flg,
1945 HAMMER_FLUSH_RECURSION);
1946 r = 1;
1947 } else {
1949 * General or delete-on-disk record.
1951 * XXX this needs help. If a delete-on-disk we could
1952 * disconnect the target. If the target has its own
1953 * dependancies they really need to be flushed.
1955 * XXX
1957 rec->flush_state = HAMMER_FST_FLUSH;
1958 rec->flush_group = flg;
1959 ++flg->refs;
1960 hammer_ref(&rec->lock);
1961 hammer_flush_inode_core(target_ip, flg,
1962 HAMMER_FLUSH_RECURSION);
1963 r = 1;
1965 break;
1966 case HAMMER_FST_FLUSH:
1968 * The flush_group should already match.
1970 KKASSERT(rec->flush_group == flg);
1971 r = 1;
1972 break;
1974 return(r);
1977 #if 0
1979 * This version just moves records already in a flush state to the new
1980 * flush group and that is it.
1982 static int
1983 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1985 hammer_inode_t ip = rec->ip;
1987 switch(rec->flush_state) {
1988 case HAMMER_FST_FLUSH:
1989 KKASSERT(rec->flush_group == ip->flush_group);
1990 break;
1991 default:
1992 break;
1994 return(0);
1996 #endif
1999 * Wait for a previously queued flush to complete.
2001 * If a critical error occured we don't try to wait.
2003 void
2004 hammer_wait_inode(hammer_inode_t ip)
2006 hammer_flush_group_t flg;
2008 flg = NULL;
2009 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2010 while (ip->flush_state != HAMMER_FST_IDLE &&
2011 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2012 if (ip->flush_state == HAMMER_FST_SETUP)
2013 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2014 if (ip->flush_state != HAMMER_FST_IDLE) {
2015 ip->flags |= HAMMER_INODE_FLUSHW;
2016 tsleep(&ip->flags, 0, "hmrwin", 0);
2023 * Called by the backend code when a flush has been completed.
2024 * The inode has already been removed from the flush list.
2026 * A pipelined flush can occur, in which case we must re-enter the
2027 * inode on the list and re-copy its fields.
2029 void
2030 hammer_flush_inode_done(hammer_inode_t ip, int error)
2032 hammer_mount_t hmp;
2033 int dorel;
2035 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2037 hmp = ip->hmp;
2040 * Merge left-over flags back into the frontend and fix the state.
2041 * Incomplete truncations are retained by the backend.
2043 ip->error = error;
2044 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2045 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2048 * The backend may have adjusted nlinks, so if the adjusted nlinks
2049 * does not match the fronttend set the frontend's RDIRTY flag again.
2051 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2052 ip->flags |= HAMMER_INODE_DDIRTY;
2055 * Fix up the dirty buffer status.
2057 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2058 ip->flags |= HAMMER_INODE_BUFS;
2062 * Re-set the XDIRTY flag if some of the inode's in-memory records
2063 * could not be flushed.
2065 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2066 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2067 (!RB_EMPTY(&ip->rec_tree) &&
2068 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2071 * Do not lose track of inodes which no longer have vnode
2072 * assocations, otherwise they may never get flushed again.
2074 * The reflush flag can be set superfluously, causing extra pain
2075 * for no reason. If the inode is no longer modified it no longer
2076 * needs to be flushed.
2078 if (ip->flags & HAMMER_INODE_MODMASK) {
2079 if (ip->vp == NULL)
2080 ip->flags |= HAMMER_INODE_REFLUSH;
2081 } else {
2082 ip->flags &= ~HAMMER_INODE_REFLUSH;
2086 * Adjust the flush state.
2088 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2090 * We were unable to flush out all our records, leave the
2091 * inode in a flush state and in the current flush group.
2092 * The flush group will be re-run.
2094 * This occurs if the UNDO block gets too full or there is
2095 * too much dirty meta-data and allows the flusher to
2096 * finalize the UNDO block and then re-flush.
2098 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2099 dorel = 0;
2100 } else {
2102 * Remove from the flush_group
2104 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2105 ip->flush_group = NULL;
2108 * Clean up the vnode ref and tracking counts.
2110 if (ip->flags & HAMMER_INODE_VHELD) {
2111 ip->flags &= ~HAMMER_INODE_VHELD;
2112 vrele(ip->vp);
2114 --hmp->count_iqueued;
2115 --hammer_count_iqueued;
2118 * And adjust the state.
2120 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2121 ip->flush_state = HAMMER_FST_IDLE;
2122 dorel = 1;
2123 } else {
2124 ip->flush_state = HAMMER_FST_SETUP;
2125 dorel = 0;
2129 * If the frontend is waiting for a flush to complete,
2130 * wake it up.
2132 if (ip->flags & HAMMER_INODE_FLUSHW) {
2133 ip->flags &= ~HAMMER_INODE_FLUSHW;
2134 wakeup(&ip->flags);
2138 * If the frontend made more changes and requested another
2139 * flush, then try to get it running.
2141 * Reflushes are aborted when the inode is errored out.
2143 if (ip->flags & HAMMER_INODE_REFLUSH) {
2144 ip->flags &= ~HAMMER_INODE_REFLUSH;
2145 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2146 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2147 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2148 } else {
2149 hammer_flush_inode(ip, 0);
2155 * If we have no parent dependancies we can clear CONN_DOWN
2157 if (TAILQ_EMPTY(&ip->target_list))
2158 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2161 * If the inode is now clean drop the space reservation.
2163 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2164 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2165 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2166 --hmp->rsv_inodes;
2169 if (dorel)
2170 hammer_rel_inode(ip, 0);
2174 * Called from hammer_sync_inode() to synchronize in-memory records
2175 * to the media.
2177 static int
2178 hammer_sync_record_callback(hammer_record_t record, void *data)
2180 hammer_cursor_t cursor = data;
2181 hammer_transaction_t trans = cursor->trans;
2182 hammer_mount_t hmp = trans->hmp;
2183 int error;
2186 * Skip records that do not belong to the current flush.
2188 ++hammer_stats_record_iterations;
2189 if (record->flush_state != HAMMER_FST_FLUSH)
2190 return(0);
2192 #if 1
2193 if (record->flush_group != record->ip->flush_group) {
2194 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2195 Debugger("blah2");
2196 return(0);
2198 #endif
2199 KKASSERT(record->flush_group == record->ip->flush_group);
2202 * Interlock the record using the BE flag. Once BE is set the
2203 * frontend cannot change the state of FE.
2205 * NOTE: If FE is set prior to us setting BE we still sync the
2206 * record out, but the flush completion code converts it to
2207 * a delete-on-disk record instead of destroying it.
2209 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2210 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2213 * The backend may have already disposed of the record.
2215 if (record->flags & HAMMER_RECF_DELETED_BE) {
2216 error = 0;
2217 goto done;
2221 * If the whole inode is being deleting all on-disk records will
2222 * be deleted very soon, we can't sync any new records to disk
2223 * because they will be deleted in the same transaction they were
2224 * created in (delete_tid == create_tid), which will assert.
2226 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2227 * that we currently panic on.
2229 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2230 switch(record->type) {
2231 case HAMMER_MEM_RECORD_DATA:
2233 * We don't have to do anything, if the record was
2234 * committed the space will have been accounted for
2235 * in the blockmap.
2237 /* fall through */
2238 case HAMMER_MEM_RECORD_GENERAL:
2239 record->flags |= HAMMER_RECF_DELETED_FE;
2240 record->flags |= HAMMER_RECF_DELETED_BE;
2241 error = 0;
2242 goto done;
2243 case HAMMER_MEM_RECORD_ADD:
2244 panic("hammer_sync_record_callback: illegal add "
2245 "during inode deletion record %p", record);
2246 break; /* NOT REACHED */
2247 case HAMMER_MEM_RECORD_INODE:
2248 panic("hammer_sync_record_callback: attempt to "
2249 "sync inode record %p?", record);
2250 break; /* NOT REACHED */
2251 case HAMMER_MEM_RECORD_DEL:
2253 * Follow through and issue the on-disk deletion
2255 break;
2260 * If DELETED_FE is set special handling is needed for directory
2261 * entries. Dependant pieces related to the directory entry may
2262 * have already been synced to disk. If this occurs we have to
2263 * sync the directory entry and then change the in-memory record
2264 * from an ADD to a DELETE to cover the fact that it's been
2265 * deleted by the frontend.
2267 * A directory delete covering record (MEM_RECORD_DEL) can never
2268 * be deleted by the frontend.
2270 * Any other record type (aka DATA) can be deleted by the frontend.
2271 * XXX At the moment the flusher must skip it because there may
2272 * be another data record in the flush group for the same block,
2273 * meaning that some frontend data changes can leak into the backend's
2274 * synchronization point.
2276 if (record->flags & HAMMER_RECF_DELETED_FE) {
2277 if (record->type == HAMMER_MEM_RECORD_ADD) {
2278 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2279 } else {
2280 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2281 record->flags |= HAMMER_RECF_DELETED_BE;
2282 error = 0;
2283 goto done;
2288 * Assign the create_tid for new records. Deletions already
2289 * have the record's entire key properly set up.
2291 if (record->type != HAMMER_MEM_RECORD_DEL)
2292 record->leaf.base.create_tid = trans->tid;
2293 record->leaf.create_ts = trans->time32;
2294 for (;;) {
2295 error = hammer_ip_sync_record_cursor(cursor, record);
2296 if (error != EDEADLK)
2297 break;
2298 hammer_done_cursor(cursor);
2299 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2300 record->ip);
2301 if (error)
2302 break;
2304 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2306 if (error)
2307 error = -error;
2308 done:
2309 hammer_flush_record_done(record, error);
2312 * Do partial finalization if we have built up too many dirty
2313 * buffers. Otherwise a buffer cache deadlock can occur when
2314 * doing things like creating tens of thousands of tiny files.
2316 * We must release our cursor lock to avoid a 3-way deadlock
2317 * due to the exclusive sync lock the finalizer must get.
2319 if (hammer_flusher_meta_limit(hmp)) {
2320 hammer_unlock_cursor(cursor, 0);
2321 hammer_flusher_finalize(trans, 0);
2322 hammer_lock_cursor(cursor, 0);
2325 return(error);
2329 * Backend function called by the flusher to sync an inode to media.
2332 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2334 struct hammer_cursor cursor;
2335 hammer_node_t tmp_node;
2336 hammer_record_t depend;
2337 hammer_record_t next;
2338 int error, tmp_error;
2339 u_int64_t nlinks;
2341 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2342 return(0);
2344 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2345 if (error)
2346 goto done;
2349 * Any directory records referencing this inode which are not in
2350 * our current flush group must adjust our nlink count for the
2351 * purposes of synchronization to disk.
2353 * Records which are in our flush group can be unlinked from our
2354 * inode now, potentially allowing the inode to be physically
2355 * deleted.
2357 * This cannot block.
2359 nlinks = ip->ino_data.nlinks;
2360 next = TAILQ_FIRST(&ip->target_list);
2361 while ((depend = next) != NULL) {
2362 next = TAILQ_NEXT(depend, target_entry);
2363 if (depend->flush_state == HAMMER_FST_FLUSH &&
2364 depend->flush_group == ip->flush_group) {
2366 * If this is an ADD that was deleted by the frontend
2367 * the frontend nlinks count will have already been
2368 * decremented, but the backend is going to sync its
2369 * directory entry and must account for it. The
2370 * record will be converted to a delete-on-disk when
2371 * it gets synced.
2373 * If the ADD was not deleted by the frontend we
2374 * can remove the dependancy from our target_list.
2376 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2377 ++nlinks;
2378 } else {
2379 TAILQ_REMOVE(&ip->target_list, depend,
2380 target_entry);
2381 depend->target_ip = NULL;
2383 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2385 * Not part of our flush group
2387 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2388 switch(depend->type) {
2389 case HAMMER_MEM_RECORD_ADD:
2390 --nlinks;
2391 break;
2392 case HAMMER_MEM_RECORD_DEL:
2393 ++nlinks;
2394 break;
2395 default:
2396 break;
2402 * Set dirty if we had to modify the link count.
2404 if (ip->sync_ino_data.nlinks != nlinks) {
2405 KKASSERT((int64_t)nlinks >= 0);
2406 ip->sync_ino_data.nlinks = nlinks;
2407 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2411 * If there is a trunction queued destroy any data past the (aligned)
2412 * truncation point. Userland will have dealt with the buffer
2413 * containing the truncation point for us.
2415 * We don't flush pending frontend data buffers until after we've
2416 * dealt with the truncation.
2418 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2420 * Interlock trunc_off. The VOP front-end may continue to
2421 * make adjustments to it while we are blocked.
2423 off_t trunc_off;
2424 off_t aligned_trunc_off;
2425 int blkmask;
2427 trunc_off = ip->sync_trunc_off;
2428 blkmask = hammer_blocksize(trunc_off) - 1;
2429 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2432 * Delete any whole blocks on-media. The front-end has
2433 * already cleaned out any partial block and made it
2434 * pending. The front-end may have updated trunc_off
2435 * while we were blocked so we only use sync_trunc_off.
2437 * This operation can blow out the buffer cache, EWOULDBLOCK
2438 * means we were unable to complete the deletion. The
2439 * deletion will update sync_trunc_off in that case.
2441 error = hammer_ip_delete_range(&cursor, ip,
2442 aligned_trunc_off,
2443 0x7FFFFFFFFFFFFFFFLL, 2);
2444 if (error == EWOULDBLOCK) {
2445 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2446 error = 0;
2447 goto defer_buffer_flush;
2450 if (error)
2451 goto done;
2454 * Clear the truncation flag on the backend after we have
2455 * complete the deletions. Backend data is now good again
2456 * (including new records we are about to sync, below).
2458 * Leave sync_trunc_off intact. As we write additional
2459 * records the backend will update sync_trunc_off. This
2460 * tells the backend whether it can skip the overwrite
2461 * test. This should work properly even when the backend
2462 * writes full blocks where the truncation point straddles
2463 * the block because the comparison is against the base
2464 * offset of the record.
2466 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2467 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2468 } else {
2469 error = 0;
2473 * Now sync related records. These will typically be directory
2474 * entries, records tracking direct-writes, or delete-on-disk records.
2476 if (error == 0) {
2477 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2478 hammer_sync_record_callback, &cursor);
2479 if (tmp_error < 0)
2480 tmp_error = -error;
2481 if (tmp_error)
2482 error = tmp_error;
2484 hammer_cache_node(&ip->cache[1], cursor.node);
2487 * Re-seek for inode update, assuming our cache hasn't been ripped
2488 * out from under us.
2490 if (error == 0) {
2491 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2492 if (tmp_node) {
2493 hammer_cursor_downgrade(&cursor);
2494 hammer_lock_sh(&tmp_node->lock);
2495 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2496 hammer_cursor_seek(&cursor, tmp_node, 0);
2497 hammer_unlock(&tmp_node->lock);
2498 hammer_rel_node(tmp_node);
2500 error = 0;
2504 * If we are deleting the inode the frontend had better not have
2505 * any active references on elements making up the inode.
2507 * The call to hammer_ip_delete_clean() cleans up auxillary records
2508 * but not DB or DATA records. Those must have already been deleted
2509 * by the normal truncation mechanic.
2511 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2512 RB_EMPTY(&ip->rec_tree) &&
2513 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2514 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2515 int count1 = 0;
2517 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2518 if (error == 0) {
2519 ip->flags |= HAMMER_INODE_DELETED;
2520 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2521 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2522 KKASSERT(RB_EMPTY(&ip->rec_tree));
2525 * Set delete_tid in both the frontend and backend
2526 * copy of the inode record. The DELETED flag handles
2527 * this, do not set RDIRTY.
2529 ip->ino_leaf.base.delete_tid = trans->tid;
2530 ip->sync_ino_leaf.base.delete_tid = trans->tid;
2531 ip->ino_leaf.delete_ts = trans->time32;
2532 ip->sync_ino_leaf.delete_ts = trans->time32;
2536 * Adjust the inode count in the volume header
2538 hammer_sync_lock_sh(trans);
2539 if (ip->flags & HAMMER_INODE_ONDISK) {
2540 hammer_modify_volume_field(trans,
2541 trans->rootvol,
2542 vol0_stat_inodes);
2543 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2544 hammer_modify_volume_done(trans->rootvol);
2546 hammer_sync_unlock(trans);
2550 if (error)
2551 goto done;
2552 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2554 defer_buffer_flush:
2556 * Now update the inode's on-disk inode-data and/or on-disk record.
2557 * DELETED and ONDISK are managed only in ip->flags.
2559 * In the case of a defered buffer flush we still update the on-disk
2560 * inode to satisfy visibility requirements if there happen to be
2561 * directory dependancies.
2563 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2564 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2566 * If deleted and on-disk, don't set any additional flags.
2567 * the delete flag takes care of things.
2569 * Clear flags which may have been set by the frontend.
2571 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2572 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2573 HAMMER_INODE_DELETING);
2574 break;
2575 case HAMMER_INODE_DELETED:
2577 * Take care of the case where a deleted inode was never
2578 * flushed to the disk in the first place.
2580 * Clear flags which may have been set by the frontend.
2582 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2583 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2584 HAMMER_INODE_DELETING);
2585 while (RB_ROOT(&ip->rec_tree)) {
2586 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2587 hammer_ref(&record->lock);
2588 KKASSERT(record->lock.refs == 1);
2589 record->flags |= HAMMER_RECF_DELETED_FE;
2590 record->flags |= HAMMER_RECF_DELETED_BE;
2591 hammer_rel_mem_record(record);
2593 break;
2594 case HAMMER_INODE_ONDISK:
2596 * If already on-disk, do not set any additional flags.
2598 break;
2599 default:
2601 * If not on-disk and not deleted, set DDIRTY to force
2602 * an initial record to be written.
2604 * Also set the create_tid in both the frontend and backend
2605 * copy of the inode record.
2607 ip->ino_leaf.base.create_tid = trans->tid;
2608 ip->ino_leaf.create_ts = trans->time32;
2609 ip->sync_ino_leaf.base.create_tid = trans->tid;
2610 ip->sync_ino_leaf.create_ts = trans->time32;
2611 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2612 break;
2616 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2617 * is already on-disk the old record is marked as deleted.
2619 * If DELETED is set hammer_update_inode() will delete the existing
2620 * record without writing out a new one.
2622 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2624 if (ip->flags & HAMMER_INODE_DELETED) {
2625 error = hammer_update_inode(&cursor, ip);
2626 } else
2627 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2628 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2629 error = hammer_update_itimes(&cursor, ip);
2630 } else
2631 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2632 error = hammer_update_inode(&cursor, ip);
2634 done:
2635 if (error) {
2636 hammer_critical_error(ip->hmp, ip, error,
2637 "while syncing inode");
2639 hammer_done_cursor(&cursor);
2640 return(error);
2644 * This routine is called when the OS is no longer actively referencing
2645 * the inode (but might still be keeping it cached), or when releasing
2646 * the last reference to an inode.
2648 * At this point if the inode's nlinks count is zero we want to destroy
2649 * it, which may mean destroying it on-media too.
2651 void
2652 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2654 struct vnode *vp;
2657 * Set the DELETING flag when the link count drops to 0 and the
2658 * OS no longer has any opens on the inode.
2660 * The backend will clear DELETING (a mod flag) and set DELETED
2661 * (a state flag) when it is actually able to perform the
2662 * operation.
2664 * Don't reflag the deletion if the flusher is currently syncing
2665 * one that was already flagged. A previously set DELETING flag
2666 * may bounce around flags and sync_flags until the operation is
2667 * completely done.
2669 if (ip->ino_data.nlinks == 0 &&
2670 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2671 ip->flags |= HAMMER_INODE_DELETING;
2672 ip->flags |= HAMMER_INODE_TRUNCATED;
2673 ip->trunc_off = 0;
2674 vp = NULL;
2675 if (getvp) {
2676 if (hammer_get_vnode(ip, &vp) != 0)
2677 return;
2681 * Final cleanup
2683 if (ip->vp) {
2684 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2685 vnode_pager_setsize(ip->vp, 0);
2687 if (getvp) {
2688 vput(vp);
2694 * After potentially resolving a dependancy the inode is tested
2695 * to determine whether it needs to be reflushed.
2697 void
2698 hammer_test_inode(hammer_inode_t ip)
2700 if (ip->flags & HAMMER_INODE_REFLUSH) {
2701 ip->flags &= ~HAMMER_INODE_REFLUSH;
2702 hammer_ref(&ip->lock);
2703 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2704 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2705 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2706 } else {
2707 hammer_flush_inode(ip, 0);
2709 hammer_rel_inode(ip, 0);
2714 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2715 * reassociated with a vp or just before it gets freed.
2717 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2718 * the inode the thread is waiting on behalf of is a different inode then
2719 * the inode we are called with. This is to create a pipeline.
2721 static void
2722 hammer_inode_wakereclaims(hammer_inode_t ip)
2724 struct hammer_reclaim *reclaim;
2725 hammer_mount_t hmp = ip->hmp;
2727 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2728 return;
2730 --hammer_count_reclaiming;
2731 --hmp->inode_reclaims;
2732 ip->flags &= ~HAMMER_INODE_RECLAIM;
2734 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2735 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2736 reclaim->okydoky = 1;
2737 wakeup(reclaim);
2742 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2743 * inodes build up before we start blocking.
2745 * When we block we don't care *which* inode has finished reclaiming,
2746 * as lone as one does. This is somewhat heuristical... we also put a
2747 * cap on how long we are willing to wait.
2749 void
2750 hammer_inode_waitreclaims(hammer_mount_t hmp)
2752 struct hammer_reclaim reclaim;
2753 int delay;
2755 if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2756 reclaim.okydoky = 0;
2757 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2758 &reclaim, entry);
2759 } else {
2760 reclaim.okydoky = 1;
2763 if (reclaim.okydoky == 0) {
2764 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2765 (HAMMER_RECLAIM_WAIT * 5);
2766 if (delay >= 0)
2767 tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2768 if (reclaim.okydoky == 0)
2769 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);