HAMMER: MFC all changes through 20080924
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
bloba686d7feb38739a188de26edb201c92b66bbc9ef
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.90.2.6 2008/09/25 01:42:52 dillon Exp $
37 #include "hammer.h"
39 static int hammer_mem_lookup(hammer_cursor_t cursor);
40 static int hammer_mem_first(hammer_cursor_t cursor);
41 static int hammer_frontend_trunc_callback(hammer_record_t record,
42 void *data __unused);
43 static int hammer_bulk_scan_callback(hammer_record_t record, void *data);
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
45 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46 hammer_btree_leaf_elm_t leaf);
48 struct rec_trunc_info {
49 u_int16_t rec_type;
50 int64_t trunc_off;
53 struct hammer_bulk_info {
54 hammer_record_t record;
55 struct hammer_btree_leaf_elm leaf;
59 * Red-black tree support. Comparison code for insertion.
61 static int
62 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
64 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
65 return(-1);
66 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
67 return(1);
69 if (rec1->leaf.base.key < rec2->leaf.base.key)
70 return(-1);
71 if (rec1->leaf.base.key > rec2->leaf.base.key)
72 return(1);
75 * Never match against an item deleted by the front-end.
77 * rec1 is greater then rec2 if rec1 is marked deleted.
78 * rec1 is less then rec2 if rec2 is marked deleted.
80 * Multiple deleted records may be present, do not return 0
81 * if both are marked deleted.
83 if (rec1->flags & HAMMER_RECF_DELETED_FE)
84 return(1);
85 if (rec2->flags & HAMMER_RECF_DELETED_FE)
86 return(-1);
88 return(0);
92 * Basic record comparison code similar to hammer_btree_cmp().
94 static int
95 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
97 if (elm->rec_type < rec->leaf.base.rec_type)
98 return(-3);
99 if (elm->rec_type > rec->leaf.base.rec_type)
100 return(3);
102 if (elm->key < rec->leaf.base.key)
103 return(-2);
104 if (elm->key > rec->leaf.base.key)
105 return(2);
108 * Never match against an item deleted by the front-end.
109 * elm is less then rec if rec is marked deleted.
111 if (rec->flags & HAMMER_RECF_DELETED_FE)
112 return(-1);
113 return(0);
117 * Ranged scan to locate overlapping record(s). This is used by
118 * hammer_ip_get_bulk() to locate an overlapping record. We have
119 * to use a ranged scan because the keys for data records with the
120 * same file base offset can be different due to differing data_len's.
122 * NOTE: The base file offset of a data record is (key - data_len), not (key).
124 static int
125 hammer_rec_overlap_cmp(hammer_record_t rec, void *data)
127 struct hammer_bulk_info *info = data;
128 hammer_btree_leaf_elm_t leaf = &info->leaf;
130 if (rec->leaf.base.rec_type < leaf->base.rec_type)
131 return(-3);
132 if (rec->leaf.base.rec_type > leaf->base.rec_type)
133 return(3);
136 * Overlap compare
138 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
139 /* rec_beg >= leaf_end */
140 if (rec->leaf.base.key - rec->leaf.data_len >= leaf->base.key)
141 return(2);
142 /* rec_end <= leaf_beg */
143 if (rec->leaf.base.key <= leaf->base.key - leaf->data_len)
144 return(-2);
145 } else {
146 if (rec->leaf.base.key < leaf->base.key)
147 return(-2);
148 if (rec->leaf.base.key > leaf->base.key)
149 return(2);
153 * We have to return 0 at this point, even if DELETED_FE is set,
154 * because returning anything else will cause the scan to ignore
155 * one of the branches when we really want it to check both.
157 return(0);
161 * RB_SCAN comparison code for hammer_mem_first(). The argument order
162 * is reversed so the comparison result has to be negated. key_beg and
163 * key_end are both range-inclusive.
165 * Localized deletions are not cached in-memory.
167 static
169 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
171 hammer_cursor_t cursor = data;
172 int r;
174 r = hammer_rec_cmp(&cursor->key_beg, rec);
175 if (r > 1)
176 return(-1);
177 r = hammer_rec_cmp(&cursor->key_end, rec);
178 if (r < -1)
179 return(1);
180 return(0);
184 * This compare function is used when simply looking up key_beg.
186 static
188 hammer_rec_find_cmp(hammer_record_t rec, void *data)
190 hammer_cursor_t cursor = data;
191 int r;
193 r = hammer_rec_cmp(&cursor->key_beg, rec);
194 if (r > 1)
195 return(-1);
196 if (r < -1)
197 return(1);
198 return(0);
202 * Locate blocks within the truncation range. Partial blocks do not count.
204 static
206 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
208 struct rec_trunc_info *info = data;
210 if (rec->leaf.base.rec_type < info->rec_type)
211 return(-1);
212 if (rec->leaf.base.rec_type > info->rec_type)
213 return(1);
215 switch(rec->leaf.base.rec_type) {
216 case HAMMER_RECTYPE_DB:
218 * DB record key is not beyond the truncation point, retain.
220 if (rec->leaf.base.key < info->trunc_off)
221 return(-1);
222 break;
223 case HAMMER_RECTYPE_DATA:
225 * DATA record offset start is not beyond the truncation point,
226 * retain.
228 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
229 return(-1);
230 break;
231 default:
232 panic("hammer_rec_trunc_cmp: unexpected record type");
236 * The record start is >= the truncation point, return match,
237 * the record should be destroyed.
239 return(0);
242 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
245 * Allocate a record for the caller to finish filling in. The record is
246 * returned referenced.
248 hammer_record_t
249 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
251 hammer_record_t record;
253 ++hammer_count_records;
254 record = kmalloc(sizeof(*record), M_HAMMER,
255 M_WAITOK | M_ZERO | M_USE_RESERVE);
256 record->flush_state = HAMMER_FST_IDLE;
257 record->ip = ip;
258 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
259 record->leaf.data_len = data_len;
260 hammer_ref(&record->lock);
262 if (data_len) {
263 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
264 record->flags |= HAMMER_RECF_ALLOCDATA;
265 ++hammer_count_record_datas;
268 return (record);
271 void
272 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
274 while (record->flush_state == HAMMER_FST_FLUSH) {
275 record->flags |= HAMMER_RECF_WANTED;
276 tsleep(record, 0, ident, 0);
281 * Called from the backend, hammer_inode.c, after a record has been
282 * flushed to disk. The record has been exclusively locked by the
283 * caller and interlocked with BE.
285 * We clean up the state, unlock, and release the record (the record
286 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
288 void
289 hammer_flush_record_done(hammer_record_t record, int error)
291 hammer_inode_t target_ip;
293 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
294 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
296 if (error) {
298 * An error occured, the backend was unable to sync the
299 * record to its media. Leave the record intact.
301 hammer_critical_error(record->ip->hmp, record->ip, error,
302 "while flushing record");
305 --record->flush_group->refs;
306 record->flush_group = NULL;
308 if (record->flags & HAMMER_RECF_DELETED_BE) {
309 if ((target_ip = record->target_ip) != NULL) {
310 TAILQ_REMOVE(&target_ip->target_list, record,
311 target_entry);
312 record->target_ip = NULL;
313 hammer_test_inode(target_ip);
315 record->flush_state = HAMMER_FST_IDLE;
316 } else {
317 if (record->target_ip) {
318 record->flush_state = HAMMER_FST_SETUP;
319 hammer_test_inode(record->ip);
320 hammer_test_inode(record->target_ip);
321 } else {
322 record->flush_state = HAMMER_FST_IDLE;
325 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
326 if (record->flags & HAMMER_RECF_WANTED) {
327 record->flags &= ~HAMMER_RECF_WANTED;
328 wakeup(record);
330 hammer_rel_mem_record(record);
334 * Release a memory record. Records marked for deletion are immediately
335 * removed from the RB-Tree but otherwise left intact until the last ref
336 * goes away.
338 void
339 hammer_rel_mem_record(struct hammer_record *record)
341 hammer_mount_t hmp;
342 hammer_reserve_t resv;
343 hammer_inode_t ip;
344 hammer_inode_t target_ip;
346 hammer_unref(&record->lock);
348 if (record->lock.refs == 0) {
350 * Upon release of the last reference wakeup any waiters.
351 * The record structure may get destroyed so callers will
352 * loop up and do a relookup.
354 * WARNING! Record must be removed from RB-TREE before we
355 * might possibly block. hammer_test_inode() can block!
357 ip = record->ip;
358 hmp = ip->hmp;
361 * Upon release of the last reference a record marked deleted
362 * is destroyed.
364 if (record->flags & HAMMER_RECF_DELETED_FE) {
365 KKASSERT(ip->lock.refs > 0);
366 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
369 * target_ip may have zero refs, we have to ref it
370 * to prevent it from being ripped out from under
371 * us.
373 if ((target_ip = record->target_ip) != NULL) {
374 TAILQ_REMOVE(&target_ip->target_list,
375 record, target_entry);
376 record->target_ip = NULL;
377 hammer_ref(&target_ip->lock);
380 if (record->flags & HAMMER_RECF_ONRBTREE) {
381 RB_REMOVE(hammer_rec_rb_tree,
382 &record->ip->rec_tree,
383 record);
384 KKASSERT(ip->rsv_recs > 0);
385 --hmp->rsv_recs;
386 --ip->rsv_recs;
387 hmp->rsv_databytes -= record->leaf.data_len;
388 record->flags &= ~HAMMER_RECF_ONRBTREE;
390 if (RB_EMPTY(&record->ip->rec_tree)) {
391 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
392 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
393 hammer_test_inode(record->ip);
398 * We must wait for any direct-IO to complete before
399 * we can destroy the record because the bio may
400 * have a reference to it.
402 if (record->flags &
403 (HAMMER_RECF_DIRECT_IO | HAMMER_RECF_DIRECT_INVAL)) {
404 hammer_io_direct_wait(record);
409 * Do this test after removing record from the B-Tree.
411 if (target_ip) {
412 hammer_test_inode(target_ip);
413 hammer_rel_inode(target_ip, 0);
416 if (record->flags & HAMMER_RECF_ALLOCDATA) {
417 --hammer_count_record_datas;
418 kfree(record->data, M_HAMMER);
419 record->flags &= ~HAMMER_RECF_ALLOCDATA;
423 * Release the reservation. If the record was not
424 * committed return the reservation before
425 * releasing it.
427 if ((resv = record->resv) != NULL) {
428 if ((record->flags & HAMMER_RECF_COMMITTED) == 0) {
429 hammer_blockmap_reserve_undo(
430 resv,
431 record->leaf.data_offset,
432 record->leaf.data_len);
434 hammer_blockmap_reserve_complete(hmp, resv);
435 record->resv = NULL;
437 record->data = NULL;
438 --hammer_count_records;
439 kfree(record, M_HAMMER);
445 * Record visibility depends on whether the record is being accessed by
446 * the backend or the frontend.
448 * Return non-zero if the record is visible, zero if it isn't or if it is
449 * deleted.
451 static __inline
453 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
455 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
456 if (record->flags & HAMMER_RECF_DELETED_BE)
457 return(0);
458 } else {
459 if (record->flags & HAMMER_RECF_DELETED_FE)
460 return(0);
462 return(1);
466 * This callback is used as part of the RB_SCAN function for in-memory
467 * records. We terminate it (return -1) as soon as we get a match.
469 * This routine is used by frontend code.
471 * The primary compare code does not account for ASOF lookups. This
472 * code handles that case as well as a few others.
474 static
476 hammer_rec_scan_callback(hammer_record_t rec, void *data)
478 hammer_cursor_t cursor = data;
481 * We terminate on success, so this should be NULL on entry.
483 KKASSERT(cursor->iprec == NULL);
486 * Skip if the record was marked deleted.
488 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
489 return(0);
492 * Skip if not visible due to our as-of TID
494 if (cursor->flags & HAMMER_CURSOR_ASOF) {
495 if (cursor->asof < rec->leaf.base.create_tid)
496 return(0);
497 if (rec->leaf.base.delete_tid &&
498 cursor->asof >= rec->leaf.base.delete_tid) {
499 return(0);
504 * ref the record. The record is protected from backend B-Tree
505 * interactions by virtue of the cursor's IP lock.
507 hammer_ref(&rec->lock);
510 * The record may have been deleted while we were blocked.
512 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
513 hammer_rel_mem_record(rec);
514 return(0);
518 * Set the matching record and stop the scan.
520 cursor->iprec = rec;
521 return(-1);
526 * Lookup an in-memory record given the key specified in the cursor. Works
527 * just like hammer_btree_lookup() but operates on an inode's in-memory
528 * record list.
530 * The lookup must fail if the record is marked for deferred deletion.
532 static
534 hammer_mem_lookup(hammer_cursor_t cursor)
536 int error;
538 KKASSERT(cursor->ip);
539 if (cursor->iprec) {
540 hammer_rel_mem_record(cursor->iprec);
541 cursor->iprec = NULL;
543 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
544 hammer_rec_scan_callback, cursor);
546 if (cursor->iprec == NULL)
547 error = ENOENT;
548 else
549 error = 0;
550 return(error);
554 * hammer_mem_first() - locate the first in-memory record matching the
555 * cursor within the bounds of the key range.
557 static
559 hammer_mem_first(hammer_cursor_t cursor)
561 hammer_inode_t ip;
563 ip = cursor->ip;
564 KKASSERT(ip != NULL);
566 if (cursor->iprec) {
567 hammer_rel_mem_record(cursor->iprec);
568 cursor->iprec = NULL;
571 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
572 hammer_rec_scan_callback, cursor);
575 * Adjust scan.node and keep it linked into the RB-tree so we can
576 * hold the cursor through third party modifications of the RB-tree.
578 if (cursor->iprec)
579 return(0);
580 return(ENOENT);
583 /************************************************************************
584 * HAMMER IN-MEMORY RECORD FUNCTIONS *
585 ************************************************************************
587 * These functions manipulate in-memory records. Such records typically
588 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
592 * Add a directory entry (dip,ncp) which references inode (ip).
594 * Note that the low 32 bits of the namekey are set temporarily to create
595 * a unique in-memory record, and may be modified a second time when the
596 * record is synchronized to disk. In particular, the low 32 bits cannot be
597 * all 0's when synching to disk, which is not handled here.
599 * NOTE: bytes does not include any terminating \0 on name, and name might
600 * not be terminated.
603 hammer_ip_add_directory(struct hammer_transaction *trans,
604 struct hammer_inode *dip, const char *name, int bytes,
605 struct hammer_inode *ip)
607 struct hammer_cursor cursor;
608 hammer_record_t record;
609 int error;
610 int count;
611 u_int32_t iterator;
613 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
614 if (++trans->hmp->namekey_iterator == 0)
615 ++trans->hmp->namekey_iterator;
617 record->type = HAMMER_MEM_RECORD_ADD;
618 record->leaf.base.localization = dip->obj_localization +
619 HAMMER_LOCALIZE_MISC;
620 record->leaf.base.obj_id = dip->obj_id;
621 record->leaf.base.key = hammer_directory_namekey(name, bytes);
622 record->leaf.base.key += trans->hmp->namekey_iterator;
623 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
624 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
625 record->data->entry.obj_id = ip->obj_id;
626 record->data->entry.localization = ip->obj_localization;
627 bcopy(name, record->data->entry.name, bytes);
629 ++ip->ino_data.nlinks;
630 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
633 * Find an unused namekey. Both the in-memory record tree and
634 * the B-Tree are checked. Exact matches also match create_tid
635 * so use an ASOF search to (mostly) ignore it.
637 * delete-visibility is set so pending deletions do not give us
638 * a false-negative on our ability to use an iterator.
640 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
641 cursor.key_beg = record->leaf.base;
642 cursor.flags |= HAMMER_CURSOR_ASOF;
643 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
644 cursor.asof = ip->obj_asof;
646 count = 0;
647 while (hammer_ip_lookup(&cursor) == 0) {
648 iterator = (u_int32_t)record->leaf.base.key + 1;
649 if (iterator == 0)
650 iterator = 1;
651 record->leaf.base.key &= ~0xFFFFFFFFLL;
652 record->leaf.base.key |= iterator;
653 cursor.key_beg.key = record->leaf.base.key;
654 if (++count == 1000000000) {
655 hammer_rel_mem_record(record);
656 error = ENOSPC;
657 goto failed;
662 * The target inode and the directory entry are bound together.
664 record->target_ip = ip;
665 record->flush_state = HAMMER_FST_SETUP;
666 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
669 * The inode now has a dependancy and must be taken out of the idle
670 * state. An inode not in an idle state is given an extra reference.
672 * When transitioning to a SETUP state flag for an automatic reflush
673 * when the dependancies are disposed of if someone is waiting on
674 * the inode.
676 if (ip->flush_state == HAMMER_FST_IDLE) {
677 hammer_ref(&ip->lock);
678 ip->flush_state = HAMMER_FST_SETUP;
679 if (ip->flags & HAMMER_INODE_FLUSHW)
680 ip->flags |= HAMMER_INODE_REFLUSH;
682 error = hammer_mem_add(record);
683 if (error == 0) {
684 dip->ino_data.mtime = trans->time;
685 hammer_modify_inode(dip, HAMMER_INODE_MTIME);
687 failed:
688 hammer_done_cursor(&cursor);
689 return(error);
693 * Delete the directory entry and update the inode link count. The
694 * cursor must be seeked to the directory entry record being deleted.
696 * The related inode should be share-locked by the caller. The caller is
697 * on the frontend.
699 * This function can return EDEADLK requiring the caller to terminate
700 * the cursor, any locks, wait on the returned record, and retry.
703 hammer_ip_del_directory(struct hammer_transaction *trans,
704 hammer_cursor_t cursor, struct hammer_inode *dip,
705 struct hammer_inode *ip)
707 hammer_record_t record;
708 int error;
710 if (hammer_cursor_inmem(cursor)) {
712 * In-memory (unsynchronized) records can simply be freed.
713 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
714 * by the backend, we must still avoid races against the
715 * backend potentially syncing the record to the media.
717 * We cannot call hammer_ip_delete_record(), that routine may
718 * only be called from the backend.
720 record = cursor->iprec;
721 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
722 KKASSERT(cursor->deadlk_rec == NULL);
723 hammer_ref(&record->lock);
724 cursor->deadlk_rec = record;
725 error = EDEADLK;
726 } else {
727 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
728 record->flags |= HAMMER_RECF_DELETED_FE;
729 error = 0;
731 } else {
733 * If the record is on-disk we have to queue the deletion by
734 * the record's key. This also causes lookups to skip the
735 * record.
737 KKASSERT(dip->flags &
738 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
739 record = hammer_alloc_mem_record(dip, 0);
740 record->type = HAMMER_MEM_RECORD_DEL;
741 record->leaf.base = cursor->leaf->base;
743 record->target_ip = ip;
744 record->flush_state = HAMMER_FST_SETUP;
745 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
748 * The inode now has a dependancy and must be taken out of
749 * the idle state. An inode not in an idle state is given
750 * an extra reference.
752 * When transitioning to a SETUP state flag for an automatic
753 * reflush when the dependancies are disposed of if someone
754 * is waiting on the inode.
756 if (ip->flush_state == HAMMER_FST_IDLE) {
757 hammer_ref(&ip->lock);
758 ip->flush_state = HAMMER_FST_SETUP;
759 if (ip->flags & HAMMER_INODE_FLUSHW)
760 ip->flags |= HAMMER_INODE_REFLUSH;
763 error = hammer_mem_add(record);
767 * One less link. The file may still be open in the OS even after
768 * all links have gone away.
770 * We have to terminate the cursor before syncing the inode to
771 * avoid deadlocking against ourselves. XXX this may no longer
772 * be true.
774 * If nlinks drops to zero and the vnode is inactive (or there is
775 * no vnode), call hammer_inode_unloadable_check() to zonk the
776 * inode. If we don't do this here the inode will not be destroyed
777 * on-media until we unmount.
779 if (error == 0) {
780 --ip->ino_data.nlinks;
781 dip->ino_data.mtime = trans->time;
782 hammer_modify_inode(dip, HAMMER_INODE_MTIME);
783 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
784 if (ip->ino_data.nlinks == 0 &&
785 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
786 hammer_done_cursor(cursor);
787 hammer_inode_unloadable_check(ip, 1);
788 hammer_flush_inode(ip, 0);
792 return(error);
796 * Add a record to an inode.
798 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
799 * initialize the following additional fields:
801 * The related inode should be share-locked by the caller. The caller is
802 * on the frontend.
804 * record->rec.entry.base.base.key
805 * record->rec.entry.base.base.rec_type
806 * record->rec.entry.base.base.data_len
807 * record->data (a copy will be kmalloc'd if it cannot be embedded)
810 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
812 hammer_inode_t ip = record->ip;
813 int error;
815 KKASSERT(record->leaf.base.localization != 0);
816 record->leaf.base.obj_id = ip->obj_id;
817 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
818 error = hammer_mem_add(record);
819 return(error);
823 * Locate a bulk record in-memory. Bulk records allow disk space to be
824 * reserved so the front-end can flush large data writes without having
825 * to queue the BIO to the flusher. Only the related record gets queued
826 * to the flusher.
829 static hammer_record_t
830 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
832 struct hammer_bulk_info info;
834 bzero(&info, sizeof(info));
835 info.leaf.base.obj_id = ip->obj_id;
836 info.leaf.base.key = file_offset + bytes;
837 info.leaf.base.create_tid = 0;
838 info.leaf.base.delete_tid = 0;
839 info.leaf.base.rec_type = HAMMER_RECTYPE_DATA;
840 info.leaf.base.obj_type = 0; /* unused */
841 info.leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
842 info.leaf.base.localization = ip->obj_localization + /* unused */
843 HAMMER_LOCALIZE_MISC;
844 info.leaf.data_len = bytes;
846 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_overlap_cmp,
847 hammer_bulk_scan_callback, &info);
849 return(info.record); /* may be NULL */
853 * Take records vetted by overlap_cmp. The first non-deleted record
854 * (if any) stops the scan.
856 static int
857 hammer_bulk_scan_callback(hammer_record_t record, void *data)
859 struct hammer_bulk_info *info = data;
861 if (record->flags & HAMMER_RECF_DELETED_FE)
862 return(0);
863 hammer_ref(&record->lock);
864 info->record = record;
865 return(-1); /* stop scan */
869 * Reserve blockmap space placemarked with an in-memory record.
871 * This routine is called by the frontend in order to be able to directly
872 * flush a buffer cache buffer. The frontend has locked the related buffer
873 * cache buffers and we should be able to manipulate any overlapping
874 * in-memory records.
876 * The caller is responsible for adding the returned record.
878 hammer_record_t
879 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
880 int *errorp)
882 hammer_record_t record;
883 hammer_record_t conflict;
884 int zone;
887 * Deal with conflicting in-memory records. We cannot have multiple
888 * in-memory records for the same base offset without seriously
889 * confusing the backend, including but not limited to the backend
890 * issuing delete-create-delete or create-delete-create sequences
891 * and asserting on the delete_tid being the same as the create_tid.
893 * If we encounter a record with the backend interlock set we cannot
894 * immediately delete it without confusing the backend.
896 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
897 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
898 conflict->flags |= HAMMER_RECF_WANTED;
899 tsleep(conflict, 0, "hmrrc3", 0);
900 } else {
901 conflict->flags |= HAMMER_RECF_DELETED_FE;
903 hammer_rel_mem_record(conflict);
907 * Create a record to cover the direct write. This is called with
908 * the related BIO locked so there should be no possible conflict.
910 * The backend is responsible for finalizing the space reserved in
911 * this record.
913 * XXX bytes not aligned, depend on the reservation code to
914 * align the reservation.
916 record = hammer_alloc_mem_record(ip, 0);
917 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
918 HAMMER_ZONE_SMALL_DATA_INDEX;
919 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
920 &record->leaf.data_offset,
921 errorp);
922 if (record->resv == NULL) {
923 kprintf("hammer_ip_add_bulk: reservation failed\n");
924 hammer_rel_mem_record(record);
925 return(NULL);
927 record->type = HAMMER_MEM_RECORD_DATA;
928 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
929 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
930 record->leaf.base.obj_id = ip->obj_id;
931 record->leaf.base.key = file_offset + bytes;
932 record->leaf.base.localization = ip->obj_localization +
933 HAMMER_LOCALIZE_MISC;
934 record->leaf.data_len = bytes;
935 hammer_crc_set_leaf(data, &record->leaf);
936 KKASSERT(*errorp == 0);
937 return(record);
941 * Frontend truncation code. Scan in-memory records only. On-disk records
942 * and records in a flushing state are handled by the backend. The vnops
943 * setattr code will handle the block containing the truncation point.
945 * Partial blocks are not deleted.
948 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
950 struct rec_trunc_info info;
952 switch(ip->ino_data.obj_type) {
953 case HAMMER_OBJTYPE_REGFILE:
954 info.rec_type = HAMMER_RECTYPE_DATA;
955 break;
956 case HAMMER_OBJTYPE_DBFILE:
957 info.rec_type = HAMMER_RECTYPE_DB;
958 break;
959 default:
960 return(EINVAL);
962 info.trunc_off = file_size;
963 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
964 hammer_frontend_trunc_callback, &info);
965 return(0);
968 static int
969 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
971 if (record->flags & HAMMER_RECF_DELETED_FE)
972 return(0);
973 if (record->flush_state == HAMMER_FST_FLUSH)
974 return(0);
975 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
976 hammer_ref(&record->lock);
977 record->flags |= HAMMER_RECF_DELETED_FE;
978 hammer_rel_mem_record(record);
979 return(0);
983 * Return 1 if the caller must check for and delete existing records
984 * before writing out a new data record.
986 * Return 0 if the caller can just insert the record into the B-Tree without
987 * checking.
989 static int
990 hammer_record_needs_overwrite_delete(hammer_record_t record)
992 hammer_inode_t ip = record->ip;
993 int64_t file_offset;
994 int r;
996 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
997 file_offset = record->leaf.base.key;
998 else
999 file_offset = record->leaf.base.key - record->leaf.data_len;
1000 r = (file_offset < ip->save_trunc_off);
1001 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1002 if (ip->save_trunc_off <= record->leaf.base.key)
1003 ip->save_trunc_off = record->leaf.base.key + 1;
1004 } else {
1005 if (ip->save_trunc_off < record->leaf.base.key)
1006 ip->save_trunc_off = record->leaf.base.key;
1008 return(r);
1012 * Backend code. Sync a record to the media.
1015 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1017 hammer_transaction_t trans = cursor->trans;
1018 int64_t file_offset;
1019 int bytes;
1020 void *bdata;
1021 int error;
1022 int doprop;
1024 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1025 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1026 KKASSERT(record->leaf.base.localization != 0);
1029 * Any direct-write related to the record must complete before we
1030 * can sync the record to the on-disk media.
1032 if (record->flags & (HAMMER_RECF_DIRECT_IO | HAMMER_RECF_DIRECT_INVAL))
1033 hammer_io_direct_wait(record);
1036 * If this is a bulk-data record placemarker there may be an existing
1037 * record on-disk, indicating a data overwrite. If there is the
1038 * on-disk record must be deleted before we can insert our new record.
1040 * We've synthesized this record and do not know what the create_tid
1041 * on-disk is, nor how much data it represents.
1043 * Keep in mind that (key) for data records is (base_offset + len),
1044 * not (base_offset). Also, we only want to get rid of on-disk
1045 * records since we are trying to sync our in-memory record, call
1046 * hammer_ip_delete_range() with truncating set to 1 to make sure
1047 * it skips in-memory records.
1049 * It is ok for the lookup to return ENOENT.
1051 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1052 * to call hammer_ip_delete_range() or not. This also means we must
1053 * update sync_trunc_off() as we write.
1055 if (record->type == HAMMER_MEM_RECORD_DATA &&
1056 hammer_record_needs_overwrite_delete(record)) {
1057 file_offset = record->leaf.base.key - record->leaf.data_len;
1058 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1059 ~HAMMER_BUFMASK;
1060 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1061 error = hammer_ip_delete_range(
1062 cursor, record->ip,
1063 file_offset, file_offset + bytes - 1,
1065 if (error && error != ENOENT)
1066 goto done;
1070 * If this is a general record there may be an on-disk version
1071 * that must be deleted before we can insert the new record.
1073 if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1074 error = hammer_delete_general(cursor, record->ip,
1075 &record->leaf);
1076 if (error && error != ENOENT)
1077 goto done;
1081 * Setup the cursor.
1083 hammer_normalize_cursor(cursor);
1084 cursor->key_beg = record->leaf.base;
1085 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1086 cursor->flags |= HAMMER_CURSOR_BACKEND;
1087 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1090 * Records can wind up on-media before the inode itself is on-media.
1091 * Flag the case.
1093 record->ip->flags |= HAMMER_INODE_DONDISK;
1096 * If we are deleting a directory entry an exact match must be
1097 * found on-disk.
1099 if (record->type == HAMMER_MEM_RECORD_DEL) {
1100 error = hammer_btree_lookup(cursor);
1101 if (error == 0) {
1102 KKASSERT(cursor->iprec == NULL);
1103 error = hammer_ip_delete_record(cursor, record->ip,
1104 trans->tid);
1105 if (error == 0) {
1106 record->flags |= HAMMER_RECF_DELETED_FE;
1107 record->flags |= HAMMER_RECF_DELETED_BE;
1108 record->flags |= HAMMER_RECF_COMMITTED;
1111 goto done;
1115 * We are inserting.
1117 * Issue a lookup to position the cursor and locate the cluster. The
1118 * target key should not exist. If we are creating a directory entry
1119 * we may have to iterate the low 32 bits of the key to find an unused
1120 * key.
1122 hammer_sync_lock_sh(trans);
1123 cursor->flags |= HAMMER_CURSOR_INSERT;
1124 error = hammer_btree_lookup(cursor);
1125 if (hammer_debug_inode)
1126 kprintf("DOINSERT LOOKUP %d\n", error);
1127 if (error == 0) {
1128 kprintf("hammer_ip_sync_record: duplicate rec "
1129 "at (%016llx)\n", record->leaf.base.key);
1130 Debugger("duplicate record1");
1131 error = EIO;
1133 #if 0
1134 if (record->type == HAMMER_MEM_RECORD_DATA)
1135 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1136 record->leaf.base.key - record->leaf.data_len,
1137 record->leaf.data_offset, error);
1138 #endif
1140 if (error != ENOENT)
1141 goto done_unlock;
1144 * Allocate the record and data. The result buffers will be
1145 * marked as being modified and further calls to
1146 * hammer_modify_buffer() will result in unneeded UNDO records.
1148 * Support zero-fill records (data == NULL and data_len != 0)
1150 if (record->type == HAMMER_MEM_RECORD_DATA) {
1152 * The data portion of a bulk-data record has already been
1153 * committed to disk, we need only adjust the layer2
1154 * statistics in the same transaction as our B-Tree insert.
1156 KKASSERT(record->leaf.data_offset != 0);
1157 error = hammer_blockmap_finalize(trans,
1158 record->leaf.data_offset,
1159 record->leaf.data_len);
1160 } else if (record->data && record->leaf.data_len) {
1162 * Wholely cached record, with data. Allocate the data.
1164 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1165 record->leaf.base.rec_type,
1166 &record->leaf.data_offset,
1167 &cursor->data_buffer, &error);
1168 if (bdata == NULL)
1169 goto done_unlock;
1170 hammer_crc_set_leaf(record->data, &record->leaf);
1171 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1172 bcopy(record->data, bdata, record->leaf.data_len);
1173 hammer_modify_buffer_done(cursor->data_buffer);
1174 } else {
1176 * Wholely cached record, without data.
1178 record->leaf.data_offset = 0;
1179 record->leaf.data_crc = 0;
1182 error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1183 if (hammer_debug_inode && error)
1184 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1187 * Our record is on-disk, normally mark the in-memory version as
1188 * deleted. If the record represented a directory deletion but
1189 * we had to sync a valid directory entry to disk we must convert
1190 * the record to a covering delete so the frontend does not have
1191 * visibility on the synced entry.
1193 if (error == 0) {
1194 if (doprop) {
1195 hammer_btree_do_propagation(cursor,
1196 record->ip->pfsm,
1197 &record->leaf);
1199 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1200 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1201 record->flags &= ~HAMMER_RECF_DELETED_FE;
1202 record->type = HAMMER_MEM_RECORD_DEL;
1203 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1204 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1205 /* hammer_flush_record_done takes care of the rest */
1206 } else {
1207 record->flags |= HAMMER_RECF_DELETED_FE;
1208 record->flags |= HAMMER_RECF_DELETED_BE;
1210 record->flags |= HAMMER_RECF_COMMITTED;
1211 } else {
1212 if (record->leaf.data_offset) {
1213 hammer_blockmap_free(trans, record->leaf.data_offset,
1214 record->leaf.data_len);
1217 done_unlock:
1218 hammer_sync_unlock(trans);
1219 done:
1220 return(error);
1224 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1225 * entry's key is used to deal with hash collisions in the upper 32 bits.
1226 * A unique 64 bit key is generated in-memory and may be regenerated a
1227 * second time when the directory record is flushed to the on-disk B-Tree.
1229 * A referenced record is passed to this function. This function
1230 * eats the reference. If an error occurs the record will be deleted.
1232 * A copy of the temporary record->data pointer provided by the caller
1233 * will be made.
1236 hammer_mem_add(hammer_record_t record)
1238 hammer_mount_t hmp = record->ip->hmp;
1241 * Make a private copy of record->data
1243 if (record->data)
1244 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1247 * Insert into the RB tree. A unique key should have already
1248 * been selected if this is a directory entry.
1250 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1251 record->flags |= HAMMER_RECF_DELETED_FE;
1252 hammer_rel_mem_record(record);
1253 return (EEXIST);
1255 ++hmp->count_newrecords;
1256 ++hmp->rsv_recs;
1257 ++record->ip->rsv_recs;
1258 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1259 record->flags |= HAMMER_RECF_ONRBTREE;
1260 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1261 hammer_rel_mem_record(record);
1262 return(0);
1265 /************************************************************************
1266 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1267 ************************************************************************
1269 * These functions augment the B-Tree scanning functions in hammer_btree.c
1270 * by merging in-memory records with on-disk records.
1274 * Locate a particular record either in-memory or on-disk.
1276 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1277 * NOT be called to iterate results.
1280 hammer_ip_lookup(hammer_cursor_t cursor)
1282 int error;
1285 * If the element is in-memory return it without searching the
1286 * on-disk B-Tree
1288 KKASSERT(cursor->ip);
1289 error = hammer_mem_lookup(cursor);
1290 if (error == 0) {
1291 cursor->leaf = &cursor->iprec->leaf;
1292 return(error);
1294 if (error != ENOENT)
1295 return(error);
1298 * If the inode has on-disk components search the on-disk B-Tree.
1300 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1301 return(error);
1302 error = hammer_btree_lookup(cursor);
1303 if (error == 0)
1304 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1305 return(error);
1309 * Locate the first record within the cursor's key_beg/key_end range,
1310 * restricted to a particular inode. 0 is returned on success, ENOENT
1311 * if no records matched the requested range, or some other error.
1313 * When 0 is returned hammer_ip_next() may be used to iterate additional
1314 * records within the requested range.
1316 * This function can return EDEADLK, requiring the caller to terminate
1317 * the cursor and try again.
1320 hammer_ip_first(hammer_cursor_t cursor)
1322 hammer_inode_t ip = cursor->ip;
1323 int error;
1325 KKASSERT(ip != NULL);
1328 * Clean up fields and setup for merged scan
1330 cursor->flags &= ~HAMMER_CURSOR_RETEST;
1331 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1332 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1333 if (cursor->iprec) {
1334 hammer_rel_mem_record(cursor->iprec);
1335 cursor->iprec = NULL;
1339 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1340 * exact lookup so if we get ENOENT we have to call the iterate
1341 * function to validate the first record after the begin key.
1343 * The ATEDISK flag is used by hammer_btree_iterate to determine
1344 * whether it must index forwards or not. It is also used here
1345 * to select the next record from in-memory or on-disk.
1347 * EDEADLK can only occur if the lookup hit an empty internal
1348 * element and couldn't delete it. Since this could only occur
1349 * in-range, we can just iterate from the failure point.
1351 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1352 error = hammer_btree_lookup(cursor);
1353 if (error == ENOENT || error == EDEADLK) {
1354 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1355 if (hammer_debug_general & 0x2000)
1356 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1357 error = hammer_btree_iterate(cursor);
1359 if (error && error != ENOENT)
1360 return(error);
1361 if (error == 0) {
1362 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1363 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1364 } else {
1365 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1370 * Search the in-memory record list (Red-Black tree). Unlike the
1371 * B-Tree search, mem_first checks for records in the range.
1373 error = hammer_mem_first(cursor);
1374 if (error && error != ENOENT)
1375 return(error);
1376 if (error == 0) {
1377 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1378 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1379 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1380 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1384 * This will return the first matching record.
1386 return(hammer_ip_next(cursor));
1390 * Retrieve the next record in a merged iteration within the bounds of the
1391 * cursor. This call may be made multiple times after the cursor has been
1392 * initially searched with hammer_ip_first().
1394 * 0 is returned on success, ENOENT if no further records match the
1395 * requested range, or some other error code is returned.
1398 hammer_ip_next(hammer_cursor_t cursor)
1400 hammer_btree_elm_t elm;
1401 hammer_record_t rec, save;
1402 int error;
1403 int r;
1405 next_btree:
1407 * Load the current on-disk and in-memory record. If we ate any
1408 * records we have to get the next one.
1410 * If we deleted the last on-disk record we had scanned ATEDISK will
1411 * be clear and RETEST will be set, forcing a call to iterate. The
1412 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1413 * element. If ATEDISK is set, iterate will skip the 'current'
1414 * element.
1416 * Get the next on-disk record
1418 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_RETEST)) {
1419 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1420 error = hammer_btree_iterate(cursor);
1421 cursor->flags &= ~HAMMER_CURSOR_RETEST;
1422 if (error == 0) {
1423 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1424 hammer_cache_node(&cursor->ip->cache[1],
1425 cursor->node);
1426 } else {
1427 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1428 HAMMER_CURSOR_ATEDISK;
1433 next_memory:
1435 * Get the next in-memory record.
1437 * hammer_rec_scan_cmp: Is the record still in our general range,
1438 * (non-inclusive of snapshot exclusions)?
1439 * hammer_rec_scan_callback: Is the record in our snapshot?
1441 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1442 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1443 save = cursor->iprec;
1444 cursor->iprec = NULL;
1445 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1446 while (rec) {
1447 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1448 break;
1449 if (hammer_rec_scan_callback(rec, cursor) != 0)
1450 break;
1451 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1453 if (save)
1454 hammer_rel_mem_record(save);
1455 if (cursor->iprec) {
1456 KKASSERT(cursor->iprec == rec);
1457 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1458 } else {
1459 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1465 * The memory record may have become stale while being held in
1466 * cursor->iprec. We are interlocked against the backend on
1467 * with regards to B-Tree entries.
1469 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1470 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1471 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1472 goto next_memory;
1477 * Extract either the disk or memory record depending on their
1478 * relative position.
1480 error = 0;
1481 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1482 case 0:
1484 * Both entries valid. Compare the entries and nominally
1485 * return the first one in the sort order. Numerous cases
1486 * require special attention, however.
1488 elm = &cursor->node->ondisk->elms[cursor->index];
1489 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1492 * If the two entries differ only by their key (-2/2) or
1493 * create_tid (-1/1), and are DATA records, we may have a
1494 * nominal match. We have to calculate the base file
1495 * offset of the data.
1497 if (r <= 2 && r >= -2 && r != 0 &&
1498 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1499 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1500 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1501 int64_t base2 = cursor->iprec->leaf.base.key -
1502 cursor->iprec->leaf.data_len;
1503 if (base1 == base2)
1504 r = 0;
1507 if (r < 0) {
1508 error = hammer_btree_extract(cursor,
1509 HAMMER_CURSOR_GET_LEAF);
1510 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1511 break;
1515 * If the entries match exactly the memory entry is either
1516 * an on-disk directory entry deletion or a bulk data
1517 * overwrite. If it is a directory entry deletion we eat
1518 * both entries.
1520 * For the bulk-data overwrite case it is possible to have
1521 * visibility into both, which simply means the syncer
1522 * hasn't gotten around to doing the delete+insert sequence
1523 * on the B-Tree. Use the memory entry and throw away the
1524 * on-disk entry.
1526 * If the in-memory record is not either of these we
1527 * probably caught the syncer while it was syncing it to
1528 * the media. Since we hold a shared lock on the cursor,
1529 * the in-memory record had better be marked deleted at
1530 * this point.
1532 if (r == 0) {
1533 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1534 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1535 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1536 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1537 goto next_btree;
1539 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1540 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1541 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1543 /* fall through to memory entry */
1544 } else {
1545 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
1546 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1547 goto next_memory;
1550 /* fall through to the memory entry */
1551 case HAMMER_CURSOR_ATEDISK:
1553 * Only the memory entry is valid.
1555 cursor->leaf = &cursor->iprec->leaf;
1556 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1559 * If the memory entry is an on-disk deletion we should have
1560 * also had found a B-Tree record. If the backend beat us
1561 * to it it would have interlocked the cursor and we should
1562 * have seen the in-memory record marked DELETED_FE.
1564 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1565 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1566 panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
1568 break;
1569 case HAMMER_CURSOR_ATEMEM:
1571 * Only the disk entry is valid
1573 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1574 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1575 break;
1576 default:
1578 * Neither entry is valid
1580 * XXX error not set properly
1582 cursor->leaf = NULL;
1583 error = ENOENT;
1584 break;
1586 return(error);
1590 * Resolve the cursor->data pointer for the current cursor position in
1591 * a merged iteration.
1594 hammer_ip_resolve_data(hammer_cursor_t cursor)
1596 hammer_record_t record;
1597 int error;
1599 if (hammer_cursor_inmem(cursor)) {
1601 * The data associated with an in-memory record is usually
1602 * kmalloced, but reserve-ahead data records will have an
1603 * on-disk reference.
1605 * NOTE: Reserve-ahead data records must be handled in the
1606 * context of the related high level buffer cache buffer
1607 * to interlock against async writes.
1609 record = cursor->iprec;
1610 cursor->data = record->data;
1611 error = 0;
1612 if (cursor->data == NULL) {
1613 KKASSERT(record->leaf.base.rec_type ==
1614 HAMMER_RECTYPE_DATA);
1615 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1616 record->leaf.data_offset,
1617 record->leaf.data_len,
1618 &error,
1619 &cursor->data_buffer);
1621 } else {
1622 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1623 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1625 return(error);
1629 * Backend truncation / record replacement - delete records in range.
1631 * Delete all records within the specified range for inode ip. In-memory
1632 * records still associated with the frontend are ignored.
1634 * If truncating is non-zero in-memory records associated with the back-end
1635 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1637 * NOTES:
1639 * * An unaligned range will cause new records to be added to cover
1640 * the edge cases. (XXX not implemented yet).
1642 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1643 * also do not deal with unaligned ranges.
1645 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1647 * * Record keys for regular file data have to be special-cased since
1648 * they indicate the end of the range (key = base + bytes).
1650 * * This function may be asked to delete ridiculously huge ranges, for
1651 * example if someone truncates or removes a 1TB regular file. We
1652 * must be very careful on restarts and we may have to stop w/
1653 * EWOULDBLOCK to avoid blowing out the buffer cache.
1656 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1657 int64_t ran_beg, int64_t ran_end, int truncating)
1659 hammer_transaction_t trans = cursor->trans;
1660 hammer_btree_leaf_elm_t leaf;
1661 int error;
1662 int64_t off;
1663 int64_t tmp64;
1665 #if 0
1666 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1667 #endif
1669 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1670 retry:
1671 hammer_normalize_cursor(cursor);
1672 cursor->key_beg.localization = ip->obj_localization +
1673 HAMMER_LOCALIZE_MISC;
1674 cursor->key_beg.obj_id = ip->obj_id;
1675 cursor->key_beg.create_tid = 0;
1676 cursor->key_beg.delete_tid = 0;
1677 cursor->key_beg.obj_type = 0;
1679 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1680 cursor->key_beg.key = ran_beg;
1681 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1682 } else {
1684 * The key in the B-Tree is (base+bytes), so the first possible
1685 * matching key is ran_beg + 1.
1687 cursor->key_beg.key = ran_beg + 1;
1688 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1691 cursor->key_end = cursor->key_beg;
1692 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1693 cursor->key_end.key = ran_end;
1694 } else {
1695 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1696 if (tmp64 < ran_end)
1697 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1698 else
1699 cursor->key_end.key = ran_end + MAXPHYS + 1;
1702 cursor->asof = ip->obj_asof;
1703 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1704 cursor->flags |= HAMMER_CURSOR_ASOF;
1705 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1706 cursor->flags |= HAMMER_CURSOR_BACKEND;
1707 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1709 error = hammer_ip_first(cursor);
1712 * Iterate through matching records and mark them as deleted.
1714 while (error == 0) {
1715 leaf = cursor->leaf;
1717 KKASSERT(leaf->base.delete_tid == 0);
1718 KKASSERT(leaf->base.obj_id == ip->obj_id);
1721 * There may be overlap cases for regular file data. Also
1722 * remember the key for a regular file record is (base + len),
1723 * NOT (base).
1725 * Note that do to duplicates (mem & media) allowed by
1726 * DELETE_VISIBILITY, off can wind up less then ran_beg.
1728 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1729 off = leaf->base.key - leaf->data_len;
1731 * Check the left edge case. We currently do not
1732 * split existing records.
1734 if (off < ran_beg && leaf->base.key > ran_beg) {
1735 panic("hammer left edge case %016llx %d\n",
1736 leaf->base.key, leaf->data_len);
1740 * Check the right edge case. Note that the
1741 * record can be completely out of bounds, which
1742 * terminates the search.
1744 * base->key is exclusive of the right edge while
1745 * ran_end is inclusive of the right edge. The
1746 * (key - data_len) left boundary is inclusive.
1748 * XXX theory-check this test at some point, are
1749 * we missing a + 1 somewhere? Note that ran_end
1750 * could overflow.
1752 if (leaf->base.key - 1 > ran_end) {
1753 if (leaf->base.key - leaf->data_len > ran_end)
1754 break;
1755 panic("hammer right edge case\n");
1757 } else {
1758 off = leaf->base.key;
1762 * Delete the record. When truncating we do not delete
1763 * in-memory (data) records because they represent data
1764 * written after the truncation.
1766 * This will also physically destroy the B-Tree entry and
1767 * data if the retention policy dictates. The function
1768 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
1769 * to retest the new 'current' element.
1771 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
1772 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1774 * If we have built up too many meta-buffers we risk
1775 * deadlocking the kernel and must stop. This can
1776 * occur when deleting ridiculously huge files.
1777 * sync_trunc_off is updated so the next cycle does
1778 * not re-iterate records we have already deleted.
1780 * This is only done with formal truncations.
1782 if (truncating > 1 && error == 0 &&
1783 hammer_flusher_meta_limit(ip->hmp)) {
1784 ip->sync_trunc_off = off;
1785 error = EWOULDBLOCK;
1788 if (error)
1789 break;
1790 ran_beg = off; /* for restart */
1791 error = hammer_ip_next(cursor);
1793 if (cursor->node)
1794 hammer_cache_node(&ip->cache[1], cursor->node);
1796 if (error == EDEADLK) {
1797 hammer_done_cursor(cursor);
1798 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1799 if (error == 0)
1800 goto retry;
1802 if (error == ENOENT)
1803 error = 0;
1804 return(error);
1808 * This backend function deletes the specified record on-disk, similar to
1809 * delete_range but for a specific record. Unlike the exact deletions
1810 * used when deleting a directory entry this function uses an ASOF search
1811 * like delete_range.
1813 * This function may be called with ip->obj_asof set for a slave snapshot,
1814 * so don't use it. We always delete non-historical records only.
1816 static int
1817 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
1818 hammer_btree_leaf_elm_t leaf)
1820 hammer_transaction_t trans = cursor->trans;
1821 int error;
1823 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1824 retry:
1825 hammer_normalize_cursor(cursor);
1826 cursor->key_beg = leaf->base;
1827 cursor->asof = HAMMER_MAX_TID;
1828 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1829 cursor->flags |= HAMMER_CURSOR_ASOF;
1830 cursor->flags |= HAMMER_CURSOR_BACKEND;
1831 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1833 error = hammer_btree_lookup(cursor);
1834 if (error == 0) {
1835 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1837 if (error == EDEADLK) {
1838 hammer_done_cursor(cursor);
1839 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1840 if (error == 0)
1841 goto retry;
1843 return(error);
1847 * This function deletes remaining auxillary records when an inode is
1848 * being deleted. This function explicitly does not delete the
1849 * inode record, directory entry, data, or db records. Those must be
1850 * properly disposed of prior to this call.
1853 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
1855 hammer_transaction_t trans = cursor->trans;
1856 hammer_btree_leaf_elm_t leaf;
1857 int error;
1859 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1860 retry:
1861 hammer_normalize_cursor(cursor);
1862 cursor->key_beg.localization = ip->obj_localization +
1863 HAMMER_LOCALIZE_MISC;
1864 cursor->key_beg.obj_id = ip->obj_id;
1865 cursor->key_beg.create_tid = 0;
1866 cursor->key_beg.delete_tid = 0;
1867 cursor->key_beg.obj_type = 0;
1868 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
1869 cursor->key_beg.key = HAMMER_MIN_KEY;
1871 cursor->key_end = cursor->key_beg;
1872 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
1873 cursor->key_end.key = HAMMER_MAX_KEY;
1875 cursor->asof = ip->obj_asof;
1876 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1877 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1878 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1879 cursor->flags |= HAMMER_CURSOR_BACKEND;
1881 error = hammer_ip_first(cursor);
1884 * Iterate through matching records and mark them as deleted.
1886 while (error == 0) {
1887 leaf = cursor->leaf;
1889 KKASSERT(leaf->base.delete_tid == 0);
1892 * Mark the record and B-Tree entry as deleted. This will
1893 * also physically delete the B-Tree entry, record, and
1894 * data if the retention policy dictates. The function
1895 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
1896 * to retest the new 'current' element.
1898 * Directory entries (and delete-on-disk directory entries)
1899 * must be synced and cannot be deleted.
1901 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1902 ++*countp;
1903 if (error)
1904 break;
1905 error = hammer_ip_next(cursor);
1907 if (cursor->node)
1908 hammer_cache_node(&ip->cache[1], cursor->node);
1909 if (error == EDEADLK) {
1910 hammer_done_cursor(cursor);
1911 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1912 if (error == 0)
1913 goto retry;
1915 if (error == ENOENT)
1916 error = 0;
1917 return(error);
1921 * Delete the record at the current cursor. On success the cursor will
1922 * be positioned appropriately for an iteration but may no longer be at
1923 * a leaf node.
1925 * This routine is only called from the backend.
1927 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1928 * cursor and retry.
1931 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1932 hammer_tid_t tid)
1934 hammer_record_t iprec;
1935 hammer_mount_t hmp;
1936 int error;
1938 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1939 KKASSERT(tid != 0);
1940 hmp = cursor->node->hmp;
1943 * In-memory (unsynchronized) records can simply be freed. This
1944 * only occurs in range iterations since all other records are
1945 * individually synchronized. Thus there should be no confusion with
1946 * the interlock.
1948 * An in-memory record may be deleted before being committed to disk,
1949 * but could have been accessed in the mean time. The reservation
1950 * code will deal with the case.
1952 if (hammer_cursor_inmem(cursor)) {
1953 iprec = cursor->iprec;
1954 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1955 iprec->flags |= HAMMER_RECF_DELETED_FE;
1956 iprec->flags |= HAMMER_RECF_DELETED_BE;
1957 return(0);
1961 * On-disk records are marked as deleted by updating their delete_tid.
1962 * This does not effect their position in the B-Tree (which is based
1963 * on their create_tid).
1965 * Frontend B-Tree operations track inodes so we tell
1966 * hammer_delete_at_cursor() not to.
1968 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1970 if (error == 0) {
1971 error = hammer_delete_at_cursor(
1972 cursor,
1973 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
1974 cursor->trans->tid,
1975 cursor->trans->time32,
1976 0, NULL);
1978 return(error);
1982 * Delete the B-Tree element at the current cursor and do any necessary
1983 * mirror propagation.
1985 * The cursor must be properly positioned for an iteration on return but
1986 * may be pointing at an internal element.
1988 * An element can be un-deleted by passing a delete_tid of 0 with
1989 * HAMMER_DELETE_ADJUST.
1992 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1993 hammer_tid_t delete_tid, u_int32_t delete_ts,
1994 int track, int64_t *stat_bytes)
1996 struct hammer_btree_leaf_elm save_leaf;
1997 hammer_transaction_t trans;
1998 hammer_btree_leaf_elm_t leaf;
1999 hammer_node_t node;
2000 hammer_btree_elm_t elm;
2001 hammer_off_t data_offset;
2002 int32_t data_len;
2003 u_int16_t rec_type;
2004 int error;
2005 int icount;
2006 int doprop;
2008 error = hammer_cursor_upgrade(cursor);
2009 if (error)
2010 return(error);
2012 trans = cursor->trans;
2013 node = cursor->node;
2014 elm = &node->ondisk->elms[cursor->index];
2015 leaf = &elm->leaf;
2016 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2018 hammer_sync_lock_sh(trans);
2019 doprop = 0;
2020 icount = 0;
2023 * Adjust the delete_tid. Update the mirror_tid propagation field
2024 * as well. delete_tid can be 0 (undelete -- used by mirroring).
2026 if (delete_flags & HAMMER_DELETE_ADJUST) {
2027 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2028 if (elm->leaf.base.delete_tid == 0 && delete_tid)
2029 icount = -1;
2030 if (elm->leaf.base.delete_tid && delete_tid == 0)
2031 icount = 1;
2034 hammer_modify_node(trans, node, elm, sizeof(*elm));
2035 elm->leaf.base.delete_tid = delete_tid;
2036 elm->leaf.delete_ts = delete_ts;
2037 hammer_modify_node_done(node);
2039 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
2040 hammer_modify_node_field(trans, node, mirror_tid);
2041 node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2042 hammer_modify_node_done(node);
2043 doprop = 1;
2044 if (hammer_debug_general & 0x0002) {
2045 kprintf("delete_at_cursor: propagate %016llx"
2046 " @%016llx\n",
2047 elm->leaf.base.delete_tid,
2048 node->node_offset);
2053 * Adjust for the iteration. We have deleted the current
2054 * element and want to clear ATEDISK so the iteration does
2055 * not skip the element after, which now becomes the current
2056 * element. This element must be re-tested if doing an
2057 * iteration, which is handled by the RETEST flag.
2059 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2060 cursor->flags |= HAMMER_CURSOR_RETEST;
2061 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2065 * An on-disk record cannot have the same delete_tid
2066 * as its create_tid. In a chain of record updates
2067 * this could result in a duplicate record.
2069 KKASSERT(elm->leaf.base.delete_tid !=
2070 elm->leaf.base.create_tid);
2074 * Destroy the B-Tree element if asked (typically if a nohistory
2075 * file or mount, or when called by the pruning code).
2077 * Adjust the ATEDISK flag to properly support iterations.
2079 if (delete_flags & HAMMER_DELETE_DESTROY) {
2080 data_offset = elm->leaf.data_offset;
2081 data_len = elm->leaf.data_len;
2082 rec_type = elm->leaf.base.rec_type;
2083 if (doprop) {
2084 save_leaf = elm->leaf;
2085 leaf = &save_leaf;
2087 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2088 elm->leaf.base.delete_tid == 0) {
2089 icount = -1;
2092 error = hammer_btree_delete(cursor);
2093 if (error == 0) {
2095 * The deletion moves the next element (if any) to
2096 * the current element position. We must clear
2097 * ATEDISK so this element is not skipped and we
2098 * must set RETEST to force any iteration to re-test
2099 * the element.
2101 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2102 cursor->flags |= HAMMER_CURSOR_RETEST;
2103 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2106 if (error == 0) {
2107 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2108 case HAMMER_ZONE_LARGE_DATA:
2109 case HAMMER_ZONE_SMALL_DATA:
2110 case HAMMER_ZONE_META:
2111 hammer_blockmap_free(trans,
2112 data_offset, data_len);
2113 break;
2114 default:
2115 break;
2121 * Track inode count and next_tid. This is used by the mirroring
2122 * and PFS code. icount can be negative, zero, or positive.
2124 if (error == 0 && track) {
2125 if (icount) {
2126 hammer_modify_volume_field(trans, trans->rootvol,
2127 vol0_stat_inodes);
2128 trans->rootvol->ondisk->vol0_stat_inodes += icount;
2129 hammer_modify_volume_done(trans->rootvol);
2131 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2132 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2133 trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2134 hammer_modify_volume_done(trans->rootvol);
2139 * mirror_tid propagation occurs if the node's mirror_tid had to be
2140 * updated while adjusting the delete_tid.
2142 * This occurs when deleting even in nohistory mode, but does not
2143 * occur when pruning an already-deleted node.
2145 * cursor->ip is NULL when called from the pruning, mirroring,
2146 * and pfs code. If non-NULL propagation will be conditionalized
2147 * on whether the PFS is in no-history mode or not.
2149 if (doprop) {
2150 if (cursor->ip)
2151 hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2152 else
2153 hammer_btree_do_propagation(cursor, NULL, leaf);
2155 hammer_sync_unlock(trans);
2156 return (error);
2160 * Determine whether we can remove a directory. This routine checks whether
2161 * a directory is empty or not and enforces flush connectivity.
2163 * Flush connectivity requires that we block if the target directory is
2164 * currently flushing, otherwise it may not end up in the same flush group.
2166 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2169 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2171 struct hammer_cursor cursor;
2172 int error;
2175 * Check directory empty
2177 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2179 cursor.key_beg.localization = ip->obj_localization +
2180 HAMMER_LOCALIZE_MISC;
2181 cursor.key_beg.obj_id = ip->obj_id;
2182 cursor.key_beg.create_tid = 0;
2183 cursor.key_beg.delete_tid = 0;
2184 cursor.key_beg.obj_type = 0;
2185 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2186 cursor.key_beg.key = HAMMER_MIN_KEY;
2188 cursor.key_end = cursor.key_beg;
2189 cursor.key_end.rec_type = 0xFFFF;
2190 cursor.key_end.key = HAMMER_MAX_KEY;
2192 cursor.asof = ip->obj_asof;
2193 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2195 error = hammer_ip_first(&cursor);
2196 if (error == ENOENT)
2197 error = 0;
2198 else if (error == 0)
2199 error = ENOTEMPTY;
2200 hammer_done_cursor(&cursor);
2201 return(error);