a8baab132519460081c59289aa8755312818a844
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
bloba8baab132519460081c59289aa8755312818a844
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.90 2008/07/14 03:20:49 dillon Exp $
37 #include "hammer.h"
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_frontend_trunc_callback(hammer_record_t record,
43 void *data __unused);
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
45 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46 hammer_btree_leaf_elm_t leaf);
48 struct rec_trunc_info {
49 u_int16_t rec_type;
50 int64_t trunc_off;
54 * Red-black tree support. Comparison code for insertion.
56 static int
57 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
59 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
60 return(-1);
61 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
62 return(1);
64 if (rec1->leaf.base.key < rec2->leaf.base.key)
65 return(-1);
66 if (rec1->leaf.base.key > rec2->leaf.base.key)
67 return(1);
70 * Never match against an item deleted by the front-end.
72 * rec1 is greater then rec2 if rec1 is marked deleted.
73 * rec1 is less then rec2 if rec2 is marked deleted.
75 * Multiple deleted records may be present, do not return 0
76 * if both are marked deleted.
78 if (rec1->flags & HAMMER_RECF_DELETED_FE)
79 return(1);
80 if (rec2->flags & HAMMER_RECF_DELETED_FE)
81 return(-1);
83 return(0);
87 * Basic record comparison code similar to hammer_btree_cmp().
89 static int
90 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
92 if (elm->rec_type < rec->leaf.base.rec_type)
93 return(-3);
94 if (elm->rec_type > rec->leaf.base.rec_type)
95 return(3);
97 if (elm->key < rec->leaf.base.key)
98 return(-2);
99 if (elm->key > rec->leaf.base.key)
100 return(2);
103 * Never match against an item deleted by the front-end.
104 * elm is less then rec if rec is marked deleted.
106 if (rec->flags & HAMMER_RECF_DELETED_FE)
107 return(-1);
108 return(0);
112 * Special LOOKUP_INFO to locate an overlapping record. This used by
113 * the reservation code to implement small-block records (whos keys will
114 * be different depending on data_len, when representing the same base
115 * offset).
117 * NOTE: The base file offset of a data record is (key - data_len), not (key).
119 static int
120 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
122 if (leaf->base.rec_type < rec->leaf.base.rec_type)
123 return(-3);
124 if (leaf->base.rec_type > rec->leaf.base.rec_type)
125 return(3);
128 * Overlap compare
130 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
131 /* leaf_end <= rec_beg */
132 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
133 return(-2);
134 /* leaf_beg >= rec_end */
135 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
136 return(2);
137 } else {
138 if (leaf->base.key < rec->leaf.base.key)
139 return(-2);
140 if (leaf->base.key > rec->leaf.base.key)
141 return(2);
145 * Never match against an item deleted by the front-end.
146 * leaf is less then rec if rec is marked deleted.
148 * We must still return the proper code for the scan to continue
149 * along the correct branches.
151 if (rec->flags & HAMMER_RECF_DELETED_FE) {
152 if (leaf->base.key < rec->leaf.base.key)
153 return(-2);
154 if (leaf->base.key > rec->leaf.base.key)
155 return(2);
156 return(-1);
158 return(0);
162 * RB_SCAN comparison code for hammer_mem_first(). The argument order
163 * is reversed so the comparison result has to be negated. key_beg and
164 * key_end are both range-inclusive.
166 * Localized deletions are not cached in-memory.
168 static
170 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
172 hammer_cursor_t cursor = data;
173 int r;
175 r = hammer_rec_cmp(&cursor->key_beg, rec);
176 if (r > 1)
177 return(-1);
178 r = hammer_rec_cmp(&cursor->key_end, rec);
179 if (r < -1)
180 return(1);
181 return(0);
185 * This compare function is used when simply looking up key_beg.
187 static
189 hammer_rec_find_cmp(hammer_record_t rec, void *data)
191 hammer_cursor_t cursor = data;
192 int r;
194 r = hammer_rec_cmp(&cursor->key_beg, rec);
195 if (r > 1)
196 return(-1);
197 if (r < -1)
198 return(1);
199 return(0);
203 * Locate blocks within the truncation range. Partial blocks do not count.
205 static
207 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
209 struct rec_trunc_info *info = data;
211 if (rec->leaf.base.rec_type < info->rec_type)
212 return(-1);
213 if (rec->leaf.base.rec_type > info->rec_type)
214 return(1);
216 switch(rec->leaf.base.rec_type) {
217 case HAMMER_RECTYPE_DB:
219 * DB record key is not beyond the truncation point, retain.
221 if (rec->leaf.base.key < info->trunc_off)
222 return(-1);
223 break;
224 case HAMMER_RECTYPE_DATA:
226 * DATA record offset start is not beyond the truncation point,
227 * retain.
229 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
230 return(-1);
231 break;
232 default:
233 panic("hammer_rec_trunc_cmp: unexpected record type");
237 * The record start is >= the truncation point, return match,
238 * the record should be destroyed.
240 return(0);
243 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
244 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
245 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
248 * Allocate a record for the caller to finish filling in. The record is
249 * returned referenced.
251 hammer_record_t
252 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
254 hammer_record_t record;
256 ++hammer_count_records;
257 record = kmalloc(sizeof(*record), M_HAMMER,
258 M_WAITOK | M_ZERO | M_USE_RESERVE);
259 record->flush_state = HAMMER_FST_IDLE;
260 record->ip = ip;
261 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
262 record->leaf.data_len = data_len;
263 hammer_ref(&record->lock);
265 if (data_len) {
266 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
267 record->flags |= HAMMER_RECF_ALLOCDATA;
268 ++hammer_count_record_datas;
271 return (record);
274 void
275 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
277 while (record->flush_state == HAMMER_FST_FLUSH) {
278 record->flags |= HAMMER_RECF_WANTED;
279 tsleep(record, 0, ident, 0);
284 * Called from the backend, hammer_inode.c, after a record has been
285 * flushed to disk. The record has been exclusively locked by the
286 * caller and interlocked with BE.
288 * We clean up the state, unlock, and release the record (the record
289 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
291 void
292 hammer_flush_record_done(hammer_record_t record, int error)
294 hammer_inode_t target_ip;
296 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
297 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
299 if (error) {
301 * An error occured, the backend was unable to sync the
302 * record to its media. Leave the record intact.
304 Debugger("flush_record_done error");
307 --record->flush_group->refs;
308 record->flush_group = NULL;
310 if (record->flags & HAMMER_RECF_DELETED_BE) {
311 if ((target_ip = record->target_ip) != NULL) {
312 TAILQ_REMOVE(&target_ip->target_list, record,
313 target_entry);
314 record->target_ip = NULL;
315 hammer_test_inode(target_ip);
317 record->flush_state = HAMMER_FST_IDLE;
318 } else {
319 if (record->target_ip) {
320 record->flush_state = HAMMER_FST_SETUP;
321 hammer_test_inode(record->ip);
322 hammer_test_inode(record->target_ip);
323 } else {
324 record->flush_state = HAMMER_FST_IDLE;
327 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
328 if (record->flags & HAMMER_RECF_WANTED) {
329 record->flags &= ~HAMMER_RECF_WANTED;
330 wakeup(record);
332 hammer_rel_mem_record(record);
336 * Release a memory record. Records marked for deletion are immediately
337 * removed from the RB-Tree but otherwise left intact until the last ref
338 * goes away.
340 void
341 hammer_rel_mem_record(struct hammer_record *record)
343 hammer_mount_t hmp;
344 hammer_reserve_t resv;
345 hammer_inode_t ip;
346 hammer_inode_t target_ip;
348 hammer_unref(&record->lock);
350 if (record->lock.refs == 0) {
352 * Upon release of the last reference wakeup any waiters.
353 * The record structure may get destroyed so callers will
354 * loop up and do a relookup.
356 * WARNING! Record must be removed from RB-TREE before we
357 * might possibly block. hammer_test_inode() can block!
359 ip = record->ip;
360 hmp = ip->hmp;
363 * Upon release of the last reference a record marked deleted
364 * is destroyed.
366 if (record->flags & HAMMER_RECF_DELETED_FE) {
367 KKASSERT(ip->lock.refs > 0);
368 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
371 * target_ip may have zero refs, we have to ref it
372 * to prevent it from being ripped out from under
373 * us.
375 if ((target_ip = record->target_ip) != NULL) {
376 TAILQ_REMOVE(&target_ip->target_list,
377 record, target_entry);
378 record->target_ip = NULL;
379 hammer_ref(&target_ip->lock);
382 if (record->flags & HAMMER_RECF_ONRBTREE) {
383 RB_REMOVE(hammer_rec_rb_tree,
384 &record->ip->rec_tree,
385 record);
386 KKASSERT(ip->rsv_recs > 0);
387 --hmp->rsv_recs;
388 --ip->rsv_recs;
389 hmp->rsv_databytes -= record->leaf.data_len;
390 record->flags &= ~HAMMER_RECF_ONRBTREE;
392 if (RB_EMPTY(&record->ip->rec_tree)) {
393 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
394 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
395 hammer_test_inode(record->ip);
400 * We must wait for any direct-IO to complete before
401 * we can destroy the record.
403 if (record->flags & HAMMER_RECF_DIRECT_IO)
404 hammer_io_direct_wait(record);
408 * Do this test after removing record from the B-Tree.
410 if (target_ip) {
411 hammer_test_inode(target_ip);
412 hammer_rel_inode(target_ip, 0);
415 if (record->flags & HAMMER_RECF_ALLOCDATA) {
416 --hammer_count_record_datas;
417 kfree(record->data, M_HAMMER);
418 record->flags &= ~HAMMER_RECF_ALLOCDATA;
422 * Release the reservation. If the record was not
423 * committed return the reservation before
424 * releasing it.
426 if ((resv = record->resv) != NULL) {
427 if ((record->flags & HAMMER_RECF_COMMITTED) == 0) {
428 hammer_blockmap_reserve_undo(
429 resv,
430 record->leaf.data_offset,
431 record->leaf.data_len);
433 hammer_blockmap_reserve_complete(hmp, resv);
434 record->resv = NULL;
436 record->data = NULL;
437 --hammer_count_records;
438 kfree(record, M_HAMMER);
444 * Record visibility depends on whether the record is being accessed by
445 * the backend or the frontend.
447 * Return non-zero if the record is visible, zero if it isn't or if it is
448 * deleted.
450 static __inline
452 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
454 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
455 if (record->flags & HAMMER_RECF_DELETED_BE)
456 return(0);
457 } else {
458 if (record->flags & HAMMER_RECF_DELETED_FE)
459 return(0);
461 return(1);
465 * This callback is used as part of the RB_SCAN function for in-memory
466 * records. We terminate it (return -1) as soon as we get a match.
468 * This routine is used by frontend code.
470 * The primary compare code does not account for ASOF lookups. This
471 * code handles that case as well as a few others.
473 static
475 hammer_rec_scan_callback(hammer_record_t rec, void *data)
477 hammer_cursor_t cursor = data;
480 * We terminate on success, so this should be NULL on entry.
482 KKASSERT(cursor->iprec == NULL);
485 * Skip if the record was marked deleted.
487 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
488 return(0);
491 * Skip if not visible due to our as-of TID
493 if (cursor->flags & HAMMER_CURSOR_ASOF) {
494 if (cursor->asof < rec->leaf.base.create_tid)
495 return(0);
496 if (rec->leaf.base.delete_tid &&
497 cursor->asof >= rec->leaf.base.delete_tid) {
498 return(0);
503 * ref the record. The record is protected from backend B-Tree
504 * interactions by virtue of the cursor's IP lock.
506 hammer_ref(&rec->lock);
509 * The record may have been deleted while we were blocked.
511 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
512 hammer_rel_mem_record(rec);
513 return(0);
517 * Set the matching record and stop the scan.
519 cursor->iprec = rec;
520 return(-1);
525 * Lookup an in-memory record given the key specified in the cursor. Works
526 * just like hammer_btree_lookup() but operates on an inode's in-memory
527 * record list.
529 * The lookup must fail if the record is marked for deferred deletion.
531 static
533 hammer_mem_lookup(hammer_cursor_t cursor)
535 int error;
537 KKASSERT(cursor->ip);
538 if (cursor->iprec) {
539 hammer_rel_mem_record(cursor->iprec);
540 cursor->iprec = NULL;
542 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
543 hammer_rec_scan_callback, cursor);
545 if (cursor->iprec == NULL)
546 error = ENOENT;
547 else
548 error = 0;
549 return(error);
553 * hammer_mem_first() - locate the first in-memory record matching the
554 * cursor within the bounds of the key range.
556 static
558 hammer_mem_first(hammer_cursor_t cursor)
560 hammer_inode_t ip;
562 ip = cursor->ip;
563 KKASSERT(ip != NULL);
565 if (cursor->iprec) {
566 hammer_rel_mem_record(cursor->iprec);
567 cursor->iprec = NULL;
570 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
571 hammer_rec_scan_callback, cursor);
574 * Adjust scan.node and keep it linked into the RB-tree so we can
575 * hold the cursor through third party modifications of the RB-tree.
577 if (cursor->iprec)
578 return(0);
579 return(ENOENT);
582 /************************************************************************
583 * HAMMER IN-MEMORY RECORD FUNCTIONS *
584 ************************************************************************
586 * These functions manipulate in-memory records. Such records typically
587 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
591 * Add a directory entry (dip,ncp) which references inode (ip).
593 * Note that the low 32 bits of the namekey are set temporarily to create
594 * a unique in-memory record, and may be modified a second time when the
595 * record is synchronized to disk. In particular, the low 32 bits cannot be
596 * all 0's when synching to disk, which is not handled here.
598 * NOTE: bytes does not include any terminating \0 on name, and name might
599 * not be terminated.
602 hammer_ip_add_directory(struct hammer_transaction *trans,
603 struct hammer_inode *dip, const char *name, int bytes,
604 struct hammer_inode *ip)
606 struct hammer_cursor cursor;
607 hammer_record_t record;
608 int error;
609 int count;
610 u_int32_t iterator;
612 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
613 if (++trans->hmp->namekey_iterator == 0)
614 ++trans->hmp->namekey_iterator;
616 record->type = HAMMER_MEM_RECORD_ADD;
617 record->leaf.base.localization = dip->obj_localization +
618 HAMMER_LOCALIZE_MISC;
619 record->leaf.base.obj_id = dip->obj_id;
620 record->leaf.base.key = hammer_directory_namekey(name, bytes);
621 record->leaf.base.key += trans->hmp->namekey_iterator;
622 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
623 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
624 record->data->entry.obj_id = ip->obj_id;
625 record->data->entry.localization = ip->obj_localization;
626 bcopy(name, record->data->entry.name, bytes);
628 ++ip->ino_data.nlinks;
629 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
632 * Find an unused namekey. Both the in-memory record tree and
633 * the B-Tree are checked. Exact matches also match create_tid
634 * so use an ASOF search to (mostly) ignore it.
636 * delete-visibility is set so pending deletions do not give us
637 * a false-negative on our ability to use an iterator.
639 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
640 cursor.key_beg = record->leaf.base;
641 cursor.flags |= HAMMER_CURSOR_ASOF;
642 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
643 cursor.asof = ip->obj_asof;
645 count = 0;
646 while (hammer_ip_lookup(&cursor) == 0) {
647 iterator = (u_int32_t)record->leaf.base.key + 1;
648 if (iterator == 0)
649 iterator = 1;
650 record->leaf.base.key &= ~0xFFFFFFFFLL;
651 record->leaf.base.key |= iterator;
652 cursor.key_beg.key = record->leaf.base.key;
653 if (++count == 1000000000) {
654 hammer_rel_mem_record(record);
655 error = ENOSPC;
656 goto failed;
661 * The target inode and the directory entry are bound together.
663 record->target_ip = ip;
664 record->flush_state = HAMMER_FST_SETUP;
665 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
668 * The inode now has a dependancy and must be taken out of the idle
669 * state. An inode not in an idle state is given an extra reference.
671 if (ip->flush_state == HAMMER_FST_IDLE) {
672 hammer_ref(&ip->lock);
673 ip->flush_state = HAMMER_FST_SETUP;
675 error = hammer_mem_add(record);
676 failed:
677 hammer_done_cursor(&cursor);
678 return(error);
682 * Delete the directory entry and update the inode link count. The
683 * cursor must be seeked to the directory entry record being deleted.
685 * The related inode should be share-locked by the caller. The caller is
686 * on the frontend.
688 * This function can return EDEADLK requiring the caller to terminate
689 * the cursor, any locks, wait on the returned record, and retry.
692 hammer_ip_del_directory(struct hammer_transaction *trans,
693 hammer_cursor_t cursor, struct hammer_inode *dip,
694 struct hammer_inode *ip)
696 hammer_record_t record;
697 int error;
699 if (hammer_cursor_inmem(cursor)) {
701 * In-memory (unsynchronized) records can simply be freed.
702 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
703 * by the backend, we must still avoid races against the
704 * backend potentially syncing the record to the media.
706 * We cannot call hammer_ip_delete_record(), that routine may
707 * only be called from the backend.
709 record = cursor->iprec;
710 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
711 KKASSERT(cursor->deadlk_rec == NULL);
712 hammer_ref(&record->lock);
713 cursor->deadlk_rec = record;
714 error = EDEADLK;
715 } else {
716 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
717 record->flags |= HAMMER_RECF_DELETED_FE;
718 error = 0;
720 } else {
722 * If the record is on-disk we have to queue the deletion by
723 * the record's key. This also causes lookups to skip the
724 * record.
726 KKASSERT(dip->flags &
727 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
728 record = hammer_alloc_mem_record(dip, 0);
729 record->type = HAMMER_MEM_RECORD_DEL;
730 record->leaf.base = cursor->leaf->base;
732 record->target_ip = ip;
733 record->flush_state = HAMMER_FST_SETUP;
734 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
737 * The inode now has a dependancy and must be taken out of
738 * the idle state. An inode not in an idle state is given
739 * an extra reference.
741 if (ip->flush_state == HAMMER_FST_IDLE) {
742 hammer_ref(&ip->lock);
743 ip->flush_state = HAMMER_FST_SETUP;
746 error = hammer_mem_add(record);
750 * One less link. The file may still be open in the OS even after
751 * all links have gone away.
753 * We have to terminate the cursor before syncing the inode to
754 * avoid deadlocking against ourselves. XXX this may no longer
755 * be true.
757 * If nlinks drops to zero and the vnode is inactive (or there is
758 * no vnode), call hammer_inode_unloadable_check() to zonk the
759 * inode. If we don't do this here the inode will not be destroyed
760 * on-media until we unmount.
762 if (error == 0) {
763 --ip->ino_data.nlinks;
764 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
765 if (ip->ino_data.nlinks == 0 &&
766 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
767 hammer_done_cursor(cursor);
768 hammer_inode_unloadable_check(ip, 1);
769 hammer_flush_inode(ip, 0);
773 return(error);
777 * Add a record to an inode.
779 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
780 * initialize the following additional fields:
782 * The related inode should be share-locked by the caller. The caller is
783 * on the frontend.
785 * record->rec.entry.base.base.key
786 * record->rec.entry.base.base.rec_type
787 * record->rec.entry.base.base.data_len
788 * record->data (a copy will be kmalloc'd if it cannot be embedded)
791 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
793 hammer_inode_t ip = record->ip;
794 int error;
796 KKASSERT(record->leaf.base.localization != 0);
797 record->leaf.base.obj_id = ip->obj_id;
798 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
799 error = hammer_mem_add(record);
800 return(error);
804 * Locate a bulk record in-memory. Bulk records allow disk space to be
805 * reserved so the front-end can flush large data writes without having
806 * to queue the BIO to the flusher. Only the related record gets queued
807 * to the flusher.
809 static hammer_record_t
810 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
812 hammer_record_t record;
813 struct hammer_btree_leaf_elm leaf;
815 bzero(&leaf, sizeof(leaf));
816 leaf.base.obj_id = ip->obj_id;
817 leaf.base.key = file_offset + bytes;
818 leaf.base.create_tid = 0;
819 leaf.base.delete_tid = 0;
820 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
821 leaf.base.obj_type = 0; /* unused */
822 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
823 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_MISC;
824 leaf.data_len = bytes;
826 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
827 if (record)
828 hammer_ref(&record->lock);
829 return(record);
833 * Reserve blockmap space placemarked with an in-memory record.
835 * This routine is called by the frontend in order to be able to directly
836 * flush a buffer cache buffer. The frontend has locked the related buffer
837 * cache buffers and we should be able to manipulate any overlapping
838 * in-memory records.
840 hammer_record_t
841 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
842 int *errorp)
844 hammer_record_t record;
845 hammer_record_t conflict;
846 int zone;
847 int flags;
850 * Deal with conflicting in-memory records. We cannot have multiple
851 * in-memory records for the same offset without seriously confusing
852 * the backend, including but not limited to the backend issuing
853 * delete-create-delete sequences and asserting on the delete_tid
854 * being the same as the create_tid.
856 * If we encounter a record with the backend interlock set we cannot
857 * immediately delete it without confusing the backend.
859 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
860 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
861 conflict->flags |= HAMMER_RECF_WANTED;
862 tsleep(conflict, 0, "hmrrc3", 0);
863 } else {
864 conflict->flags |= HAMMER_RECF_DELETED_FE;
866 hammer_rel_mem_record(conflict);
870 * Create a record to cover the direct write. This is called with
871 * the related BIO locked so there should be no possible conflict.
873 * The backend is responsible for finalizing the space reserved in
874 * this record.
876 * XXX bytes not aligned, depend on the reservation code to
877 * align the reservation.
879 record = hammer_alloc_mem_record(ip, 0);
880 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
881 HAMMER_ZONE_SMALL_DATA_INDEX;
882 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
883 &record->leaf.data_offset,
884 errorp);
885 if (record->resv == NULL) {
886 kprintf("hammer_ip_add_bulk: reservation failed\n");
887 hammer_rel_mem_record(record);
888 return(NULL);
890 record->type = HAMMER_MEM_RECORD_DATA;
891 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
892 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
893 record->leaf.base.obj_id = ip->obj_id;
894 record->leaf.base.key = file_offset + bytes;
895 record->leaf.base.localization = ip->obj_localization +
896 HAMMER_LOCALIZE_MISC;
897 record->leaf.data_len = bytes;
898 hammer_crc_set_leaf(data, &record->leaf);
899 flags = record->flags;
901 hammer_ref(&record->lock); /* mem_add eats a reference */
902 *errorp = hammer_mem_add(record);
903 if (*errorp) {
904 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
905 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
906 *errorp, conflict, file_offset, bytes);
907 if (conflict)
908 kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
909 if (conflict)
910 hammer_rel_mem_record(conflict);
912 KKASSERT(*errorp == 0);
913 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
914 if (conflict != record) {
915 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
916 if (conflict)
917 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
919 KKASSERT(conflict == record);
920 hammer_rel_mem_record(conflict);
922 return (record);
926 * Frontend truncation code. Scan in-memory records only. On-disk records
927 * and records in a flushing state are handled by the backend. The vnops
928 * setattr code will handle the block containing the truncation point.
930 * Partial blocks are not deleted.
933 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
935 struct rec_trunc_info info;
937 switch(ip->ino_data.obj_type) {
938 case HAMMER_OBJTYPE_REGFILE:
939 info.rec_type = HAMMER_RECTYPE_DATA;
940 break;
941 case HAMMER_OBJTYPE_DBFILE:
942 info.rec_type = HAMMER_RECTYPE_DB;
943 break;
944 default:
945 return(EINVAL);
947 info.trunc_off = file_size;
948 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
949 hammer_frontend_trunc_callback, &info);
950 return(0);
953 static int
954 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
956 if (record->flags & HAMMER_RECF_DELETED_FE)
957 return(0);
958 if (record->flush_state == HAMMER_FST_FLUSH)
959 return(0);
960 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
961 hammer_ref(&record->lock);
962 record->flags |= HAMMER_RECF_DELETED_FE;
963 hammer_rel_mem_record(record);
964 return(0);
968 * Return 1 if the caller must check for and delete existing records
969 * before writing out a new data record.
971 * Return 0 if the caller can just insert the record into the B-Tree without
972 * checking.
974 static int
975 hammer_record_needs_overwrite_delete(hammer_record_t record)
977 hammer_inode_t ip = record->ip;
978 int64_t file_offset;
979 int r;
981 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
982 file_offset = record->leaf.base.key;
983 else
984 file_offset = record->leaf.base.key - record->leaf.data_len;
985 r = (file_offset < ip->save_trunc_off);
986 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
987 if (ip->save_trunc_off <= record->leaf.base.key)
988 ip->save_trunc_off = record->leaf.base.key + 1;
989 } else {
990 if (ip->save_trunc_off < record->leaf.base.key)
991 ip->save_trunc_off = record->leaf.base.key;
993 return(r);
997 * Backend code. Sync a record to the media.
1000 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1002 hammer_transaction_t trans = cursor->trans;
1003 int64_t file_offset;
1004 int bytes;
1005 void *bdata;
1006 int error;
1007 int doprop;
1009 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1010 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1011 KKASSERT(record->leaf.base.localization != 0);
1014 * If this is a bulk-data record placemarker there may be an existing
1015 * record on-disk, indicating a data overwrite. If there is the
1016 * on-disk record must be deleted before we can insert our new record.
1018 * We've synthesized this record and do not know what the create_tid
1019 * on-disk is, nor how much data it represents.
1021 * Keep in mind that (key) for data records is (base_offset + len),
1022 * not (base_offset). Also, we only want to get rid of on-disk
1023 * records since we are trying to sync our in-memory record, call
1024 * hammer_ip_delete_range() with truncating set to 1 to make sure
1025 * it skips in-memory records.
1027 * It is ok for the lookup to return ENOENT.
1029 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1030 * to call hammer_ip_delete_range() or not. This also means we must
1031 * update sync_trunc_off() as we write.
1033 if (record->type == HAMMER_MEM_RECORD_DATA &&
1034 hammer_record_needs_overwrite_delete(record)) {
1035 file_offset = record->leaf.base.key - record->leaf.data_len;
1036 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1037 ~HAMMER_BUFMASK;
1038 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1039 error = hammer_ip_delete_range(
1040 cursor, record->ip,
1041 file_offset, file_offset + bytes - 1,
1043 if (error && error != ENOENT)
1044 goto done;
1048 * If this is a general record there may be an on-disk version
1049 * that must be deleted before we can insert the new record.
1051 if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1052 error = hammer_delete_general(cursor, record->ip,
1053 &record->leaf);
1054 if (error && error != ENOENT)
1055 goto done;
1059 * Setup the cursor.
1061 hammer_normalize_cursor(cursor);
1062 cursor->key_beg = record->leaf.base;
1063 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1064 cursor->flags |= HAMMER_CURSOR_BACKEND;
1065 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1068 * Records can wind up on-media before the inode itself is on-media.
1069 * Flag the case.
1071 record->ip->flags |= HAMMER_INODE_DONDISK;
1074 * If we are deleting a directory entry an exact match must be
1075 * found on-disk.
1077 if (record->type == HAMMER_MEM_RECORD_DEL) {
1078 error = hammer_btree_lookup(cursor);
1079 if (error == 0) {
1080 /* XXX iprec? */
1081 error = hammer_ip_delete_record(cursor, record->ip,
1082 trans->tid);
1083 if (error == 0) {
1084 record->flags |= HAMMER_RECF_DELETED_FE;
1085 record->flags |= HAMMER_RECF_DELETED_BE;
1086 record->flags |= HAMMER_RECF_COMMITTED;
1089 goto done;
1093 * We are inserting.
1095 * Issue a lookup to position the cursor and locate the cluster. The
1096 * target key should not exist. If we are creating a directory entry
1097 * we may have to iterate the low 32 bits of the key to find an unused
1098 * key.
1100 hammer_sync_lock_sh(trans);
1101 cursor->flags |= HAMMER_CURSOR_INSERT;
1102 error = hammer_btree_lookup(cursor);
1103 if (hammer_debug_inode)
1104 kprintf("DOINSERT LOOKUP %d\n", error);
1105 if (error == 0) {
1106 kprintf("hammer_ip_sync_record: duplicate rec "
1107 "at (%016llx)\n", record->leaf.base.key);
1108 Debugger("duplicate record1");
1109 error = EIO;
1111 #if 0
1112 if (record->type == HAMMER_MEM_RECORD_DATA)
1113 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1114 record->leaf.base.key - record->leaf.data_len,
1115 record->leaf.data_offset, error);
1116 #endif
1118 if (error != ENOENT)
1119 goto done_unlock;
1122 * Allocate the record and data. The result buffers will be
1123 * marked as being modified and further calls to
1124 * hammer_modify_buffer() will result in unneeded UNDO records.
1126 * Support zero-fill records (data == NULL and data_len != 0)
1128 if (record->type == HAMMER_MEM_RECORD_DATA) {
1130 * The data portion of a bulk-data record has already been
1131 * committed to disk, we need only adjust the layer2
1132 * statistics in the same transaction as our B-Tree insert.
1134 KKASSERT(record->leaf.data_offset != 0);
1135 hammer_blockmap_finalize(trans, record->leaf.data_offset,
1136 record->leaf.data_len);
1137 error = 0;
1138 } else if (record->data && record->leaf.data_len) {
1140 * Wholely cached record, with data. Allocate the data.
1142 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1143 record->leaf.base.rec_type,
1144 &record->leaf.data_offset,
1145 &cursor->data_buffer, &error);
1146 if (bdata == NULL)
1147 goto done_unlock;
1148 hammer_crc_set_leaf(record->data, &record->leaf);
1149 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1150 bcopy(record->data, bdata, record->leaf.data_len);
1151 hammer_modify_buffer_done(cursor->data_buffer);
1152 } else {
1154 * Wholely cached record, without data.
1156 record->leaf.data_offset = 0;
1157 record->leaf.data_crc = 0;
1161 * If the record's data was direct-written we cannot insert
1162 * it until the direct-IO has completed.
1164 if (record->flags & HAMMER_RECF_DIRECT_IO)
1165 hammer_io_direct_wait(record);
1167 error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1168 if (hammer_debug_inode && error)
1169 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1172 * Our record is on-disk, normally mark the in-memory version as
1173 * deleted. If the record represented a directory deletion but
1174 * we had to sync a valid directory entry to disk we must convert
1175 * the record to a covering delete so the frontend does not have
1176 * visibility on the synced entry.
1178 if (error == 0) {
1179 if (doprop) {
1180 hammer_btree_do_propagation(cursor,
1181 record->ip->pfsm,
1182 &record->leaf);
1184 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1185 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1186 record->flags &= ~HAMMER_RECF_DELETED_FE;
1187 record->type = HAMMER_MEM_RECORD_DEL;
1188 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1189 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1190 /* hammer_flush_record_done takes care of the rest */
1191 } else {
1192 record->flags |= HAMMER_RECF_DELETED_FE;
1193 record->flags |= HAMMER_RECF_DELETED_BE;
1195 record->flags |= HAMMER_RECF_COMMITTED;
1196 } else {
1197 if (record->leaf.data_offset) {
1198 hammer_blockmap_free(trans, record->leaf.data_offset,
1199 record->leaf.data_len);
1202 done_unlock:
1203 hammer_sync_unlock(trans);
1204 done:
1205 return(error);
1209 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1210 * entry's key is used to deal with hash collisions in the upper 32 bits.
1211 * A unique 64 bit key is generated in-memory and may be regenerated a
1212 * second time when the directory record is flushed to the on-disk B-Tree.
1214 * A referenced record is passed to this function. This function
1215 * eats the reference. If an error occurs the record will be deleted.
1217 * A copy of the temporary record->data pointer provided by the caller
1218 * will be made.
1220 static
1222 hammer_mem_add(hammer_record_t record)
1224 hammer_mount_t hmp = record->ip->hmp;
1227 * Make a private copy of record->data
1229 if (record->data)
1230 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1233 * Insert into the RB tree. A unique key should have already
1234 * been selected if this is a directory entry.
1236 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1237 record->flags |= HAMMER_RECF_DELETED_FE;
1238 hammer_rel_mem_record(record);
1239 return (EEXIST);
1241 ++hmp->count_newrecords;
1242 ++hmp->rsv_recs;
1243 ++record->ip->rsv_recs;
1244 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1245 record->flags |= HAMMER_RECF_ONRBTREE;
1246 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1247 hammer_rel_mem_record(record);
1248 return(0);
1251 /************************************************************************
1252 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1253 ************************************************************************
1255 * These functions augment the B-Tree scanning functions in hammer_btree.c
1256 * by merging in-memory records with on-disk records.
1260 * Locate a particular record either in-memory or on-disk.
1262 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1263 * NOT be called to iterate results.
1266 hammer_ip_lookup(hammer_cursor_t cursor)
1268 int error;
1271 * If the element is in-memory return it without searching the
1272 * on-disk B-Tree
1274 KKASSERT(cursor->ip);
1275 error = hammer_mem_lookup(cursor);
1276 if (error == 0) {
1277 cursor->leaf = &cursor->iprec->leaf;
1278 return(error);
1280 if (error != ENOENT)
1281 return(error);
1284 * If the inode has on-disk components search the on-disk B-Tree.
1286 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1287 return(error);
1288 error = hammer_btree_lookup(cursor);
1289 if (error == 0)
1290 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1291 return(error);
1295 * Locate the first record within the cursor's key_beg/key_end range,
1296 * restricted to a particular inode. 0 is returned on success, ENOENT
1297 * if no records matched the requested range, or some other error.
1299 * When 0 is returned hammer_ip_next() may be used to iterate additional
1300 * records within the requested range.
1302 * This function can return EDEADLK, requiring the caller to terminate
1303 * the cursor and try again.
1306 hammer_ip_first(hammer_cursor_t cursor)
1308 hammer_inode_t ip = cursor->ip;
1309 int error;
1311 KKASSERT(ip != NULL);
1314 * Clean up fields and setup for merged scan
1316 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1317 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1318 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1319 if (cursor->iprec) {
1320 hammer_rel_mem_record(cursor->iprec);
1321 cursor->iprec = NULL;
1325 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1326 * exact lookup so if we get ENOENT we have to call the iterate
1327 * function to validate the first record after the begin key.
1329 * The ATEDISK flag is used by hammer_btree_iterate to determine
1330 * whether it must index forwards or not. It is also used here
1331 * to select the next record from in-memory or on-disk.
1333 * EDEADLK can only occur if the lookup hit an empty internal
1334 * element and couldn't delete it. Since this could only occur
1335 * in-range, we can just iterate from the failure point.
1337 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1338 error = hammer_btree_lookup(cursor);
1339 if (error == ENOENT || error == EDEADLK) {
1340 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1341 if (hammer_debug_general & 0x2000)
1342 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1343 error = hammer_btree_iterate(cursor);
1345 if (error && error != ENOENT)
1346 return(error);
1347 if (error == 0) {
1348 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1349 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1350 } else {
1351 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1356 * Search the in-memory record list (Red-Black tree). Unlike the
1357 * B-Tree search, mem_first checks for records in the range.
1359 error = hammer_mem_first(cursor);
1360 if (error && error != ENOENT)
1361 return(error);
1362 if (error == 0) {
1363 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1364 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1365 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1366 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1370 * This will return the first matching record.
1372 return(hammer_ip_next(cursor));
1376 * Retrieve the next record in a merged iteration within the bounds of the
1377 * cursor. This call may be made multiple times after the cursor has been
1378 * initially searched with hammer_ip_first().
1380 * 0 is returned on success, ENOENT if no further records match the
1381 * requested range, or some other error code is returned.
1384 hammer_ip_next(hammer_cursor_t cursor)
1386 hammer_btree_elm_t elm;
1387 hammer_record_t rec, save;
1388 int error;
1389 int r;
1391 next_btree:
1393 * Load the current on-disk and in-memory record. If we ate any
1394 * records we have to get the next one.
1396 * If we deleted the last on-disk record we had scanned ATEDISK will
1397 * be clear and DELBTREE will be set, forcing a call to iterate. The
1398 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1399 * element. If ATEDISK is set, iterate will skip the 'current'
1400 * element.
1402 * Get the next on-disk record
1404 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1405 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1406 error = hammer_btree_iterate(cursor);
1407 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1408 if (error == 0) {
1409 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1410 hammer_cache_node(&cursor->ip->cache[1],
1411 cursor->node);
1412 } else {
1413 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1414 HAMMER_CURSOR_ATEDISK;
1419 next_memory:
1421 * Get the next in-memory record.
1423 * hammer_rec_scan_cmp: Is the record still in our general range,
1424 * (non-inclusive of snapshot exclusions)?
1425 * hammer_rec_scan_callback: Is the record in our snapshot?
1427 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1428 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1429 save = cursor->iprec;
1430 cursor->iprec = NULL;
1431 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1432 while (rec) {
1433 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1434 break;
1435 if (hammer_rec_scan_callback(rec, cursor) != 0)
1436 break;
1437 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1439 if (save)
1440 hammer_rel_mem_record(save);
1441 if (cursor->iprec) {
1442 KKASSERT(cursor->iprec == rec);
1443 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1444 } else {
1445 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1451 * The memory record may have become stale while being held in
1452 * cursor->iprec. We are interlocked against the backend on
1453 * with regards to B-Tree entries.
1455 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1456 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1457 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1458 goto next_memory;
1463 * Extract either the disk or memory record depending on their
1464 * relative position.
1466 error = 0;
1467 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1468 case 0:
1470 * Both entries valid. Compare the entries and nominally
1471 * return the first one in the sort order. Numerous cases
1472 * require special attention, however.
1474 elm = &cursor->node->ondisk->elms[cursor->index];
1475 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1478 * If the two entries differ only by their key (-2/2) or
1479 * create_tid (-1/1), and are DATA records, we may have a
1480 * nominal match. We have to calculate the base file
1481 * offset of the data.
1483 if (r <= 2 && r >= -2 && r != 0 &&
1484 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1485 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1486 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1487 int64_t base2 = cursor->iprec->leaf.base.key -
1488 cursor->iprec->leaf.data_len;
1489 if (base1 == base2)
1490 r = 0;
1493 if (r < 0) {
1494 error = hammer_btree_extract(cursor,
1495 HAMMER_CURSOR_GET_LEAF);
1496 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1497 break;
1501 * If the entries match exactly the memory entry is either
1502 * an on-disk directory entry deletion or a bulk data
1503 * overwrite. If it is a directory entry deletion we eat
1504 * both entries.
1506 * For the bulk-data overwrite case it is possible to have
1507 * visibility into both, which simply means the syncer
1508 * hasn't gotten around to doing the delete+insert sequence
1509 * on the B-Tree. Use the memory entry and throw away the
1510 * on-disk entry.
1512 * If the in-memory record is not either of these we
1513 * probably caught the syncer while it was syncing it to
1514 * the media. Since we hold a shared lock on the cursor,
1515 * the in-memory record had better be marked deleted at
1516 * this point.
1518 if (r == 0) {
1519 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1520 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1521 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1522 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1523 goto next_btree;
1525 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1526 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1527 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1529 /* fall through to memory entry */
1530 } else {
1531 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
1532 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1533 goto next_memory;
1536 /* fall through to the memory entry */
1537 case HAMMER_CURSOR_ATEDISK:
1539 * Only the memory entry is valid.
1541 cursor->leaf = &cursor->iprec->leaf;
1542 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1545 * If the memory entry is an on-disk deletion we should have
1546 * also had found a B-Tree record. If the backend beat us
1547 * to it it would have interlocked the cursor and we should
1548 * have seen the in-memory record marked DELETED_FE.
1550 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1551 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1552 panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
1554 break;
1555 case HAMMER_CURSOR_ATEMEM:
1557 * Only the disk entry is valid
1559 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1560 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1561 break;
1562 default:
1564 * Neither entry is valid
1566 * XXX error not set properly
1568 cursor->leaf = NULL;
1569 error = ENOENT;
1570 break;
1572 return(error);
1576 * Resolve the cursor->data pointer for the current cursor position in
1577 * a merged iteration.
1580 hammer_ip_resolve_data(hammer_cursor_t cursor)
1582 hammer_record_t record;
1583 int error;
1585 if (hammer_cursor_inmem(cursor)) {
1587 * The data associated with an in-memory record is usually
1588 * kmalloced, but reserve-ahead data records will have an
1589 * on-disk reference.
1591 * NOTE: Reserve-ahead data records must be handled in the
1592 * context of the related high level buffer cache buffer
1593 * to interlock against async writes.
1595 record = cursor->iprec;
1596 cursor->data = record->data;
1597 error = 0;
1598 if (cursor->data == NULL) {
1599 KKASSERT(record->leaf.base.rec_type ==
1600 HAMMER_RECTYPE_DATA);
1601 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1602 record->leaf.data_offset,
1603 record->leaf.data_len,
1604 &error,
1605 &cursor->data_buffer);
1607 } else {
1608 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1609 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1611 return(error);
1615 * Backend truncation / record replacement - delete records in range.
1617 * Delete all records within the specified range for inode ip. In-memory
1618 * records still associated with the frontend are ignored.
1620 * If truncating is non-zero in-memory records associated with the back-end
1621 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1623 * NOTES:
1625 * * An unaligned range will cause new records to be added to cover
1626 * the edge cases. (XXX not implemented yet).
1628 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1629 * also do not deal with unaligned ranges.
1631 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1633 * * Record keys for regular file data have to be special-cased since
1634 * they indicate the end of the range (key = base + bytes).
1636 * * This function may be asked to delete ridiculously huge ranges, for
1637 * example if someone truncates or removes a 1TB regular file. We
1638 * must be very careful on restarts and we may have to stop w/
1639 * EWOULDBLOCK to avoid blowing out the buffer cache.
1642 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1643 int64_t ran_beg, int64_t ran_end, int truncating)
1645 hammer_transaction_t trans = cursor->trans;
1646 hammer_btree_leaf_elm_t leaf;
1647 int error;
1648 int64_t off;
1649 int64_t tmp64;
1651 #if 0
1652 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1653 #endif
1655 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1656 retry:
1657 hammer_normalize_cursor(cursor);
1658 cursor->key_beg.localization = ip->obj_localization +
1659 HAMMER_LOCALIZE_MISC;
1660 cursor->key_beg.obj_id = ip->obj_id;
1661 cursor->key_beg.create_tid = 0;
1662 cursor->key_beg.delete_tid = 0;
1663 cursor->key_beg.obj_type = 0;
1665 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1666 cursor->key_beg.key = ran_beg;
1667 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1668 } else {
1670 * The key in the B-Tree is (base+bytes), so the first possible
1671 * matching key is ran_beg + 1.
1673 cursor->key_beg.key = ran_beg + 1;
1674 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1677 cursor->key_end = cursor->key_beg;
1678 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1679 cursor->key_end.key = ran_end;
1680 } else {
1681 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1682 if (tmp64 < ran_end)
1683 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1684 else
1685 cursor->key_end.key = ran_end + MAXPHYS + 1;
1688 cursor->asof = ip->obj_asof;
1689 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1690 cursor->flags |= HAMMER_CURSOR_ASOF;
1691 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1692 cursor->flags |= HAMMER_CURSOR_BACKEND;
1693 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1695 error = hammer_ip_first(cursor);
1698 * Iterate through matching records and mark them as deleted.
1700 while (error == 0) {
1701 leaf = cursor->leaf;
1703 KKASSERT(leaf->base.delete_tid == 0);
1704 KKASSERT(leaf->base.obj_id == ip->obj_id);
1707 * There may be overlap cases for regular file data. Also
1708 * remember the key for a regular file record is (base + len),
1709 * NOT (base).
1711 * Note that do to duplicates (mem & media) allowed by
1712 * DELETE_VISIBILITY, off can wind up less then ran_beg.
1714 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1715 off = leaf->base.key - leaf->data_len;
1717 * Check the left edge case. We currently do not
1718 * split existing records.
1720 if (off < ran_beg && leaf->base.key > ran_beg) {
1721 panic("hammer left edge case %016llx %d\n",
1722 leaf->base.key, leaf->data_len);
1726 * Check the right edge case. Note that the
1727 * record can be completely out of bounds, which
1728 * terminates the search.
1730 * base->key is exclusive of the right edge while
1731 * ran_end is inclusive of the right edge. The
1732 * (key - data_len) left boundary is inclusive.
1734 * XXX theory-check this test at some point, are
1735 * we missing a + 1 somewhere? Note that ran_end
1736 * could overflow.
1738 if (leaf->base.key - 1 > ran_end) {
1739 if (leaf->base.key - leaf->data_len > ran_end)
1740 break;
1741 panic("hammer right edge case\n");
1743 } else {
1744 off = leaf->base.key;
1748 * Delete the record. When truncating we do not delete
1749 * in-memory (data) records because they represent data
1750 * written after the truncation.
1752 * This will also physically destroy the B-Tree entry and
1753 * data if the retention policy dictates. The function
1754 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1755 * uses to perform a fixup.
1757 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
1758 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1760 * If we have built up too many meta-buffers we risk
1761 * deadlocking the kernel and must stop. This can
1762 * occur when deleting ridiculously huge files.
1763 * sync_trunc_off is updated so the next cycle does
1764 * not re-iterate records we have already deleted.
1766 * This is only done with formal truncations.
1768 if (truncating > 1 && error == 0 &&
1769 hammer_flusher_meta_limit(ip->hmp)) {
1770 ip->sync_trunc_off = off;
1771 error = EWOULDBLOCK;
1774 if (error)
1775 break;
1776 ran_beg = off; /* for restart */
1777 error = hammer_ip_next(cursor);
1779 if (cursor->node)
1780 hammer_cache_node(&ip->cache[1], cursor->node);
1782 if (error == EDEADLK) {
1783 hammer_done_cursor(cursor);
1784 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1785 if (error == 0)
1786 goto retry;
1788 if (error == ENOENT)
1789 error = 0;
1790 return(error);
1794 * This backend function deletes the specified record on-disk, similar to
1795 * delete_range but for a specific record. Unlike the exact deletions
1796 * used when deleting a directory entry this function uses an ASOF search
1797 * like delete_range.
1799 * This function may be called with ip->obj_asof set for a slave snapshot,
1800 * so don't use it. We always delete non-historical records only.
1802 static int
1803 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
1804 hammer_btree_leaf_elm_t leaf)
1806 hammer_transaction_t trans = cursor->trans;
1807 int error;
1809 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1810 retry:
1811 hammer_normalize_cursor(cursor);
1812 cursor->key_beg = leaf->base;
1813 cursor->asof = HAMMER_MAX_TID;
1814 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1815 cursor->flags |= HAMMER_CURSOR_ASOF;
1816 cursor->flags |= HAMMER_CURSOR_BACKEND;
1817 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1819 error = hammer_btree_lookup(cursor);
1820 if (error == 0) {
1821 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1823 if (error == EDEADLK) {
1824 hammer_done_cursor(cursor);
1825 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1826 if (error == 0)
1827 goto retry;
1829 return(error);
1833 * This function deletes remaining auxillary records when an inode is
1834 * being deleted. This function explicitly does not delete the
1835 * inode record, directory entry, data, or db records. Those must be
1836 * properly disposed of prior to this call.
1839 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
1841 hammer_transaction_t trans = cursor->trans;
1842 hammer_btree_leaf_elm_t leaf;
1843 int error;
1845 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1846 retry:
1847 hammer_normalize_cursor(cursor);
1848 cursor->key_beg.localization = ip->obj_localization +
1849 HAMMER_LOCALIZE_MISC;
1850 cursor->key_beg.obj_id = ip->obj_id;
1851 cursor->key_beg.create_tid = 0;
1852 cursor->key_beg.delete_tid = 0;
1853 cursor->key_beg.obj_type = 0;
1854 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
1855 cursor->key_beg.key = HAMMER_MIN_KEY;
1857 cursor->key_end = cursor->key_beg;
1858 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
1859 cursor->key_end.key = HAMMER_MAX_KEY;
1861 cursor->asof = ip->obj_asof;
1862 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1863 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1864 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1865 cursor->flags |= HAMMER_CURSOR_BACKEND;
1867 error = hammer_ip_first(cursor);
1870 * Iterate through matching records and mark them as deleted.
1872 while (error == 0) {
1873 leaf = cursor->leaf;
1875 KKASSERT(leaf->base.delete_tid == 0);
1878 * Mark the record and B-Tree entry as deleted. This will
1879 * also physically delete the B-Tree entry, record, and
1880 * data if the retention policy dictates. The function
1881 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1882 * uses to perform a fixup.
1884 * Directory entries (and delete-on-disk directory entries)
1885 * must be synced and cannot be deleted.
1887 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1888 ++*countp;
1889 if (error)
1890 break;
1891 error = hammer_ip_next(cursor);
1893 if (cursor->node)
1894 hammer_cache_node(&ip->cache[1], cursor->node);
1895 if (error == EDEADLK) {
1896 hammer_done_cursor(cursor);
1897 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1898 if (error == 0)
1899 goto retry;
1901 if (error == ENOENT)
1902 error = 0;
1903 return(error);
1907 * Delete the record at the current cursor. On success the cursor will
1908 * be positioned appropriately for an iteration but may no longer be at
1909 * a leaf node.
1911 * This routine is only called from the backend.
1913 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1914 * cursor and retry.
1917 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1918 hammer_tid_t tid)
1920 hammer_record_t iprec;
1921 hammer_btree_elm_t elm;
1922 hammer_mount_t hmp;
1923 int error;
1925 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1926 KKASSERT(tid != 0);
1927 hmp = cursor->node->hmp;
1930 * In-memory (unsynchronized) records can simply be freed. This
1931 * only occurs in range iterations since all other records are
1932 * individually synchronized. Thus there should be no confusion with
1933 * the interlock.
1935 * An in-memory record may be deleted before being committed to disk,
1936 * but could have been accessed in the mean time. The reservation
1937 * code will deal with the case.
1939 if (hammer_cursor_inmem(cursor)) {
1940 iprec = cursor->iprec;
1941 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1942 iprec->flags |= HAMMER_RECF_DELETED_FE;
1943 iprec->flags |= HAMMER_RECF_DELETED_BE;
1944 return(0);
1948 * On-disk records are marked as deleted by updating their delete_tid.
1949 * This does not effect their position in the B-Tree (which is based
1950 * on their create_tid).
1952 * Frontend B-Tree operations track inodes so we tell
1953 * hammer_delete_at_cursor() not to.
1955 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1956 elm = NULL;
1958 if (error == 0) {
1959 error = hammer_delete_at_cursor(
1960 cursor,
1961 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
1962 cursor->trans->tid,
1963 cursor->trans->time32,
1964 0, NULL);
1966 return(error);
1970 * Delete the B-Tree element at the current cursor and do any necessary
1971 * mirror propagation.
1973 * The cursor must be properly positioned for an iteration on return but
1974 * may be pointing at an internal element.
1976 * An element can be un-deleted by passing a delete_tid of 0 with
1977 * HAMMER_DELETE_ADJUST.
1980 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1981 hammer_tid_t delete_tid, u_int32_t delete_ts,
1982 int track, int64_t *stat_bytes)
1984 struct hammer_btree_leaf_elm save_leaf;
1985 hammer_transaction_t trans;
1986 hammer_btree_leaf_elm_t leaf;
1987 hammer_node_t node;
1988 hammer_btree_elm_t elm;
1989 hammer_off_t data_offset;
1990 int32_t data_len;
1991 u_int16_t rec_type;
1992 int error;
1993 int icount;
1994 int doprop;
1996 error = hammer_cursor_upgrade(cursor);
1997 if (error)
1998 return(error);
2000 trans = cursor->trans;
2001 node = cursor->node;
2002 elm = &node->ondisk->elms[cursor->index];
2003 leaf = &elm->leaf;
2004 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2006 hammer_sync_lock_sh(trans);
2007 doprop = 0;
2008 icount = 0;
2011 * Adjust the delete_tid. Update the mirror_tid propagation field
2012 * as well. delete_tid can be 0 (undelete -- used by mirroring).
2014 if (delete_flags & HAMMER_DELETE_ADJUST) {
2015 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2016 if (elm->leaf.base.delete_tid == 0 && delete_tid)
2017 icount = -1;
2018 if (elm->leaf.base.delete_tid && delete_tid == 0)
2019 icount = 1;
2022 hammer_modify_node(trans, node, elm, sizeof(*elm));
2023 elm->leaf.base.delete_tid = delete_tid;
2024 elm->leaf.delete_ts = delete_ts;
2025 hammer_modify_node_done(node);
2027 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
2028 hammer_modify_node_field(trans, node, mirror_tid);
2029 node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2030 hammer_modify_node_done(node);
2031 doprop = 1;
2032 if (hammer_debug_general & 0x0002) {
2033 kprintf("delete_at_cursor: propagate %016llx"
2034 " @%016llx\n",
2035 elm->leaf.base.delete_tid,
2036 node->node_offset);
2041 * Adjust for the iteration. We have deleted the current
2042 * element and want to clear ATEDISK so the iteration does
2043 * not skip the element after, which now becomes the current
2044 * element.
2046 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2047 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2048 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2052 * An on-disk record cannot have the same delete_tid
2053 * as its create_tid. In a chain of record updates
2054 * this could result in a duplicate record.
2056 KKASSERT(elm->leaf.base.delete_tid !=
2057 elm->leaf.base.create_tid);
2061 * Destroy the B-Tree element if asked (typically if a nohistory
2062 * file or mount, or when called by the pruning code).
2064 * Adjust the ATEDISK flag to properly support iterations.
2066 if (delete_flags & HAMMER_DELETE_DESTROY) {
2067 data_offset = elm->leaf.data_offset;
2068 data_len = elm->leaf.data_len;
2069 rec_type = elm->leaf.base.rec_type;
2070 if (doprop) {
2071 save_leaf = elm->leaf;
2072 leaf = &save_leaf;
2074 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2075 elm->leaf.base.delete_tid == 0) {
2076 icount = -1;
2079 error = hammer_btree_delete(cursor);
2080 if (error == 0) {
2082 * This forces a fixup for the iteration because
2083 * the cursor is now either sitting at the 'next'
2084 * element or sitting at the end of a leaf.
2086 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2087 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2088 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2091 if (error == 0) {
2092 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2093 case HAMMER_ZONE_LARGE_DATA:
2094 case HAMMER_ZONE_SMALL_DATA:
2095 case HAMMER_ZONE_META:
2096 hammer_blockmap_free(trans,
2097 data_offset, data_len);
2098 break;
2099 default:
2100 break;
2106 * Track inode count and next_tid. This is used by the mirroring
2107 * and PFS code. icount can be negative, zero, or positive.
2109 if (error == 0 && track) {
2110 if (icount) {
2111 hammer_modify_volume_field(trans, trans->rootvol,
2112 vol0_stat_inodes);
2113 trans->rootvol->ondisk->vol0_stat_inodes += icount;
2114 hammer_modify_volume_done(trans->rootvol);
2116 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2117 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2118 trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2119 hammer_modify_volume_done(trans->rootvol);
2124 * mirror_tid propagation occurs if the node's mirror_tid had to be
2125 * updated while adjusting the delete_tid.
2127 * This occurs when deleting even in nohistory mode, but does not
2128 * occur when pruning an already-deleted node.
2130 * cursor->ip is NULL when called from the pruning, mirroring,
2131 * and pfs code. If non-NULL propagation will be conditionalized
2132 * on whether the PFS is in no-history mode or not.
2134 if (doprop) {
2135 if (cursor->ip)
2136 hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2137 else
2138 hammer_btree_do_propagation(cursor, NULL, leaf);
2140 hammer_sync_unlock(trans);
2141 return (error);
2145 * Determine whether we can remove a directory. This routine checks whether
2146 * a directory is empty or not and enforces flush connectivity.
2148 * Flush connectivity requires that we block if the target directory is
2149 * currently flushing, otherwise it may not end up in the same flush group.
2151 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2154 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2156 struct hammer_cursor cursor;
2157 int error;
2160 * Check directory empty
2162 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2164 cursor.key_beg.localization = ip->obj_localization +
2165 HAMMER_LOCALIZE_MISC;
2166 cursor.key_beg.obj_id = ip->obj_id;
2167 cursor.key_beg.create_tid = 0;
2168 cursor.key_beg.delete_tid = 0;
2169 cursor.key_beg.obj_type = 0;
2170 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2171 cursor.key_beg.key = HAMMER_MIN_KEY;
2173 cursor.key_end = cursor.key_beg;
2174 cursor.key_end.rec_type = 0xFFFF;
2175 cursor.key_end.key = HAMMER_MAX_KEY;
2177 cursor.asof = ip->obj_asof;
2178 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2180 error = hammer_ip_first(&cursor);
2181 if (error == ENOENT)
2182 error = 0;
2183 else if (error == 0)
2184 error = ENOTEMPTY;
2185 hammer_done_cursor(&cursor);
2186 return(error);