2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.97 2008/09/23 22:28:56 dillon Exp $
39 static int hammer_mem_lookup(hammer_cursor_t cursor
);
40 static void hammer_mem_first(hammer_cursor_t cursor
);
41 static int hammer_frontend_trunc_callback(hammer_record_t record
,
43 static int hammer_bulk_scan_callback(hammer_record_t record
, void *data
);
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record
);
45 static int hammer_delete_general(hammer_cursor_t cursor
, hammer_inode_t ip
,
46 hammer_btree_leaf_elm_t leaf
);
48 struct rec_trunc_info
{
53 struct hammer_bulk_info
{
54 hammer_record_t record
;
55 struct hammer_btree_leaf_elm leaf
;
59 * Red-black tree support. Comparison code for insertion.
62 hammer_rec_rb_compare(hammer_record_t rec1
, hammer_record_t rec2
)
64 if (rec1
->leaf
.base
.rec_type
< rec2
->leaf
.base
.rec_type
)
66 if (rec1
->leaf
.base
.rec_type
> rec2
->leaf
.base
.rec_type
)
69 if (rec1
->leaf
.base
.key
< rec2
->leaf
.base
.key
)
71 if (rec1
->leaf
.base
.key
> rec2
->leaf
.base
.key
)
75 * For search & insertion purposes records deleted by the
76 * frontend or deleted/committed by the backend are silently
77 * ignored. Otherwise pipelined insertions will get messed
80 * rec1 is greater then rec2 if rec1 is marked deleted.
81 * rec1 is less then rec2 if rec2 is marked deleted.
83 * Multiple deleted records may be present, do not return 0
84 * if both are marked deleted.
86 if (rec1
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
87 HAMMER_RECF_COMMITTED
)) {
90 if (rec2
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
91 HAMMER_RECF_COMMITTED
)) {
99 * Basic record comparison code similar to hammer_btree_cmp().
102 hammer_rec_cmp(hammer_base_elm_t elm
, hammer_record_t rec
)
104 if (elm
->rec_type
< rec
->leaf
.base
.rec_type
)
106 if (elm
->rec_type
> rec
->leaf
.base
.rec_type
)
109 if (elm
->key
< rec
->leaf
.base
.key
)
111 if (elm
->key
> rec
->leaf
.base
.key
)
115 * Never match against an item deleted by the frontend
116 * or backend, or committed by the backend.
118 * elm is less then rec if rec is marked deleted.
120 if (rec
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
121 HAMMER_RECF_COMMITTED
)) {
128 * Ranged scan to locate overlapping record(s). This is used by
129 * hammer_ip_get_bulk() to locate an overlapping record. We have
130 * to use a ranged scan because the keys for data records with the
131 * same file base offset can be different due to differing data_len's.
133 * NOTE: The base file offset of a data record is (key - data_len), not (key).
136 hammer_rec_overlap_cmp(hammer_record_t rec
, void *data
)
138 struct hammer_bulk_info
*info
= data
;
139 hammer_btree_leaf_elm_t leaf
= &info
->leaf
;
141 if (rec
->leaf
.base
.rec_type
< leaf
->base
.rec_type
)
143 if (rec
->leaf
.base
.rec_type
> leaf
->base
.rec_type
)
149 if (leaf
->base
.rec_type
== HAMMER_RECTYPE_DATA
) {
150 /* rec_beg >= leaf_end */
151 if (rec
->leaf
.base
.key
- rec
->leaf
.data_len
>= leaf
->base
.key
)
153 /* rec_end <= leaf_beg */
154 if (rec
->leaf
.base
.key
<= leaf
->base
.key
- leaf
->data_len
)
157 if (rec
->leaf
.base
.key
< leaf
->base
.key
)
159 if (rec
->leaf
.base
.key
> leaf
->base
.key
)
164 * We have to return 0 at this point, even if DELETED_FE is set,
165 * because returning anything else will cause the scan to ignore
166 * one of the branches when we really want it to check both.
172 * RB_SCAN comparison code for hammer_mem_first(). The argument order
173 * is reversed so the comparison result has to be negated. key_beg and
174 * key_end are both range-inclusive.
176 * Localized deletions are not cached in-memory.
180 hammer_rec_scan_cmp(hammer_record_t rec
, void *data
)
182 hammer_cursor_t cursor
= data
;
185 r
= hammer_rec_cmp(&cursor
->key_beg
, rec
);
188 r
= hammer_rec_cmp(&cursor
->key_end
, rec
);
195 * This compare function is used when simply looking up key_beg.
199 hammer_rec_find_cmp(hammer_record_t rec
, void *data
)
201 hammer_cursor_t cursor
= data
;
204 r
= hammer_rec_cmp(&cursor
->key_beg
, rec
);
213 * Locate blocks within the truncation range. Partial blocks do not count.
217 hammer_rec_trunc_cmp(hammer_record_t rec
, void *data
)
219 struct rec_trunc_info
*info
= data
;
221 if (rec
->leaf
.base
.rec_type
< info
->rec_type
)
223 if (rec
->leaf
.base
.rec_type
> info
->rec_type
)
226 switch(rec
->leaf
.base
.rec_type
) {
227 case HAMMER_RECTYPE_DB
:
229 * DB record key is not beyond the truncation point, retain.
231 if (rec
->leaf
.base
.key
< info
->trunc_off
)
234 case HAMMER_RECTYPE_DATA
:
236 * DATA record offset start is not beyond the truncation point,
239 if (rec
->leaf
.base
.key
- rec
->leaf
.data_len
< info
->trunc_off
)
243 panic("hammer_rec_trunc_cmp: unexpected record type");
247 * The record start is >= the truncation point, return match,
248 * the record should be destroyed.
253 RB_GENERATE(hammer_rec_rb_tree
, hammer_record
, rb_node
, hammer_rec_rb_compare
);
256 * Allocate a record for the caller to finish filling in. The record is
257 * returned referenced.
260 hammer_alloc_mem_record(hammer_inode_t ip
, int data_len
)
262 hammer_record_t record
;
266 ++hammer_count_records
;
267 record
= kmalloc(sizeof(*record
), hmp
->m_misc
,
268 M_WAITOK
| M_ZERO
| M_USE_RESERVE
);
269 record
->flush_state
= HAMMER_FST_IDLE
;
271 record
->leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
272 record
->leaf
.data_len
= data_len
;
273 hammer_ref(&record
->lock
);
276 record
->data
= kmalloc(data_len
, hmp
->m_misc
, M_WAITOK
| M_ZERO
);
277 record
->flags
|= HAMMER_RECF_ALLOCDATA
;
278 ++hammer_count_record_datas
;
285 hammer_wait_mem_record_ident(hammer_record_t record
, const char *ident
)
287 while (record
->flush_state
== HAMMER_FST_FLUSH
) {
288 record
->flags
|= HAMMER_RECF_WANTED
;
289 tsleep(record
, 0, ident
, 0);
294 * Called from the backend, hammer_inode.c, after a record has been
295 * flushed to disk. The record has been exclusively locked by the
296 * caller and interlocked with BE.
298 * We clean up the state, unlock, and release the record (the record
299 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
302 hammer_flush_record_done(hammer_record_t record
, int error
)
304 hammer_inode_t target_ip
;
306 KKASSERT(record
->flush_state
== HAMMER_FST_FLUSH
);
307 KKASSERT(record
->flags
& HAMMER_RECF_INTERLOCK_BE
);
310 * If an error occured, the backend was unable to sync the
311 * record to its media. Leave the record intact.
314 hammer_critical_error(record
->ip
->hmp
, record
->ip
, error
,
315 "while flushing record");
318 --record
->flush_group
->refs
;
319 record
->flush_group
= NULL
;
322 * Adjust the flush state and dependancy based on success or
325 if (record
->flags
& (HAMMER_RECF_DELETED_BE
| HAMMER_RECF_COMMITTED
)) {
326 if ((target_ip
= record
->target_ip
) != NULL
) {
327 TAILQ_REMOVE(&target_ip
->target_list
, record
,
329 record
->target_ip
= NULL
;
330 hammer_test_inode(target_ip
);
332 record
->flush_state
= HAMMER_FST_IDLE
;
334 if (record
->target_ip
) {
335 record
->flush_state
= HAMMER_FST_SETUP
;
336 hammer_test_inode(record
->ip
);
337 hammer_test_inode(record
->target_ip
);
339 record
->flush_state
= HAMMER_FST_IDLE
;
342 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
347 if (record
->flags
& HAMMER_RECF_WANTED
) {
348 record
->flags
&= ~HAMMER_RECF_WANTED
;
351 hammer_rel_mem_record(record
);
355 * Release a memory record. Records marked for deletion are immediately
356 * removed from the RB-Tree but otherwise left intact until the last ref
360 hammer_rel_mem_record(struct hammer_record
*record
)
363 hammer_reserve_t resv
;
365 hammer_inode_t target_ip
;
367 hammer_unref(&record
->lock
);
369 if (record
->lock
.refs
== 0) {
371 * Upon release of the last reference wakeup any waiters.
372 * The record structure may get destroyed so callers will
373 * loop up and do a relookup.
375 * WARNING! Record must be removed from RB-TREE before we
376 * might possibly block. hammer_test_inode() can block!
382 * Upon release of the last reference a record marked deleted
383 * by the front or backend, or committed by the backend,
386 if (record
->flags
& (HAMMER_RECF_DELETED_FE
|
387 HAMMER_RECF_DELETED_BE
|
388 HAMMER_RECF_COMMITTED
)) {
389 KKASSERT(ip
->lock
.refs
> 0);
390 KKASSERT(record
->flush_state
!= HAMMER_FST_FLUSH
);
393 * target_ip may have zero refs, we have to ref it
394 * to prevent it from being ripped out from under
397 if ((target_ip
= record
->target_ip
) != NULL
) {
398 TAILQ_REMOVE(&target_ip
->target_list
,
399 record
, target_entry
);
400 record
->target_ip
= NULL
;
401 hammer_ref(&target_ip
->lock
);
404 if (record
->flags
& HAMMER_RECF_ONRBTREE
) {
405 RB_REMOVE(hammer_rec_rb_tree
,
406 &record
->ip
->rec_tree
,
408 KKASSERT(ip
->rsv_recs
> 0);
411 hmp
->rsv_databytes
-= record
->leaf
.data_len
;
412 record
->flags
&= ~HAMMER_RECF_ONRBTREE
;
414 if (RB_EMPTY(&record
->ip
->rec_tree
)) {
415 record
->ip
->flags
&= ~HAMMER_INODE_XDIRTY
;
416 record
->ip
->sync_flags
&= ~HAMMER_INODE_XDIRTY
;
417 hammer_test_inode(record
->ip
);
422 * We must wait for any direct-IO to complete before
423 * we can destroy the record because the bio may
424 * have a reference to it.
427 (HAMMER_RECF_DIRECT_IO
| HAMMER_RECF_DIRECT_INVAL
)) {
428 hammer_io_direct_wait(record
);
433 * Do this test after removing record from the B-Tree.
436 hammer_test_inode(target_ip
);
437 hammer_rel_inode(target_ip
, 0);
440 if (record
->flags
& HAMMER_RECF_ALLOCDATA
) {
441 --hammer_count_record_datas
;
442 kfree(record
->data
, hmp
->m_misc
);
443 record
->flags
&= ~HAMMER_RECF_ALLOCDATA
;
447 * Release the reservation.
449 * If the record was not committed we can theoretically
450 * undo the reservation. However, doing so might
451 * create weird edge cases with the ordering of
452 * direct writes because the related buffer cache
453 * elements are per-vnode. So we don't try.
455 if ((resv
= record
->resv
) != NULL
) {
456 /* XXX undo leaf.data_offset,leaf.data_len */
457 hammer_blockmap_reserve_complete(hmp
, resv
);
461 --hammer_count_records
;
462 kfree(record
, hmp
->m_misc
);
468 * Record visibility depends on whether the record is being accessed by
469 * the backend or the frontend. Backend tests ignore the frontend delete
470 * flag. Frontend tests do NOT ignore the backend delete/commit flags and
471 * must also check for commit races.
473 * Return non-zero if the record is visible, zero if it isn't or if it is
474 * deleted. Returns 0 if the record has been comitted (unless the special
475 * delete-visibility flag is set). A committed record must be located
476 * via the media B-Tree. Returns non-zero if the record is good.
478 * If HAMMER_CURSOR_DELETE_VISIBILITY is set we allow deleted memory
479 * records to be returned. This is so pending deletions are detected
480 * when using an iterator to locate an unused hash key, or when we need
481 * to locate historical records on-disk to destroy.
485 hammer_ip_iterate_mem_good(hammer_cursor_t cursor
, hammer_record_t record
)
487 if (cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
)
489 if (cursor
->flags
& HAMMER_CURSOR_BACKEND
) {
490 if (record
->flags
& (HAMMER_RECF_DELETED_BE
|
491 HAMMER_RECF_COMMITTED
)) {
495 if (record
->flags
& (HAMMER_RECF_DELETED_FE
|
496 HAMMER_RECF_DELETED_BE
|
497 HAMMER_RECF_COMMITTED
)) {
505 * This callback is used as part of the RB_SCAN function for in-memory
506 * records. We terminate it (return -1) as soon as we get a match.
508 * This routine is used by frontend code.
510 * The primary compare code does not account for ASOF lookups. This
511 * code handles that case as well as a few others.
515 hammer_rec_scan_callback(hammer_record_t rec
, void *data
)
517 hammer_cursor_t cursor
= data
;
520 * We terminate on success, so this should be NULL on entry.
522 KKASSERT(cursor
->iprec
== NULL
);
525 * Skip if the record was marked deleted or committed.
527 if (hammer_ip_iterate_mem_good(cursor
, rec
) == 0)
531 * Skip if not visible due to our as-of TID
533 if (cursor
->flags
& HAMMER_CURSOR_ASOF
) {
534 if (cursor
->asof
< rec
->leaf
.base
.create_tid
)
536 if (rec
->leaf
.base
.delete_tid
&&
537 cursor
->asof
>= rec
->leaf
.base
.delete_tid
) {
543 * ref the record. The record is protected from backend B-Tree
544 * interactions by virtue of the cursor's IP lock.
546 hammer_ref(&rec
->lock
);
549 * The record may have been deleted or committed while we
550 * were blocked. XXX remove?
552 if (hammer_ip_iterate_mem_good(cursor
, rec
) == 0) {
553 hammer_rel_mem_record(rec
);
558 * Set the matching record and stop the scan.
566 * Lookup an in-memory record given the key specified in the cursor. Works
567 * just like hammer_btree_lookup() but operates on an inode's in-memory
570 * The lookup must fail if the record is marked for deferred deletion.
572 * The API for mem/btree_lookup() does not mess with the ATE/EOF bits.
576 hammer_mem_lookup(hammer_cursor_t cursor
)
578 KKASSERT(cursor
->ip
);
580 hammer_rel_mem_record(cursor
->iprec
);
581 cursor
->iprec
= NULL
;
583 hammer_rec_rb_tree_RB_SCAN(&cursor
->ip
->rec_tree
, hammer_rec_find_cmp
,
584 hammer_rec_scan_callback
, cursor
);
586 return (cursor
->iprec
? 0 : ENOENT
);
590 * hammer_mem_first() - locate the first in-memory record matching the
591 * cursor within the bounds of the key range.
593 * WARNING! API is slightly different from btree_first(). hammer_mem_first()
594 * will set ATEMEM the same as MEMEOF, and does not return any error.
598 hammer_mem_first(hammer_cursor_t cursor
)
603 KKASSERT(ip
!= NULL
);
606 hammer_rel_mem_record(cursor
->iprec
);
607 cursor
->iprec
= NULL
;
609 hammer_rec_rb_tree_RB_SCAN(&ip
->rec_tree
, hammer_rec_scan_cmp
,
610 hammer_rec_scan_callback
, cursor
);
613 cursor
->flags
&= ~(HAMMER_CURSOR_MEMEOF
| HAMMER_CURSOR_ATEMEM
);
615 cursor
->flags
|= HAMMER_CURSOR_MEMEOF
| HAMMER_CURSOR_ATEMEM
;
618 /************************************************************************
619 * HAMMER IN-MEMORY RECORD FUNCTIONS *
620 ************************************************************************
622 * These functions manipulate in-memory records. Such records typically
623 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
627 * Add a directory entry (dip,ncp) which references inode (ip).
629 * Note that the low 32 bits of the namekey are set temporarily to create
630 * a unique in-memory record, and may be modified a second time when the
631 * record is synchronized to disk. In particular, the low 32 bits cannot be
632 * all 0's when synching to disk, which is not handled here.
634 * NOTE: bytes does not include any terminating \0 on name, and name might
638 hammer_ip_add_directory(struct hammer_transaction
*trans
,
639 struct hammer_inode
*dip
, const char *name
, int bytes
,
640 struct hammer_inode
*ip
)
642 struct hammer_cursor cursor
;
643 hammer_record_t record
;
645 u_int32_t max_iterations
;
647 record
= hammer_alloc_mem_record(dip
, HAMMER_ENTRY_SIZE(bytes
));
649 record
->type
= HAMMER_MEM_RECORD_ADD
;
650 record
->leaf
.base
.localization
= dip
->obj_localization
+
651 HAMMER_LOCALIZE_MISC
;
652 record
->leaf
.base
.obj_id
= dip
->obj_id
;
653 record
->leaf
.base
.key
= hammer_directory_namekey(dip
, name
, bytes
,
655 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
656 record
->leaf
.base
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
657 record
->data
->entry
.obj_id
= ip
->obj_id
;
658 record
->data
->entry
.localization
= ip
->obj_localization
;
659 bcopy(name
, record
->data
->entry
.name
, bytes
);
661 ++ip
->ino_data
.nlinks
;
662 ip
->ino_data
.ctime
= trans
->time
;
663 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
666 * Find an unused namekey. Both the in-memory record tree and
667 * the B-Tree are checked. We do not want historically deleted
668 * names to create a collision as our iteration space may be limited,
669 * and since create_tid wouldn't match anyway an ASOF search
670 * must be used to locate collisions.
672 * delete-visibility is set so pending deletions do not give us
673 * a false-negative on our ability to use an iterator.
675 * The iterator must not rollover the key. Directory keys only
676 * use the positive key space.
678 hammer_init_cursor(trans
, &cursor
, &dip
->cache
[1], dip
);
679 cursor
.key_beg
= record
->leaf
.base
;
680 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
681 cursor
.flags
|= HAMMER_CURSOR_DELETE_VISIBILITY
;
682 cursor
.asof
= ip
->obj_asof
;
684 while (hammer_ip_lookup(&cursor
) == 0) {
685 ++record
->leaf
.base
.key
;
686 KKASSERT(record
->leaf
.base
.key
> 0);
687 cursor
.key_beg
.key
= record
->leaf
.base
.key
;
688 if (--max_iterations
== 0) {
689 hammer_rel_mem_record(record
);
696 * The target inode and the directory entry are bound together.
698 record
->target_ip
= ip
;
699 record
->flush_state
= HAMMER_FST_SETUP
;
700 TAILQ_INSERT_TAIL(&ip
->target_list
, record
, target_entry
);
703 * The inode now has a dependancy and must be taken out of the idle
704 * state. An inode not in an idle state is given an extra reference.
706 * When transitioning to a SETUP state flag for an automatic reflush
707 * when the dependancies are disposed of if someone is waiting on
710 if (ip
->flush_state
== HAMMER_FST_IDLE
) {
711 hammer_ref(&ip
->lock
);
712 ip
->flush_state
= HAMMER_FST_SETUP
;
713 if (ip
->flags
& HAMMER_INODE_FLUSHW
)
714 ip
->flags
|= HAMMER_INODE_REFLUSH
;
716 error
= hammer_mem_add(record
);
718 dip
->ino_data
.mtime
= trans
->time
;
719 hammer_modify_inode(dip
, HAMMER_INODE_MTIME
);
722 hammer_done_cursor(&cursor
);
727 * Delete the directory entry and update the inode link count. The
728 * cursor must be seeked to the directory entry record being deleted.
730 * The related inode should be share-locked by the caller. The caller is
731 * on the frontend. It could also be NULL indicating that the directory
732 * entry being removed has no related inode.
734 * This function can return EDEADLK requiring the caller to terminate
735 * the cursor, any locks, wait on the returned record, and retry.
738 hammer_ip_del_directory(struct hammer_transaction
*trans
,
739 hammer_cursor_t cursor
, struct hammer_inode
*dip
,
740 struct hammer_inode
*ip
)
742 hammer_record_t record
;
745 if (hammer_cursor_inmem(cursor
)) {
747 * In-memory (unsynchronized) records can simply be freed.
749 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
750 * by the backend, we must still avoid races against the
751 * backend potentially syncing the record to the media.
753 * We cannot call hammer_ip_delete_record(), that routine may
754 * only be called from the backend.
756 record
= cursor
->iprec
;
757 if (record
->flags
& (HAMMER_RECF_INTERLOCK_BE
|
758 HAMMER_RECF_DELETED_BE
|
759 HAMMER_RECF_COMMITTED
)) {
760 KKASSERT(cursor
->deadlk_rec
== NULL
);
761 hammer_ref(&record
->lock
);
762 cursor
->deadlk_rec
= record
;
765 KKASSERT(record
->type
== HAMMER_MEM_RECORD_ADD
);
766 record
->flags
|= HAMMER_RECF_DELETED_FE
;
771 * If the record is on-disk we have to queue the deletion by
772 * the record's key. This also causes lookups to skip the
775 KKASSERT(dip
->flags
&
776 (HAMMER_INODE_ONDISK
| HAMMER_INODE_DONDISK
));
777 record
= hammer_alloc_mem_record(dip
, 0);
778 record
->type
= HAMMER_MEM_RECORD_DEL
;
779 record
->leaf
.base
= cursor
->leaf
->base
;
782 * ip may be NULL, indicating the deletion of a directory
783 * entry which has no related inode.
785 record
->target_ip
= ip
;
787 record
->flush_state
= HAMMER_FST_SETUP
;
788 TAILQ_INSERT_TAIL(&ip
->target_list
, record
,
791 record
->flush_state
= HAMMER_FST_IDLE
;
795 * The inode now has a dependancy and must be taken out of
796 * the idle state. An inode not in an idle state is given
797 * an extra reference.
799 * When transitioning to a SETUP state flag for an automatic
800 * reflush when the dependancies are disposed of if someone
801 * is waiting on the inode.
803 if (ip
&& ip
->flush_state
== HAMMER_FST_IDLE
) {
804 hammer_ref(&ip
->lock
);
805 ip
->flush_state
= HAMMER_FST_SETUP
;
806 if (ip
->flags
& HAMMER_INODE_FLUSHW
)
807 ip
->flags
|= HAMMER_INODE_REFLUSH
;
810 error
= hammer_mem_add(record
);
814 * One less link. The file may still be open in the OS even after
815 * all links have gone away.
817 * We have to terminate the cursor before syncing the inode to
818 * avoid deadlocking against ourselves. XXX this may no longer
821 * If nlinks drops to zero and the vnode is inactive (or there is
822 * no vnode), call hammer_inode_unloadable_check() to zonk the
823 * inode. If we don't do this here the inode will not be destroyed
824 * on-media until we unmount.
828 --ip
->ino_data
.nlinks
; /* do before we might block */
829 ip
->ino_data
.ctime
= trans
->time
;
831 dip
->ino_data
.mtime
= trans
->time
;
832 hammer_modify_inode(dip
, HAMMER_INODE_MTIME
);
834 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
835 if (ip
->ino_data
.nlinks
== 0 &&
836 (ip
->vp
== NULL
|| (ip
->vp
->v_flag
& VINACTIVE
))) {
837 hammer_done_cursor(cursor
);
838 hammer_inode_unloadable_check(ip
, 1);
839 hammer_flush_inode(ip
, 0);
848 * Add a record to an inode.
850 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
851 * initialize the following additional fields:
853 * The related inode should be share-locked by the caller. The caller is
856 * record->rec.entry.base.base.key
857 * record->rec.entry.base.base.rec_type
858 * record->rec.entry.base.base.data_len
859 * record->data (a copy will be kmalloc'd if it cannot be embedded)
862 hammer_ip_add_record(struct hammer_transaction
*trans
, hammer_record_t record
)
864 hammer_inode_t ip
= record
->ip
;
867 KKASSERT(record
->leaf
.base
.localization
!= 0);
868 record
->leaf
.base
.obj_id
= ip
->obj_id
;
869 record
->leaf
.base
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
870 error
= hammer_mem_add(record
);
875 * Locate a bulk record in-memory. Bulk records allow disk space to be
876 * reserved so the front-end can flush large data writes without having
877 * to queue the BIO to the flusher. Only the related record gets queued
881 static hammer_record_t
882 hammer_ip_get_bulk(hammer_inode_t ip
, off_t file_offset
, int bytes
)
884 struct hammer_bulk_info info
;
886 bzero(&info
, sizeof(info
));
887 info
.leaf
.base
.obj_id
= ip
->obj_id
;
888 info
.leaf
.base
.key
= file_offset
+ bytes
;
889 info
.leaf
.base
.create_tid
= 0;
890 info
.leaf
.base
.delete_tid
= 0;
891 info
.leaf
.base
.rec_type
= HAMMER_RECTYPE_DATA
;
892 info
.leaf
.base
.obj_type
= 0; /* unused */
893 info
.leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
; /* unused */
894 info
.leaf
.base
.localization
= ip
->obj_localization
+ /* unused */
895 HAMMER_LOCALIZE_MISC
;
896 info
.leaf
.data_len
= bytes
;
898 hammer_rec_rb_tree_RB_SCAN(&ip
->rec_tree
, hammer_rec_overlap_cmp
,
899 hammer_bulk_scan_callback
, &info
);
901 return(info
.record
); /* may be NULL */
905 * Take records vetted by overlap_cmp. The first non-deleted record
906 * (if any) stops the scan.
909 hammer_bulk_scan_callback(hammer_record_t record
, void *data
)
911 struct hammer_bulk_info
*info
= data
;
913 if (record
->flags
& (HAMMER_RECF_DELETED_FE
| HAMMER_RECF_DELETED_BE
|
914 HAMMER_RECF_COMMITTED
)) {
917 hammer_ref(&record
->lock
);
918 info
->record
= record
;
919 return(-1); /* stop scan */
923 * Reserve blockmap space placemarked with an in-memory record.
925 * This routine is called by the frontend in order to be able to directly
926 * flush a buffer cache buffer. The frontend has locked the related buffer
927 * cache buffers and we should be able to manipulate any overlapping
930 * The caller is responsible for adding the returned record.
933 hammer_ip_add_bulk(hammer_inode_t ip
, off_t file_offset
, void *data
, int bytes
,
936 hammer_record_t record
;
937 hammer_record_t conflict
;
941 * Deal with conflicting in-memory records. We cannot have multiple
942 * in-memory records for the same base offset without seriously
943 * confusing the backend, including but not limited to the backend
944 * issuing delete-create-delete or create-delete-create sequences
945 * and asserting on the delete_tid being the same as the create_tid.
947 * If we encounter a record with the backend interlock set we cannot
948 * immediately delete it without confusing the backend.
950 while ((conflict
= hammer_ip_get_bulk(ip
, file_offset
, bytes
)) !=NULL
) {
951 if (conflict
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
952 conflict
->flags
|= HAMMER_RECF_WANTED
;
953 tsleep(conflict
, 0, "hmrrc3", 0);
955 conflict
->flags
|= HAMMER_RECF_DELETED_FE
;
957 hammer_rel_mem_record(conflict
);
961 * Create a record to cover the direct write. This is called with
962 * the related BIO locked so there should be no possible conflict.
964 * The backend is responsible for finalizing the space reserved in
967 * XXX bytes not aligned, depend on the reservation code to
968 * align the reservation.
970 record
= hammer_alloc_mem_record(ip
, 0);
971 zone
= (bytes
>= HAMMER_BUFSIZE
) ? HAMMER_ZONE_LARGE_DATA_INDEX
:
972 HAMMER_ZONE_SMALL_DATA_INDEX
;
973 record
->resv
= hammer_blockmap_reserve(ip
->hmp
, zone
, bytes
,
974 &record
->leaf
.data_offset
,
976 if (record
->resv
== NULL
) {
977 kprintf("hammer_ip_add_bulk: reservation failed\n");
978 hammer_rel_mem_record(record
);
981 record
->type
= HAMMER_MEM_RECORD_DATA
;
982 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_DATA
;
983 record
->leaf
.base
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
984 record
->leaf
.base
.obj_id
= ip
->obj_id
;
985 record
->leaf
.base
.key
= file_offset
+ bytes
;
986 record
->leaf
.base
.localization
= ip
->obj_localization
+
987 HAMMER_LOCALIZE_MISC
;
988 record
->leaf
.data_len
= bytes
;
989 hammer_crc_set_leaf(data
, &record
->leaf
);
990 KKASSERT(*errorp
== 0);
995 * Frontend truncation code. Scan in-memory records only. On-disk records
996 * and records in a flushing state are handled by the backend. The vnops
997 * setattr code will handle the block containing the truncation point.
999 * Partial blocks are not deleted.
1002 hammer_ip_frontend_trunc(struct hammer_inode
*ip
, off_t file_size
)
1004 struct rec_trunc_info info
;
1006 switch(ip
->ino_data
.obj_type
) {
1007 case HAMMER_OBJTYPE_REGFILE
:
1008 info
.rec_type
= HAMMER_RECTYPE_DATA
;
1010 case HAMMER_OBJTYPE_DBFILE
:
1011 info
.rec_type
= HAMMER_RECTYPE_DB
;
1016 info
.trunc_off
= file_size
;
1017 hammer_rec_rb_tree_RB_SCAN(&ip
->rec_tree
, hammer_rec_trunc_cmp
,
1018 hammer_frontend_trunc_callback
, &info
);
1023 hammer_frontend_trunc_callback(hammer_record_t record
, void *data __unused
)
1025 if (record
->flags
& HAMMER_RECF_DELETED_FE
)
1027 if (record
->flush_state
== HAMMER_FST_FLUSH
)
1029 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
1030 hammer_ref(&record
->lock
);
1031 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1032 hammer_rel_mem_record(record
);
1037 * Return 1 if the caller must check for and delete existing records
1038 * before writing out a new data record.
1040 * Return 0 if the caller can just insert the record into the B-Tree without
1044 hammer_record_needs_overwrite_delete(hammer_record_t record
)
1046 hammer_inode_t ip
= record
->ip
;
1047 int64_t file_offset
;
1050 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
)
1051 file_offset
= record
->leaf
.base
.key
;
1053 file_offset
= record
->leaf
.base
.key
- record
->leaf
.data_len
;
1054 r
= (file_offset
< ip
->save_trunc_off
);
1055 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
1056 if (ip
->save_trunc_off
<= record
->leaf
.base
.key
)
1057 ip
->save_trunc_off
= record
->leaf
.base
.key
+ 1;
1059 if (ip
->save_trunc_off
< record
->leaf
.base
.key
)
1060 ip
->save_trunc_off
= record
->leaf
.base
.key
;
1066 * Backend code. Sync a record to the media.
1069 hammer_ip_sync_record_cursor(hammer_cursor_t cursor
, hammer_record_t record
)
1071 hammer_transaction_t trans
= cursor
->trans
;
1072 int64_t file_offset
;
1078 KKASSERT(record
->flush_state
== HAMMER_FST_FLUSH
);
1079 KKASSERT(record
->flags
& HAMMER_RECF_INTERLOCK_BE
);
1080 KKASSERT(record
->leaf
.base
.localization
!= 0);
1083 * Any direct-write related to the record must complete before we
1084 * can sync the record to the on-disk media.
1086 if (record
->flags
& (HAMMER_RECF_DIRECT_IO
| HAMMER_RECF_DIRECT_INVAL
))
1087 hammer_io_direct_wait(record
);
1090 * If this is a bulk-data record placemarker there may be an existing
1091 * record on-disk, indicating a data overwrite. If there is the
1092 * on-disk record must be deleted before we can insert our new record.
1094 * We've synthesized this record and do not know what the create_tid
1095 * on-disk is, nor how much data it represents.
1097 * Keep in mind that (key) for data records is (base_offset + len),
1098 * not (base_offset). Also, we only want to get rid of on-disk
1099 * records since we are trying to sync our in-memory record, call
1100 * hammer_ip_delete_range() with truncating set to 1 to make sure
1101 * it skips in-memory records.
1103 * It is ok for the lookup to return ENOENT.
1105 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1106 * to call hammer_ip_delete_range() or not. This also means we must
1107 * update sync_trunc_off() as we write.
1109 if (record
->type
== HAMMER_MEM_RECORD_DATA
&&
1110 hammer_record_needs_overwrite_delete(record
)) {
1111 file_offset
= record
->leaf
.base
.key
- record
->leaf
.data_len
;
1112 bytes
= (record
->leaf
.data_len
+ HAMMER_BUFMASK
) &
1114 KKASSERT((file_offset
& HAMMER_BUFMASK
) == 0);
1115 error
= hammer_ip_delete_range(
1117 file_offset
, file_offset
+ bytes
- 1,
1119 if (error
&& error
!= ENOENT
)
1124 * If this is a general record there may be an on-disk version
1125 * that must be deleted before we can insert the new record.
1127 if (record
->type
== HAMMER_MEM_RECORD_GENERAL
) {
1128 error
= hammer_delete_general(cursor
, record
->ip
,
1130 if (error
&& error
!= ENOENT
)
1137 hammer_normalize_cursor(cursor
);
1138 cursor
->key_beg
= record
->leaf
.base
;
1139 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1140 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1141 cursor
->flags
&= ~HAMMER_CURSOR_INSERT
;
1144 * Records can wind up on-media before the inode itself is on-media.
1147 record
->ip
->flags
|= HAMMER_INODE_DONDISK
;
1150 * If we are deleting a directory entry an exact match must be
1153 if (record
->type
== HAMMER_MEM_RECORD_DEL
) {
1154 error
= hammer_btree_lookup(cursor
);
1156 KKASSERT(cursor
->iprec
== NULL
);
1157 error
= hammer_ip_delete_record(cursor
, record
->ip
,
1160 record
->flags
|= HAMMER_RECF_DELETED_BE
|
1161 HAMMER_RECF_COMMITTED
;
1162 ++record
->ip
->rec_generation
;
1171 * Issue a lookup to position the cursor and locate the insertion
1172 * point. The target key should not exist. If we are creating a
1173 * directory entry we may have to iterate the low 32 bits of the
1174 * key to find an unused key.
1176 hammer_sync_lock_sh(trans
);
1177 cursor
->flags
|= HAMMER_CURSOR_INSERT
;
1178 error
= hammer_btree_lookup(cursor
);
1179 if (hammer_debug_inode
)
1180 kprintf("DOINSERT LOOKUP %d\n", error
);
1182 kprintf("hammer_ip_sync_record: duplicate rec "
1183 "at (%016llx)\n", (long long)record
->leaf
.base
.key
);
1184 Debugger("duplicate record1");
1188 if (record
->type
== HAMMER_MEM_RECORD_DATA
)
1189 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1190 record
->leaf
.base
.key
- record
->leaf
.data_len
,
1191 record
->leaf
.data_offset
, error
);
1194 if (error
!= ENOENT
)
1198 * Allocate the record and data. The result buffers will be
1199 * marked as being modified and further calls to
1200 * hammer_modify_buffer() will result in unneeded UNDO records.
1202 * Support zero-fill records (data == NULL and data_len != 0)
1204 if (record
->type
== HAMMER_MEM_RECORD_DATA
) {
1206 * The data portion of a bulk-data record has already been
1207 * committed to disk, we need only adjust the layer2
1208 * statistics in the same transaction as our B-Tree insert.
1210 KKASSERT(record
->leaf
.data_offset
!= 0);
1211 error
= hammer_blockmap_finalize(trans
,
1213 record
->leaf
.data_offset
,
1214 record
->leaf
.data_len
);
1215 } else if (record
->data
&& record
->leaf
.data_len
) {
1217 * Wholely cached record, with data. Allocate the data.
1219 bdata
= hammer_alloc_data(trans
, record
->leaf
.data_len
,
1220 record
->leaf
.base
.rec_type
,
1221 &record
->leaf
.data_offset
,
1222 &cursor
->data_buffer
,
1226 hammer_crc_set_leaf(record
->data
, &record
->leaf
);
1227 hammer_modify_buffer(trans
, cursor
->data_buffer
, NULL
, 0);
1228 bcopy(record
->data
, bdata
, record
->leaf
.data_len
);
1229 hammer_modify_buffer_done(cursor
->data_buffer
);
1232 * Wholely cached record, without data.
1234 record
->leaf
.data_offset
= 0;
1235 record
->leaf
.data_crc
= 0;
1238 error
= hammer_btree_insert(cursor
, &record
->leaf
, &doprop
);
1239 if (hammer_debug_inode
&& error
) {
1240 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n",
1242 (long long)cursor
->node
->node_offset
,
1244 (long long)record
->leaf
.base
.key
);
1248 * Our record is on-disk and we normally mark the in-memory version
1249 * as having been committed (and not BE-deleted).
1251 * If the record represented a directory deletion but we had to
1252 * sync a valid directory entry to disk due to dependancies,
1253 * we must convert the record to a covering delete so the
1254 * frontend does not have visibility on the synced entry.
1258 hammer_btree_do_propagation(cursor
,
1262 if (record
->flags
& HAMMER_RECF_CONVERT_DELETE
) {
1264 * Must convert deleted directory entry add
1265 * to a directory entry delete.
1267 KKASSERT(record
->type
== HAMMER_MEM_RECORD_ADD
);
1268 record
->flags
&= ~HAMMER_RECF_DELETED_FE
;
1269 record
->type
= HAMMER_MEM_RECORD_DEL
;
1270 KKASSERT(record
->flush_state
== HAMMER_FST_FLUSH
);
1271 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
1272 KKASSERT((record
->flags
& (HAMMER_RECF_COMMITTED
|
1273 HAMMER_RECF_DELETED_BE
)) == 0);
1274 /* converted record is not yet committed */
1275 /* hammer_flush_record_done takes care of the rest */
1278 * Everything went fine and we are now done with
1281 record
->flags
|= HAMMER_RECF_COMMITTED
;
1282 ++record
->ip
->rec_generation
;
1285 if (record
->leaf
.data_offset
) {
1286 hammer_blockmap_free(trans
, record
->leaf
.data_offset
,
1287 record
->leaf
.data_len
);
1291 hammer_sync_unlock(trans
);
1297 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1298 * entry's key is used to deal with hash collisions in the upper 32 bits.
1299 * A unique 64 bit key is generated in-memory and may be regenerated a
1300 * second time when the directory record is flushed to the on-disk B-Tree.
1302 * A referenced record is passed to this function. This function
1303 * eats the reference. If an error occurs the record will be deleted.
1305 * A copy of the temporary record->data pointer provided by the caller
1309 hammer_mem_add(hammer_record_t record
)
1311 hammer_mount_t hmp
= record
->ip
->hmp
;
1314 * Make a private copy of record->data
1317 KKASSERT(record
->flags
& HAMMER_RECF_ALLOCDATA
);
1320 * Insert into the RB tree. A unique key should have already
1321 * been selected if this is a directory entry.
1323 if (RB_INSERT(hammer_rec_rb_tree
, &record
->ip
->rec_tree
, record
)) {
1324 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1325 hammer_rel_mem_record(record
);
1328 ++hmp
->count_newrecords
;
1330 ++record
->ip
->rsv_recs
;
1331 record
->ip
->hmp
->rsv_databytes
+= record
->leaf
.data_len
;
1332 record
->flags
|= HAMMER_RECF_ONRBTREE
;
1333 hammer_modify_inode(record
->ip
, HAMMER_INODE_XDIRTY
);
1334 hammer_rel_mem_record(record
);
1338 /************************************************************************
1339 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1340 ************************************************************************
1342 * These functions augment the B-Tree scanning functions in hammer_btree.c
1343 * by merging in-memory records with on-disk records.
1347 * Locate a particular record either in-memory or on-disk.
1349 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1350 * NOT be called to iterate results.
1353 hammer_ip_lookup(hammer_cursor_t cursor
)
1358 * If the element is in-memory return it without searching the
1361 KKASSERT(cursor
->ip
);
1362 error
= hammer_mem_lookup(cursor
);
1364 cursor
->leaf
= &cursor
->iprec
->leaf
;
1367 if (error
!= ENOENT
)
1371 * If the inode has on-disk components search the on-disk B-Tree.
1373 if ((cursor
->ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DONDISK
)) == 0)
1375 error
= hammer_btree_lookup(cursor
);
1377 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_LEAF
);
1382 * Helper for hammer_ip_first()/hammer_ip_next()
1384 * NOTE: Both ATEDISK and DISKEOF will be set the same. This sets up
1385 * hammer_ip_first() for calling hammer_ip_next(), and sets up the re-seek
1386 * state if hammer_ip_next() needs to re-seek.
1390 _hammer_ip_seek_btree(hammer_cursor_t cursor
)
1392 hammer_inode_t ip
= cursor
->ip
;
1395 if (ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DONDISK
)) {
1396 error
= hammer_btree_lookup(cursor
);
1397 if (error
== ENOENT
|| error
== EDEADLK
) {
1398 if (hammer_debug_general
& 0x2000) {
1399 kprintf("error %d node %p %016llx index %d\n",
1400 error
, cursor
->node
,
1401 (long long)cursor
->node
->node_offset
,
1404 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
1405 error
= hammer_btree_iterate(cursor
);
1408 cursor
->flags
&= ~(HAMMER_CURSOR_DISKEOF
|
1409 HAMMER_CURSOR_ATEDISK
);
1411 cursor
->flags
|= HAMMER_CURSOR_DISKEOF
|
1412 HAMMER_CURSOR_ATEDISK
;
1413 if (error
== ENOENT
)
1417 cursor
->flags
|= HAMMER_CURSOR_DISKEOF
| HAMMER_CURSOR_ATEDISK
;
1424 * Helper for hammer_ip_next()
1426 * The caller has determined that the media cursor is further along than the
1427 * memory cursor and must be reseeked after a generation number change.
1431 _hammer_ip_reseek(hammer_cursor_t cursor
)
1433 struct hammer_base_elm save
;
1434 hammer_btree_elm_t elm
;
1442 kprintf("HAMMER: Debug: re-seeked during scan @ino=%016llx\n",
1443 (long long)cursor
->ip
->obj_id
);
1444 save
= cursor
->key_beg
;
1445 cursor
->key_beg
= cursor
->iprec
->leaf
.base
;
1446 error
= _hammer_ip_seek_btree(cursor
);
1447 KKASSERT(error
== 0);
1448 cursor
->key_beg
= save
;
1451 * If the memory record was previous returned to
1452 * the caller and the media record matches
1453 * (-1/+1: only create_tid differs), then iterate
1454 * the media record to avoid a double result.
1456 if ((cursor
->flags
& HAMMER_CURSOR_ATEDISK
) == 0 &&
1457 (cursor
->flags
& HAMMER_CURSOR_LASTWASMEM
)) {
1458 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
1459 r
= hammer_btree_cmp(&elm
->base
,
1460 &cursor
->iprec
->leaf
.base
);
1461 if (cursor
->flags
& HAMMER_CURSOR_ASOF
) {
1462 if (r
>= -1 && r
<= 1) {
1463 kprintf("HAMMER: Debug: iterated after "
1464 "re-seek (asof r=%d)\n", r
);
1465 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1470 kprintf("HAMMER: Debug: iterated after "
1472 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1481 * Locate the first record within the cursor's key_beg/key_end range,
1482 * restricted to a particular inode. 0 is returned on success, ENOENT
1483 * if no records matched the requested range, or some other error.
1485 * When 0 is returned hammer_ip_next() may be used to iterate additional
1486 * records within the requested range.
1488 * This function can return EDEADLK, requiring the caller to terminate
1489 * the cursor and try again.
1493 hammer_ip_first(hammer_cursor_t cursor
)
1495 hammer_inode_t ip
= cursor
->ip
;
1498 KKASSERT(ip
!= NULL
);
1501 * Clean up fields and setup for merged scan
1503 cursor
->flags
&= ~HAMMER_CURSOR_RETEST
;
1506 * Search the in-memory record list (Red-Black tree). Unlike the
1507 * B-Tree search, mem_first checks for records in the range.
1509 * This function will setup both ATEMEM and MEMEOF properly for
1510 * the ip iteration. ATEMEM will be set if MEMEOF is set.
1512 hammer_mem_first(cursor
);
1515 * Detect generation changes during blockages, including
1516 * blockages which occur on the initial btree search.
1518 cursor
->rec_generation
= cursor
->ip
->rec_generation
;
1521 * Initial search and result
1523 error
= _hammer_ip_seek_btree(cursor
);
1525 error
= hammer_ip_next(cursor
);
1531 * Retrieve the next record in a merged iteration within the bounds of the
1532 * cursor. This call may be made multiple times after the cursor has been
1533 * initially searched with hammer_ip_first().
1535 * There are numerous special cases in this code to deal with races between
1536 * in-memory records and on-media records.
1538 * 0 is returned on success, ENOENT if no further records match the
1539 * requested range, or some other error code is returned.
1542 hammer_ip_next(hammer_cursor_t cursor
)
1544 hammer_btree_elm_t elm
;
1545 hammer_record_t rec
;
1546 hammer_record_t tmprec
;
1552 * Get the next on-disk record
1554 * NOTE: If we deleted the last on-disk record we had scanned
1555 * ATEDISK will be clear and RETEST will be set, forcing
1556 * a call to iterate. The fact that ATEDISK is clear causes
1557 * iterate to re-test the 'current' element. If ATEDISK is
1558 * set, iterate will skip the 'current' element.
1561 if ((cursor
->flags
& HAMMER_CURSOR_DISKEOF
) == 0) {
1562 if (cursor
->flags
& (HAMMER_CURSOR_ATEDISK
|
1563 HAMMER_CURSOR_RETEST
)) {
1564 error
= hammer_btree_iterate(cursor
);
1565 cursor
->flags
&= ~HAMMER_CURSOR_RETEST
;
1567 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
1568 hammer_cache_node(&cursor
->ip
->cache
[1],
1570 } else if (error
== ENOENT
) {
1571 cursor
->flags
|= HAMMER_CURSOR_DISKEOF
|
1572 HAMMER_CURSOR_ATEDISK
;
1579 * If the generation changed the backend has deleted or committed
1580 * one or more memory records since our last check.
1582 * When this case occurs if the disk cursor is > current memory record
1583 * or the disk cursor is at EOF, we must re-seek the disk-cursor.
1584 * Since the cursor is ahead it must have not yet been eaten (if
1585 * not at eof anyway). (XXX data offset case?)
1587 * NOTE: we are not doing a full check here. That will be handled
1590 * If we have exhausted all memory records we do not have to do any
1593 while (cursor
->rec_generation
!= cursor
->ip
->rec_generation
&&
1596 kprintf("HAMMER: Debug: generation changed during scan @ino=%016llx\n", (long long)cursor
->ip
->obj_id
);
1597 cursor
->rec_generation
= cursor
->ip
->rec_generation
;
1598 if (cursor
->flags
& HAMMER_CURSOR_MEMEOF
)
1600 if (cursor
->flags
& HAMMER_CURSOR_DISKEOF
) {
1603 KKASSERT((cursor
->flags
& HAMMER_CURSOR_ATEDISK
) == 0);
1604 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
1605 r
= hammer_btree_cmp(&elm
->base
,
1606 &cursor
->iprec
->leaf
.base
);
1610 * Do we re-seek the media cursor?
1613 if (_hammer_ip_reseek(cursor
))
1619 * We can now safely get the next in-memory record. We cannot
1622 * hammer_rec_scan_cmp: Is the record still in our general range,
1623 * (non-inclusive of snapshot exclusions)?
1624 * hammer_rec_scan_callback: Is the record in our snapshot?
1627 if ((cursor
->flags
& HAMMER_CURSOR_MEMEOF
) == 0) {
1629 * If the current memory record was eaten then get the next
1630 * one. Stale records are skipped.
1632 if (cursor
->flags
& HAMMER_CURSOR_ATEMEM
) {
1633 tmprec
= cursor
->iprec
;
1634 cursor
->iprec
= NULL
;
1635 rec
= hammer_rec_rb_tree_RB_NEXT(tmprec
);
1637 if (hammer_rec_scan_cmp(rec
, cursor
) != 0)
1639 if (hammer_rec_scan_callback(rec
, cursor
) != 0)
1641 rec
= hammer_rec_rb_tree_RB_NEXT(rec
);
1643 if (cursor
->iprec
) {
1644 KKASSERT(cursor
->iprec
== rec
);
1645 cursor
->flags
&= ~HAMMER_CURSOR_ATEMEM
;
1647 cursor
->flags
|= HAMMER_CURSOR_MEMEOF
;
1649 cursor
->flags
&= ~HAMMER_CURSOR_LASTWASMEM
;
1654 * MEMORY RECORD VALIDITY TEST
1656 * (We still can't block, which is why tmprec is being held so
1659 * If the memory record is no longer valid we skip it. It may
1660 * have been deleted by the frontend. If it was deleted or
1661 * committed by the backend the generation change re-seeked the
1662 * disk cursor and the record will be present there.
1664 if (error
== 0 && (cursor
->flags
& HAMMER_CURSOR_MEMEOF
) == 0) {
1665 KKASSERT(cursor
->iprec
);
1666 KKASSERT((cursor
->flags
& HAMMER_CURSOR_ATEMEM
) == 0);
1667 if (!hammer_ip_iterate_mem_good(cursor
, cursor
->iprec
)) {
1668 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1670 hammer_rel_mem_record(tmprec
);
1675 hammer_rel_mem_record(tmprec
);
1678 * Extract either the disk or memory record depending on their
1679 * relative position.
1682 switch(cursor
->flags
& (HAMMER_CURSOR_ATEDISK
| HAMMER_CURSOR_ATEMEM
)) {
1685 * Both entries valid. Compare the entries and nominally
1686 * return the first one in the sort order. Numerous cases
1687 * require special attention, however.
1689 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
1690 r
= hammer_btree_cmp(&elm
->base
, &cursor
->iprec
->leaf
.base
);
1693 * If the two entries differ only by their key (-2/2) or
1694 * create_tid (-1/1), and are DATA records, we may have a
1695 * nominal match. We have to calculate the base file
1696 * offset of the data.
1698 if (r
<= 2 && r
>= -2 && r
!= 0 &&
1699 cursor
->ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
&&
1700 cursor
->iprec
->type
== HAMMER_MEM_RECORD_DATA
) {
1701 int64_t base1
= elm
->leaf
.base
.key
- elm
->leaf
.data_len
;
1702 int64_t base2
= cursor
->iprec
->leaf
.base
.key
-
1703 cursor
->iprec
->leaf
.data_len
;
1709 error
= hammer_btree_extract(cursor
,
1710 HAMMER_CURSOR_GET_LEAF
);
1711 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1712 cursor
->flags
&= ~HAMMER_CURSOR_LASTWASMEM
;
1717 * If the entries match exactly the memory entry is either
1718 * an on-disk directory entry deletion or a bulk data
1719 * overwrite. If it is a directory entry deletion we eat
1722 * For the bulk-data overwrite case it is possible to have
1723 * visibility into both, which simply means the syncer
1724 * hasn't gotten around to doing the delete+insert sequence
1725 * on the B-Tree. Use the memory entry and throw away the
1728 * If the in-memory record is not either of these we
1729 * probably caught the syncer while it was syncing it to
1730 * the media. Since we hold a shared lock on the cursor,
1731 * the in-memory record had better be marked deleted at
1735 if (cursor
->iprec
->type
== HAMMER_MEM_RECORD_DEL
) {
1736 if ((cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
) == 0) {
1737 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1738 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1741 } else if (cursor
->iprec
->type
== HAMMER_MEM_RECORD_DATA
) {
1742 if ((cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
) == 0) {
1743 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1745 /* fall through to memory entry */
1747 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor
->iprec
, cursor
->iprec
->type
, cursor
->iprec
->flags
);
1748 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1752 /* fall through to the memory entry */
1753 case HAMMER_CURSOR_ATEDISK
:
1755 * Only the memory entry is valid.
1757 cursor
->leaf
= &cursor
->iprec
->leaf
;
1758 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1759 cursor
->flags
|= HAMMER_CURSOR_LASTWASMEM
;
1762 * If the memory entry is an on-disk deletion we should have
1763 * also had found a B-Tree record. If the backend beat us
1764 * to it it would have interlocked the cursor and we should
1765 * have seen the in-memory record marked DELETED_FE.
1767 if (cursor
->iprec
->type
== HAMMER_MEM_RECORD_DEL
&&
1768 (cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
) == 0) {
1769 panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor
->iprec
, cursor
->iprec
->flags
);
1772 case HAMMER_CURSOR_ATEMEM
:
1774 * Only the disk entry is valid
1776 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_LEAF
);
1777 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1778 cursor
->flags
&= ~HAMMER_CURSOR_LASTWASMEM
;
1782 * Neither entry is valid
1784 * XXX error not set properly
1786 cursor
->flags
&= ~HAMMER_CURSOR_LASTWASMEM
;
1787 cursor
->leaf
= NULL
;
1795 * Resolve the cursor->data pointer for the current cursor position in
1796 * a merged iteration.
1799 hammer_ip_resolve_data(hammer_cursor_t cursor
)
1801 hammer_record_t record
;
1804 if (hammer_cursor_inmem(cursor
)) {
1806 * The data associated with an in-memory record is usually
1807 * kmalloced, but reserve-ahead data records will have an
1808 * on-disk reference.
1810 * NOTE: Reserve-ahead data records must be handled in the
1811 * context of the related high level buffer cache buffer
1812 * to interlock against async writes.
1814 record
= cursor
->iprec
;
1815 cursor
->data
= record
->data
;
1817 if (cursor
->data
== NULL
) {
1818 KKASSERT(record
->leaf
.base
.rec_type
==
1819 HAMMER_RECTYPE_DATA
);
1820 cursor
->data
= hammer_bread_ext(cursor
->trans
->hmp
,
1821 record
->leaf
.data_offset
,
1822 record
->leaf
.data_len
,
1824 &cursor
->data_buffer
);
1827 cursor
->leaf
= &cursor
->node
->ondisk
->elms
[cursor
->index
].leaf
;
1828 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_DATA
);
1834 * Backend truncation / record replacement - delete records in range.
1836 * Delete all records within the specified range for inode ip. In-memory
1837 * records still associated with the frontend are ignored.
1839 * If truncating is non-zero in-memory records associated with the back-end
1840 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1844 * * An unaligned range will cause new records to be added to cover
1845 * the edge cases. (XXX not implemented yet).
1847 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1848 * also do not deal with unaligned ranges.
1850 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1852 * * Record keys for regular file data have to be special-cased since
1853 * they indicate the end of the range (key = base + bytes).
1855 * * This function may be asked to delete ridiculously huge ranges, for
1856 * example if someone truncates or removes a 1TB regular file. We
1857 * must be very careful on restarts and we may have to stop w/
1858 * EWOULDBLOCK to avoid blowing out the buffer cache.
1861 hammer_ip_delete_range(hammer_cursor_t cursor
, hammer_inode_t ip
,
1862 int64_t ran_beg
, int64_t ran_end
, int truncating
)
1864 hammer_transaction_t trans
= cursor
->trans
;
1865 hammer_btree_leaf_elm_t leaf
;
1871 kprintf("delete_range %p %016llx-%016llx\n", ip
, ran_beg
, ran_end
);
1874 KKASSERT(trans
->type
== HAMMER_TRANS_FLS
);
1876 hammer_normalize_cursor(cursor
);
1877 cursor
->key_beg
.localization
= ip
->obj_localization
+
1878 HAMMER_LOCALIZE_MISC
;
1879 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1880 cursor
->key_beg
.create_tid
= 0;
1881 cursor
->key_beg
.delete_tid
= 0;
1882 cursor
->key_beg
.obj_type
= 0;
1884 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
1885 cursor
->key_beg
.key
= ran_beg
;
1886 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
1889 * The key in the B-Tree is (base+bytes), so the first possible
1890 * matching key is ran_beg + 1.
1892 cursor
->key_beg
.key
= ran_beg
+ 1;
1893 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
1896 cursor
->key_end
= cursor
->key_beg
;
1897 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
1898 cursor
->key_end
.key
= ran_end
;
1900 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work around GCC-4 bug */
1901 if (tmp64
< ran_end
)
1902 cursor
->key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
1904 cursor
->key_end
.key
= ran_end
+ MAXPHYS
+ 1;
1907 cursor
->asof
= ip
->obj_asof
;
1908 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1909 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1910 cursor
->flags
|= HAMMER_CURSOR_DELETE_VISIBILITY
;
1911 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1912 cursor
->flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
1914 error
= hammer_ip_first(cursor
);
1917 * Iterate through matching records and mark them as deleted.
1919 while (error
== 0) {
1920 leaf
= cursor
->leaf
;
1922 KKASSERT(leaf
->base
.delete_tid
== 0);
1923 KKASSERT(leaf
->base
.obj_id
== ip
->obj_id
);
1926 * There may be overlap cases for regular file data. Also
1927 * remember the key for a regular file record is (base + len),
1930 * Note that do to duplicates (mem & media) allowed by
1931 * DELETE_VISIBILITY, off can wind up less then ran_beg.
1933 if (leaf
->base
.rec_type
== HAMMER_RECTYPE_DATA
) {
1934 off
= leaf
->base
.key
- leaf
->data_len
;
1936 * Check the left edge case. We currently do not
1937 * split existing records.
1939 if (off
< ran_beg
&& leaf
->base
.key
> ran_beg
) {
1940 panic("hammer left edge case %016llx %d\n",
1941 (long long)leaf
->base
.key
,
1946 * Check the right edge case. Note that the
1947 * record can be completely out of bounds, which
1948 * terminates the search.
1950 * base->key is exclusive of the right edge while
1951 * ran_end is inclusive of the right edge. The
1952 * (key - data_len) left boundary is inclusive.
1954 * XXX theory-check this test at some point, are
1955 * we missing a + 1 somewhere? Note that ran_end
1958 if (leaf
->base
.key
- 1 > ran_end
) {
1959 if (leaf
->base
.key
- leaf
->data_len
> ran_end
)
1961 panic("hammer right edge case\n");
1964 off
= leaf
->base
.key
;
1968 * Delete the record. When truncating we do not delete
1969 * in-memory (data) records because they represent data
1970 * written after the truncation.
1972 * This will also physically destroy the B-Tree entry and
1973 * data if the retention policy dictates. The function
1974 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
1975 * to retest the new 'current' element.
1977 if (truncating
== 0 || hammer_cursor_ondisk(cursor
)) {
1978 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1980 * If we have built up too many meta-buffers we risk
1981 * deadlocking the kernel and must stop. This can
1982 * occur when deleting ridiculously huge files.
1983 * sync_trunc_off is updated so the next cycle does
1984 * not re-iterate records we have already deleted.
1986 * This is only done with formal truncations.
1988 if (truncating
> 1 && error
== 0 &&
1989 hammer_flusher_meta_limit(ip
->hmp
)) {
1990 ip
->sync_trunc_off
= off
;
1991 error
= EWOULDBLOCK
;
1996 ran_beg
= off
; /* for restart */
1997 error
= hammer_ip_next(cursor
);
2000 hammer_cache_node(&ip
->cache
[1], cursor
->node
);
2002 if (error
== EDEADLK
) {
2003 hammer_done_cursor(cursor
);
2004 error
= hammer_init_cursor(trans
, cursor
, &ip
->cache
[1], ip
);
2008 if (error
== ENOENT
)
2014 * This backend function deletes the specified record on-disk, similar to
2015 * delete_range but for a specific record. Unlike the exact deletions
2016 * used when deleting a directory entry this function uses an ASOF search
2017 * like delete_range.
2019 * This function may be called with ip->obj_asof set for a slave snapshot,
2020 * so don't use it. We always delete non-historical records only.
2023 hammer_delete_general(hammer_cursor_t cursor
, hammer_inode_t ip
,
2024 hammer_btree_leaf_elm_t leaf
)
2026 hammer_transaction_t trans
= cursor
->trans
;
2029 KKASSERT(trans
->type
== HAMMER_TRANS_FLS
);
2031 hammer_normalize_cursor(cursor
);
2032 cursor
->key_beg
= leaf
->base
;
2033 cursor
->asof
= HAMMER_MAX_TID
;
2034 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
2035 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
2036 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
2037 cursor
->flags
&= ~HAMMER_CURSOR_INSERT
;
2039 error
= hammer_btree_lookup(cursor
);
2041 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
2043 if (error
== EDEADLK
) {
2044 hammer_done_cursor(cursor
);
2045 error
= hammer_init_cursor(trans
, cursor
, &ip
->cache
[1], ip
);
2053 * This function deletes remaining auxillary records when an inode is
2054 * being deleted. This function explicitly does not delete the
2055 * inode record, directory entry, data, or db records. Those must be
2056 * properly disposed of prior to this call.
2059 hammer_ip_delete_clean(hammer_cursor_t cursor
, hammer_inode_t ip
, int *countp
)
2061 hammer_transaction_t trans
= cursor
->trans
;
2062 hammer_btree_leaf_elm_t leaf
;
2065 KKASSERT(trans
->type
== HAMMER_TRANS_FLS
);
2067 hammer_normalize_cursor(cursor
);
2068 cursor
->key_beg
.localization
= ip
->obj_localization
+
2069 HAMMER_LOCALIZE_MISC
;
2070 cursor
->key_beg
.obj_id
= ip
->obj_id
;
2071 cursor
->key_beg
.create_tid
= 0;
2072 cursor
->key_beg
.delete_tid
= 0;
2073 cursor
->key_beg
.obj_type
= 0;
2074 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_CLEAN_START
;
2075 cursor
->key_beg
.key
= HAMMER_MIN_KEY
;
2077 cursor
->key_end
= cursor
->key_beg
;
2078 cursor
->key_end
.rec_type
= HAMMER_RECTYPE_MAX
;
2079 cursor
->key_end
.key
= HAMMER_MAX_KEY
;
2081 cursor
->asof
= ip
->obj_asof
;
2082 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
2083 cursor
->flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
2084 cursor
->flags
|= HAMMER_CURSOR_DELETE_VISIBILITY
;
2085 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
2087 error
= hammer_ip_first(cursor
);
2090 * Iterate through matching records and mark them as deleted.
2092 while (error
== 0) {
2093 leaf
= cursor
->leaf
;
2095 KKASSERT(leaf
->base
.delete_tid
== 0);
2098 * Mark the record and B-Tree entry as deleted. This will
2099 * also physically delete the B-Tree entry, record, and
2100 * data if the retention policy dictates. The function
2101 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2102 * to retest the new 'current' element.
2104 * Directory entries (and delete-on-disk directory entries)
2105 * must be synced and cannot be deleted.
2107 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
2111 error
= hammer_ip_next(cursor
);
2114 hammer_cache_node(&ip
->cache
[1], cursor
->node
);
2115 if (error
== EDEADLK
) {
2116 hammer_done_cursor(cursor
);
2117 error
= hammer_init_cursor(trans
, cursor
, &ip
->cache
[1], ip
);
2121 if (error
== ENOENT
)
2127 * Delete the record at the current cursor. On success the cursor will
2128 * be positioned appropriately for an iteration but may no longer be at
2131 * This routine is only called from the backend.
2133 * NOTE: This can return EDEADLK, requiring the caller to terminate the
2137 hammer_ip_delete_record(hammer_cursor_t cursor
, hammer_inode_t ip
,
2140 hammer_record_t iprec
;
2144 KKASSERT(cursor
->flags
& HAMMER_CURSOR_BACKEND
);
2146 hmp
= cursor
->node
->hmp
;
2149 * In-memory (unsynchronized) records can simply be freed. This
2150 * only occurs in range iterations since all other records are
2151 * individually synchronized. Thus there should be no confusion with
2154 * An in-memory record may be deleted before being committed to disk,
2155 * but could have been accessed in the mean time. The reservation
2156 * code will deal with the case.
2158 if (hammer_cursor_inmem(cursor
)) {
2159 iprec
= cursor
->iprec
;
2160 KKASSERT((iprec
->flags
& HAMMER_RECF_INTERLOCK_BE
) ==0);
2161 iprec
->flags
|= HAMMER_RECF_DELETED_FE
;
2162 iprec
->flags
|= HAMMER_RECF_DELETED_BE
;
2163 KKASSERT(iprec
->ip
== ip
);
2164 ++ip
->rec_generation
;
2169 * On-disk records are marked as deleted by updating their delete_tid.
2170 * This does not effect their position in the B-Tree (which is based
2171 * on their create_tid).
2173 * Frontend B-Tree operations track inodes so we tell
2174 * hammer_delete_at_cursor() not to.
2176 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_LEAF
);
2179 error
= hammer_delete_at_cursor(
2181 HAMMER_DELETE_ADJUST
| hammer_nohistory(ip
),
2183 cursor
->trans
->time32
,
2190 * Delete the B-Tree element at the current cursor and do any necessary
2191 * mirror propagation.
2193 * The cursor must be properly positioned for an iteration on return but
2194 * may be pointing at an internal element.
2196 * An element can be un-deleted by passing a delete_tid of 0 with
2197 * HAMMER_DELETE_ADJUST.
2200 hammer_delete_at_cursor(hammer_cursor_t cursor
, int delete_flags
,
2201 hammer_tid_t delete_tid
, u_int32_t delete_ts
,
2202 int track
, int64_t *stat_bytes
)
2204 struct hammer_btree_leaf_elm save_leaf
;
2205 hammer_transaction_t trans
;
2206 hammer_btree_leaf_elm_t leaf
;
2208 hammer_btree_elm_t elm
;
2209 hammer_off_t data_offset
;
2216 error
= hammer_cursor_upgrade(cursor
);
2220 trans
= cursor
->trans
;
2221 node
= cursor
->node
;
2222 elm
= &node
->ondisk
->elms
[cursor
->index
];
2224 KKASSERT(elm
->base
.btype
== HAMMER_BTREE_TYPE_RECORD
);
2226 hammer_sync_lock_sh(trans
);
2231 * Adjust the delete_tid. Update the mirror_tid propagation field
2232 * as well. delete_tid can be 0 (undelete -- used by mirroring).
2234 if (delete_flags
& HAMMER_DELETE_ADJUST
) {
2235 if (elm
->base
.rec_type
== HAMMER_RECTYPE_INODE
) {
2236 if (elm
->leaf
.base
.delete_tid
== 0 && delete_tid
)
2238 if (elm
->leaf
.base
.delete_tid
&& delete_tid
== 0)
2242 hammer_modify_node(trans
, node
, elm
, sizeof(*elm
));
2243 elm
->leaf
.base
.delete_tid
= delete_tid
;
2244 elm
->leaf
.delete_ts
= delete_ts
;
2245 hammer_modify_node_done(node
);
2247 if (elm
->leaf
.base
.delete_tid
> node
->ondisk
->mirror_tid
) {
2248 hammer_modify_node_field(trans
, node
, mirror_tid
);
2249 node
->ondisk
->mirror_tid
= elm
->leaf
.base
.delete_tid
;
2250 hammer_modify_node_done(node
);
2252 if (hammer_debug_general
& 0x0002) {
2253 kprintf("delete_at_cursor: propagate %016llx"
2255 (long long)elm
->leaf
.base
.delete_tid
,
2256 (long long)node
->node_offset
);
2261 * Adjust for the iteration. We have deleted the current
2262 * element and want to clear ATEDISK so the iteration does
2263 * not skip the element after, which now becomes the current
2264 * element. This element must be re-tested if doing an
2265 * iteration, which is handled by the RETEST flag.
2267 if ((cursor
->flags
& HAMMER_CURSOR_DISKEOF
) == 0) {
2268 cursor
->flags
|= HAMMER_CURSOR_RETEST
;
2269 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
2273 * An on-disk record cannot have the same delete_tid
2274 * as its create_tid. In a chain of record updates
2275 * this could result in a duplicate record.
2277 KKASSERT(elm
->leaf
.base
.delete_tid
!=
2278 elm
->leaf
.base
.create_tid
);
2282 * Destroy the B-Tree element if asked (typically if a nohistory
2283 * file or mount, or when called by the pruning code).
2285 * Adjust the ATEDISK flag to properly support iterations.
2287 if (delete_flags
& HAMMER_DELETE_DESTROY
) {
2288 data_offset
= elm
->leaf
.data_offset
;
2289 data_len
= elm
->leaf
.data_len
;
2290 rec_type
= elm
->leaf
.base
.rec_type
;
2292 save_leaf
= elm
->leaf
;
2295 if (elm
->base
.rec_type
== HAMMER_RECTYPE_INODE
&&
2296 elm
->leaf
.base
.delete_tid
== 0) {
2300 error
= hammer_btree_delete(cursor
);
2303 * The deletion moves the next element (if any) to
2304 * the current element position. We must clear
2305 * ATEDISK so this element is not skipped and we
2306 * must set RETEST to force any iteration to re-test
2309 if ((cursor
->flags
& HAMMER_CURSOR_DISKEOF
) == 0) {
2310 cursor
->flags
|= HAMMER_CURSOR_RETEST
;
2311 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
2315 switch(data_offset
& HAMMER_OFF_ZONE_MASK
) {
2316 case HAMMER_ZONE_LARGE_DATA
:
2317 case HAMMER_ZONE_SMALL_DATA
:
2318 case HAMMER_ZONE_META
:
2319 hammer_blockmap_free(trans
,
2320 data_offset
, data_len
);
2329 * Track inode count and next_tid. This is used by the mirroring
2330 * and PFS code. icount can be negative, zero, or positive.
2332 if (error
== 0 && track
) {
2334 hammer_modify_volume_field(trans
, trans
->rootvol
,
2336 trans
->rootvol
->ondisk
->vol0_stat_inodes
+= icount
;
2337 hammer_modify_volume_done(trans
->rootvol
);
2339 if (trans
->rootvol
->ondisk
->vol0_next_tid
< delete_tid
) {
2340 hammer_modify_volume(trans
, trans
->rootvol
, NULL
, 0);
2341 trans
->rootvol
->ondisk
->vol0_next_tid
= delete_tid
;
2342 hammer_modify_volume_done(trans
->rootvol
);
2347 * mirror_tid propagation occurs if the node's mirror_tid had to be
2348 * updated while adjusting the delete_tid.
2350 * This occurs when deleting even in nohistory mode, but does not
2351 * occur when pruning an already-deleted node.
2353 * cursor->ip is NULL when called from the pruning, mirroring,
2354 * and pfs code. If non-NULL propagation will be conditionalized
2355 * on whether the PFS is in no-history mode or not.
2359 hammer_btree_do_propagation(cursor
, cursor
->ip
->pfsm
, leaf
);
2361 hammer_btree_do_propagation(cursor
, NULL
, leaf
);
2363 hammer_sync_unlock(trans
);
2368 * Determine whether we can remove a directory. This routine checks whether
2369 * a directory is empty or not and enforces flush connectivity.
2371 * Flush connectivity requires that we block if the target directory is
2372 * currently flushing, otherwise it may not end up in the same flush group.
2374 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2377 hammer_ip_check_directory_empty(hammer_transaction_t trans
, hammer_inode_t ip
)
2379 struct hammer_cursor cursor
;
2383 * Check directory empty
2385 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
2387 cursor
.key_beg
.localization
= ip
->obj_localization
+
2388 HAMMER_LOCALIZE_MISC
;
2389 cursor
.key_beg
.obj_id
= ip
->obj_id
;
2390 cursor
.key_beg
.create_tid
= 0;
2391 cursor
.key_beg
.delete_tid
= 0;
2392 cursor
.key_beg
.obj_type
= 0;
2393 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
+ 1;
2394 cursor
.key_beg
.key
= HAMMER_MIN_KEY
;
2396 cursor
.key_end
= cursor
.key_beg
;
2397 cursor
.key_end
.rec_type
= 0xFFFF;
2398 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
2400 cursor
.asof
= ip
->obj_asof
;
2401 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
2403 error
= hammer_ip_first(&cursor
);
2404 if (error
== ENOENT
)
2406 else if (error
== 0)
2408 hammer_done_cursor(&cursor
);