2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.76 2008/06/26 04:06:23 dillon Exp $
39 static int hammer_mem_add(hammer_record_t record
);
40 static int hammer_mem_lookup(hammer_cursor_t cursor
);
41 static int hammer_mem_first(hammer_cursor_t cursor
);
42 static int hammer_rec_trunc_callback(hammer_record_t record
,
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record
);
46 struct rec_trunc_info
{
52 * Red-black tree support. Comparison code for insertion.
55 hammer_rec_rb_compare(hammer_record_t rec1
, hammer_record_t rec2
)
57 if (rec1
->leaf
.base
.rec_type
< rec2
->leaf
.base
.rec_type
)
59 if (rec1
->leaf
.base
.rec_type
> rec2
->leaf
.base
.rec_type
)
62 if (rec1
->leaf
.base
.key
< rec2
->leaf
.base
.key
)
64 if (rec1
->leaf
.base
.key
> rec2
->leaf
.base
.key
)
68 * Never match against an item deleted by the front-end.
70 * rec1 is greater then rec2 if rec1 is marked deleted.
71 * rec1 is less then rec2 if rec2 is marked deleted.
73 * Multiple deleted records may be present, do not return 0
74 * if both are marked deleted.
76 if (rec1
->flags
& HAMMER_RECF_DELETED_FE
)
78 if (rec2
->flags
& HAMMER_RECF_DELETED_FE
)
85 * Basic record comparison code similar to hammer_btree_cmp().
88 hammer_rec_cmp(hammer_base_elm_t elm
, hammer_record_t rec
)
90 if (elm
->rec_type
< rec
->leaf
.base
.rec_type
)
92 if (elm
->rec_type
> rec
->leaf
.base
.rec_type
)
95 if (elm
->key
< rec
->leaf
.base
.key
)
97 if (elm
->key
> rec
->leaf
.base
.key
)
101 * Never match against an item deleted by the front-end.
102 * elm is less then rec if rec is marked deleted.
104 if (rec
->flags
& HAMMER_RECF_DELETED_FE
)
110 * Special LOOKUP_INFO to locate an overlapping record. This used by
111 * the reservation code to implement small-block records (whos keys will
112 * be different depending on data_len, when representing the same base
115 * NOTE: The base file offset of a data record is (key - data_len), not (key).
118 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf
, hammer_record_t rec
)
120 if (leaf
->base
.rec_type
< rec
->leaf
.base
.rec_type
)
122 if (leaf
->base
.rec_type
> rec
->leaf
.base
.rec_type
)
128 if (leaf
->base
.rec_type
== HAMMER_RECTYPE_DATA
) {
129 /* leaf_end <= rec_beg */
130 if (leaf
->base
.key
<= rec
->leaf
.base
.key
- rec
->leaf
.data_len
)
132 /* leaf_beg >= rec_end */
133 if (leaf
->base
.key
- leaf
->data_len
>= rec
->leaf
.base
.key
)
136 if (leaf
->base
.key
< rec
->leaf
.base
.key
)
138 if (leaf
->base
.key
> rec
->leaf
.base
.key
)
143 * Never match against an item deleted by the front-end.
144 * leaf is less then rec if rec is marked deleted.
146 * We must still return the proper code for the scan to continue
147 * along the correct branches.
149 if (rec
->flags
& HAMMER_RECF_DELETED_FE
) {
150 if (leaf
->base
.key
< rec
->leaf
.base
.key
)
152 if (leaf
->base
.key
> rec
->leaf
.base
.key
)
160 * RB_SCAN comparison code for hammer_mem_first(). The argument order
161 * is reversed so the comparison result has to be negated. key_beg and
162 * key_end are both range-inclusive.
164 * Localized deletions are not cached in-memory.
168 hammer_rec_scan_cmp(hammer_record_t rec
, void *data
)
170 hammer_cursor_t cursor
= data
;
173 r
= hammer_rec_cmp(&cursor
->key_beg
, rec
);
176 r
= hammer_rec_cmp(&cursor
->key_end
, rec
);
183 * This compare function is used when simply looking up key_beg.
187 hammer_rec_find_cmp(hammer_record_t rec
, void *data
)
189 hammer_cursor_t cursor
= data
;
192 r
= hammer_rec_cmp(&cursor
->key_beg
, rec
);
201 * Locate blocks within the truncation range. Partial blocks do not count.
205 hammer_rec_trunc_cmp(hammer_record_t rec
, void *data
)
207 struct rec_trunc_info
*info
= data
;
209 if (rec
->leaf
.base
.rec_type
< info
->rec_type
)
211 if (rec
->leaf
.base
.rec_type
> info
->rec_type
)
214 switch(rec
->leaf
.base
.rec_type
) {
215 case HAMMER_RECTYPE_DB
:
217 * DB record key is not beyond the truncation point, retain.
219 if (rec
->leaf
.base
.key
< info
->trunc_off
)
222 case HAMMER_RECTYPE_DATA
:
224 * DATA record offset start is not beyond the truncation point,
227 if (rec
->leaf
.base
.key
- rec
->leaf
.data_len
< info
->trunc_off
)
231 panic("hammer_rec_trunc_cmp: unexpected record type");
235 * The record start is >= the truncation point, return match,
236 * the record should be destroyed.
241 RB_GENERATE(hammer_rec_rb_tree
, hammer_record
, rb_node
, hammer_rec_rb_compare
);
242 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree
, INFO
, hammer_record
, rb_node
,
243 hammer_rec_overlap_compare
, hammer_btree_leaf_elm_t
);
246 * Allocate a record for the caller to finish filling in. The record is
247 * returned referenced.
250 hammer_alloc_mem_record(hammer_inode_t ip
, int data_len
)
252 hammer_record_t record
;
254 ++hammer_count_records
;
255 record
= kmalloc(sizeof(*record
), M_HAMMER
, M_WAITOK
| M_ZERO
);
256 record
->flush_state
= HAMMER_FST_IDLE
;
258 record
->leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
;
259 record
->leaf
.data_len
= data_len
;
260 hammer_ref(&record
->lock
);
263 record
->data
= kmalloc(data_len
, M_HAMMER
, M_WAITOK
| M_ZERO
);
264 record
->flags
|= HAMMER_RECF_ALLOCDATA
;
265 ++hammer_count_record_datas
;
272 hammer_wait_mem_record_ident(hammer_record_t record
, const char *ident
)
274 while (record
->flush_state
== HAMMER_FST_FLUSH
) {
275 record
->flags
|= HAMMER_RECF_WANTED
;
276 tsleep(record
, 0, ident
, 0);
281 * Called from the backend, hammer_inode.c, after a record has been
282 * flushed to disk. The record has been exclusively locked by the
283 * caller and interlocked with BE.
285 * We clean up the state, unlock, and release the record (the record
286 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
289 hammer_flush_record_done(hammer_record_t record
, int error
)
291 hammer_inode_t target_ip
;
293 KKASSERT(record
->flush_state
== HAMMER_FST_FLUSH
);
294 KKASSERT(record
->flags
& HAMMER_RECF_INTERLOCK_BE
);
298 * An error occured, the backend was unable to sync the
299 * record to its media. Leave the record intact.
301 Debugger("flush_record_done error");
304 if (record
->flags
& HAMMER_RECF_DELETED_BE
) {
305 if ((target_ip
= record
->target_ip
) != NULL
) {
306 TAILQ_REMOVE(&target_ip
->target_list
, record
,
308 record
->target_ip
= NULL
;
309 hammer_test_inode(target_ip
);
311 record
->flush_state
= HAMMER_FST_IDLE
;
313 if (record
->target_ip
) {
314 record
->flush_state
= HAMMER_FST_SETUP
;
315 hammer_test_inode(record
->ip
);
316 hammer_test_inode(record
->target_ip
);
318 record
->flush_state
= HAMMER_FST_IDLE
;
321 record
->flags
&= ~HAMMER_RECF_INTERLOCK_BE
;
322 if (record
->flags
& HAMMER_RECF_WANTED
) {
323 record
->flags
&= ~HAMMER_RECF_WANTED
;
326 hammer_rel_mem_record(record
);
330 * Release a memory record. Records marked for deletion are immediately
331 * removed from the RB-Tree but otherwise left intact until the last ref
335 hammer_rel_mem_record(struct hammer_record
*record
)
337 hammer_inode_t ip
, target_ip
;
339 hammer_unref(&record
->lock
);
341 if (record
->lock
.refs
== 0) {
343 * Upon release of the last reference wakeup any waiters.
344 * The record structure may get destroyed so callers will
345 * loop up and do a relookup.
347 * WARNING! Record must be removed from RB-TREE before we
348 * might possibly block. hammer_test_inode() can block!
353 * Upon release of the last reference a record marked deleted
356 if (record
->flags
& HAMMER_RECF_DELETED_FE
) {
357 KKASSERT(ip
->lock
.refs
> 0);
358 KKASSERT(record
->flush_state
!= HAMMER_FST_FLUSH
);
361 * target_ip may have zero refs, we have to ref it
362 * to prevent it from being ripped out from under
365 if ((target_ip
= record
->target_ip
) != NULL
) {
366 TAILQ_REMOVE(&target_ip
->target_list
,
367 record
, target_entry
);
368 record
->target_ip
= NULL
;
369 hammer_ref(&target_ip
->lock
);
372 if (record
->flags
& HAMMER_RECF_ONRBTREE
) {
373 RB_REMOVE(hammer_rec_rb_tree
,
374 &record
->ip
->rec_tree
,
376 KKASSERT(ip
->rsv_recs
> 0);
379 ip
->hmp
->rsv_databytes
-= record
->leaf
.data_len
;
380 record
->flags
&= ~HAMMER_RECF_ONRBTREE
;
382 if (RB_EMPTY(&record
->ip
->rec_tree
)) {
383 record
->ip
->flags
&= ~HAMMER_INODE_XDIRTY
;
384 record
->ip
->sync_flags
&= ~HAMMER_INODE_XDIRTY
;
385 hammer_test_inode(record
->ip
);
390 * Do this test after removing record from the B-Tree.
393 hammer_test_inode(target_ip
);
394 hammer_rel_inode(target_ip
, 0);
397 if (record
->flags
& HAMMER_RECF_ALLOCDATA
) {
398 --hammer_count_record_datas
;
399 kfree(record
->data
, M_HAMMER
);
400 record
->flags
&= ~HAMMER_RECF_ALLOCDATA
;
403 hammer_blockmap_reserve_complete(ip
->hmp
,
408 --hammer_count_records
;
409 kfree(record
, M_HAMMER
);
415 * Record visibility depends on whether the record is being accessed by
416 * the backend or the frontend.
418 * Return non-zero if the record is visible, zero if it isn't or if it is
423 hammer_ip_iterate_mem_good(hammer_cursor_t cursor
, hammer_record_t record
)
425 if (cursor
->flags
& HAMMER_CURSOR_BACKEND
) {
426 if (record
->flags
& HAMMER_RECF_DELETED_BE
)
429 if (record
->flags
& HAMMER_RECF_DELETED_FE
)
436 * This callback is used as part of the RB_SCAN function for in-memory
437 * records. We terminate it (return -1) as soon as we get a match.
439 * This routine is used by frontend code.
441 * The primary compare code does not account for ASOF lookups. This
442 * code handles that case as well as a few others.
446 hammer_rec_scan_callback(hammer_record_t rec
, void *data
)
448 hammer_cursor_t cursor
= data
;
451 * We terminate on success, so this should be NULL on entry.
453 KKASSERT(cursor
->iprec
== NULL
);
456 * Skip if the record was marked deleted.
458 if (hammer_ip_iterate_mem_good(cursor
, rec
) == 0)
462 * Skip if not visible due to our as-of TID
464 if (cursor
->flags
& HAMMER_CURSOR_ASOF
) {
465 if (cursor
->asof
< rec
->leaf
.base
.create_tid
)
467 if (rec
->leaf
.base
.delete_tid
&&
468 cursor
->asof
>= rec
->leaf
.base
.delete_tid
) {
474 * If the record is queued to the flusher we have to block until
475 * it isn't. Otherwise we may see duplication between our memory
476 * cache and the media.
478 hammer_ref(&rec
->lock
);
480 #warning "This deadlocks"
482 if (rec
->flush_state
== HAMMER_FST_FLUSH
)
483 hammer_wait_mem_record(rec
);
487 * The record may have been deleted while we were blocked.
489 if (hammer_ip_iterate_mem_good(cursor
, rec
) == 0) {
490 hammer_rel_mem_record(rec
);
495 * Set the matching record and stop the scan.
503 * Lookup an in-memory record given the key specified in the cursor. Works
504 * just like hammer_btree_lookup() but operates on an inode's in-memory
507 * The lookup must fail if the record is marked for deferred deletion.
511 hammer_mem_lookup(hammer_cursor_t cursor
)
515 KKASSERT(cursor
->ip
);
517 hammer_rel_mem_record(cursor
->iprec
);
518 cursor
->iprec
= NULL
;
520 hammer_rec_rb_tree_RB_SCAN(&cursor
->ip
->rec_tree
, hammer_rec_find_cmp
,
521 hammer_rec_scan_callback
, cursor
);
523 if (cursor
->iprec
== NULL
)
531 * hammer_mem_first() - locate the first in-memory record matching the
532 * cursor within the bounds of the key range.
536 hammer_mem_first(hammer_cursor_t cursor
)
541 KKASSERT(ip
!= NULL
);
544 hammer_rel_mem_record(cursor
->iprec
);
545 cursor
->iprec
= NULL
;
548 hammer_rec_rb_tree_RB_SCAN(&ip
->rec_tree
, hammer_rec_scan_cmp
,
549 hammer_rec_scan_callback
, cursor
);
552 * Adjust scan.node and keep it linked into the RB-tree so we can
553 * hold the cursor through third party modifications of the RB-tree.
561 hammer_mem_done(hammer_cursor_t cursor
)
564 hammer_rel_mem_record(cursor
->iprec
);
565 cursor
->iprec
= NULL
;
569 /************************************************************************
570 * HAMMER IN-MEMORY RECORD FUNCTIONS *
571 ************************************************************************
573 * These functions manipulate in-memory records. Such records typically
574 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
578 * Add a directory entry (dip,ncp) which references inode (ip).
580 * Note that the low 32 bits of the namekey are set temporarily to create
581 * a unique in-memory record, and may be modified a second time when the
582 * record is synchronized to disk. In particular, the low 32 bits cannot be
583 * all 0's when synching to disk, which is not handled here.
585 * NOTE: bytes does not include any terminating \0 on name, and name might
589 hammer_ip_add_directory(struct hammer_transaction
*trans
,
590 struct hammer_inode
*dip
, const char *name
, int bytes
,
591 struct hammer_inode
*ip
)
593 struct hammer_cursor cursor
;
594 hammer_record_t record
;
599 record
= hammer_alloc_mem_record(dip
, HAMMER_ENTRY_SIZE(bytes
));
600 if (++trans
->hmp
->namekey_iterator
== 0)
601 ++trans
->hmp
->namekey_iterator
;
603 record
->type
= HAMMER_MEM_RECORD_ADD
;
604 record
->leaf
.base
.localization
= dip
->obj_localization
+
605 HAMMER_LOCALIZE_MISC
;
606 record
->leaf
.base
.obj_id
= dip
->obj_id
;
607 record
->leaf
.base
.key
= hammer_directory_namekey(name
, bytes
);
608 record
->leaf
.base
.key
+= trans
->hmp
->namekey_iterator
;
609 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_DIRENTRY
;
610 record
->leaf
.base
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
611 record
->data
->entry
.obj_id
= ip
->obj_id
;
612 record
->data
->entry
.localization
= ip
->obj_localization
;
613 bcopy(name
, record
->data
->entry
.name
, bytes
);
615 ++ip
->ino_data
.nlinks
;
616 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
619 * Find an unused namekey. Both the in-memory record tree and
620 * the B-Tree are checked. Exact matches also match create_tid
621 * so use an ASOF search to (mostly) ignore it.
623 hammer_init_cursor(trans
, &cursor
, &dip
->cache
[1], dip
);
624 cursor
.key_beg
= record
->leaf
.base
;
625 cursor
.flags
|= HAMMER_CURSOR_ASOF
;
626 cursor
.asof
= ip
->obj_asof
;
629 while (hammer_ip_lookup(&cursor
) == 0) {
630 iterator
= (u_int32_t
)record
->leaf
.base
.key
+ 1;
633 record
->leaf
.base
.key
&= ~0xFFFFFFFFLL
;
634 record
->leaf
.base
.key
|= iterator
;
635 cursor
.key_beg
.key
= record
->leaf
.base
.key
;
636 if (++count
== 1000000000) {
637 hammer_rel_mem_record(record
);
644 * The target inode and the directory entry are bound together.
646 record
->target_ip
= ip
;
647 record
->flush_state
= HAMMER_FST_SETUP
;
648 TAILQ_INSERT_TAIL(&ip
->target_list
, record
, target_entry
);
651 * The inode now has a dependancy and must be taken out of the idle
652 * state. An inode not in an idle state is given an extra reference.
654 if (ip
->flush_state
== HAMMER_FST_IDLE
) {
655 hammer_ref(&ip
->lock
);
656 ip
->flush_state
= HAMMER_FST_SETUP
;
658 error
= hammer_mem_add(record
);
660 hammer_done_cursor(&cursor
);
665 * Delete the directory entry and update the inode link count. The
666 * cursor must be seeked to the directory entry record being deleted.
668 * The related inode should be share-locked by the caller. The caller is
671 * This function can return EDEADLK requiring the caller to terminate
672 * the cursor, any locks, wait on the returned record, and retry.
675 hammer_ip_del_directory(struct hammer_transaction
*trans
,
676 hammer_cursor_t cursor
, struct hammer_inode
*dip
,
677 struct hammer_inode
*ip
)
679 hammer_record_t record
;
682 if (hammer_cursor_inmem(cursor
)) {
684 * In-memory (unsynchronized) records can simply be freed.
685 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
686 * by the backend, we must still avoid races against the
687 * backend potentially syncing the record to the media.
689 * We cannot call hammer_ip_delete_record(), that routine may
690 * only be called from the backend.
692 record
= cursor
->iprec
;
693 if (record
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
694 KKASSERT(cursor
->deadlk_rec
== NULL
);
695 hammer_ref(&record
->lock
);
696 cursor
->deadlk_rec
= record
;
699 KKASSERT(record
->type
== HAMMER_MEM_RECORD_ADD
);
700 record
->flags
|= HAMMER_RECF_DELETED_FE
;
705 * If the record is on-disk we have to queue the deletion by
706 * the record's key. This also causes lookups to skip the
709 KKASSERT(dip
->flags
&
710 (HAMMER_INODE_ONDISK
| HAMMER_INODE_DONDISK
));
711 record
= hammer_alloc_mem_record(dip
, 0);
712 record
->type
= HAMMER_MEM_RECORD_DEL
;
713 record
->leaf
.base
= cursor
->leaf
->base
;
715 record
->target_ip
= ip
;
716 record
->flush_state
= HAMMER_FST_SETUP
;
717 TAILQ_INSERT_TAIL(&ip
->target_list
, record
, target_entry
);
720 * The inode now has a dependancy and must be taken out of
721 * the idle state. An inode not in an idle state is given
722 * an extra reference.
724 if (ip
->flush_state
== HAMMER_FST_IDLE
) {
725 hammer_ref(&ip
->lock
);
726 ip
->flush_state
= HAMMER_FST_SETUP
;
729 error
= hammer_mem_add(record
);
733 * One less link. The file may still be open in the OS even after
734 * all links have gone away.
736 * We have to terminate the cursor before syncing the inode to
737 * avoid deadlocking against ourselves. XXX this may no longer
740 * If nlinks drops to zero and the vnode is inactive (or there is
741 * no vnode), call hammer_inode_unloadable_check() to zonk the
742 * inode. If we don't do this here the inode will not be destroyed
743 * on-media until we unmount.
746 --ip
->ino_data
.nlinks
;
747 hammer_modify_inode(ip
, HAMMER_INODE_DDIRTY
);
748 if (ip
->ino_data
.nlinks
== 0 &&
749 (ip
->vp
== NULL
|| (ip
->vp
->v_flag
& VINACTIVE
))) {
750 hammer_done_cursor(cursor
);
751 hammer_inode_unloadable_check(ip
, 1);
752 hammer_flush_inode(ip
, 0);
760 * Add a record to an inode.
762 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
763 * initialize the following additional fields:
765 * The related inode should be share-locked by the caller. The caller is
768 * record->rec.entry.base.base.key
769 * record->rec.entry.base.base.rec_type
770 * record->rec.entry.base.base.data_len
771 * record->data (a copy will be kmalloc'd if it cannot be embedded)
774 hammer_ip_add_record(struct hammer_transaction
*trans
, hammer_record_t record
)
776 hammer_inode_t ip
= record
->ip
;
779 KKASSERT(record
->leaf
.base
.localization
!= 0);
780 record
->leaf
.base
.obj_id
= ip
->obj_id
;
781 record
->leaf
.base
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
782 error
= hammer_mem_add(record
);
787 * Locate a bulk record in-memory. Bulk records allow disk space to be
788 * reserved so the front-end can flush large data writes without having
789 * to queue the BIO to the flusher. Only the related record gets queued
792 static hammer_record_t
793 hammer_ip_get_bulk(hammer_inode_t ip
, off_t file_offset
, int bytes
)
795 hammer_record_t record
;
796 struct hammer_btree_leaf_elm leaf
;
798 bzero(&leaf
, sizeof(leaf
));
799 leaf
.base
.obj_id
= ip
->obj_id
;
800 leaf
.base
.key
= file_offset
+ bytes
;
801 leaf
.base
.create_tid
= 0;
802 leaf
.base
.delete_tid
= 0;
803 leaf
.base
.rec_type
= HAMMER_RECTYPE_DATA
;
804 leaf
.base
.obj_type
= 0; /* unused */
805 leaf
.base
.btype
= HAMMER_BTREE_TYPE_RECORD
; /* unused */
806 leaf
.base
.localization
= ip
->obj_localization
+ HAMMER_LOCALIZE_MISC
;
807 leaf
.data_len
= bytes
;
809 record
= hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip
->rec_tree
, &leaf
);
811 hammer_ref(&record
->lock
);
816 * Reserve blockmap space placemarked with an in-memory record.
818 * This routine is called by the frontend in order to be able to directly
819 * flush a buffer cache buffer. The frontend has locked the related buffer
820 * cache buffers and we should be able to manipulate any overlapping
824 hammer_ip_add_bulk(hammer_inode_t ip
, off_t file_offset
, void *data
, int bytes
,
827 hammer_record_t record
;
828 hammer_record_t conflict
;
833 * Deal with conflicting in-memory records. We cannot have multiple
834 * in-memory records for the same offset without seriously confusing
835 * the backend, including but not limited to the backend issuing
836 * delete-create-delete sequences and asserting on the delete_tid
837 * being the same as the create_tid.
839 * If we encounter a record with the backend interlock set we cannot
840 * immediately delete it without confusing the backend.
842 while ((conflict
= hammer_ip_get_bulk(ip
, file_offset
, bytes
)) !=NULL
) {
843 if (conflict
->flags
& HAMMER_RECF_INTERLOCK_BE
) {
844 conflict
->flags
|= HAMMER_RECF_WANTED
;
845 tsleep(conflict
, 0, "hmrrc3", 0);
847 conflict
->flags
|= HAMMER_RECF_DELETED_FE
;
849 hammer_rel_mem_record(conflict
);
853 * Create a record to cover the direct write. This is called with
854 * the related BIO locked so there should be no possible conflict.
856 * The backend is responsible for finalizing the space reserved in
859 * XXX bytes not aligned, depend on the reservation code to
860 * align the reservation.
862 record
= hammer_alloc_mem_record(ip
, 0);
863 zone
= (bytes
>= HAMMER_BUFSIZE
) ? HAMMER_ZONE_LARGE_DATA_INDEX
:
864 HAMMER_ZONE_SMALL_DATA_INDEX
;
865 record
->resv
= hammer_blockmap_reserve(ip
->hmp
, zone
, bytes
,
866 &record
->leaf
.data_offset
,
868 if (record
->resv
== NULL
) {
869 kprintf("hammer_ip_add_bulk: reservation failed\n");
870 hammer_rel_mem_record(record
);
873 record
->type
= HAMMER_MEM_RECORD_DATA
;
874 record
->leaf
.base
.rec_type
= HAMMER_RECTYPE_DATA
;
875 record
->leaf
.base
.obj_type
= ip
->ino_leaf
.base
.obj_type
;
876 record
->leaf
.base
.obj_id
= ip
->obj_id
;
877 record
->leaf
.base
.key
= file_offset
+ bytes
;
878 record
->leaf
.base
.localization
= ip
->obj_localization
+
879 HAMMER_LOCALIZE_MISC
;
880 record
->leaf
.data_len
= bytes
;
881 hammer_crc_set_leaf(data
, &record
->leaf
);
882 flags
= record
->flags
;
884 hammer_ref(&record
->lock
); /* mem_add eats a reference */
885 *errorp
= hammer_mem_add(record
);
887 conflict
= hammer_ip_get_bulk(ip
, file_offset
, bytes
);
888 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
889 *errorp
, conflict
, file_offset
, bytes
);
891 kprintf("conflict %lld %d\n", conflict
->leaf
.base
.key
, conflict
->leaf
.data_len
);
893 hammer_rel_mem_record(conflict
);
895 KKASSERT(*errorp
== 0);
896 conflict
= hammer_ip_get_bulk(ip
, file_offset
, bytes
);
897 if (conflict
!= record
) {
898 kprintf("conflict mismatch %p %p %08x\n", conflict
, record
, record
->flags
);
900 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict
->leaf
.base
.key
, conflict
->leaf
.data_len
, record
->leaf
.base
.key
, record
->leaf
.data_len
);
902 KKASSERT(conflict
== record
);
903 hammer_rel_mem_record(conflict
);
909 * Frontend truncation code. Scan in-memory records only. On-disk records
910 * and records in a flushing state are handled by the backend. The vnops
911 * setattr code will handle the block containing the truncation point.
913 * Partial blocks are not deleted.
916 hammer_ip_frontend_trunc(struct hammer_inode
*ip
, off_t file_size
)
918 struct rec_trunc_info info
;
920 switch(ip
->ino_data
.obj_type
) {
921 case HAMMER_OBJTYPE_REGFILE
:
922 info
.rec_type
= HAMMER_RECTYPE_DATA
;
924 case HAMMER_OBJTYPE_DBFILE
:
925 info
.rec_type
= HAMMER_RECTYPE_DB
;
930 info
.trunc_off
= file_size
;
931 hammer_rec_rb_tree_RB_SCAN(&ip
->rec_tree
, hammer_rec_trunc_cmp
,
932 hammer_rec_trunc_callback
, &info
);
937 hammer_rec_trunc_callback(hammer_record_t record
, void *data __unused
)
939 if (record
->flags
& HAMMER_RECF_DELETED_FE
)
941 if (record
->flush_state
== HAMMER_FST_FLUSH
)
943 KKASSERT((record
->flags
& HAMMER_RECF_INTERLOCK_BE
) == 0);
944 hammer_ref(&record
->lock
);
945 record
->flags
|= HAMMER_RECF_DELETED_FE
;
946 hammer_rel_mem_record(record
);
951 * Return 1 if the caller must check for and delete existing records
952 * before writing out a new data record.
954 * Return 0 if the caller can just insert the record into the B-Tree without
958 hammer_record_needs_overwrite_delete(hammer_record_t record
)
960 hammer_inode_t ip
= record
->ip
;
964 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
)
965 file_offset
= record
->leaf
.base
.key
;
967 file_offset
= record
->leaf
.base
.key
- record
->leaf
.data_len
;
968 r
= (file_offset
< ip
->sync_trunc_off
);
969 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
970 if (ip
->sync_trunc_off
<= record
->leaf
.base
.key
)
971 ip
->sync_trunc_off
= record
->leaf
.base
.key
+ 1;
973 if (ip
->sync_trunc_off
< record
->leaf
.base
.key
)
974 ip
->sync_trunc_off
= record
->leaf
.base
.key
;
980 * Backend code. Sync a record to the media.
983 hammer_ip_sync_record_cursor(hammer_cursor_t cursor
, hammer_record_t record
)
985 hammer_transaction_t trans
= cursor
->trans
;
991 KKASSERT(record
->flush_state
== HAMMER_FST_FLUSH
);
992 KKASSERT(record
->flags
& HAMMER_RECF_INTERLOCK_BE
);
993 KKASSERT(record
->leaf
.base
.localization
!= 0);
996 * If this is a bulk-data record placemarker there may be an existing
997 * record on-disk, indicating a data overwrite. If there is the
998 * on-disk record must be deleted before we can insert our new record.
1000 * We've synthesized this record and do not know what the create_tid
1001 * on-disk is, nor how much data it represents.
1003 * Keep in mind that (key) for data records is (base_offset + len),
1004 * not (base_offset). Also, we only want to get rid of on-disk
1005 * records since we are trying to sync our in-memory record, call
1006 * hammer_ip_delete_range() with truncating set to 1 to make sure
1007 * it skips in-memory records.
1009 * It is ok for the lookup to return ENOENT.
1011 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1012 * to call hammer_ip_delete_range() or not. This also means we must
1013 * update sync_trunc_off() as we write.
1015 if (record
->type
== HAMMER_MEM_RECORD_DATA
&&
1016 hammer_record_needs_overwrite_delete(record
)) {
1017 file_offset
= record
->leaf
.base
.key
- record
->leaf
.data_len
;
1018 bytes
= (record
->leaf
.data_len
+ HAMMER_BUFMASK
) &
1020 KKASSERT((file_offset
& HAMMER_BUFMASK
) == 0);
1021 error
= hammer_ip_delete_range(
1023 file_offset
, file_offset
+ bytes
- 1,
1025 if (error
&& error
!= ENOENT
)
1032 hammer_normalize_cursor(cursor
);
1033 cursor
->key_beg
= record
->leaf
.base
;
1034 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1035 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1036 cursor
->flags
&= ~HAMMER_CURSOR_INSERT
;
1039 * Records can wind up on-media before the inode itself is on-media.
1042 record
->ip
->flags
|= HAMMER_INODE_DONDISK
;
1045 * If we are deleting a directory entry an exact match must be
1048 if (record
->type
== HAMMER_MEM_RECORD_DEL
) {
1049 error
= hammer_btree_lookup(cursor
);
1051 error
= hammer_ip_delete_record(cursor
, record
->ip
,
1054 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1055 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1064 * Issue a lookup to position the cursor and locate the cluster. The
1065 * target key should not exist. If we are creating a directory entry
1066 * we may have to iterate the low 32 bits of the key to find an unused
1069 cursor
->flags
|= HAMMER_CURSOR_INSERT
;
1071 error
= hammer_btree_lookup(cursor
);
1072 if (hammer_debug_inode
)
1073 kprintf("DOINSERT LOOKUP %d\n", error
);
1075 kprintf("hammer_ip_sync_record: duplicate rec "
1076 "at (%016llx)\n", record
->leaf
.base
.key
);
1077 Debugger("duplicate record1");
1081 if (record
->type
== HAMMER_MEM_RECORD_DATA
)
1082 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1083 record
->leaf
.base
.key
- record
->leaf
.data_len
,
1084 record
->leaf
.data_offset
, error
);
1087 if (error
!= ENOENT
)
1091 * Allocate the record and data. The result buffers will be
1092 * marked as being modified and further calls to
1093 * hammer_modify_buffer() will result in unneeded UNDO records.
1095 * Support zero-fill records (data == NULL and data_len != 0)
1097 if (record
->type
== HAMMER_MEM_RECORD_DATA
) {
1099 * The data portion of a bulk-data record has already been
1100 * committed to disk, we need only adjust the layer2
1101 * statistics in the same transaction as our B-Tree insert.
1103 KKASSERT(record
->leaf
.data_offset
!= 0);
1104 hammer_blockmap_finalize(trans
, record
->leaf
.data_offset
,
1105 record
->leaf
.data_len
);
1107 } else if (record
->data
&& record
->leaf
.data_len
) {
1109 * Wholely cached record, with data. Allocate the data.
1111 bdata
= hammer_alloc_data(trans
, record
->leaf
.data_len
,
1112 record
->leaf
.base
.rec_type
,
1113 &record
->leaf
.data_offset
,
1114 &cursor
->data_buffer
, &error
);
1117 hammer_crc_set_leaf(record
->data
, &record
->leaf
);
1118 hammer_modify_buffer(trans
, cursor
->data_buffer
, NULL
, 0);
1119 bcopy(record
->data
, bdata
, record
->leaf
.data_len
);
1120 hammer_modify_buffer_done(cursor
->data_buffer
);
1123 * Wholely cached record, without data.
1125 record
->leaf
.data_offset
= 0;
1126 record
->leaf
.data_crc
= 0;
1129 error
= hammer_btree_insert(cursor
, &record
->leaf
);
1130 if (hammer_debug_inode
&& error
)
1131 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error
, cursor
->node
->node_offset
, cursor
->index
, record
->leaf
.base
.key
);
1134 * Our record is on-disk, normally mark the in-memory version as
1135 * deleted. If the record represented a directory deletion but
1136 * we had to sync a valid directory entry to disk we must convert
1137 * the record to a covering delete so the frontend does not have
1138 * visibility on the synced entry.
1141 if (record
->flags
& HAMMER_RECF_CONVERT_DELETE
) {
1142 KKASSERT(record
->type
== HAMMER_MEM_RECORD_ADD
);
1143 record
->flags
&= ~HAMMER_RECF_DELETED_FE
;
1144 record
->type
= HAMMER_MEM_RECORD_DEL
;
1145 KKASSERT(record
->flush_state
== HAMMER_FST_FLUSH
);
1146 record
->flags
&= ~HAMMER_RECF_CONVERT_DELETE
;
1147 /* hammer_flush_record_done takes care of the rest */
1149 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1150 record
->flags
|= HAMMER_RECF_DELETED_BE
;
1153 if (record
->leaf
.data_offset
) {
1154 hammer_blockmap_free(trans
, record
->leaf
.data_offset
,
1155 record
->leaf
.data_len
);
1164 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1165 * entry's key is used to deal with hash collisions in the upper 32 bits.
1166 * A unique 64 bit key is generated in-memory and may be regenerated a
1167 * second time when the directory record is flushed to the on-disk B-Tree.
1169 * A referenced record is passed to this function. This function
1170 * eats the reference. If an error occurs the record will be deleted.
1172 * A copy of the temporary record->data pointer provided by the caller
1177 hammer_mem_add(hammer_record_t record
)
1179 hammer_mount_t hmp
= record
->ip
->hmp
;
1182 * Make a private copy of record->data
1185 KKASSERT(record
->flags
& HAMMER_RECF_ALLOCDATA
);
1188 * Insert into the RB tree. A unique key should have already
1189 * been selected if this is a directory entry.
1191 if (RB_INSERT(hammer_rec_rb_tree
, &record
->ip
->rec_tree
, record
)) {
1192 record
->flags
|= HAMMER_RECF_DELETED_FE
;
1193 hammer_rel_mem_record(record
);
1196 ++hmp
->count_newrecords
;
1198 ++record
->ip
->rsv_recs
;
1199 record
->ip
->hmp
->rsv_databytes
+= record
->leaf
.data_len
;
1200 record
->flags
|= HAMMER_RECF_ONRBTREE
;
1201 hammer_modify_inode(record
->ip
, HAMMER_INODE_XDIRTY
);
1202 hammer_rel_mem_record(record
);
1206 /************************************************************************
1207 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1208 ************************************************************************
1210 * These functions augment the B-Tree scanning functions in hammer_btree.c
1211 * by merging in-memory records with on-disk records.
1215 * Locate a particular record either in-memory or on-disk.
1217 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1218 * NOT be called to iterate results.
1221 hammer_ip_lookup(hammer_cursor_t cursor
)
1226 * If the element is in-memory return it without searching the
1229 KKASSERT(cursor
->ip
);
1230 error
= hammer_mem_lookup(cursor
);
1232 cursor
->leaf
= &cursor
->iprec
->leaf
;
1235 if (error
!= ENOENT
)
1239 * If the inode has on-disk components search the on-disk B-Tree.
1241 if ((cursor
->ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DONDISK
)) == 0)
1243 error
= hammer_btree_lookup(cursor
);
1245 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_LEAF
);
1250 * Locate the first record within the cursor's key_beg/key_end range,
1251 * restricted to a particular inode. 0 is returned on success, ENOENT
1252 * if no records matched the requested range, or some other error.
1254 * When 0 is returned hammer_ip_next() may be used to iterate additional
1255 * records within the requested range.
1257 * This function can return EDEADLK, requiring the caller to terminate
1258 * the cursor and try again.
1261 hammer_ip_first(hammer_cursor_t cursor
)
1263 hammer_inode_t ip
= cursor
->ip
;
1266 KKASSERT(ip
!= NULL
);
1269 * Clean up fields and setup for merged scan
1271 cursor
->flags
&= ~HAMMER_CURSOR_DELBTREE
;
1272 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
| HAMMER_CURSOR_ATEMEM
;
1273 cursor
->flags
|= HAMMER_CURSOR_DISKEOF
| HAMMER_CURSOR_MEMEOF
;
1274 if (cursor
->iprec
) {
1275 hammer_rel_mem_record(cursor
->iprec
);
1276 cursor
->iprec
= NULL
;
1280 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1281 * exact lookup so if we get ENOENT we have to call the iterate
1282 * function to validate the first record after the begin key.
1284 * The ATEDISK flag is used by hammer_btree_iterate to determine
1285 * whether it must index forwards or not. It is also used here
1286 * to select the next record from in-memory or on-disk.
1288 * EDEADLK can only occur if the lookup hit an empty internal
1289 * element and couldn't delete it. Since this could only occur
1290 * in-range, we can just iterate from the failure point.
1292 if (ip
->flags
& (HAMMER_INODE_ONDISK
|HAMMER_INODE_DONDISK
)) {
1293 error
= hammer_btree_lookup(cursor
);
1294 if (error
== ENOENT
|| error
== EDEADLK
) {
1295 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
1296 if (hammer_debug_general
& 0x2000)
1297 kprintf("error %d node %p %016llx index %d\n", error
, cursor
->node
, cursor
->node
->node_offset
, cursor
->index
);
1298 error
= hammer_btree_iterate(cursor
);
1300 if (error
&& error
!= ENOENT
)
1303 cursor
->flags
&= ~HAMMER_CURSOR_DISKEOF
;
1304 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
1306 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1311 * Search the in-memory record list (Red-Black tree). Unlike the
1312 * B-Tree search, mem_first checks for records in the range.
1314 error
= hammer_mem_first(cursor
);
1315 if (error
&& error
!= ENOENT
)
1318 cursor
->flags
&= ~HAMMER_CURSOR_MEMEOF
;
1319 cursor
->flags
&= ~HAMMER_CURSOR_ATEMEM
;
1320 if (hammer_ip_iterate_mem_good(cursor
, cursor
->iprec
) == 0)
1321 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1325 * This will return the first matching record.
1327 return(hammer_ip_next(cursor
));
1331 * Retrieve the next record in a merged iteration within the bounds of the
1332 * cursor. This call may be made multiple times after the cursor has been
1333 * initially searched with hammer_ip_first().
1335 * 0 is returned on success, ENOENT if no further records match the
1336 * requested range, or some other error code is returned.
1339 hammer_ip_next(hammer_cursor_t cursor
)
1341 hammer_btree_elm_t elm
;
1342 hammer_record_t rec
, save
;
1348 * Load the current on-disk and in-memory record. If we ate any
1349 * records we have to get the next one.
1351 * If we deleted the last on-disk record we had scanned ATEDISK will
1352 * be clear and DELBTREE will be set, forcing a call to iterate. The
1353 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1354 * element. If ATEDISK is set, iterate will skip the 'current'
1357 * Get the next on-disk record
1359 if (cursor
->flags
& (HAMMER_CURSOR_ATEDISK
|HAMMER_CURSOR_DELBTREE
)) {
1360 if ((cursor
->flags
& HAMMER_CURSOR_DISKEOF
) == 0) {
1361 error
= hammer_btree_iterate(cursor
);
1362 cursor
->flags
&= ~HAMMER_CURSOR_DELBTREE
;
1364 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
1365 hammer_cache_node(&cursor
->ip
->cache
[1],
1368 cursor
->flags
|= HAMMER_CURSOR_DISKEOF
|
1369 HAMMER_CURSOR_ATEDISK
;
1376 * Get the next in-memory record. The record can be ripped out
1377 * of the RB tree so we maintain a scan_info structure to track
1380 * hammer_rec_scan_cmp: Is the record still in our general range,
1381 * (non-inclusive of snapshot exclusions)?
1382 * hammer_rec_scan_callback: Is the record in our snapshot?
1384 if (cursor
->flags
& HAMMER_CURSOR_ATEMEM
) {
1385 if ((cursor
->flags
& HAMMER_CURSOR_MEMEOF
) == 0) {
1386 save
= cursor
->iprec
;
1387 cursor
->iprec
= NULL
;
1388 rec
= save
? hammer_rec_rb_tree_RB_NEXT(save
) : NULL
;
1390 if (hammer_rec_scan_cmp(rec
, cursor
) != 0)
1392 if (hammer_rec_scan_callback(rec
, cursor
) != 0)
1394 rec
= hammer_rec_rb_tree_RB_NEXT(rec
);
1397 hammer_rel_mem_record(save
);
1398 if (cursor
->iprec
) {
1399 KKASSERT(cursor
->iprec
== rec
);
1400 cursor
->flags
&= ~HAMMER_CURSOR_ATEMEM
;
1402 cursor
->flags
|= HAMMER_CURSOR_MEMEOF
;
1408 * The memory record may have become stale while being held in
1409 * cursor->iprec. We are interlocked against the backend on
1410 * with regards to B-Tree entries.
1412 if ((cursor
->flags
& HAMMER_CURSOR_ATEMEM
) == 0) {
1413 if (hammer_ip_iterate_mem_good(cursor
, cursor
->iprec
) == 0) {
1414 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1420 * Extract either the disk or memory record depending on their
1421 * relative position.
1424 switch(cursor
->flags
& (HAMMER_CURSOR_ATEDISK
| HAMMER_CURSOR_ATEMEM
)) {
1427 * Both entries valid. Compare the entries and nominally
1428 * return the first one in the sort order. Numerous cases
1429 * require special attention, however.
1431 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
1432 r
= hammer_btree_cmp(&elm
->base
, &cursor
->iprec
->leaf
.base
);
1435 * If the two entries differ only by their key (-2/2) or
1436 * create_tid (-1/1), and are DATA records, we may have a
1437 * nominal match. We have to calculate the base file
1438 * offset of the data.
1440 if (r
<= 2 && r
>= -2 && r
!= 0 &&
1441 cursor
->ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_REGFILE
&&
1442 cursor
->iprec
->type
== HAMMER_MEM_RECORD_DATA
) {
1443 int64_t base1
= elm
->leaf
.base
.key
- elm
->leaf
.data_len
;
1444 int64_t base2
= cursor
->iprec
->leaf
.base
.key
-
1445 cursor
->iprec
->leaf
.data_len
;
1451 error
= hammer_btree_extract(cursor
,
1452 HAMMER_CURSOR_GET_LEAF
);
1453 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1458 * If the entries match exactly the memory entry is either
1459 * an on-disk directory entry deletion or a bulk data
1460 * overwrite. If it is a directory entry deletion we eat
1463 * For the bulk-data overwrite case it is possible to have
1464 * visibility into both, which simply means the syncer
1465 * hasn't gotten around to doing the delete+insert sequence
1466 * on the B-Tree. Use the memory entry and throw away the
1469 * If the in-memory record is not either of these we
1470 * probably caught the syncer while it was syncing it to
1471 * the media. Since we hold a shared lock on the cursor,
1472 * the in-memory record had better be marked deleted at
1476 if (cursor
->iprec
->type
== HAMMER_MEM_RECORD_DEL
) {
1477 if ((cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
) == 0) {
1478 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1479 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1482 } else if (cursor
->iprec
->type
== HAMMER_MEM_RECORD_DATA
) {
1483 if ((cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
) == 0) {
1484 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1486 /* fall through to memory entry */
1488 panic("hammer_ip_next: duplicate mem/b-tree entry");
1489 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1493 /* fall through to the memory entry */
1494 case HAMMER_CURSOR_ATEDISK
:
1496 * Only the memory entry is valid.
1498 cursor
->leaf
= &cursor
->iprec
->leaf
;
1499 cursor
->flags
|= HAMMER_CURSOR_ATEMEM
;
1502 * If the memory entry is an on-disk deletion we should have
1503 * also had found a B-Tree record. If the backend beat us
1504 * to it it would have interlocked the cursor and we should
1505 * have seen the in-memory record marked DELETED_FE.
1507 if (cursor
->iprec
->type
== HAMMER_MEM_RECORD_DEL
&&
1508 (cursor
->flags
& HAMMER_CURSOR_DELETE_VISIBILITY
) == 0) {
1509 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1512 case HAMMER_CURSOR_ATEMEM
:
1514 * Only the disk entry is valid
1516 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_LEAF
);
1517 cursor
->flags
|= HAMMER_CURSOR_ATEDISK
;
1521 * Neither entry is valid
1523 * XXX error not set properly
1525 cursor
->leaf
= NULL
;
1533 * Resolve the cursor->data pointer for the current cursor position in
1534 * a merged iteration.
1537 hammer_ip_resolve_data(hammer_cursor_t cursor
)
1539 hammer_record_t record
;
1542 if (hammer_cursor_inmem(cursor
)) {
1544 * The data associated with an in-memory record is usually
1545 * kmalloced, but reserve-ahead data records will have an
1546 * on-disk reference.
1548 * NOTE: Reserve-ahead data records must be handled in the
1549 * context of the related high level buffer cache buffer
1550 * to interlock against async writes.
1552 record
= cursor
->iprec
;
1553 cursor
->data
= record
->data
;
1555 if (cursor
->data
== NULL
) {
1556 KKASSERT(record
->leaf
.base
.rec_type
==
1557 HAMMER_RECTYPE_DATA
);
1558 cursor
->data
= hammer_bread_ext(cursor
->trans
->hmp
,
1559 record
->leaf
.data_offset
,
1560 record
->leaf
.data_len
,
1562 &cursor
->data_buffer
);
1565 cursor
->leaf
= &cursor
->node
->ondisk
->elms
[cursor
->index
].leaf
;
1566 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_DATA
);
1572 * Backend truncation / record replacement - delete records in range.
1574 * Delete all records within the specified range for inode ip. In-memory
1575 * records still associated with the frontend are ignored.
1577 * NOTE: An unaligned range will cause new records to be added to cover
1578 * the edge cases. (XXX not implemented yet).
1580 * NOTE: Replacement via reservations (see hammer_ip_sync_record_cursor())
1581 * also do not deal with unaligned ranges.
1583 * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1585 * NOTE: Record keys for regular file data have to be special-cased since
1586 * they indicate the end of the range (key = base + bytes).
1589 hammer_ip_delete_range(hammer_cursor_t cursor
, hammer_inode_t ip
,
1590 int64_t ran_beg
, int64_t ran_end
, int truncating
)
1592 hammer_transaction_t trans
= cursor
->trans
;
1593 hammer_btree_leaf_elm_t leaf
;
1598 kprintf("delete_range %p %016llx-%016llx\n", ip
, ran_beg
, ran_end
);
1601 KKASSERT(trans
->type
== HAMMER_TRANS_FLS
);
1603 hammer_normalize_cursor(cursor
);
1604 cursor
->key_beg
.localization
= ip
->obj_localization
+
1605 HAMMER_LOCALIZE_MISC
;
1606 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1607 cursor
->key_beg
.create_tid
= 0;
1608 cursor
->key_beg
.delete_tid
= 0;
1609 cursor
->key_beg
.obj_type
= 0;
1610 cursor
->asof
= ip
->obj_asof
;
1611 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1612 cursor
->flags
|= HAMMER_CURSOR_ASOF
;
1613 cursor
->flags
|= HAMMER_CURSOR_DELETE_VISIBILITY
;
1614 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1616 cursor
->key_end
= cursor
->key_beg
;
1617 if (ip
->ino_data
.obj_type
== HAMMER_OBJTYPE_DBFILE
) {
1618 cursor
->key_beg
.key
= ran_beg
;
1619 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_DB
;
1620 cursor
->key_end
.rec_type
= HAMMER_RECTYPE_DB
;
1621 cursor
->key_end
.key
= ran_end
;
1624 * The key in the B-Tree is (base+bytes), so the first possible
1625 * matching key is ran_beg + 1.
1629 cursor
->key_beg
.key
= ran_beg
+ 1;
1630 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_DATA
;
1631 cursor
->key_end
.rec_type
= HAMMER_RECTYPE_DATA
;
1633 tmp64
= ran_end
+ MAXPHYS
+ 1; /* work around GCC-4 bug */
1634 if (tmp64
< ran_end
)
1635 cursor
->key_end
.key
= 0x7FFFFFFFFFFFFFFFLL
;
1637 cursor
->key_end
.key
= ran_end
+ MAXPHYS
+ 1;
1639 cursor
->flags
|= HAMMER_CURSOR_END_INCLUSIVE
;
1641 error
= hammer_ip_first(cursor
);
1644 * Iterate through matching records and mark them as deleted.
1646 while (error
== 0) {
1647 leaf
= cursor
->leaf
;
1649 KKASSERT(leaf
->base
.delete_tid
== 0);
1652 * There may be overlap cases for regular file data. Also
1653 * remember the key for a regular file record is (base + len),
1656 if (leaf
->base
.rec_type
== HAMMER_RECTYPE_DATA
) {
1657 off
= leaf
->base
.key
- leaf
->data_len
;
1659 * Check the left edge case. We currently do not
1660 * split existing records.
1662 if (off
< ran_beg
) {
1663 panic("hammer left edge case %016llx %d\n",
1664 leaf
->base
.key
, leaf
->data_len
);
1668 * Check the right edge case. Note that the
1669 * record can be completely out of bounds, which
1670 * terminates the search.
1672 * base->key is exclusive of the right edge while
1673 * ran_end is inclusive of the right edge. The
1674 * (key - data_len) left boundary is inclusive.
1676 * XXX theory-check this test at some point, are
1677 * we missing a + 1 somewhere? Note that ran_end
1680 if (leaf
->base
.key
- 1 > ran_end
) {
1681 if (leaf
->base
.key
- leaf
->data_len
> ran_end
)
1683 panic("hammer right edge case\n");
1688 * Delete the record. When truncating we do not delete
1689 * in-memory (data) records because they represent data
1690 * written after the truncation.
1692 * This will also physically destroy the B-Tree entry and
1693 * data if the retention policy dictates. The function
1694 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1695 * uses to perform a fixup.
1697 if (truncating
== 0 || hammer_cursor_ondisk(cursor
))
1698 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1701 error
= hammer_ip_next(cursor
);
1704 hammer_cache_node(&ip
->cache
[1], cursor
->node
);
1706 if (error
== EDEADLK
) {
1707 hammer_done_cursor(cursor
);
1708 error
= hammer_init_cursor(trans
, cursor
, &ip
->cache
[1], ip
);
1712 if (error
== ENOENT
)
1718 * Backend truncation - delete all records.
1720 * Delete all user records associated with an inode except the inode record
1721 * itself. Directory entries are not deleted (they must be properly disposed
1722 * of or nlinks would get upset).
1725 hammer_ip_delete_range_all(hammer_cursor_t cursor
, hammer_inode_t ip
,
1728 hammer_transaction_t trans
= cursor
->trans
;
1729 hammer_btree_leaf_elm_t leaf
;
1732 KKASSERT(trans
->type
== HAMMER_TRANS_FLS
);
1734 hammer_normalize_cursor(cursor
);
1735 cursor
->key_beg
.localization
= ip
->obj_localization
+
1736 HAMMER_LOCALIZE_MISC
;
1737 cursor
->key_beg
.obj_id
= ip
->obj_id
;
1738 cursor
->key_beg
.create_tid
= 0;
1739 cursor
->key_beg
.delete_tid
= 0;
1740 cursor
->key_beg
.obj_type
= 0;
1741 cursor
->key_beg
.rec_type
= HAMMER_RECTYPE_INODE
+ 1;
1742 cursor
->key_beg
.key
= HAMMER_MIN_KEY
;
1744 cursor
->key_end
= cursor
->key_beg
;
1745 cursor
->key_end
.rec_type
= 0xFFFF;
1746 cursor
->key_end
.key
= HAMMER_MAX_KEY
;
1748 cursor
->asof
= ip
->obj_asof
;
1749 cursor
->flags
&= ~HAMMER_CURSOR_INITMASK
;
1750 cursor
->flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1751 cursor
->flags
|= HAMMER_CURSOR_DELETE_VISIBILITY
;
1752 cursor
->flags
|= HAMMER_CURSOR_BACKEND
;
1754 error
= hammer_ip_first(cursor
);
1757 * Iterate through matching records and mark them as deleted.
1759 while (error
== 0) {
1760 leaf
= cursor
->leaf
;
1762 KKASSERT(leaf
->base
.delete_tid
== 0);
1765 * Mark the record and B-Tree entry as deleted. This will
1766 * also physically delete the B-Tree entry, record, and
1767 * data if the retention policy dictates. The function
1768 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1769 * uses to perform a fixup.
1771 * Directory entries (and delete-on-disk directory entries)
1772 * must be synced and cannot be deleted.
1774 if (leaf
->base
.rec_type
!= HAMMER_RECTYPE_DIRENTRY
) {
1775 error
= hammer_ip_delete_record(cursor
, ip
, trans
->tid
);
1780 error
= hammer_ip_next(cursor
);
1783 hammer_cache_node(&ip
->cache
[1], cursor
->node
);
1784 if (error
== EDEADLK
) {
1785 hammer_done_cursor(cursor
);
1786 error
= hammer_init_cursor(trans
, cursor
, &ip
->cache
[1], ip
);
1790 if (error
== ENOENT
)
1796 * Delete the record at the current cursor. On success the cursor will
1797 * be positioned appropriately for an iteration but may no longer be at
1800 * This routine is only called from the backend.
1802 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1806 hammer_ip_delete_record(hammer_cursor_t cursor
, hammer_inode_t ip
,
1809 hammer_off_t zone2_offset
;
1810 hammer_record_t iprec
;
1811 hammer_btree_elm_t elm
;
1816 KKASSERT(cursor
->flags
& HAMMER_CURSOR_BACKEND
);
1818 hmp
= cursor
->node
->hmp
;
1821 * In-memory (unsynchronized) records can simply be freed. This
1822 * only occurs in range iterations since all other records are
1823 * individually synchronized. Thus there should be no confusion with
1826 * An in-memory record may be deleted before being committed to disk,
1827 * but could have been accessed in the mean time. The backing store
1828 * may never been marked allocated and so hammer_blockmap_free() may
1829 * never get called on it. Because of this we have to make sure that
1830 * we've gotten rid of any related hammer_buffer or buffer cache
1833 if (hammer_cursor_inmem(cursor
)) {
1834 iprec
= cursor
->iprec
;
1835 KKASSERT((iprec
->flags
& HAMMER_RECF_INTERLOCK_BE
) ==0);
1836 iprec
->flags
|= HAMMER_RECF_DELETED_FE
;
1837 iprec
->flags
|= HAMMER_RECF_DELETED_BE
;
1839 if (iprec
->leaf
.data_offset
&& iprec
->leaf
.data_len
) {
1840 zone2_offset
= hammer_blockmap_lookup(hmp
, iprec
->leaf
.data_offset
, &error
);
1841 KKASSERT(error
== 0);
1842 hammer_del_buffers(hmp
,
1843 iprec
->leaf
.data_offset
,
1845 iprec
->leaf
.data_len
);
1851 * On-disk records are marked as deleted by updating their delete_tid.
1852 * This does not effect their position in the B-Tree (which is based
1853 * on their create_tid).
1855 error
= hammer_btree_extract(cursor
, HAMMER_CURSOR_GET_LEAF
);
1859 * If we were mounted with the nohistory option, we physically
1860 * delete the record.
1862 dodelete
= hammer_nohistory(ip
);
1865 error
= hammer_cursor_upgrade(cursor
);
1867 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
1868 hammer_modify_node(cursor
->trans
, cursor
->node
,
1870 elm
->leaf
.base
.delete_tid
= tid
;
1871 elm
->leaf
.delete_ts
= cursor
->trans
->time32
;
1872 hammer_modify_node_done(cursor
->node
);
1875 * An on-disk record cannot have the same delete_tid
1876 * as its create_tid. In a chain of record updates
1877 * this could result in a duplicate record.
1879 KKASSERT(elm
->leaf
.base
.delete_tid
!= elm
->leaf
.base
.create_tid
);
1883 if (error
== 0 && dodelete
) {
1884 error
= hammer_delete_at_cursor(cursor
, NULL
);
1886 panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1894 hammer_delete_at_cursor(hammer_cursor_t cursor
, int64_t *stat_bytes
)
1896 hammer_btree_elm_t elm
;
1897 hammer_off_t data_offset
;
1902 elm
= &cursor
->node
->ondisk
->elms
[cursor
->index
];
1903 KKASSERT(elm
->base
.btype
== HAMMER_BTREE_TYPE_RECORD
);
1905 data_offset
= elm
->leaf
.data_offset
;
1906 data_len
= elm
->leaf
.data_len
;
1907 rec_type
= elm
->leaf
.base
.rec_type
;
1909 error
= hammer_btree_delete(cursor
);
1912 * This forces a fixup for the iteration because
1913 * the cursor is now either sitting at the 'next'
1914 * element or sitting at the end of a leaf.
1916 if ((cursor
->flags
& HAMMER_CURSOR_DISKEOF
) == 0) {
1917 cursor
->flags
|= HAMMER_CURSOR_DELBTREE
;
1918 cursor
->flags
&= ~HAMMER_CURSOR_ATEDISK
;
1922 switch(data_offset
& HAMMER_OFF_ZONE_MASK
) {
1923 case HAMMER_ZONE_LARGE_DATA
:
1924 case HAMMER_ZONE_SMALL_DATA
:
1925 case HAMMER_ZONE_META
:
1926 hammer_blockmap_free(cursor
->trans
,
1927 data_offset
, data_len
);
1937 * Determine whether we can remove a directory. This routine checks whether
1938 * a directory is empty or not and enforces flush connectivity.
1940 * Flush connectivity requires that we block if the target directory is
1941 * currently flushing, otherwise it may not end up in the same flush group.
1943 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
1946 hammer_ip_check_directory_empty(hammer_transaction_t trans
, hammer_inode_t ip
)
1948 struct hammer_cursor cursor
;
1952 * Check directory empty
1954 hammer_init_cursor(trans
, &cursor
, &ip
->cache
[1], ip
);
1956 cursor
.key_beg
.localization
= ip
->obj_localization
+
1957 HAMMER_LOCALIZE_MISC
;
1958 cursor
.key_beg
.obj_id
= ip
->obj_id
;
1959 cursor
.key_beg
.create_tid
= 0;
1960 cursor
.key_beg
.delete_tid
= 0;
1961 cursor
.key_beg
.obj_type
= 0;
1962 cursor
.key_beg
.rec_type
= HAMMER_RECTYPE_INODE
+ 1;
1963 cursor
.key_beg
.key
= HAMMER_MIN_KEY
;
1965 cursor
.key_end
= cursor
.key_beg
;
1966 cursor
.key_end
.rec_type
= 0xFFFF;
1967 cursor
.key_end
.key
= HAMMER_MAX_KEY
;
1969 cursor
.asof
= ip
->obj_asof
;
1970 cursor
.flags
|= HAMMER_CURSOR_END_INCLUSIVE
| HAMMER_CURSOR_ASOF
;
1972 error
= hammer_ip_first(&cursor
);
1973 if (error
== ENOENT
)
1975 else if (error
== 0)
1977 hammer_done_cursor(&cursor
);