HAMMER 58B/Many: Revamp ioctls, add non-monotonic timestamps, mirroring
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
blobca071ee88704121d759905c3c7b513d4c99eface
1 /*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.75 2008/06/24 17:38:17 dillon Exp $
37 #include "hammer.h"
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_rec_trunc_callback(hammer_record_t record,
43 void *data __unused);
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
46 struct rec_trunc_info {
47 u_int16_t rec_type;
48 int64_t trunc_off;
52 * Red-black tree support. Comparison code for insertion.
54 static int
55 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
57 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
58 return(-1);
59 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
60 return(1);
62 if (rec1->leaf.base.key < rec2->leaf.base.key)
63 return(-1);
64 if (rec1->leaf.base.key > rec2->leaf.base.key)
65 return(1);
68 * Never match against an item deleted by the front-end.
70 * rec1 is greater then rec2 if rec1 is marked deleted.
71 * rec1 is less then rec2 if rec2 is marked deleted.
73 * Multiple deleted records may be present, do not return 0
74 * if both are marked deleted.
76 if (rec1->flags & HAMMER_RECF_DELETED_FE)
77 return(1);
78 if (rec2->flags & HAMMER_RECF_DELETED_FE)
79 return(-1);
81 return(0);
85 * Basic record comparison code similar to hammer_btree_cmp().
87 static int
88 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
90 if (elm->rec_type < rec->leaf.base.rec_type)
91 return(-3);
92 if (elm->rec_type > rec->leaf.base.rec_type)
93 return(3);
95 if (elm->key < rec->leaf.base.key)
96 return(-2);
97 if (elm->key > rec->leaf.base.key)
98 return(2);
101 * Never match against an item deleted by the front-end.
102 * elm is less then rec if rec is marked deleted.
104 if (rec->flags & HAMMER_RECF_DELETED_FE)
105 return(-1);
106 return(0);
110 * Special LOOKUP_INFO to locate an overlapping record. This used by
111 * the reservation code to implement small-block records (whos keys will
112 * be different depending on data_len, when representing the same base
113 * offset).
115 * NOTE: The base file offset of a data record is (key - data_len), not (key).
117 static int
118 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
120 if (leaf->base.rec_type < rec->leaf.base.rec_type)
121 return(-3);
122 if (leaf->base.rec_type > rec->leaf.base.rec_type)
123 return(3);
126 * Overlap compare
128 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
129 /* leaf_end <= rec_beg */
130 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
131 return(-2);
132 /* leaf_beg >= rec_end */
133 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
134 return(2);
135 } else {
136 if (leaf->base.key < rec->leaf.base.key)
137 return(-2);
138 if (leaf->base.key > rec->leaf.base.key)
139 return(2);
143 * Never match against an item deleted by the front-end.
144 * leaf is less then rec if rec is marked deleted.
146 * We must still return the proper code for the scan to continue
147 * along the correct branches.
149 if (rec->flags & HAMMER_RECF_DELETED_FE) {
150 if (leaf->base.key < rec->leaf.base.key)
151 return(-2);
152 if (leaf->base.key > rec->leaf.base.key)
153 return(2);
154 return(-1);
156 return(0);
160 * RB_SCAN comparison code for hammer_mem_first(). The argument order
161 * is reversed so the comparison result has to be negated. key_beg and
162 * key_end are both range-inclusive.
164 * Localized deletions are not cached in-memory.
166 static
168 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
170 hammer_cursor_t cursor = data;
171 int r;
173 r = hammer_rec_cmp(&cursor->key_beg, rec);
174 if (r > 1)
175 return(-1);
176 r = hammer_rec_cmp(&cursor->key_end, rec);
177 if (r < -1)
178 return(1);
179 return(0);
183 * This compare function is used when simply looking up key_beg.
185 static
187 hammer_rec_find_cmp(hammer_record_t rec, void *data)
189 hammer_cursor_t cursor = data;
190 int r;
192 r = hammer_rec_cmp(&cursor->key_beg, rec);
193 if (r > 1)
194 return(-1);
195 if (r < -1)
196 return(1);
197 return(0);
201 * Locate blocks within the truncation range. Partial blocks do not count.
203 static
205 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
207 struct rec_trunc_info *info = data;
209 if (rec->leaf.base.rec_type < info->rec_type)
210 return(-1);
211 if (rec->leaf.base.rec_type > info->rec_type)
212 return(1);
214 switch(rec->leaf.base.rec_type) {
215 case HAMMER_RECTYPE_DB:
217 * DB record key is not beyond the truncation point, retain.
219 if (rec->leaf.base.key < info->trunc_off)
220 return(-1);
221 break;
222 case HAMMER_RECTYPE_DATA:
224 * DATA record offset start is not beyond the truncation point,
225 * retain.
227 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
228 return(-1);
229 break;
230 default:
231 panic("hammer_rec_trunc_cmp: unexpected record type");
235 * The record start is >= the truncation point, return match,
236 * the record should be destroyed.
238 return(0);
241 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
242 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
243 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
246 * Allocate a record for the caller to finish filling in. The record is
247 * returned referenced.
249 hammer_record_t
250 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
252 hammer_record_t record;
254 ++hammer_count_records;
255 record = kmalloc(sizeof(*record), M_HAMMER, M_WAITOK | M_ZERO);
256 record->flush_state = HAMMER_FST_IDLE;
257 record->ip = ip;
258 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
259 record->leaf.data_len = data_len;
260 hammer_ref(&record->lock);
262 if (data_len) {
263 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
264 record->flags |= HAMMER_RECF_ALLOCDATA;
265 ++hammer_count_record_datas;
268 return (record);
271 void
272 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
274 while (record->flush_state == HAMMER_FST_FLUSH) {
275 record->flags |= HAMMER_RECF_WANTED;
276 tsleep(record, 0, ident, 0);
281 * Called from the backend, hammer_inode.c, after a record has been
282 * flushed to disk. The record has been exclusively locked by the
283 * caller and interlocked with BE.
285 * We clean up the state, unlock, and release the record (the record
286 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
288 void
289 hammer_flush_record_done(hammer_record_t record, int error)
291 hammer_inode_t target_ip;
293 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
294 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
296 if (error) {
298 * An error occured, the backend was unable to sync the
299 * record to its media. Leave the record intact.
301 Debugger("flush_record_done error");
304 if (record->flags & HAMMER_RECF_DELETED_BE) {
305 if ((target_ip = record->target_ip) != NULL) {
306 TAILQ_REMOVE(&target_ip->target_list, record,
307 target_entry);
308 record->target_ip = NULL;
309 hammer_test_inode(target_ip);
311 record->flush_state = HAMMER_FST_IDLE;
312 } else {
313 if (record->target_ip) {
314 record->flush_state = HAMMER_FST_SETUP;
315 hammer_test_inode(record->ip);
316 hammer_test_inode(record->target_ip);
317 } else {
318 record->flush_state = HAMMER_FST_IDLE;
321 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
322 if (record->flags & HAMMER_RECF_WANTED) {
323 record->flags &= ~HAMMER_RECF_WANTED;
324 wakeup(record);
326 hammer_rel_mem_record(record);
330 * Release a memory record. Records marked for deletion are immediately
331 * removed from the RB-Tree but otherwise left intact until the last ref
332 * goes away.
334 void
335 hammer_rel_mem_record(struct hammer_record *record)
337 hammer_inode_t ip, target_ip;
339 hammer_unref(&record->lock);
341 if (record->lock.refs == 0) {
343 * Upon release of the last reference wakeup any waiters.
344 * The record structure may get destroyed so callers will
345 * loop up and do a relookup.
347 * WARNING! Record must be removed from RB-TREE before we
348 * might possibly block. hammer_test_inode() can block!
350 ip = record->ip;
353 * Upon release of the last reference a record marked deleted
354 * is destroyed.
356 if (record->flags & HAMMER_RECF_DELETED_FE) {
357 KKASSERT(ip->lock.refs > 0);
358 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
361 * target_ip may have zero refs, we have to ref it
362 * to prevent it from being ripped out from under
363 * us.
365 if ((target_ip = record->target_ip) != NULL) {
366 TAILQ_REMOVE(&target_ip->target_list,
367 record, target_entry);
368 record->target_ip = NULL;
369 hammer_ref(&target_ip->lock);
372 if (record->flags & HAMMER_RECF_ONRBTREE) {
373 RB_REMOVE(hammer_rec_rb_tree,
374 &record->ip->rec_tree,
375 record);
376 KKASSERT(ip->rsv_recs > 0);
377 --ip->hmp->rsv_recs;
378 --ip->rsv_recs;
379 ip->hmp->rsv_databytes -= record->leaf.data_len;
380 record->flags &= ~HAMMER_RECF_ONRBTREE;
382 if (RB_EMPTY(&record->ip->rec_tree)) {
383 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
384 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
385 hammer_test_inode(record->ip);
390 * Do this test after removing record from the B-Tree.
392 if (target_ip) {
393 hammer_test_inode(target_ip);
394 hammer_rel_inode(target_ip, 0);
397 if (record->flags & HAMMER_RECF_ALLOCDATA) {
398 --hammer_count_record_datas;
399 kfree(record->data, M_HAMMER);
400 record->flags &= ~HAMMER_RECF_ALLOCDATA;
402 if (record->resv) {
403 hammer_blockmap_reserve_complete(ip->hmp,
404 record->resv);
405 record->resv = NULL;
407 record->data = NULL;
408 --hammer_count_records;
409 kfree(record, M_HAMMER);
415 * Record visibility depends on whether the record is being accessed by
416 * the backend or the frontend.
418 * Return non-zero if the record is visible, zero if it isn't or if it is
419 * deleted.
421 static __inline
423 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
425 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
426 if (record->flags & HAMMER_RECF_DELETED_BE)
427 return(0);
428 } else {
429 if (record->flags & HAMMER_RECF_DELETED_FE)
430 return(0);
432 return(1);
436 * This callback is used as part of the RB_SCAN function for in-memory
437 * records. We terminate it (return -1) as soon as we get a match.
439 * This routine is used by frontend code.
441 * The primary compare code does not account for ASOF lookups. This
442 * code handles that case as well as a few others.
444 static
446 hammer_rec_scan_callback(hammer_record_t rec, void *data)
448 hammer_cursor_t cursor = data;
451 * We terminate on success, so this should be NULL on entry.
453 KKASSERT(cursor->iprec == NULL);
456 * Skip if the record was marked deleted.
458 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
459 return(0);
462 * Skip if not visible due to our as-of TID
464 if (cursor->flags & HAMMER_CURSOR_ASOF) {
465 if (cursor->asof < rec->leaf.base.create_tid)
466 return(0);
467 if (rec->leaf.base.delete_tid &&
468 cursor->asof >= rec->leaf.base.delete_tid) {
469 return(0);
474 * If the record is queued to the flusher we have to block until
475 * it isn't. Otherwise we may see duplication between our memory
476 * cache and the media.
478 hammer_ref(&rec->lock);
480 #warning "This deadlocks"
481 #if 0
482 if (rec->flush_state == HAMMER_FST_FLUSH)
483 hammer_wait_mem_record(rec);
484 #endif
487 * The record may have been deleted while we were blocked.
489 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
490 hammer_rel_mem_record(rec);
491 return(0);
495 * Set the matching record and stop the scan.
497 cursor->iprec = rec;
498 return(-1);
503 * Lookup an in-memory record given the key specified in the cursor. Works
504 * just like hammer_btree_lookup() but operates on an inode's in-memory
505 * record list.
507 * The lookup must fail if the record is marked for deferred deletion.
509 static
511 hammer_mem_lookup(hammer_cursor_t cursor)
513 int error;
515 KKASSERT(cursor->ip);
516 if (cursor->iprec) {
517 hammer_rel_mem_record(cursor->iprec);
518 cursor->iprec = NULL;
520 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
521 hammer_rec_scan_callback, cursor);
523 if (cursor->iprec == NULL)
524 error = ENOENT;
525 else
526 error = 0;
527 return(error);
531 * hammer_mem_first() - locate the first in-memory record matching the
532 * cursor within the bounds of the key range.
534 static
536 hammer_mem_first(hammer_cursor_t cursor)
538 hammer_inode_t ip;
540 ip = cursor->ip;
541 KKASSERT(ip != NULL);
543 if (cursor->iprec) {
544 hammer_rel_mem_record(cursor->iprec);
545 cursor->iprec = NULL;
548 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
549 hammer_rec_scan_callback, cursor);
552 * Adjust scan.node and keep it linked into the RB-tree so we can
553 * hold the cursor through third party modifications of the RB-tree.
555 if (cursor->iprec)
556 return(0);
557 return(ENOENT);
560 void
561 hammer_mem_done(hammer_cursor_t cursor)
563 if (cursor->iprec) {
564 hammer_rel_mem_record(cursor->iprec);
565 cursor->iprec = NULL;
569 /************************************************************************
570 * HAMMER IN-MEMORY RECORD FUNCTIONS *
571 ************************************************************************
573 * These functions manipulate in-memory records. Such records typically
574 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
578 * Add a directory entry (dip,ncp) which references inode (ip).
580 * Note that the low 32 bits of the namekey are set temporarily to create
581 * a unique in-memory record, and may be modified a second time when the
582 * record is synchronized to disk. In particular, the low 32 bits cannot be
583 * all 0's when synching to disk, which is not handled here.
585 * NOTE: bytes does not include any terminating \0 on name, and name might
586 * not be terminated.
589 hammer_ip_add_directory(struct hammer_transaction *trans,
590 struct hammer_inode *dip, const char *name, int bytes,
591 struct hammer_inode *ip)
593 hammer_record_t record;
594 int error;
596 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
597 if (++trans->hmp->namekey_iterator == 0)
598 ++trans->hmp->namekey_iterator;
600 record->type = HAMMER_MEM_RECORD_ADD;
601 record->leaf.base.localization = dip->obj_localization +
602 HAMMER_LOCALIZE_MISC;
603 record->leaf.base.obj_id = dip->obj_id;
604 record->leaf.base.key = hammer_directory_namekey(name, bytes);
605 record->leaf.base.key += trans->hmp->namekey_iterator;
606 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
607 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
608 record->data->entry.obj_id = ip->obj_id;
609 record->data->entry.localization = ip->obj_localization;
610 bcopy(name, record->data->entry.name, bytes);
612 ++ip->ino_data.nlinks;
613 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
616 * The target inode and the directory entry are bound together.
618 record->target_ip = ip;
619 record->flush_state = HAMMER_FST_SETUP;
620 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
623 * The inode now has a dependancy and must be taken out of the idle
624 * state. An inode not in an idle state is given an extra reference.
626 if (ip->flush_state == HAMMER_FST_IDLE) {
627 hammer_ref(&ip->lock);
628 ip->flush_state = HAMMER_FST_SETUP;
630 error = hammer_mem_add(record);
631 return(error);
635 * Delete the directory entry and update the inode link count. The
636 * cursor must be seeked to the directory entry record being deleted.
638 * The related inode should be share-locked by the caller. The caller is
639 * on the frontend.
641 * This function can return EDEADLK requiring the caller to terminate
642 * the cursor, any locks, wait on the returned record, and retry.
645 hammer_ip_del_directory(struct hammer_transaction *trans,
646 hammer_cursor_t cursor, struct hammer_inode *dip,
647 struct hammer_inode *ip)
649 hammer_record_t record;
650 int error;
652 if (hammer_cursor_inmem(cursor)) {
654 * In-memory (unsynchronized) records can simply be freed.
655 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
656 * by the backend, we must still avoid races against the
657 * backend potentially syncing the record to the media.
659 * We cannot call hammer_ip_delete_record(), that routine may
660 * only be called from the backend.
662 record = cursor->iprec;
663 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
664 KKASSERT(cursor->deadlk_rec == NULL);
665 hammer_ref(&record->lock);
666 cursor->deadlk_rec = record;
667 error = EDEADLK;
668 } else {
669 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
670 record->flags |= HAMMER_RECF_DELETED_FE;
671 error = 0;
673 } else {
675 * If the record is on-disk we have to queue the deletion by
676 * the record's key. This also causes lookups to skip the
677 * record.
679 KKASSERT(dip->flags &
680 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
681 record = hammer_alloc_mem_record(dip, 0);
682 record->type = HAMMER_MEM_RECORD_DEL;
683 record->leaf.base = cursor->leaf->base;
685 record->target_ip = ip;
686 record->flush_state = HAMMER_FST_SETUP;
687 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
690 * The inode now has a dependancy and must be taken out of
691 * the idle state. An inode not in an idle state is given
692 * an extra reference.
694 if (ip->flush_state == HAMMER_FST_IDLE) {
695 hammer_ref(&ip->lock);
696 ip->flush_state = HAMMER_FST_SETUP;
699 error = hammer_mem_add(record);
703 * One less link. The file may still be open in the OS even after
704 * all links have gone away.
706 * We have to terminate the cursor before syncing the inode to
707 * avoid deadlocking against ourselves. XXX this may no longer
708 * be true.
710 * If nlinks drops to zero and the vnode is inactive (or there is
711 * no vnode), call hammer_inode_unloadable_check() to zonk the
712 * inode. If we don't do this here the inode will not be destroyed
713 * on-media until we unmount.
715 if (error == 0) {
716 --ip->ino_data.nlinks;
717 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
718 if (ip->ino_data.nlinks == 0 &&
719 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
720 hammer_done_cursor(cursor);
721 hammer_inode_unloadable_check(ip, 1);
722 hammer_flush_inode(ip, 0);
726 return(error);
730 * Add a record to an inode.
732 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
733 * initialize the following additional fields:
735 * The related inode should be share-locked by the caller. The caller is
736 * on the frontend.
738 * record->rec.entry.base.base.key
739 * record->rec.entry.base.base.rec_type
740 * record->rec.entry.base.base.data_len
741 * record->data (a copy will be kmalloc'd if it cannot be embedded)
744 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
746 hammer_inode_t ip = record->ip;
747 int error;
749 KKASSERT(record->leaf.base.localization != 0);
750 record->leaf.base.obj_id = ip->obj_id;
751 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
752 error = hammer_mem_add(record);
753 return(error);
757 * Locate a bulk record in-memory. Bulk records allow disk space to be
758 * reserved so the front-end can flush large data writes without having
759 * to queue the BIO to the flusher. Only the related record gets queued
760 * to the flusher.
762 static hammer_record_t
763 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
765 hammer_record_t record;
766 struct hammer_btree_leaf_elm leaf;
768 bzero(&leaf, sizeof(leaf));
769 leaf.base.obj_id = ip->obj_id;
770 leaf.base.key = file_offset + bytes;
771 leaf.base.create_tid = 0;
772 leaf.base.delete_tid = 0;
773 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
774 leaf.base.obj_type = 0; /* unused */
775 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
776 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_MISC;
777 leaf.data_len = bytes;
779 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
780 if (record)
781 hammer_ref(&record->lock);
782 return(record);
786 * Reserve blockmap space placemarked with an in-memory record.
788 * This routine is called by the frontend in order to be able to directly
789 * flush a buffer cache buffer. The frontend has locked the related buffer
790 * cache buffers and we should be able to manipulate any overlapping
791 * in-memory records.
793 hammer_record_t
794 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
795 int *errorp)
797 hammer_record_t record;
798 hammer_record_t conflict;
799 int zone;
800 int flags;
803 * Deal with conflicting in-memory records. We cannot have multiple
804 * in-memory records for the same offset without seriously confusing
805 * the backend, including but not limited to the backend issuing
806 * delete-create-delete sequences and asserting on the delete_tid
807 * being the same as the create_tid.
809 * If we encounter a record with the backend interlock set we cannot
810 * immediately delete it without confusing the backend.
812 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
813 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
814 conflict->flags |= HAMMER_RECF_WANTED;
815 tsleep(conflict, 0, "hmrrc3", 0);
816 } else {
817 conflict->flags |= HAMMER_RECF_DELETED_FE;
819 hammer_rel_mem_record(conflict);
823 * Create a record to cover the direct write. This is called with
824 * the related BIO locked so there should be no possible conflict.
826 * The backend is responsible for finalizing the space reserved in
827 * this record.
829 * XXX bytes not aligned, depend on the reservation code to
830 * align the reservation.
832 record = hammer_alloc_mem_record(ip, 0);
833 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
834 HAMMER_ZONE_SMALL_DATA_INDEX;
835 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
836 &record->leaf.data_offset,
837 errorp);
838 if (record->resv == NULL) {
839 kprintf("hammer_ip_add_bulk: reservation failed\n");
840 hammer_rel_mem_record(record);
841 return(NULL);
843 record->type = HAMMER_MEM_RECORD_DATA;
844 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
845 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
846 record->leaf.base.obj_id = ip->obj_id;
847 record->leaf.base.key = file_offset + bytes;
848 record->leaf.base.localization = ip->obj_localization +
849 HAMMER_LOCALIZE_MISC;
850 record->leaf.data_len = bytes;
851 hammer_crc_set_leaf(data, &record->leaf);
852 flags = record->flags;
854 hammer_ref(&record->lock); /* mem_add eats a reference */
855 *errorp = hammer_mem_add(record);
856 if (*errorp) {
857 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
858 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
859 *errorp, conflict, file_offset, bytes);
860 if (conflict)
861 kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
862 if (conflict)
863 hammer_rel_mem_record(conflict);
865 KKASSERT(*errorp == 0);
866 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
867 if (conflict != record) {
868 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
869 if (conflict)
870 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
872 KKASSERT(conflict == record);
873 hammer_rel_mem_record(conflict);
875 return (record);
879 * Frontend truncation code. Scan in-memory records only. On-disk records
880 * and records in a flushing state are handled by the backend. The vnops
881 * setattr code will handle the block containing the truncation point.
883 * Partial blocks are not deleted.
886 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
888 struct rec_trunc_info info;
890 switch(ip->ino_data.obj_type) {
891 case HAMMER_OBJTYPE_REGFILE:
892 info.rec_type = HAMMER_RECTYPE_DATA;
893 break;
894 case HAMMER_OBJTYPE_DBFILE:
895 info.rec_type = HAMMER_RECTYPE_DB;
896 break;
897 default:
898 return(EINVAL);
900 info.trunc_off = file_size;
901 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
902 hammer_rec_trunc_callback, &info);
903 return(0);
906 static int
907 hammer_rec_trunc_callback(hammer_record_t record, void *data __unused)
909 if (record->flags & HAMMER_RECF_DELETED_FE)
910 return(0);
911 if (record->flush_state == HAMMER_FST_FLUSH)
912 return(0);
913 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
914 hammer_ref(&record->lock);
915 record->flags |= HAMMER_RECF_DELETED_FE;
916 hammer_rel_mem_record(record);
917 return(0);
921 * Return 1 if the caller must check for and delete existing records
922 * before writing out a new data record.
924 * Return 0 if the caller can just insert the record into the B-Tree without
925 * checking.
927 static int
928 hammer_record_needs_overwrite_delete(hammer_record_t record)
930 hammer_inode_t ip = record->ip;
931 int64_t file_offset;
932 int r;
934 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
935 file_offset = record->leaf.base.key;
936 else
937 file_offset = record->leaf.base.key - record->leaf.data_len;
938 r = (file_offset < ip->sync_trunc_off);
939 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
940 if (ip->sync_trunc_off <= record->leaf.base.key)
941 ip->sync_trunc_off = record->leaf.base.key + 1;
942 } else {
943 if (ip->sync_trunc_off < record->leaf.base.key)
944 ip->sync_trunc_off = record->leaf.base.key;
946 return(r);
950 * Backend code. Sync a record to the media.
953 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
955 hammer_transaction_t trans = cursor->trans;
956 int64_t file_offset;
957 int bytes;
958 void *bdata;
959 int error;
961 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
962 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
963 KKASSERT(record->leaf.base.localization != 0);
966 * If this is a bulk-data record placemarker there may be an existing
967 * record on-disk, indicating a data overwrite. If there is the
968 * on-disk record must be deleted before we can insert our new record.
970 * We've synthesized this record and do not know what the create_tid
971 * on-disk is, nor how much data it represents.
973 * Keep in mind that (key) for data records is (base_offset + len),
974 * not (base_offset). Also, we only want to get rid of on-disk
975 * records since we are trying to sync our in-memory record, call
976 * hammer_ip_delete_range() with truncating set to 1 to make sure
977 * it skips in-memory records.
979 * It is ok for the lookup to return ENOENT.
981 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
982 * to call hammer_ip_delete_range() or not. This also means we must
983 * update sync_trunc_off() as we write.
985 if (record->type == HAMMER_MEM_RECORD_DATA &&
986 hammer_record_needs_overwrite_delete(record)) {
987 file_offset = record->leaf.base.key - record->leaf.data_len;
988 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
989 ~HAMMER_BUFMASK;
990 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
991 error = hammer_ip_delete_range(
992 cursor, record->ip,
993 file_offset, file_offset + bytes - 1,
995 if (error && error != ENOENT)
996 goto done;
1000 * Setup the cursor.
1002 hammer_normalize_cursor(cursor);
1003 cursor->key_beg = record->leaf.base;
1004 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1005 cursor->flags |= HAMMER_CURSOR_BACKEND;
1006 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1009 * Records can wind up on-media before the inode itself is on-media.
1010 * Flag the case.
1012 record->ip->flags |= HAMMER_INODE_DONDISK;
1015 * If we are deleting a directory entry an exact match must be
1016 * found on-disk.
1018 if (record->type == HAMMER_MEM_RECORD_DEL) {
1019 error = hammer_btree_lookup(cursor);
1020 if (error == 0) {
1021 error = hammer_ip_delete_record(cursor, record->ip,
1022 trans->tid);
1023 if (error == 0) {
1024 record->flags |= HAMMER_RECF_DELETED_FE;
1025 record->flags |= HAMMER_RECF_DELETED_BE;
1028 goto done;
1032 * We are inserting.
1034 * Issue a lookup to position the cursor and locate the cluster. The
1035 * target key should not exist. If we are creating a directory entry
1036 * we may have to iterate the low 32 bits of the key to find an unused
1037 * key.
1039 cursor->flags |= HAMMER_CURSOR_INSERT;
1041 for (;;) {
1042 error = hammer_btree_lookup(cursor);
1043 if (hammer_debug_inode)
1044 kprintf("DOINSERT LOOKUP %d\n", error);
1045 if (error)
1046 break;
1047 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1048 kprintf("hammer_ip_sync_record: duplicate rec "
1049 "at (%016llx)\n", record->leaf.base.key);
1050 Debugger("duplicate record1");
1051 error = EIO;
1052 break;
1054 if (++trans->hmp->namekey_iterator == 0)
1055 ++trans->hmp->namekey_iterator;
1056 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1057 record->leaf.base.key |= trans->hmp->namekey_iterator;
1058 cursor->key_beg.key = record->leaf.base.key;
1060 #if 0
1061 if (record->type == HAMMER_MEM_RECORD_DATA)
1062 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1063 record->leaf.base.key - record->leaf.data_len,
1064 record->leaf.data_offset, error);
1065 #endif
1068 if (error != ENOENT)
1069 goto done;
1072 * Allocate the record and data. The result buffers will be
1073 * marked as being modified and further calls to
1074 * hammer_modify_buffer() will result in unneeded UNDO records.
1076 * Support zero-fill records (data == NULL and data_len != 0)
1078 if (record->type == HAMMER_MEM_RECORD_DATA) {
1080 * The data portion of a bulk-data record has already been
1081 * committed to disk, we need only adjust the layer2
1082 * statistics in the same transaction as our B-Tree insert.
1084 KKASSERT(record->leaf.data_offset != 0);
1085 hammer_blockmap_finalize(trans, record->leaf.data_offset,
1086 record->leaf.data_len);
1087 error = 0;
1088 } else if (record->data && record->leaf.data_len) {
1090 * Wholely cached record, with data. Allocate the data.
1092 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1093 record->leaf.base.rec_type,
1094 &record->leaf.data_offset,
1095 &cursor->data_buffer, &error);
1096 if (bdata == NULL)
1097 goto done;
1098 hammer_crc_set_leaf(record->data, &record->leaf);
1099 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1100 bcopy(record->data, bdata, record->leaf.data_len);
1101 hammer_modify_buffer_done(cursor->data_buffer);
1102 } else {
1104 * Wholely cached record, without data.
1106 record->leaf.data_offset = 0;
1107 record->leaf.data_crc = 0;
1110 error = hammer_btree_insert(cursor, &record->leaf);
1111 if (hammer_debug_inode && error)
1112 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1115 * Our record is on-disk, normally mark the in-memory version as
1116 * deleted. If the record represented a directory deletion but
1117 * we had to sync a valid directory entry to disk we must convert
1118 * the record to a covering delete so the frontend does not have
1119 * visibility on the synced entry.
1121 if (error == 0) {
1122 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1123 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1124 record->flags &= ~HAMMER_RECF_DELETED_FE;
1125 record->type = HAMMER_MEM_RECORD_DEL;
1126 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1127 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1128 /* hammer_flush_record_done takes care of the rest */
1129 } else {
1130 record->flags |= HAMMER_RECF_DELETED_FE;
1131 record->flags |= HAMMER_RECF_DELETED_BE;
1133 } else {
1134 if (record->leaf.data_offset) {
1135 hammer_blockmap_free(trans, record->leaf.data_offset,
1136 record->leaf.data_len);
1140 done:
1141 return(error);
1145 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1146 * entry's key is used to deal with hash collisions in the upper 32 bits.
1147 * A unique 64 bit key is generated in-memory and may be regenerated a
1148 * second time when the directory record is flushed to the on-disk B-Tree.
1150 * A referenced record is passed to this function. This function
1151 * eats the reference. If an error occurs the record will be deleted.
1153 * A copy of the temporary record->data pointer provided by the caller
1154 * will be made.
1156 static
1158 hammer_mem_add(hammer_record_t record)
1160 hammer_mount_t hmp = record->ip->hmp;
1163 * Make a private copy of record->data
1165 if (record->data)
1166 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1169 * Insert into the RB tree, find an unused iterator if this is
1170 * a directory entry.
1172 while (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1173 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY){
1174 record->flags |= HAMMER_RECF_DELETED_FE;
1175 hammer_rel_mem_record(record);
1176 return (EEXIST);
1178 if (++hmp->namekey_iterator == 0)
1179 ++hmp->namekey_iterator;
1180 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1181 record->leaf.base.key |= hmp->namekey_iterator;
1183 ++hmp->count_newrecords;
1184 ++hmp->rsv_recs;
1185 ++record->ip->rsv_recs;
1186 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1187 record->flags |= HAMMER_RECF_ONRBTREE;
1188 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1189 hammer_rel_mem_record(record);
1190 return(0);
1193 /************************************************************************
1194 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1195 ************************************************************************
1197 * These functions augment the B-Tree scanning functions in hammer_btree.c
1198 * by merging in-memory records with on-disk records.
1202 * Locate a particular record either in-memory or on-disk.
1204 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1205 * NOT be called to iterate results.
1208 hammer_ip_lookup(hammer_cursor_t cursor)
1210 int error;
1213 * If the element is in-memory return it without searching the
1214 * on-disk B-Tree
1216 KKASSERT(cursor->ip);
1217 error = hammer_mem_lookup(cursor);
1218 if (error == 0) {
1219 cursor->leaf = &cursor->iprec->leaf;
1220 return(error);
1222 if (error != ENOENT)
1223 return(error);
1226 * If the inode has on-disk components search the on-disk B-Tree.
1228 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1229 return(error);
1230 error = hammer_btree_lookup(cursor);
1231 if (error == 0)
1232 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1233 return(error);
1237 * Locate the first record within the cursor's key_beg/key_end range,
1238 * restricted to a particular inode. 0 is returned on success, ENOENT
1239 * if no records matched the requested range, or some other error.
1241 * When 0 is returned hammer_ip_next() may be used to iterate additional
1242 * records within the requested range.
1244 * This function can return EDEADLK, requiring the caller to terminate
1245 * the cursor and try again.
1248 hammer_ip_first(hammer_cursor_t cursor)
1250 hammer_inode_t ip = cursor->ip;
1251 int error;
1253 KKASSERT(ip != NULL);
1256 * Clean up fields and setup for merged scan
1258 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1259 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1260 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1261 if (cursor->iprec) {
1262 hammer_rel_mem_record(cursor->iprec);
1263 cursor->iprec = NULL;
1267 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1268 * exact lookup so if we get ENOENT we have to call the iterate
1269 * function to validate the first record after the begin key.
1271 * The ATEDISK flag is used by hammer_btree_iterate to determine
1272 * whether it must index forwards or not. It is also used here
1273 * to select the next record from in-memory or on-disk.
1275 * EDEADLK can only occur if the lookup hit an empty internal
1276 * element and couldn't delete it. Since this could only occur
1277 * in-range, we can just iterate from the failure point.
1279 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1280 error = hammer_btree_lookup(cursor);
1281 if (error == ENOENT || error == EDEADLK) {
1282 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1283 if (hammer_debug_general & 0x2000)
1284 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1285 error = hammer_btree_iterate(cursor);
1287 if (error && error != ENOENT)
1288 return(error);
1289 if (error == 0) {
1290 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1291 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1292 } else {
1293 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1298 * Search the in-memory record list (Red-Black tree). Unlike the
1299 * B-Tree search, mem_first checks for records in the range.
1301 error = hammer_mem_first(cursor);
1302 if (error && error != ENOENT)
1303 return(error);
1304 if (error == 0) {
1305 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1306 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1307 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1308 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1312 * This will return the first matching record.
1314 return(hammer_ip_next(cursor));
1318 * Retrieve the next record in a merged iteration within the bounds of the
1319 * cursor. This call may be made multiple times after the cursor has been
1320 * initially searched with hammer_ip_first().
1322 * 0 is returned on success, ENOENT if no further records match the
1323 * requested range, or some other error code is returned.
1326 hammer_ip_next(hammer_cursor_t cursor)
1328 hammer_btree_elm_t elm;
1329 hammer_record_t rec, save;
1330 int error;
1331 int r;
1333 next_btree:
1335 * Load the current on-disk and in-memory record. If we ate any
1336 * records we have to get the next one.
1338 * If we deleted the last on-disk record we had scanned ATEDISK will
1339 * be clear and DELBTREE will be set, forcing a call to iterate. The
1340 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1341 * element. If ATEDISK is set, iterate will skip the 'current'
1342 * element.
1344 * Get the next on-disk record
1346 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1347 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1348 error = hammer_btree_iterate(cursor);
1349 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1350 if (error == 0) {
1351 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1352 hammer_cache_node(&cursor->ip->cache[1],
1353 cursor->node);
1354 } else {
1355 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1356 HAMMER_CURSOR_ATEDISK;
1361 next_memory:
1363 * Get the next in-memory record. The record can be ripped out
1364 * of the RB tree so we maintain a scan_info structure to track
1365 * the next node.
1367 * hammer_rec_scan_cmp: Is the record still in our general range,
1368 * (non-inclusive of snapshot exclusions)?
1369 * hammer_rec_scan_callback: Is the record in our snapshot?
1371 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1372 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1373 save = cursor->iprec;
1374 cursor->iprec = NULL;
1375 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1376 while (rec) {
1377 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1378 break;
1379 if (hammer_rec_scan_callback(rec, cursor) != 0)
1380 break;
1381 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1383 if (save)
1384 hammer_rel_mem_record(save);
1385 if (cursor->iprec) {
1386 KKASSERT(cursor->iprec == rec);
1387 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1388 } else {
1389 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1395 * The memory record may have become stale while being held in
1396 * cursor->iprec. We are interlocked against the backend on
1397 * with regards to B-Tree entries.
1399 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1400 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1401 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1402 goto next_memory;
1407 * Extract either the disk or memory record depending on their
1408 * relative position.
1410 error = 0;
1411 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1412 case 0:
1414 * Both entries valid. Compare the entries and nominally
1415 * return the first one in the sort order. Numerous cases
1416 * require special attention, however.
1418 elm = &cursor->node->ondisk->elms[cursor->index];
1419 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1422 * If the two entries differ only by their key (-2/2) or
1423 * create_tid (-1/1), and are DATA records, we may have a
1424 * nominal match. We have to calculate the base file
1425 * offset of the data.
1427 if (r <= 2 && r >= -2 && r != 0 &&
1428 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1429 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1430 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1431 int64_t base2 = cursor->iprec->leaf.base.key -
1432 cursor->iprec->leaf.data_len;
1433 if (base1 == base2)
1434 r = 0;
1437 if (r < 0) {
1438 error = hammer_btree_extract(cursor,
1439 HAMMER_CURSOR_GET_LEAF);
1440 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1441 break;
1445 * If the entries match exactly the memory entry is either
1446 * an on-disk directory entry deletion or a bulk data
1447 * overwrite. If it is a directory entry deletion we eat
1448 * both entries.
1450 * For the bulk-data overwrite case it is possible to have
1451 * visibility into both, which simply means the syncer
1452 * hasn't gotten around to doing the delete+insert sequence
1453 * on the B-Tree. Use the memory entry and throw away the
1454 * on-disk entry.
1456 * If the in-memory record is not either of these we
1457 * probably caught the syncer while it was syncing it to
1458 * the media. Since we hold a shared lock on the cursor,
1459 * the in-memory record had better be marked deleted at
1460 * this point.
1462 if (r == 0) {
1463 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1464 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1465 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1466 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1467 goto next_btree;
1469 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1470 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1471 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1473 /* fall through to memory entry */
1474 } else {
1475 panic("hammer_ip_next: duplicate mem/b-tree entry");
1476 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1477 goto next_memory;
1480 /* fall through to the memory entry */
1481 case HAMMER_CURSOR_ATEDISK:
1483 * Only the memory entry is valid.
1485 cursor->leaf = &cursor->iprec->leaf;
1486 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1489 * If the memory entry is an on-disk deletion we should have
1490 * also had found a B-Tree record. If the backend beat us
1491 * to it it would have interlocked the cursor and we should
1492 * have seen the in-memory record marked DELETED_FE.
1494 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1495 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1496 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1498 break;
1499 case HAMMER_CURSOR_ATEMEM:
1501 * Only the disk entry is valid
1503 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1504 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1505 break;
1506 default:
1508 * Neither entry is valid
1510 * XXX error not set properly
1512 cursor->leaf = NULL;
1513 error = ENOENT;
1514 break;
1516 return(error);
1520 * Resolve the cursor->data pointer for the current cursor position in
1521 * a merged iteration.
1524 hammer_ip_resolve_data(hammer_cursor_t cursor)
1526 hammer_record_t record;
1527 int error;
1529 if (hammer_cursor_inmem(cursor)) {
1531 * The data associated with an in-memory record is usually
1532 * kmalloced, but reserve-ahead data records will have an
1533 * on-disk reference.
1535 * NOTE: Reserve-ahead data records must be handled in the
1536 * context of the related high level buffer cache buffer
1537 * to interlock against async writes.
1539 record = cursor->iprec;
1540 cursor->data = record->data;
1541 error = 0;
1542 if (cursor->data == NULL) {
1543 KKASSERT(record->leaf.base.rec_type ==
1544 HAMMER_RECTYPE_DATA);
1545 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1546 record->leaf.data_offset,
1547 record->leaf.data_len,
1548 &error,
1549 &cursor->data_buffer);
1551 } else {
1552 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1553 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1555 return(error);
1559 * Backend truncation / record replacement - delete records in range.
1561 * Delete all records within the specified range for inode ip. In-memory
1562 * records still associated with the frontend are ignored.
1564 * NOTE: An unaligned range will cause new records to be added to cover
1565 * the edge cases. (XXX not implemented yet).
1567 * NOTE: Replacement via reservations (see hammer_ip_sync_record_cursor())
1568 * also do not deal with unaligned ranges.
1570 * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1572 * NOTE: Record keys for regular file data have to be special-cased since
1573 * they indicate the end of the range (key = base + bytes).
1576 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1577 int64_t ran_beg, int64_t ran_end, int truncating)
1579 hammer_transaction_t trans = cursor->trans;
1580 hammer_btree_leaf_elm_t leaf;
1581 int error;
1582 int64_t off;
1584 #if 0
1585 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1586 #endif
1588 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1589 retry:
1590 hammer_normalize_cursor(cursor);
1591 cursor->key_beg.localization = ip->obj_localization +
1592 HAMMER_LOCALIZE_MISC;
1593 cursor->key_beg.obj_id = ip->obj_id;
1594 cursor->key_beg.create_tid = 0;
1595 cursor->key_beg.delete_tid = 0;
1596 cursor->key_beg.obj_type = 0;
1597 cursor->asof = ip->obj_asof;
1598 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1599 cursor->flags |= HAMMER_CURSOR_ASOF;
1600 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1601 cursor->flags |= HAMMER_CURSOR_BACKEND;
1603 cursor->key_end = cursor->key_beg;
1604 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1605 cursor->key_beg.key = ran_beg;
1606 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1607 cursor->key_end.rec_type = HAMMER_RECTYPE_DB;
1608 cursor->key_end.key = ran_end;
1609 } else {
1611 * The key in the B-Tree is (base+bytes), so the first possible
1612 * matching key is ran_beg + 1.
1614 int64_t tmp64;
1616 cursor->key_beg.key = ran_beg + 1;
1617 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1618 cursor->key_end.rec_type = HAMMER_RECTYPE_DATA;
1620 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1621 if (tmp64 < ran_end)
1622 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1623 else
1624 cursor->key_end.key = ran_end + MAXPHYS + 1;
1626 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1628 error = hammer_ip_first(cursor);
1631 * Iterate through matching records and mark them as deleted.
1633 while (error == 0) {
1634 leaf = cursor->leaf;
1636 KKASSERT(leaf->base.delete_tid == 0);
1639 * There may be overlap cases for regular file data. Also
1640 * remember the key for a regular file record is (base + len),
1641 * NOT (base).
1643 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1644 off = leaf->base.key - leaf->data_len;
1646 * Check the left edge case. We currently do not
1647 * split existing records.
1649 if (off < ran_beg) {
1650 panic("hammer left edge case %016llx %d\n",
1651 leaf->base.key, leaf->data_len);
1655 * Check the right edge case. Note that the
1656 * record can be completely out of bounds, which
1657 * terminates the search.
1659 * base->key is exclusive of the right edge while
1660 * ran_end is inclusive of the right edge. The
1661 * (key - data_len) left boundary is inclusive.
1663 * XXX theory-check this test at some point, are
1664 * we missing a + 1 somewhere? Note that ran_end
1665 * could overflow.
1667 if (leaf->base.key - 1 > ran_end) {
1668 if (leaf->base.key - leaf->data_len > ran_end)
1669 break;
1670 panic("hammer right edge case\n");
1675 * Delete the record. When truncating we do not delete
1676 * in-memory (data) records because they represent data
1677 * written after the truncation.
1679 * This will also physically destroy the B-Tree entry and
1680 * data if the retention policy dictates. The function
1681 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1682 * uses to perform a fixup.
1684 if (truncating == 0 || hammer_cursor_ondisk(cursor))
1685 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1686 if (error)
1687 break;
1688 error = hammer_ip_next(cursor);
1690 if (cursor->node)
1691 hammer_cache_node(&ip->cache[1], cursor->node);
1693 if (error == EDEADLK) {
1694 hammer_done_cursor(cursor);
1695 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1696 if (error == 0)
1697 goto retry;
1699 if (error == ENOENT)
1700 error = 0;
1701 return(error);
1705 * Backend truncation - delete all records.
1707 * Delete all user records associated with an inode except the inode record
1708 * itself. Directory entries are not deleted (they must be properly disposed
1709 * of or nlinks would get upset).
1712 hammer_ip_delete_range_all(hammer_cursor_t cursor, hammer_inode_t ip,
1713 int *countp)
1715 hammer_transaction_t trans = cursor->trans;
1716 hammer_btree_leaf_elm_t leaf;
1717 int error;
1719 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1720 retry:
1721 hammer_normalize_cursor(cursor);
1722 cursor->key_beg.localization = ip->obj_localization +
1723 HAMMER_LOCALIZE_MISC;
1724 cursor->key_beg.obj_id = ip->obj_id;
1725 cursor->key_beg.create_tid = 0;
1726 cursor->key_beg.delete_tid = 0;
1727 cursor->key_beg.obj_type = 0;
1728 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1729 cursor->key_beg.key = HAMMER_MIN_KEY;
1731 cursor->key_end = cursor->key_beg;
1732 cursor->key_end.rec_type = 0xFFFF;
1733 cursor->key_end.key = HAMMER_MAX_KEY;
1735 cursor->asof = ip->obj_asof;
1736 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1737 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1738 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1739 cursor->flags |= HAMMER_CURSOR_BACKEND;
1741 error = hammer_ip_first(cursor);
1744 * Iterate through matching records and mark them as deleted.
1746 while (error == 0) {
1747 leaf = cursor->leaf;
1749 KKASSERT(leaf->base.delete_tid == 0);
1752 * Mark the record and B-Tree entry as deleted. This will
1753 * also physically delete the B-Tree entry, record, and
1754 * data if the retention policy dictates. The function
1755 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1756 * uses to perform a fixup.
1758 * Directory entries (and delete-on-disk directory entries)
1759 * must be synced and cannot be deleted.
1761 if (leaf->base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1762 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1763 ++*countp;
1765 if (error)
1766 break;
1767 error = hammer_ip_next(cursor);
1769 if (cursor->node)
1770 hammer_cache_node(&ip->cache[1], cursor->node);
1771 if (error == EDEADLK) {
1772 hammer_done_cursor(cursor);
1773 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1774 if (error == 0)
1775 goto retry;
1777 if (error == ENOENT)
1778 error = 0;
1779 return(error);
1783 * Delete the record at the current cursor. On success the cursor will
1784 * be positioned appropriately for an iteration but may no longer be at
1785 * a leaf node.
1787 * This routine is only called from the backend.
1789 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1790 * cursor and retry.
1793 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1794 hammer_tid_t tid)
1796 hammer_off_t zone2_offset;
1797 hammer_record_t iprec;
1798 hammer_btree_elm_t elm;
1799 hammer_mount_t hmp;
1800 int error;
1801 int dodelete;
1803 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1804 KKASSERT(tid != 0);
1805 hmp = cursor->node->hmp;
1808 * In-memory (unsynchronized) records can simply be freed. This
1809 * only occurs in range iterations since all other records are
1810 * individually synchronized. Thus there should be no confusion with
1811 * the interlock.
1813 * An in-memory record may be deleted before being committed to disk,
1814 * but could have been accessed in the mean time. The backing store
1815 * may never been marked allocated and so hammer_blockmap_free() may
1816 * never get called on it. Because of this we have to make sure that
1817 * we've gotten rid of any related hammer_buffer or buffer cache
1818 * buffer.
1820 if (hammer_cursor_inmem(cursor)) {
1821 iprec = cursor->iprec;
1822 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1823 iprec->flags |= HAMMER_RECF_DELETED_FE;
1824 iprec->flags |= HAMMER_RECF_DELETED_BE;
1826 if (iprec->leaf.data_offset && iprec->leaf.data_len) {
1827 zone2_offset = hammer_blockmap_lookup(hmp, iprec->leaf.data_offset, &error);
1828 KKASSERT(error == 0);
1829 hammer_del_buffers(hmp,
1830 iprec->leaf.data_offset,
1831 zone2_offset,
1832 iprec->leaf.data_len);
1834 return(0);
1838 * On-disk records are marked as deleted by updating their delete_tid.
1839 * This does not effect their position in the B-Tree (which is based
1840 * on their create_tid).
1842 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1843 elm = NULL;
1846 * If we were mounted with the nohistory option, we physically
1847 * delete the record.
1849 dodelete = hammer_nohistory(ip);
1851 if (error == 0) {
1852 error = hammer_cursor_upgrade(cursor);
1853 if (error == 0) {
1854 elm = &cursor->node->ondisk->elms[cursor->index];
1855 hammer_modify_node(cursor->trans, cursor->node,
1856 elm, sizeof(*elm));
1857 elm->leaf.base.delete_tid = tid;
1858 elm->leaf.delete_ts = cursor->trans->time32;
1859 hammer_modify_node_done(cursor->node);
1862 * An on-disk record cannot have the same delete_tid
1863 * as its create_tid. In a chain of record updates
1864 * this could result in a duplicate record.
1866 KKASSERT(elm->leaf.base.delete_tid != elm->leaf.base.create_tid);
1870 if (error == 0 && dodelete) {
1871 error = hammer_delete_at_cursor(cursor, NULL);
1872 if (error) {
1873 panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1874 error = 0;
1877 return(error);
1881 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
1883 hammer_btree_elm_t elm;
1884 hammer_off_t data_offset;
1885 int32_t data_len;
1886 u_int16_t rec_type;
1887 int error;
1889 elm = &cursor->node->ondisk->elms[cursor->index];
1890 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1892 data_offset = elm->leaf.data_offset;
1893 data_len = elm->leaf.data_len;
1894 rec_type = elm->leaf.base.rec_type;
1896 error = hammer_btree_delete(cursor);
1897 if (error == 0) {
1899 * This forces a fixup for the iteration because
1900 * the cursor is now either sitting at the 'next'
1901 * element or sitting at the end of a leaf.
1903 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1904 cursor->flags |= HAMMER_CURSOR_DELBTREE;
1905 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1908 if (error == 0) {
1909 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
1910 case HAMMER_ZONE_LARGE_DATA:
1911 case HAMMER_ZONE_SMALL_DATA:
1912 case HAMMER_ZONE_META:
1913 hammer_blockmap_free(cursor->trans,
1914 data_offset, data_len);
1915 break;
1916 default:
1917 break;
1920 return (error);
1924 * Determine whether we can remove a directory. This routine checks whether
1925 * a directory is empty or not and enforces flush connectivity.
1927 * Flush connectivity requires that we block if the target directory is
1928 * currently flushing, otherwise it may not end up in the same flush group.
1930 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
1933 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
1935 struct hammer_cursor cursor;
1936 int error;
1939 * Check directory empty
1941 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1943 cursor.key_beg.localization = ip->obj_localization +
1944 HAMMER_LOCALIZE_MISC;
1945 cursor.key_beg.obj_id = ip->obj_id;
1946 cursor.key_beg.create_tid = 0;
1947 cursor.key_beg.delete_tid = 0;
1948 cursor.key_beg.obj_type = 0;
1949 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1950 cursor.key_beg.key = HAMMER_MIN_KEY;
1952 cursor.key_end = cursor.key_beg;
1953 cursor.key_end.rec_type = 0xFFFF;
1954 cursor.key_end.key = HAMMER_MAX_KEY;
1956 cursor.asof = ip->obj_asof;
1957 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1959 error = hammer_ip_first(&cursor);
1960 if (error == ENOENT)
1961 error = 0;
1962 else if (error == 0)
1963 error = ENOTEMPTY;
1964 hammer_done_cursor(&cursor);
1965 return(error);